serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
4,501
#include "includes.h" #define C0 0 #define CZ1 1 #define CX1 2 #define CY1 3 #define CZ2 4 #define CX2 5 #define CY2 6 #define CZ3 7 #define CX3 8 #define CY3 9 #define CZ4 10 #define CX4 11 #define CY4 12 __global__ void inject_Source(int id, int ii, float *p, float *_tableS, float *_sourceV, int *_locsS, int _dir, int _jt, int _ntSrc){ int ix = blockIdx.x * blockDim.x + threadIdx.x; p[_locsS[ix]]+=(float)_dir/_jt * ( _tableS[ii + 0]*_sourceV[_ntSrc*ix+id]+ _tableS[ii + 1]*_sourceV[_ntSrc*ix+id+1]+ _tableS[ii + 2]*_sourceV[_ntSrc*ix+id+2]+ _tableS[ii + 3]*_sourceV[_ntSrc*ix+id+3]+ _tableS[ii + 4]*_sourceV[_ntSrc*ix+id+4]+ _tableS[ii + 5]*_sourceV[_ntSrc*ix+id+5]+ _tableS[ii + 6]*_sourceV[_ntSrc*ix+id+6]+ _tableS[ii + 7]*_sourceV[_ntSrc*ix+id+7] ); }
4,502
#include "stdio.h" #define N 10 void add(int *a, int *b, int *c) { int tID = 0; while (tID < N) { c[tID] = a[tID] + b[tID]; tID += 1; } } int main() { int a[N], b[N], c[N]; // Fill Arrays for (int i = 0; i < N; i++) { a[i] = i, b[i] = 1; } add (a, b, c); for (int i = 0; i < N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } return 0; }
4,503
#define PI 3.141592653589793238462643 #define blocDim 256 #define powOfTwo 4 #define timerCount 10 #define min(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a < _b ? _a : _b; }) __global__ void guts(const long int N, const long int M,const double deltaX,const double * x,double * b) { __shared__ double thisBlock[blocDim]; __shared__ long int k,l,blockPos,start;//,fullNeeded,needed; double FuncTemp = 0; long int threadPos; if(threadIdx.x==0){ k = blockIdx.x+2; l = blockIdx.y; start = l*(N+1); blockPos = k+(N+1)*l; //fullNeeded = k+1 + (k+1)%2; //needed = min(fullNeeded,blocDim); } thisBlock[threadIdx.x] = 0; __syncthreads(); threadPos = threadIdx.x; while(threadPos<=k){ if(threadPos==0){ FuncTemp += x[start+threadPos]; }else if(threadPos==1){ if(k==2){ FuncTemp += deltaX*x[start+threadPos]/4.0; }else{ FuncTemp += deltaX*x[start+threadPos]/2.0; } }else if(threadPos<(k-1)){ FuncTemp += deltaX*x[start+threadPos]; }else if(threadPos==(k-1)){ FuncTemp += 3.0/4.0*deltaX*x[start+threadPos]; }else if(threadPos==k){ FuncTemp += deltaX*x[start+threadPos]/4.0; }else{ FuncTemp += 0; } threadPos += blockDim.x; } thisBlock[threadIdx.x] = FuncTemp; __syncthreads(); for(int i=blocDim/2;i>0;i=i/2){ if(threadIdx.x<i){ thisBlock[threadIdx.x] += thisBlock[threadIdx.x+i]; } __syncthreads(); } if(threadIdx.x==0){ b[blockPos] = thisBlock[0]; } }
4,504
// CS 87 - Final Project // Maria-Elena Solano // // This utility simply counts how many CPU cores are in this node // #include <stdio.h> // C's standard I/O library #include <stdlib.h> // C's standard library #include <stdint.h> // C's exact width int types #include <unistd.h> // C's POSIX API #include <cuda_runtime.h> // CUDA runtime library // macro/constant definitions #define cuda_try(X) ((X) != cudaSuccess) #define perror_out(X) perror(X), fflush(stderr) #define stderr_out(...) fprintf(stderr, __VA_ARGS__), \ fflush(stderr) #define print_out(...) printf(__VA_ARGS__), fflush(stdout) // simple helper container (used in how_many_warp_schedulers_per_SM) typedef struct sm_to_ws{ int sm; int ws; } sm_to_ws_t; void print_count(); void print_count_verbose(); int how_many_warp_schedulers_per_SM(int arch_major_ver, int arch_minor_ver); int main(int argc, char** argv){ int ret; // return value from getopt, and // Greedily read all the command line options provided. while((ret = getopt(argc, argv, "v")) != -1){ switch(ret){ // If option -v, display the full calculation instead case 'v':{ goto verbose; } } } // Print the number of cores print_count(); // And return goto done; verbose: // If 'verbose' mode, display the model, and the full calculation print_count_verbose(); done: exit(EXIT_SUCCESS); } // This function prints out the number of independent blocks in the GPU. // void print_count(){ cudaError_t ret; // return value of CUDA calls int dev, devs; // number of devices cudaDeviceProp pr; // device properties uint32_t p; // number of concurrent blocks // Count how many devices are there. If err or no devices, set p=0 and print. if(cuda_try(ret = cudaGetDeviceCount(&devs)) || devs == 0){ stderr_out("cudaGetDeviceCount error: %s\n", cudaGetErrorString(ret)); p = 0; goto print; } // Get the device properties of the last device dev = devs - 1; cudaSetDevice(dev); cudaGetDeviceProperties(&pr, dev); // Compute the number of concurrent blocks according to the formula // // # of SMs x # of warp schedulers per SM // p=pr.multiProcessorCount*how_many_warp_schedulers_per_SM(pr.major,pr.minor); print: print_out("%u\n", p); return; } // This function prints out the number of independent blocks in the GPU, // showing how the calculation was made: that is, # of SMs x # of warp sche- // dulers per SM available in the given model. // void print_count_verbose(){ cudaError_t ret; // return value of CUDA calls int dev, devs; // number of devices cudaDeviceProp pr; // device properties // Count how many devices are there. If err, return. if(cuda_try(ret = cudaGetDeviceCount(&devs))){ stderr_out("cudaGetDeviceCount error: %s\n", cudaGetErrorString(ret)); return; } // If no devices, notify the user and return if(devs == 0){ print_out(" (No CUDA-enabled GPUs in this machine.)\n"); return; } // Get the device properties of the last device dev = devs - 1; cudaSetDevice(dev); cudaGetDeviceProperties(&pr, dev); // Show the calculation // // # of SMs x # of warp schedulers per SM // print_out(" (%u SMs x %u warp schedulers per SM)\n", pr.multiProcessorCount, how_many_warp_schedulers_per_SM(pr.major, pr.minor)); return; } // This function determines how many warp schedulers per SM are there in the // GPU given ts major and minor architectural version. // // Adapted from helper_cuda.h in the CUDA SDK: // (/usr/local/cuda/samples/common/inc/helper_cuda.h). // // major, minor: major and minor architecture version // // returns: number of warp schedulers per SM // int how_many_warp_schedulers_per_SM(int major, int minor){ int i; sm_to_ws_t t[13]; // Lookup table // Tesla architecture (1 warp scheduler per SM) t[0] .sm = 0x10; t[0] .ws = 1; // Tesla (SM 1.0) G80 class t[1] .sm = 0x11; t[1] .ws = 1; // Tesla (SM 1.1) G8X class t[2] .sm = 0x12; t[2] .ws = 1; // Tesla (SM 1.2) G9X class t[3] .sm = 0x13; t[3] .ws = 1; // Tesla (SM 1.3) GT200 class // Fermi architecture (2 warp schedulers per SM) t[4] .sm = 0x20; t[4] .ws = 2; // Fermi (SM 2.0) GF100 class t[5] .sm = 0x21; t[5] .ws = 2; // Fermi (SM 2.1) GF10x class // Kepler architecture (4 warp schedulers per SM) t[6] .sm = 0x30; t[6] .ws = 4; // Kepler (SM 3.0) GK10x class t[7] .sm = 0x32; t[7] .ws = 4; // Kepler (SM 3.2) GK10x class t[8] .sm = 0x35; t[8] .ws = 4; // Kepler (SM 3.5) GK11x class t[9] .sm = 0x37; t[9] .ws = 4; // Kepler (SM 3.7) GK21x class // Maxwell architecture (4 warp schedulers per SM) t[10].sm = 0x50; t[10].ws = 4; // Maxwell (SM 5.0) GM10x class t[11].sm = 0x52; t[11].ws = 4; // Maxwell (SM 5.2) GM20x class // Unknown architecture t[12].sm = -1; t[12].ws = -1; // Unknown for(i=0; i<13; i++){ if(t[i].sm == ((major << 4) + minor)){ return t[i].ws; } } return 0; }
4,505
// %%cu // as data type is int, sum might overflow (depending on rand(), but the seq and parallel answers are still equal, or change int to long long (too lazy sorry)) #include<iostream> #include<vector> #include<cstdlib> #include<cstdio> #include<limits> #include<cuda.h> #define THREADS_PER_BLOCK 256 using namespace std; void seq(vector<int>v){ clock_t st = clock(); int sum = 0; for(int i=0;i<v.size();i++){ sum+=v[i]; } clock_t en = clock(); cout<<"Sequential Sum: "<<sum<<"\n"; cout<<"Time taken: "<<double(en-st)/CLOCKS_PER_SEC<<"\n"; st= clock(); int maxx=-1e9; for(int i=0;i<v.size();i++){ maxx=max(maxx,v[i]); } en = clock(); cout<<"Sequential Maximum: "<<maxx<<"\n"; cout<<"Time taken: "<<double(en-st)/CLOCKS_PER_SEC<<"\n"; st = clock(); int minn=1e9; for(int i=0;i<v.size();i++){ minn=min(minn,v[i]); } en = clock(); cout<<"Sequential Minimum: "<<minn<<"\n"; cout<<"Time taken: "<<double(en-st)/CLOCKS_PER_SEC<<"\n"; st = clock(); sum = 0; for(int i=0;i<v.size();i++){ sum+=v[i]; } sum=sum/(int)v.size(); en = clock(); cout<<"Sequential Average: "<<sum<<"\n"; cout<<"Time taken: "<<double(en-st)/CLOCKS_PER_SEC<<"\n"; } __global__ void calculate(int *arr_in, int* arr_out, int sz, int option){ int ind = threadIdx.x; int dim = blockDim.x; extern __shared__ int shared_mem[]; int actual_ind = blockIdx.x*blockDim.x + ind; if(actual_ind < sz){ shared_mem[ind] = arr_in[actual_ind]; }else{ if(option == 0 || option == 3) shared_mem[ind] = 0; else if(option == 1){//maximum shared_mem[ind] = -INT_MAX; }else{//minimum shared_mem[ind] = INT_MAX; } } __syncthreads(); for(int i=dim/2 ; i > 0 ; i=i/2){ if(ind<i){ if(option == 0 || option == 3) shared_mem[ind]+=shared_mem[ind+i]; else if(option == 1){ shared_mem[ind]=max(shared_mem[ind],shared_mem[ind+i]); }else{ shared_mem[ind]=min(shared_mem[ind],shared_mem[ind+i]); } } __syncthreads(); } arr_out[blockIdx.x]=shared_mem[0]; } void parll(vector<int>v){ int *h_in, *h_out, *d_in, *d_out; h_in = new int[v.size()]; copy(v.begin(),v.end(),h_in); string opt[] = {"Sum", "Maximum", "Minimum", "Average"}; h_out = new int[1]; int actual_size = v.size(); for(int option = 0; option<4; option++){ cudaMalloc(&d_in,sizeof(int)*v.size()); cudaMalloc(&d_out,sizeof(int)*v.size()); cudaMemcpy(d_in,h_in,sizeof(int)*v.size(),cudaMemcpyHostToDevice); int d_in_size = v.size(); int num_blocks = 1+(d_in_size-1)/THREADS_PER_BLOCK; clock_t st = clock(); while(num_blocks>1){ calculate<<<num_blocks,THREADS_PER_BLOCK,sizeof(int)*THREADS_PER_BLOCK>>>(d_in,d_out,d_in_size,option); d_in_size = num_blocks; cudaFree(d_in); cudaMalloc(&d_in,sizeof(int)*d_in_size); cudaMemcpy(d_in,d_out,sizeof(int)*d_in_size,cudaMemcpyDeviceToDevice); cudaFree(d_out); num_blocks = 1+(num_blocks-1)/THREADS_PER_BLOCK; cudaMalloc(&d_out,sizeof(int)*num_blocks); } calculate<<<1,THREADS_PER_BLOCK,sizeof(int)*THREADS_PER_BLOCK>>>(d_in,d_out,d_in_size,option); clock_t en = clock(); cudaMemcpy(h_out,d_out,sizeof(int)*1,cudaMemcpyDeviceToHost); if(option==3){ h_out[0] = h_out[0]/actual_size; } cout<<"Parallel "<<opt[option]<<" : "<<h_out[0]<<"\n"; cout<<"Time taken: "<<double(en-st)/CLOCKS_PER_SEC<<"\n"; cudaFree(d_in); cudaFree(d_out); } } int main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); vector<int>v; srand(time(NULL)); for(int i=0;i<100000000;i++){ v.push_back(1+rand()%1000); } seq(v); parll(v); }
4,506
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ // A simple macro to divide and round-up #define DIVUP(A,B) ( (A)%(B) == 0 ? (A)/(B) : ((A) / (B) + 1) ) // macro to clamp to min & max value: #define CLAMP(A,B,C) ( (A) < (B) ? (B) : (A) > (C) ? (C) : (A) ) // Here is the kernel which performs the rotate for every pixel in the output image __global__ void BrightnessContrastKernel(uchar4 * image, int w, int h, int pitch, float brightness, float contrast) { // compute the x & y coordinates in the image the current pixel must process. // the image data is passed in as a uchar4, so this x coordinate // points 4-pixel groups int x = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int y = __mul24(blockIdx.y,blockDim.y) + threadIdx.y; if(x < (w>>2) && y < h) // w/4 because we are processing 4 pixels per thread { float temp; // read in the value to modify uchar4 pixel = image[__umul24(y,pitch>>2) + x]; // Pixel 1 temp = ((float)pixel.x + brightness) * contrast; pixel.x = CLAMP(temp,0.0f,255.0f); // Pixel 2 temp = ((float)pixel.y + brightness) * contrast; pixel.y = CLAMP(temp,0.0f,255.0f); // Pixel 3 temp = ((float)pixel.z + brightness) * contrast; pixel.z = CLAMP(temp,0.0f,255.0f); // Pixel 4 temp = ((float)pixel.w + brightness) * contrast; pixel.w = CLAMP(temp,0.0f,255.0f); // write the new pixel value back to the image data image[__umul24(y,pitch>>2) + x] = pixel; } } // This function will adjust the brightness and contrast an image extern "C" cudaError_t AdjustBrightnessContrast(unsigned char * image, int width, int height, int pitch, float brightness_adjust, float contrast_adjust) { // we need to create a 2-d thread block and a 2-d grid // of blocks. Lets just make the blocks 16x14. // Process four pixels / thread dim3 BlockSz(16,14,1); dim3 GridSz(DIVUP(width,BlockSz.x*4),DIVUP(height,BlockSz.y),1); // Now launch the kernel to do the adjustment BrightnessContrastKernel<<<GridSz,BlockSz>>>((uchar4*)image,width,height,pitch,brightness_adjust,contrast_adjust); // just wait here for the kernel to complete cudaError_t err = cudaThreadSynchronize(); return(err); }
4,507
#include <stdio.h> #include "cuda.h" #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) extern "C" { __global__ void simple_add(float* a, float* b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { a[i] = a[i] + b[i]; } } void cuda_kernel_no_copy(float* a, float* b, int n) { const int THREADS_PER_BLOCK = 1; const int NUMBER_OF_BLOCKS = 10; cudaDeviceSynchronize(); simple_add<<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK>>>(a, b, n); cudaDeviceSynchronize(); cudaCheckErrors("cuda error"); } void cuda_kernel_with_copy(float* a, float* b, int n) { const int THREADS_PER_BLOCK = 1; const int NUMBER_OF_BLOCKS = 10; float* d_a; float* d_b; cudaMalloc(&d_a, n*sizeof(float)); cudaMalloc(&d_b, n*sizeof(float)); cudaMemcpy(d_a, a, n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, n*sizeof(float), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); simple_add<<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK>>>(d_a, d_b, n); cudaDeviceSynchronize(); cudaMemcpy(a, d_a, n*sizeof(float), cudaMemcpyDeviceToHost); cudaCheckErrors("cuda error"); } };
4,508
#include <stdlib.h> #include <string.h> #include <time.h> #include <stdio.h> __global__ void sumArraysOnHost(float *A, float *B, float *C, const int N){ // Do elementwise add int idx = threadIdx.x; C[idx] = A[idx] + B[idx]; } void initialData(float *ip, int size){ // Generate different seed for random number time_t t; srand((unsigned int) time(&t)); for(int i=0; i<size; i++){ ip[i] = (float)(rand() &0xFF) /10.0f; } } int main(int argc, char **argv){ int nElem = 1024; size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); float *d_A, *d_B, *d_C; cudaMalloc((float **)&d_A, nBytes); cudaMalloc((float **)&d_B, nBytes); cudaMalloc((float **)&d_C, nBytes); cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); sumArraysOnHost<<<1, nElem>>>(d_A, d_B, d_C, nElem); cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost); cudaDeviceReset(); for(int i=0; i<5; i++){ printf("A: %f \n", h_A[i]); printf("B: %f \n", h_B[i]); printf("C: %f \n", h_C[i]); } free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
4,509
#define NUMTHREADS 16 #define THREADWORK 32 __global__ void gpuKendall(const float * a, size_t na, const float * b, size_t nb, size_t sampleSize, double * results) { size_t i, j, tests, tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, rowa = bx * sampleSize, rowb = by * sampleSize; float discordant, concordant = 0.f, numer, denom; __shared__ float threadSums[NUMTHREADS*NUMTHREADS]; for(i = tx; i < sampleSize; i += NUMTHREADS) { for(j = i+1+ty; j < sampleSize; j += NUMTHREADS) { tests = ((a[rowa+j] > a[rowa+i]) && (b[rowb+j] > b[rowb+i])) + ((a[rowa+j] < a[rowa+i]) && (b[rowb+j] < b[rowb+i])) + ((a[rowa+j] == a[rowa+i]) && (b[rowb+j] == b[rowb+i])); concordant = concordant + (float)tests; } } threadSums[tx*NUMTHREADS+ty] = concordant; __syncthreads(); for(i = NUMTHREADS >> 1; i > 0; i >>= 1) { if(ty < i) threadSums[tx*NUMTHREADS+ty] += threadSums[tx*NUMTHREADS+ty+i]; __syncthreads(); } for(i = NUMTHREADS >> 1; i > 0; i >>= 1) { if((tx < i) && (ty == 0)) threadSums[tx*NUMTHREADS] += threadSums[(tx+i)*NUMTHREADS]; __syncthreads(); } if((tx == 0) && (ty == 0)) { concordant = threadSums[0]; denom = (float)sampleSize; denom = (denom * (denom - 1.f)) / 2.f; discordant = denom - concordant; numer = concordant - discordant; results[by*na+bx] = ((double)numer)/((double)denom); } }
4,510
#include "includes.h" __global__ void mul(int * A, int * B, int * C){ int i = blockIdx.x; int j = threadIdx.x; C[i * N + j] = 0; for (int k = 0; k < N; k++){ C[i * N + j] += A[i * N + k] * B[k * N + j]; } }
4,511
#include "includes.h" __global__ void ForwardWarpKernel_PSF1x1(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; float u_ = u[flow_row_offset + j]; float v_ = v[flow_row_offset + j]; //bottom left corner of target pixel float cx = u_ * time_scale + (float)j + 1.0f; float cy = v_ * time_scale + (float)i + 1.0f; // pixel containing bottom left corner int tx = __float2int_rn (cx); int ty = __float2int_rn (cy); float value = src[image_row_offset + j]; // fill pixel if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { atomicAdd (dst + ty * image_stride + tx, value); } }
4,512
#include "includes.h" __global__ void reduction_kernel(float *g_out, float *g_in, unsigned int size) { unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ float s_data[]; // cumulates input with grid-stride loop and save to share memory float input[NUM_LOAD] = {0.f}; for (int i = idx_x; i < size; i += blockDim.x * gridDim.x * NUM_LOAD) { for (int step = 0; step < NUM_LOAD; step++) input[step] += (i + step * blockDim.x * gridDim.x < size) ? g_in[i + step * blockDim.x * gridDim.x] : 0.f; } for (int i = 1; i < NUM_LOAD; i++) input[0] += input[i]; s_data[threadIdx.x] = input[0]; __syncthreads(); // do reduction for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) s_data[threadIdx.x] += s_data[threadIdx.x + stride]; __syncthreads(); } if (threadIdx.x == 0) { g_out[blockIdx.x] = s_data[0]; } }
4,513
#include <cuda.h> #include <iostream> #include <sys/time.h> using namespace std; /* example for thread divergence */ __global__ void diverge(int n, double *data) { int nt = blockDim.x; int b = blockIdx.x; int t = threadIdx.x; // data is read to register double local = data[b*nt + t]; // (*) modify if statement for divergence if (true) { // no divergence //if (t%32 < 16) { // divergence in warp execution //if (t < 512) { // divergence not in warp execution for (int i=0; i<1000; i++) local += (double)i*(double)(i+1); } else { for (int i=1000; i<2000; i++) local += (double)i*(double)(i+1); } // computed result is written back to global memory data[b*nt + t] = local; } int main() { time_t sTime = time(NULL); struct timeval tt1, tt2; int ms; double fms; int n = 8388608; double *data = (double*) malloc(n * sizeof(double)); for (int i=0; i<n; i++) { data[i] = 0; } double *data_dev; cudaMalloc((void**) &data_dev, n * sizeof(double)); cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice); dim3 nBlocks(8192,1,1); dim3 nThreads(1024,1,1); cudaThreadSynchronize(); gettimeofday( &tt1, NULL ); diverge <<< nBlocks, nThreads >>>(n, data_dev); cudaThreadSynchronize(); gettimeofday( &tt2, NULL ); ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.0; cout << "Comp time = " << fms << endl; cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost); cudaFree(data_dev); cout << "data[n-1] = " << data[n-1] << endl; free(data); }
4,514
#include "includes.h" __global__ void LogarithmicFunctionKernel(float* input, float* output, int size, const int type) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; if (id < size) { switch (type) { case 1: output[id] = logf(input[id]); break; case 2: output[id] = log2f(input[id]); break; case 3: output[id] = log10f(input[id]); break; } } }
4,515
#include<iostream> #include<cuda.h> using namespace std; __global__ void assign(int *a,int N){ //using gpu to assign value for a int i=blockIdx.x; if(i<N) a[i]=i*i; } int main(){ const int N=10; int *a,*dev_a,*b,*dev_b; a=new int[N]; b=new int[N]; cudaMalloc(&dev_a,N*sizeof(int)); cudaMalloc(&dev_b,N*sizeof(int)); assign<<<N,1>>>(dev_a,N); assign<<<N,1>>>(dev_b,N); cudaMemcpy(a,dev_a,N*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(b,dev_b,N*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<N;i++) cout<<a[i]<<endl; for(int i=0;i<N;i++) cout<<b[i]<<endl; cudaFree(dev_a); cudaFree(dev_b); return 0; }
4,516
#include <stdio.h> #include <stdlib.h> #include <cuda.h> const int W = 40; const int H = 12; __global__ void draw(char *odata) { const char tmp_str[H][W] = { ":::::::::::::::::::::::::::::::::::::::", ": :", ": :", ": :", ": :", ": #### <| :", ": ###### | :", ": ######## | :", ": ########## | :", ": ############ | :", ": ############## # :", ":::::::::::::::::::::::::::::::::::::::" }; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < W && y < H) { char c; if (x == W - 1 && y != H - 1) c = '\n'; else c = tmp_str[y][x]; odata[y * W + x] = c; } } int main(void) { char *h_data, *d_data; const int strlen = W*H; size_t strsize = strlen * sizeof(char); h_data = (char *)malloc(strsize); memset(h_data, 0, strlen); cudaMalloc((void **)&d_data, strsize); cudaMemcpy(d_data, h_data, strsize, cudaMemcpyHostToDevice); dim3 blocksize = dim3(16, 12, 1); dim3 nblock = dim3((W - 1) / 16 + 1, (H - 1) / 12 + 1, 1); draw <<<nblock, blocksize>>>(d_data); cudaMemcpy(h_data, d_data, strlen, cudaMemcpyDeviceToHost); printf("%s", h_data); free(h_data); cudaFree(d_data); }
4,517
#include <iostream> #include <fstream> #include <cuda.h> #include <time.h> #include <stdlib.h> #include <math.h> using namespace std; #define PI 3.14159265 __host__ void readdata(double **ArrayX,double **ArrayF,double *Arrayq) { } __device__ double sinc(double x) { if(x==0) return 1; else return sin(x)/x; } //since max number of threads is 1024 per block which is smaller than 214*214 //so separate the calculation,214 x blocks and 214 threads for each of 60 y blocks //by which I can use shared memory to do reducation __global__ void cal_output_first(double *ArrayF,double *Arrayq,double *dist,double *output_first) { int i,j,k; i=threadIdx.x; j=blockIdx.x; k=blockIdx.y; __shared__ double sdata[214]; int tid=threadIdx.x; if(threadIdx.x<214&&blockIdx.y<60&&blockIdx.x<214) { sdata[tid]=ArrayF[i+214*k]*ArrayF[j+214*k]*sinc(Arrayq[k]*dist[i+214*j]); __syncthreads(); double sum=0; for(int m=0;m<214;m++) { sum+=sdata[m]; __syncthreads(); } if(tid==0) output_first[blockIdx.x+blockIdx.y*blockDim.x]=sum; } } //calculate sum of remaining 214 value for outputq[60] //214 threads and 60 x blocks __global__ void cal_output_second(double *output_first,double *outputQ) { int i,k; i=threadIdx.x; k=blockIdx.x; __shared__ double sdata[214]; if(threadIdx.x<214&&blockIdx.x<60) { int tid=threadIdx.x; sdata[tid]=output_first[i+214*k]; __syncthreads(); double sum=0; for(int m=0;m<214;m++) { sum+=sdata[m]; __syncthreads(); } if(tid==0) outputQ[blockIdx.x]=sum; } } __global__ void cal_dist(double *ArrayX,double *dist) { int i,j; i=threadIdx.x; j=blockIdx.x; if(i<214&&j<214) { double temp1,temp2,temp3; temp1=(ArrayX[i]-ArrayX[j])*(ArrayX[i]-ArrayX[j]); temp2=(ArrayX[i+214]-ArrayX[j+214])*(ArrayX[i+214]-ArrayX[j+214]); temp3=(ArrayX[i+428]-ArrayX[j+428])*(ArrayX[i+428]-ArrayX[j+428]); dist[i+214*j]=sqrt(temp1+temp2+temp3); //printf("temp is %d",temp); } } int main() { cudaEvent_t start = 0; cudaEvent_t stop = 0; float time = 0; cudaEventCreate(&start); cudaEventCreate(&stop); double ArrayF[214*60],Arrayq[60],ArrayX[214*3],outputQ[60]; int i,j; int sizeF=214*60,sizeq=60,sizeX=214*3; int sizeQ=60,sizedist=214*214; ifstream F("dataF.txt"),q("dataq.txt"),X("dataX.txt"); for(j=0;j<3;j++) for(i=0;i<214;i++) { X>>ArrayX[i+j*214]; } for(j=0;j<60;j++) for(i=0;i<214;i++) { F>>ArrayF[i+214*j]; } for(j=0;j<60;j++) { q>>Arrayq[j]; //cout<<Arrayq[1][j]<<endl; } //calculate distance dist[214*214] int dsizeX=sizeX*sizeof(double); int dsizedist=sizedist*sizeof(double); int dsizeF=sizeF*sizeof(double); int dsizeq=sizeq*sizeof(double); int dsizeQ=sizeQ*sizeof(double); int dsize_first=214*60*sizeof(double); double *d_dist,*d_ArrayX,*d_ArrayF,*d_Arrayq,*d_outputQ; double *d_output_first; //cudaEventRecord(start,0); cudaMalloc((void**)&d_dist,dsizedist); cudaMalloc((void**)&d_ArrayX,dsizeX); cudaMalloc((void**)&d_ArrayF,dsizeF); cudaMalloc((void**)&d_Arrayq,dsizeq); cudaMalloc((void**)&d_outputQ,dsizeQ); //allocate memory for output_first cudaMalloc((void**)&d_output_first,dsize_first); cudaMemcpy(d_ArrayX,&ArrayX,dsizeX,cudaMemcpyHostToDevice); cudaMemcpy(d_ArrayF,&ArrayF,dsizeF,cudaMemcpyHostToDevice); cudaMemcpy(d_Arrayq,&Arrayq,dsizeq,cudaMemcpyHostToDevice); cudaMemcpy(d_outputQ,&outputQ,dsizeQ,cudaMemcpyHostToDevice); dim3 DimGrid1(256,1,1); dim3 DimBlock1(256, 1,1); dim3 DimGrid2(256,64,1); dim3 DimBlock2(256, 1,1); dim3 DimGrid3(64,1,1); dim3 DimBlock3(256, 1,1); cudaEventRecord(start,0); cal_dist<<<DimGrid1,DimBlock1>>>(d_ArrayX,d_dist); //cudaEventRecord(stop,0); //cudaEventSynchronize(stop); cudaThreadSynchronize(); cal_output_first<<<DimGrid2,DimBlock2>>>(d_ArrayF,d_Arrayq,d_dist,d_output_first); //output_first size 214*60 //calculate output cal_output_second<<<DimGrid3,DimBlock3>>>(d_output_first,d_outputQ); //cudaMemcpy(&outputQ,d_outputQ,dsizeQ,cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaMemcpy(&outputQ,d_outputQ,dsizeQ,cudaMemcpyDeviceToHost); cudaFree(d_dist); cudaFree(d_ArrayX); cudaFree(d_ArrayF); cudaFree(d_Arrayq); cudaFree(d_outputQ); //Free memory for output_first cudaFree(d_output_first); for(i=0;i<60;i++) { cout<<outputQ[i]<<endl; } /* for(j=0;j<214;j++) { for(i=0;i<214;i++) cout<<i<<","<<j<<"__"<<dist[i+j*214]<<endl; cout<<endl; } */ cudaEventElapsedTime(&time,start,stop); cout<<"Time for the kernel: "<<time<<endl; return EXIT_SUCCESS; }
4,518
#include "includes.h" __global__ void cubefilling_loop(const float* image, float *dev_cube_wi, float *dev_cube_w, const dim3 image_size, int scale_xy, int scale_eps, dim3 dimensions_down) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dimensions_down.x && j < dimensions_down.y) { size_t cube_idx_1 = i + dimensions_down.x*j; #pragma unroll for (int ii = 0; ii < scale_xy; ii++) { #pragma unroll for (int jj = 0; jj < scale_xy; jj++) { size_t i_idx = scale_xy*i + ii; size_t j_idx = scale_xy*j + jj; if (i_idx < image_size.x && j_idx < image_size.y) { float k = image[i_idx + image_size.x*j_idx]; size_t cube_idx_2 = cube_idx_1 + dimensions_down.x*dimensions_down.y*floorf(k / scale_eps); dev_cube_wi[cube_idx_2] += k; dev_cube_w[cube_idx_2] += 1.0f; } } } } }
4,519
#include "includes.h" __device__ double complexMagnitude(double2 in){ return sqrt(in.x*in.x + in.y*in.y); } __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __global__ void complexMagnitudeSquared(double2 *in, double2 *out){ int gid = getGid3d3d(); out[gid].x = in[gid].x*in[gid].x + in[gid].y*in[gid].y; out[gid].y = 0; }
4,520
#include "includes.h" __global__ void FullyConnectedAdjustMemoryKernel( float *weightsGradPtr, float *biasGradPtr, float *weightGradCurvePtr, float *biasGradCurvePtr, float *avgWeightGradPtr, float *avgBiasGradPtr, float *avgWeightGradVarPtr, float *avgBiasGradVarPtr, float *avgWeightGradCurvePtr, float *avgBiasGradCurvePtr, float *avgWeightGradCurveVarPtr, float *avgBiasGradCurveVarPtr, float *weightMemorySizePtr, float *biasMemorySizePtr, float *dropoutMaskPtr, int prevLayerSize, int thisLayerSize ) { // i: prev. layer neuron id // j: current layer neuron id int i; int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (j < thisLayerSize) { if (!dropoutMaskPtr[j]) { int index = j; for (i = 0; i < prevLayerSize; i++) { // check for weight outliers if ( abs(weightsGradPtr[index] - avgWeightGradPtr[index]) > 2 * sqrtf(avgWeightGradVarPtr[index]) - avgWeightGradPtr[index] * avgWeightGradPtr[index] || abs(weightGradCurvePtr[index] - avgWeightGradCurvePtr[index]) > 2 * sqrtf(avgWeightGradCurveVarPtr[index] - avgWeightGradCurveVarPtr[index] * avgWeightGradCurveVarPtr[index]) ) // TODO: test which one works best //weightMemorySizePtr[index] += 1; // original method suggested in http://arxiv.org/pdf/1301.3764.pdf weightMemorySizePtr[index] = 2.2f; // reset to 2.2 according to the Adasecant method in http://arxiv.org/pdf/1412.7419v4.pdf index += thisLayerSize; } // check for bias outliers if ( abs(biasGradPtr[j] - avgBiasGradPtr[j]) > 2 * sqrtf(avgBiasGradVarPtr[j]) - avgBiasGradPtr[j] * avgBiasGradPtr[j] || abs(biasGradCurvePtr[j] - avgBiasGradCurvePtr[j]) > 2 * sqrtf(avgBiasGradCurveVarPtr[j] - avgBiasGradCurveVarPtr[j] * avgBiasGradCurveVarPtr[j]) ) // TODO: test which one works best //biasMemorySizePtr[j] += 1; // original method suggested in http://arxiv.org/pdf/1301.3764.pdf biasMemorySizePtr[j] = 2.2f; // reset to 2.2 according to the Adasecant method in http://arxiv.org/pdf/1412.7419v4.pdf } } }
4,521
/* Pi - CUDA version 1 - uses integers for CUDA kernels * Author: Aaron Weeden, Shodor, May 2015 */ #include <stdio.h> /* fprintf() */ #include <iostream> #include <float.h> /* DBL_EPSILON() */ #include <math.h> /* sqrt() */ #if OPENMP_ENABLED #include <omp.h> #endif #define nthreads 1000 #if CUDA_ENABLED #define NUMBLOCKS(n) ceil(n/nthreads) #define KERNEL(n) <<<NUMBLOCKS(n), nthreads>>> #else #define KERNEL(n) #endif #if CUDA_ENABLED __global__ #endif void calculateAreas(const int numRects, const double width, double *dev_areas) { #if CUDA_ENABLED int threadId = threadIdx.x + (blockIdx.x * blockDim.x); if(threadId >= numRects) { return; } #elif OPENMP_ENABLED #pragma omp parallel #endif #if !CUDA_ENABLED for(int threadId = 0;threadId < numRects;threadId++) #endif { double x = threadId * width; double heightSq = 1 - (x*x); double height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq)); dev_areas[threadId] = (width * height); } } void calculateArea(const int numRects, double *area) { /* Allocate areas in host */ double *areas = (double*)malloc(numRects * sizeof(double)); double *dev_areas; int i = 0; cudaError_t err; /* Check for error in allocation*/ if (areas == NULL) { fprintf(stderr, "malloc failed!\n"); } /* Allocate areas in device */ err = cudaMalloc((void**)&dev_areas, (numRects * sizeof(double))); /* Check for error in allocation in device*/ if (err != cudaSuccess) { fprintf(stderr, "cudaMalloc failed: %s\n", cudaGetErrorString(err)); } #if CUDA_ENABLED calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), dev_areas); err = cudaMemcpy(areas, dev_areas, (numRects * sizeof(double)), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "cudaMalloc failed: %s\n", cudaGetErrorString(err)); } #else calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), areas); #endif (*area) = 0.0; for (i = 0; i < numRects; i++) { (*area) += areas[i]; } cudaFree(dev_areas); free(areas); }
4,522
#include <cuda.h> #include <math.h> #include <stdio.h> #include <sys/time.h> /* * hostmultiply.cu * Copyright 2012 Guy Dickinson <guy.dickinson@nyu.edu> * * Written for "GPUs: Architecture and Programming" * Prof. M. Zahran, New York University * * Derived in part from code in "Programming Massively Parallel Processors: * A Hands-On Approach" by David Kirk and Wen-mei Hwu. */ // Vanilla matrix multiplication on the host void matrixMulOnHost(float* M, float* N, float* P, int width) { for (int i = 0; i < width; ++i) for (int j = 0; j < width; ++j) { double sum = 0; for (int k = 0; k < width; ++k) { double a = M[i * width + k]; double b = N[k * width + j]; sum += a * b; } P[i * width + j] = sum; } } // Matrix Multiplication Kernel __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int width) { int tx = threadIdx.x; int ty = threadIdx.y; float pvalue = 0; for (int k = 0; k < width; ++k) { float Mdelement = Md[ty * width + k]; float Ndelement = Nd[ty * width + k]; pvalue += Mdelement * Ndelement; } Pd[ty * width + tx] = pvalue; } // M and N are matrices to be multiplied // P is the result void cudaMatrixMul(float* M, float* N, float* P, int width) { int size = width * width * sizeof(float); float* Md; float* Nd; float* Pd; // Transfer M and N to device memory cudaMalloc(&Md, size); cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice); cudaMalloc(&Nd, size); cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice); // Allocate P on the device cudaMalloc(&Pd, size); // Invocation dim3 dimBlock(width, width); dim3 dimGrid(1,1); MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width); // Transfer P from device to host cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost); // Free device matrices cudaFree(Md); cudaFree(Pd); cudaFree(Nd); } void runTest(void) { int doublings = 8; int widths[doublings]; for (int i = 1; i <= doublings; i++) { widths[i - 1] = pow(2, i + 2); } for (int i = 0; i < doublings; i ++) { int width = widths[i]; int size = width * width * sizeof(float); timeval serialStart, serialEnd; timeval parallelStart, parallelEnd; double serialElapsedTime; double parallelElapsedTime; float *m; float *n; float *p; m = (float*) malloc(size); n = (float*) malloc(size); p = (float*) malloc(size); for (int j = 0; j < width * width; j++) { n[j] = 0.0; m[j] = 1.0; } gettimeofday(&serialStart, NULL); matrixMulOnHost(m, n, p, width); gettimeofday(&serialEnd, NULL); gettimeofday(&parallelStart, NULL); cudaMatrixMul(m, n, p, width); gettimeofday(&parallelEnd, NULL); serialElapsedTime = (serialEnd.tv_sec - serialStart.tv_sec) * 1000.0; serialElapsedTime += (serialEnd.tv_usec - serialStart.tv_usec) / 1000.0; parallelElapsedTime = (parallelEnd.tv_sec - parallelStart.tv_sec) * 1000.0; parallelElapsedTime += (parallelEnd.tv_usec - parallelStart.tv_usec) / 1000.0; double speedup = (serialElapsedTime / parallelElapsedTime) * 100.0; printf("%d x %d: Serial: %f\t\tParallel %f\t(%f%% Speedup)\n", width, width, serialElapsedTime, parallelElapsedTime, speedup); free(m); free(n); } } int main(void) { runTest(); }
4,523
#include "includes.h" __global__ void kMartixByMatrixElementwise(const int nThreads, const float *m1, const float *m2, float *output) { /* Computes the product of two arrays (elementwise multiplication). Inputs: m1: array m2: array output: array,the results of the multiplication are to be stored here */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = m1[i] * m2[i]; } }
4,524
#include <iostream> __global__ void add( const float *const x, const float *const y, float *const res, const int n ) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) res[i] = x[i] + y[i]; } void add_serial( const float *const x, const float *const y, float *const res, const int n ) { for (int i = 0; i < n; ++i) res[i] = x[i] + y[i]; } int main() { int N = 1 << 28; // 1M elements float *x, *y, *res; cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); cudaMallocManaged(&res, N * sizeof(float)); // x = new float[N]; // y = new float[N]; // res = new float[N]; for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add <<< numBlocks, blockSize >>> (x, y, res, N); cudaDeviceSynchronize(); // add_serial(x, y, res, N); std::cout << "Calc error..."; float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(res[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; cudaFree(x); cudaFree(y); cudaFree(res); // delete[] x; // delete[] y; // delete[] res; return 0; }
4,525
//global memory #include<stdio.h> #include<math.h> #include<time.h> #include <stdlib.h> int Max=16384; int width=32; __global__ void multi(double *A,double *b,double *C,const int Max){ int idx=threadIdx.x+blockDim.x*blockIdx.x; int idy=threadIdx.y+blockDim.y*blockIdx.y; if(idx<Max && idy<Max && idx==idy){ int k=0; double sum=0; for(k=0;k<Max;k++){ sum+=A[idx*Max+k]*b[k]; } C[idx]=sum; } } int main(){ printf("global memory:\n"); double *A =(double *)malloc(Max * Max * sizeof(double)); //A double *b =(double *)malloc(Max * sizeof(double)); //b double *C =(double *)malloc(Max * sizeof(double)); //C double *test_c=(double *)malloc(Max * sizeof(double)); //cpu_test int i,j; for(i=0;i<Max;i++){ for(j=0;j<Max;j++){ A[i*Max+j]=i-0.1*j+1; } } for(i=0;i<Max;i++){ b[i]=log(sqrt(i*i-i+2)); C[i]=0.0; } double *A_d,*b_d,*C_d; cudaMalloc((void **)&A_d,Max * Max * sizeof(double)); cudaMalloc((void **)&b_d,Max *sizeof(double)); cudaMalloc((void **)&C_d,Max *sizeof(double)); clock_t start,end; start=clock(); cudaMemcpy(A_d, A,Max*Max*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(b_d, b,Max*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(C_d, C,Max * sizeof(double), cudaMemcpyHostToDevice); dim3 block(width,width); dim3 grid(Max/block.x, Max/block.y); multi<<<grid,block>>>(A_d,b_d,C_d,Max); cudaMemcpy(C, C_d, Max * sizeof(double), cudaMemcpyDeviceToHost); end=clock(); double time=(end-start)*1000/CLOCKS_PER_SEC; //cpu: clock_t start_c,end_c; start_c=clock(); for (int i = 0; i < Max; ++i){ for (int j = 0; j < Max; ++j) { test_c[i]+=A[i*Max+j]*b[j]; } } end_c=clock(); double time_C=(end_c-start_c)*1000/CLOCKS_PER_SEC; printf("GPU TIME:%lf ms\n",time); printf("CPU TIME:%lf ms\n",time_C); //check result: bool flag = true; for (int i = 0; i < Max; ++i){ float a=test_c[i]; float b=C[i]; if (a!=b) { flag = false; } } if (flag == true) printf("result correct\n"); else{ printf("resul wrong\n"); } cudaFree(A_d); cudaFree(b_d); cudaFree(C_d); free(A); free(b); free(C); }
4,526
#define _NTHREAD 512 #define _NBLOCK 65535 __global__ void _AFFINE_KERNEL(int* ,int ,int* ,int ,int ,int ,int ); #define MIN(a,b) (((a)<(b))?(a):(b)) #include<cuda.h> #include<stdio.h> #include<stdlib.h> int main() { int x[20]; int w[20],i,j,k; for(i=0;i<20;i++) { x[i]=2*i; w[i]=2*i; } int _SZ_w_1 = 20; int _SZ_x_1 = 20; int *_DEV_w; cudaMalloc((void**) &_DEV_w, sizeof(int)*_SZ_w_1); cudaMemcpy(_DEV_w, w, sizeof(int)*_SZ_w_1, cudaMemcpyHostToDevice); int *_DEV_x; cudaMalloc((void**) &_DEV_x, sizeof(int)*_SZ_x_1); cudaMemcpy(_DEV_x, x, sizeof(int)*_SZ_x_1, cudaMemcpyHostToDevice); int _NUM_THREADS = 20; float _NUM_BLOCKS=1; int _NUM_TILE=1; dim3 _THREADS(512); dim3 _BLOCKS(1); if(_NUM_THREADS < _NTHREAD) { _THREADS.x=_NUM_THREADS; } else { _THREADS.x=_NTHREAD; _NUM_BLOCKS=(_NUM_THREADS % _NTHREAD == 0)?(_NUM_THREADS/_NTHREAD):((_NUM_THREADS/_NTHREAD)+1); if(_NUM_BLOCKS<_NBLOCK) _BLOCKS.x=_NUM_BLOCKS; else { _BLOCKS.x=_NBLOCK; int temp=_NUM_BLOCKS; _NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1); } } int ID_1, ID_2, START[1]; int _CUDA_TILE; int Phi[1]={19}; int loopUpperLimits[1]={9}; for(ID_1=1;ID_1<=9/19+1;ID_1++) { for(ID_2=0;ID_2<1;ID_2++) { if(Phi[ID_2]>=0) START[ID_2]=(ID_1-1)*Phi[ID_2]; else START[ID_2]=loopUpperLimits[ID_2]+(ID_1-1)*Phi[ID_2]; } for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++) { _AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_w, _SZ_w_1, _DEV_x, _SZ_x_1, START[0], MIN(START[0]+19, 9), _CUDA_TILE); cudaDeviceSynchronize(); } } cudaMemcpy(w, _DEV_w, sizeof(int)*_SZ_w_1, cudaMemcpyDeviceToHost); cudaMemcpy(x, _DEV_x, sizeof(int)*_SZ_x_1, cudaMemcpyDeviceToHost); cudaFree(_DEV_w); cudaFree(_DEV_x); return 0; } __global__ void _AFFINE_KERNEL(int* w,int _SZ_w_1,int* x,int _SZ_x_1,int CUDA_L_i,int CUDA_U_i, int _CUDA_TILE) { int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x; if((CUDA_L_i<=i)&&(i<=CUDA_U_i)){ x[1+i-1]=-x[20-1-i]; w[1+i-1]=w[20-1-i]; }}
4,527
/* * * Carlos Roman Rivera - A01700820 * * Programming Languages - Cuda Lab 1 * */ #include <stdio.h> #include <stdlib.h> #define BLOCKS 1000 #define THREADS_PER_BLOCK 512 #define RECTANGLES 1000000 __global__ void gpuPi(double *r, double width, int n) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); // Index to calculate. int id = idx; // My array position. double mid, height; // Auxiliary variables. while (idx < n) { // Dont overflow array. mid = (idx + 0.6) * width; // Formula. height = 4.0 / (1.0 + mid * mid); // Formula. r[id] += height; // Store result. idx += (blockDim.x * gridDim.x); // Update index. } } int main() { double *pi; double *d_pi; double width; double result = 0; width = 1.0 / (double) RECTANGLES; int results = (BLOCKS * THREADS_PER_BLOCK); // Total threads. int size = results * sizeof(double); // Size in bytes. pi = (double*) malloc(size); // Memory on host. cudaMalloc((void**)&d_pi, size); // Memory on device. cudaMemcpy(d_pi, pi, size, cudaMemcpyHostToDevice); // Host to device. gpuPi<<<BLOCKS, THREADS_PER_BLOCK>>>(d_pi, width, RECTANGLES); cudaMemcpy(pi, d_pi, size, cudaMemcpyDeviceToHost); // Device to host. for(int i = 0 ; i < results ; i++) { // Sum results. result += pi[i]; } result *= width; // Formula. printf("PI: %lf\n", result); // Display result. free(pi); // Free host memory. cudaFree(d_pi); // Free device memory. return 0; }
4,528
//-------------------------------------GPU Implementation of KNN-------------------------------------------------- //---------------------------Train Data store in input.txt and Test data in test.txt------------------------------ #include<iostream> #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<stdlib.h> #include<stdio.h> #include<thrust/sort.h> #include<math.h> #include<cuda.h> using namespace std; // Calculating distance in parallel for one test point and all training point // Kernal launched with 1*n threads __global__ void k1(float *gdata,float *gquery,float *gres,int *gid,int N,int count) { int id = threadIdx.x; //gres[id*2+0] = id; gid[id] = id; float dist = 0; for(int i=1;i<count;i++){ //printf("%d\t%0.2f\t%0.2f\n",id,gdata[id*count+i],gquery[i]); dist += (gdata[id*count+i]-gquery[i])*(gdata[id*count+i]-gquery[i]); } gres[id] = sqrt(dist); //printf("%d %0.2f\n",id,gres[id]); } /*__global__ void k(float *data,int N,int count){ for(int j=0;j<count;j++){ printf("%d\n",data[threadIdx.x*count+j]); } }*/ //Calculating distances in parallel between all train point and test point . //kernal launched with m*n threads __global__ void maxkernal(float *data,float *query,float *dis,int *gid,int N,int count){ int id = blockIdx.x*blockDim.x+threadIdx.x; int i = id/N; int j = id%N; //float diss = 0; for(int k=1;k<count;k++){ //printf("%d %0.2f %0.2f %0.2f %0.2f\n",id,data[j*count+k],query[i*count+k],(data[j*count+k]-query[i*count+k]),dis[id]); atomicAdd(&dis[id],((data[j*count+k]-query[i*count+k])*(data[j*count+k]-query[i*count+k]))); //printf("%d %0.2f %0.2f %0.2f %0.2f %0.2f\n",id,data[j*count+k],query[i*count+k],(data[j*count+k]-query[i*count+k]),dis[id],((data[j*count+k]-query[i*count+k])*(data[j*count+k]-query[i*count+k]))); } gid[id] = id; dis[id] = sqrt(dis[id]); } // Accuracy calculation in parallel __global__ void Accuracy(int *s1,int *s2,int *counter){ int id = threadIdx.x; //printf("%d %d\n",s1[id],s2[id]); int x = 1; if(s1[id]==s2[id]){ atomicAdd(&counter[0],x); } } // Begin of the main function int main(){ //Reading the train points int k=15; int N=135; int count=0; FILE *fp; string s[N]; fp = fopen("input.txt","r"); char ch = ' '; while(ch!='\n'){ ch = getc(fp); if(ch==','){ count++; } } float *data = (float *)malloc(N*count*sizeof(float)); for(int i=0;i<N;i++){ for(int j=0;j<count;j++){ fscanf(fp,"%f",&data[i*count+j]); ch = fgetc(fp); //cout<<data[i*count+j]<<"\t"; } char c; c = fgetc(fp); while(c!='\n'){ s[i] += c; c = fgetc(fp); } //cout<<s[i]<<"\n"; } fclose(fp); float *gdata,*gres,*res; int *id,*gid; int *fclass; /*cudaMalloc(&gdata,N*count*sizeof(float)); cudaMemcpy(gdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice); k<<<1,N>>>(gdata,N,count);*/ //cout<<"----------------------------------------------------\n"; //Reading the test point FILE *op; int m=15; string s1[m]; int gsres[m]; float *query,*gquery; float *query2d = (float *)malloc(m*count*sizeof(float)); fclass = (int *)malloc(m*sizeof(int)); op = fopen("test.txt","r"); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float ms = 0; for(int i=0;i<m;i++){ query = (float *)malloc(count*sizeof(float)); for(int j=0;j<count;j++){ fscanf(op,"%f",&query[j]); query2d[i*count+j] = query[j]; ch = fgetc(op); //cout<<query[i*count+j]<<"\t"; } char c; c = fgetc(op); while(c!='\n'){ s1[i] += c; c = fgetc(op); } if(s1[i]=="Iris-setosa"){ fclass[i] = 1; //cout<<"c1"; } if(s1[i]=="Iris-versicolor"){ fclass[i] = 2; //cout<<"c2"; } if(s1[i]=="Iris-virginica"){ fclass[i] = 3; //cout<<"c3"; } //cout<<s1[i]<<"\n"; float milliseconds = 0; cudaEventRecord(start,0); cudaMalloc(&gquery,count*sizeof(float)); cudaMalloc(&gdata,N*count*sizeof(float)); cudaMalloc(&gres,N*sizeof(float)); cudaMalloc(&gid,N*sizeof(int)); res = (float *)malloc(N*sizeof(float)); id = (int *)malloc(N*sizeof(int)); cudaMemcpy(gdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(gquery,query,count*sizeof(float),cudaMemcpyHostToDevice); //Launching one test point to all train point kernal k1<<<1,N>>>(gdata,gquery,gres,gid,N,count); cudaMemcpy(res,gres,N*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(id,gid,N*sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); ms += milliseconds; thrust::sort_by_key(res, res + N, id); int count1,count2,count3; count1 = count2 = count3 = 0; //voting process of K closest neighbour for(int j=0;j<k;j++){ //cout<<i<<" "<<minKarr[j][0]<<" "<<minKarr[j][1]<<"\n"; if(s[id[j]]=="Iris-setosa"){ count1++; } if(s[id[j]]=="Iris-versicolor"){ count2++; } if(s[id[j]]=="Iris-virginica"){ count3++; } } //cout<<count1<<" "<<count2<<" "<<count3<<"\n"; if(count1>count2){ if(count1>count3){ //count1 gsres[i] = 1; } else{ //count3 gsres[i] = 3; } } else{ if(count2>count3){ //count2 gsres[i] = 2; } else{ //count3 gsres[i] = 3; } } //cout<<gsres[i]<<"\n"; //cout<<"---------------------------------------------\n"; } /*for(int i=0;i<m;i++){ printf("%d\n",fclass[i]); }*/ int *gclass,*ggsres,*gcounter; int counter[1]; counter[0] = 0; cudaMalloc(&gclass,m*sizeof(int)); cudaMalloc(&ggsres,m*sizeof(int)); cudaMalloc(&gcounter,1*sizeof(int)); cudaMemcpy(gclass,fclass,m*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(ggsres,gsres,m*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(gcounter,counter,1*sizeof(int),cudaMemcpyHostToDevice); // Accuracy calculation Accuracy<<<1,m>>>(gclass,ggsres,gcounter); cudaMemcpy(counter,gcounter,1*sizeof(int),cudaMemcpyDeviceToHost); //printf("%d\n",counter[0]); float acc = counter[0]*100; acc = acc/m; printf("Basic KNN Time taken in %f millisecond\n",ms); //cout<<"Time taken "<<elapsetime<<"\n"; cout<<"Accuracy of KNN "<<acc<<"%"<<"\n"; // prediction on random points srand(time(0)); float *points = (float *)malloc(count*sizeof(float)); for(int j=0;j<count;j++){ if(j<count-1){ points[j] = rand()%8; } else{ points[j] = rand()%3; } } /*for(int j=0;j<count;j++){ cout<<points[j]<<"\t"; }*/ cout<<"\n"; float *dis,*ggdata; float *gpoint,*gdis; int *gidd; int *idd; cudaMalloc(&gpoint,count*sizeof(float)); cudaMalloc(&ggdata,N*count*sizeof(float)); cudaMalloc(&gdis,N*sizeof(float)); cudaMalloc(&gidd,N*sizeof(int)); dis = (float *)malloc(N*sizeof(float)); idd = (int *)malloc(N*sizeof(int)); cudaMemcpy(ggdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(gpoint,points,count*sizeof(float),cudaMemcpyHostToDevice); //Launching one test point to all train point kernal k1<<<1,N>>>(gdata,gpoint,gdis,gidd,N,count); cudaMemcpy(dis,gdis,N*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(idd,gidd,N*sizeof(int),cudaMemcpyDeviceToHost); thrust::sort_by_key(dis, dis + N, idd); int count1,count2,count3; count1 = count2 = count3 = 0; //voting process of K closest neighbour for(int i=0;i<k;i++){ if(s[idd[i]]=="Iris-setosa"){ count1++; } if(s[idd[i]]=="Iris-versicolor"){ count2++; } if(s[idd[i]]=="Iris-virginica"){ count3++; } } //Deciding on voting result string prediction; if(count1>count2){ if(count1>count3){ //count1 prediction = "Iris-setosa"; } else{ //count3 prediction = "Iris-virginica"; } } else{ if(count2>count3){ //count2 prediction = "Iris-versicolor"; } else{ //count3 prediction = "Iris-virginica"; } } cout<<"prediction Result "<<prediction<<"\n"; // More parallelism /*for(int i=0;i<m;i++){ for(int j=0;j<count;j++){ cout<<query2d[i*count+j]<<"\t"; } cout<<"\n"; }*/ //One more Knn implementation cudaEvent_t start1, stop1; cudaEventCreate(&start1); cudaEventCreate(&stop1); float milliseconds1 = 0; cudaEventRecord(start1,0); int *id2d,*gid2d; int *mres = (int *)malloc(m*sizeof(int)); float *gquery2d,*gdatam,*gdist,*dist; cudaMalloc(&gquery2d,m*count*sizeof(float)); cudaMemcpy(gquery2d,query2d,m*count*sizeof(float),cudaMemcpyHostToDevice); cudaMalloc(&gdatam,N*count*sizeof(float)); cudaMemcpy(gdatam,data,N*count*sizeof(float),cudaMemcpyHostToDevice); dist = (float *)malloc(m*N*sizeof(float)); cudaMalloc(&gdist,m*N*sizeof(float)); id2d = (int *)malloc(m*N*sizeof(int)); cudaMalloc(&gid2d,m*N*sizeof(int)); //Distance calculation of KNN through all train and all test points in parallel //launching M*N threads maxkernal<<<m,N>>>(gdatam,gquery2d,gdist,gid2d,N,count); cudaMemcpy(dist,gdist,m*N*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(id2d,gid2d,m*N*sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(stop1,0); cudaEventSynchronize(stop1); cudaEventElapsedTime(&milliseconds1, start1, stop1); for(int i=0;i<m;i++){ float *distance = (float *)malloc(N*sizeof(float)); int *index = (int *)malloc(N*sizeof(int)); for(int j=0;j<N;j++){ distance[j] = dist[i*N+j]; index[j] = id2d[i*N+j]; } //Sorting the K nearest neighbour. thrust::sort_by_key(distance, distance + N, index); int count1,count2,count3; //voting for K nearest neighbour count1 = count2 = count3 = 0; for(int j=0;j<k;j++){ int p = index[j]%N; //cout<<i<<" "<<minKarr[j][0]<<" "<<minKarr[j][1]<<"\n"; if(s[p]=="Iris-setosa"){ count1++; } if(s[p]=="Iris-versicolor"){ count2++; } if(s[p]=="Iris-virginica"){ count3++; } } //cout<<count1<<" "<<count2<<" "<<count3<<"\n"; if(count1>count2){ if(count1>count3){ //count1 mres[i] = 1; } else{ //count3 mres[i] = 3; } } else{ if(count2>count3){ //count2 mres[i] = 2; } else{ //count3 mres[i] = 3; } } //cout<<mres[i]<<"\n"; //cout<<"\n=========================================================================\n"; } // Accuracy calculation. int *ggclass,*gggsres,*ggcounter; int ccounter[1]; ccounter[0] = 0; cudaMalloc(&ggclass,m*sizeof(int)); cudaMalloc(&gggsres,m*sizeof(int)); cudaMalloc(&ggcounter,1*sizeof(int)); cudaMemcpy(ggclass,fclass,m*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(gggsres,mres,m*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(ggcounter,ccounter,1*sizeof(int),cudaMemcpyHostToDevice); Accuracy<<<1,m>>>(ggclass,gggsres,ggcounter); cudaMemcpy(ccounter,ggcounter,1*sizeof(int),cudaMemcpyDeviceToHost); //printf("%d\n",counter[0]); float aacc = ccounter[0]*100; aacc = aacc/m; printf("Time taken %f\n",milliseconds1); cout<<"Accuracy of KNN after Max Parallelism "<<acc<<"%"<<"\n"; //cout<<"---------------------------------------------\n"; //Free gpu variables cudaFree(ggclass); cudaFree(gggsres); cudaFree(ggcounter); cudaFree(gquery2d); cudaFree(gdatam); cudaFree(gdis); cudaFree(gdist); cudaFree(gid); cudaFree(gid2d); cudaFree(gpoint); cudaFree(gquery); cudaFree(gdata); cudaFree(gcounter); cudaFree(gclass); cudaFree(gsres); cudaFree(gres); cudaFree(gidd); cudaFree(ggdata); //Free Cpu variables free(data); free(fclass); free(res); free(id); free(query); free(query2d); free(points); free(idd); free(dis); free(id2d); free(mres); free(dist); //---------------------------++++++++++++++++++++++++---------------------------- cudaDeviceSynchronize(); return 0; }
4,529
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <time.h> #define TIMER_CREATE(t) \ cudaEvent_t t##_start, t##_end; \ cudaEventCreate(&t##_start); \ cudaEventCreate(&t##_end); #define TIMER_START(t) \ cudaEventRecord(t##_start); \ cudaEventSynchronize(t##_start); \ #define TIMER_END(t) \ cudaEventRecord(t##_end); \ cudaEventSynchronize(t##_end); \ cudaEventElapsedTime(&t, t##_start, t##_end); \ cudaEventDestroy(t##_start); \ cudaEventDestroy(t##_end); #define TILE_SIZE 16 #define CUDA_TIMING unsigned char *input_gpu; unsigned char *output_gpu; unsigned int *histogram; double CLOCK() { struct timespec t; clock_gettime(CLOCK_MONOTONIC, &t); return (t.tv_sec * 1000)+(t.tv_nsec*1e-6); } /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); exit(-1); } #endif return result; } // Add GPU kernel and functions __global__ void kernel(unsigned char *input, unsigned int *histogram){ int x = blockIdx.x*TILE_SIZE+threadIdx.x; int y = blockIdx.y*TILE_SIZE+threadIdx.y; int location = y*TILE_SIZE*gridDim.x+x; int myItem = input[location]; int myBin = myItem % 256; atomicAdd(&(histogram[myBin]),1); // output[location] = x%255; __syncthreads(); //printf("!!!!!!!!!!!!!!\n"); if(location==0) {int sum=0; for(int i=0;i<256;i++) { printf("%d %d \n",i,histogram[i]); sum+=histogram[i]; } printf("sum=%d thredId=%d \n",sum,location); } //printf("%d ", myBin); } void histogram_gpu(unsigned char *data, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). int size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&histogram , 256*sizeof(unsigned int))); checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char))); checkCuda(cudaMemset(histogram, 0, 256*sizeof(unsigned int))); // Copy data to GPU checkCuda(cudaMemcpy(input_gpu, data, size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); // Execute algorithm dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(TILE_SIZE, TILE_SIZE); // Kernel Call #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif //printf("histogrammm: %d\n %d\n", histogram[250], histogram[0]); kernel<<<dimGrid, dimBlock>>>(input_gpu, histogram); double alpha = 255/size; checkCuda(cudaDeviceSynchronize()); //printf("gridXsize is: %d\n", gridXSize); //printf("gridYsize is: %d\n", gridYSize); //printf("TILE_SIZE is: %d\n",TILE_SIZE); printf("alpha: %.15f \n", alpha); //printf("%d \n", histogram[0]); unsigned int *histogram2; histogram2 = new(unsigned int[256]); checkCuda(cudaMemcpy(histogram2, histogram, 256*sizeof(unsigned int), cudaMemcpyDeviceToHost)); int sum =0; for (int i=0; i<256; i++) { printf("%d \n", histogram2[i]); sum+=histogram2[i]; } printf ("real sum is: %d\n", sum); checkCuda(cudaDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(cudaMemcpy(data, output_gpu, size*sizeof(unsigned char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(output_gpu)); checkCuda(cudaFree(input_gpu)); checkCuda(cudaFree(histogram)); } void histogram_gpu_warmup(unsigned char *data, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). int size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&histogram , 256*sizeof(unsigned int))); checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char))); checkCuda(cudaMemset(histogram, 0, 256*sizeof(unsigned int))); // Copy data to GPU checkCuda(cudaMemcpy(input_gpu, data, size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); // Execute algorithm dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(TILE_SIZE, TILE_SIZE); kernel<<<dimGrid, dimBlock>>>(input_gpu, histogram); checkCuda(cudaDeviceSynchronize()); // Retrieve results from the GPU checkCuda(cudaMemcpy(data, output_gpu, size*sizeof(unsigned char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(output_gpu)); checkCuda(cudaFree(input_gpu)); }
4,530
#include "includes.h" __global__ void gpu_colorRampHeatMapUnsat(uchar4 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 & imgVal = colored[index]; if (isnan(vals[index])) { imgVal = make_uchar4(0,0,0,0); return; } const float normVal = fmaxf(0,fminf((vals[index] - minVal)/(maxVal-minVal),1)); const float t = normVal == 1.0 ? 1.0 : fmodf(normVal,0.25)*4; uchar3 a, b; if (normVal < 0.25) { b = make_uchar3(32,191,139); a = make_uchar3(0x18,0x62,0x93); } else if (normVal < 0.5) { b = make_uchar3(241,232,137); a = make_uchar3(32,191,139); } else if (normVal < 0.75) { b = make_uchar3(198,132,63); a = make_uchar3(241,232,137); } else { b = make_uchar3(0xc0,0x43,0x36); a = make_uchar3(198,132,63); } imgVal = make_uchar4((1-t)*a.x + t*b.x, (1-t)*a.y + t*b.y, (1-t)*a.z + t*b.z,255); }
4,531
#include "includes.h" __global__ void gShift(float* out, const float* in, int length, int offset) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { if(index - offset < 0 || index - offset >= length) out[index] = 0; else out[index] = in[index - offset]; } } }
4,532
#include "includes.h" #define MAX_VALUE 10 __global__ void saxpy(float *X, float *Y, float *Z, int A, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<N){ Z[i] = A * X[i] + Y[i]; } }
4,533
#include <iostream> #include <cstdlib> #include <ctime> #include <algorithm> using namespace std; #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "hello.cuh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> int mainForSort() { const int SIZE=1000000; cout<<"Begin : "<<endl; srand(time(0)); double* a = new double[SIZE]; double* b = new double[SIZE]; for (long i = 0; i < SIZE; i++) b[i] = a[i] = rand()%100; double* dev_a=0; cudaSetDevice(0); cudaMalloc((void**)&dev_a, sizeof(double)*SIZE); cudaMemcpy(dev_a,a,SIZE * sizeof(double),cudaMemcpyHostToDevice); clock_t start, finish; start = clock(); sort(a,a+SIZE); finish = clock(); cout << "By CPU STL Sort: " << (double)(finish - start) / CLOCKS_PER_SEC << endl; start = clock(); thrust::sort(b,b+SIZE); finish = clock(); cout << "By CUDA Thrust Sort: " << (double)(finish - start)/CLOCKS_PER_SEC << endl; cudaFree(dev_a); return 1; }
4,534
/****************************************************************************** * PROGRAM: copyStruture * PURPOSE: This program is a test which test the ability to transfer multilevel * C++ structured data from host to device, modify them and transfer back. * * * NAME: Vuong Pham-Duy. * College student. * Faculty of Computer Science and Technology. * Ho Chi Minh University of Technology, Viet Nam. * vuongpd95@gmail.com * * DATE: 5/10/2017 * ******************************************************************************/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", \ cudaGetErrorString(code), file, line); if (abort) exit(code); } } /* Structure *****************************************************************/ typedef struct { int64_t rbeg; int32_t qbeg, len; int score; } mem_seed_t; // unaligned memory typedef struct { int n, m, first, rid; uint32_t w:29, kept:2, is_alt:1; float frac_rep; int64_t pos; mem_seed_t *seeds; } mem_chain_t; typedef struct { size_t n, m; mem_chain_t *a; } mem_chain_v; typedef struct { int n, m, first, rid; uint32_t w:29, kept:2, is_alt:1; float frac_rep; int64_t pos; } flat_mem_chain_t; typedef struct { size_t n, m; } flat_mem_chain_v; __global__ void func(int n, int n_a, int n_seeds, flat_mem_chain_v *chns, \ flat_mem_chain_t *a, mem_seed_t *seeds, int *d_b) { int i, j; *d_b = 0; for(i = 0; i < n; i++) { *d_b += chns[i].n; for(j = 0; j < n_a; j++) { *d_b += a[j].n; } } } /* Structure *****************************************************************/ int main(int argc, char *argv[]) { // Assumptions int b; int *d_b; gpuErrchk(cudaMalloc(&d_b, sizeof(int))); // Begin Assumptions int n, i, j, k; n = 10; mem_chain_v *chns; chns = (mem_chain_v*)malloc(sizeof(mem_chain_v) * n); for(i = 0; i < n; i++) { chns[i].n = 10; chns[i].a = (mem_chain_t*)malloc(\ chns[i].n * sizeof(mem_chain_t)); for(j = 0; j < chns[i].n; j++) { chns[i].a[j].n = 10; chns[i].a[j].seeds = (mem_seed_t*)malloc(\ chns[i].a[j].n * sizeof(mem_seed_t)); for(k = 0; k < chns[i].a[j].n; k++) { chns[i].a[j].seeds[k].score = i + j + k; } } } // End Assumptions int n_a, n_seeds; n_a = 0; n_seeds = 0; for(i = 0; i < n; i++) { n_a += chns[i].n; for(j = 0; j < chns[i].n; j++) { n_seeds += chns[i].a[j].n; } } flat_mem_chain_v *f_chns, *df_chns; flat_mem_chain_t *f_a, *df_a; mem_seed_t *seeds, *d_seeds; // Flattened the nested structure f_chns = (flat_mem_chain_v*)malloc(n * sizeof(flat_mem_chain_v)); f_a = (flat_mem_chain_t*)malloc(n_a * sizeof(flat_mem_chain_t)); seeds = (mem_seed_t*)malloc(n_seeds * sizeof(mem_seed_t)); int acc_a, acc_seeds; acc_a = 0; acc_seeds = 0; for(i = 0; i < n; i++) { f_chns[i].n = chns[i].n; f_chns[i].m = chns[i].m; for(j = 0; j < chns[i].n; j++) { // int n, m, first, rid; // uint32_t w:29, kept:2, is_alt:1; // float frac_rep; // int64_t pos; mem_chain_t *tmp; tmp = &chns[i].a[j]; f_a[acc_a].n = tmp->n; f_a[acc_a].m = tmp->m; f_a[acc_a].first = tmp->first; f_a[acc_a].rid = tmp->rid; f_a[acc_a].w = tmp->w; f_a[acc_a].kept = tmp->kept; f_a[acc_a].is_alt = tmp->is_alt; f_a[acc_a].frac_rep = tmp->frac_rep; f_a[acc_a].pos = tmp->pos; for(k = 0; k < chns[i].a[j].n; k++) { // int64_t rbeg; // int32_t qbeg, len; // int score; mem_seed_t *tmp0; tmp0 = &chns[i].a[j].seeds[k]; seeds[acc_seeds].rbeg = tmp0->rbeg; seeds[acc_seeds].qbeg = tmp0->qbeg; seeds[acc_seeds].len = tmp0->len; seeds[acc_seeds].score = tmp0->score; } acc_seeds += chns[i].a[j].n; } acc_a += chns[i].n; } // Copy the flattened structure to kernel gpuErrchk(cudaMalloc(&df_chns, n * sizeof(flat_mem_chain_v))); gpuErrchk(cudaMalloc(&df_a, n_a * sizeof(flat_mem_chain_t))); gpuErrchk(cudaMalloc(&d_seeds, n_seeds * sizeof(mem_seed_t))); gpuErrchk(cudaMemcpy(df_chns, f_chns, n * sizeof(flat_mem_chain_v), \ cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(df_a, f_a, n_a * sizeof(flat_mem_chain_t), \ cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_seeds, seeds, n_seeds * sizeof(mem_seed_t), \ cudaMemcpyHostToDevice)); printf("n = %d, n_a = %d\n", n, n_a); func<<<1, 1>>>(n, n_a, n_seeds, df_chns, df_a, d_seeds, d_b); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(&b, d_b, sizeof(int), cudaMemcpyDeviceToHost)); printf("d_b = %d\n", b); cudaFree(df_chns); cudaFree(df_a); cudaFree(d_seeds); cudaFree(d_b); return 0; }
4,535
/* autor fredy m uaem desonses@gmail.com para mas comentarios */ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <vector_types.h> #include <device_launch_parameters.h> #define N 20 /* operaciones con matrices */ //GLOABL: funcion llamada desde el host y ejecutada en el device (kernel) __global__ void suma(float *A, float *B, float *C) { //indice de las columnas int columna = threadIdx.x; //indice de las filas int fila = threadIdx.y; //indice lineal int Id = columna + fila * blockDim.x; //sumamos cada elemento C[Id] = A[Id] + B[Id]; } //Cada elemento de la matriz C se obtiene como la suma de los //elementos de la matriz A ubicados en posiciones adyacentes __global__ void add(float *A, float *C) { int columna = threadIdx.x; //indice de las filas int fila = threadIdx.y; //indice lineal int Id = columna + fila * blockDim.x; int id1 = (columna - 1) + fila * blockDim.x; int id2 = (columna + 1) + fila * blockDim.x; int id3 = columna + (fila - 1) * blockDim.x; int id4 = columna + (fila + 1) * blockDim.x; if ((fila > 0 && fila < N - 1) && (columna > 0 && columna < N - 1)) { C[Id] = A[id1] + A[id2] + A[id3] + A[id4]; } else { C[Id] = A[Id]; } } // funcion chequeo de errores __host__ void check_CUDA_Error(const char *mensaje) { cudaError_t error; cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("ERROR %d: %s (%s)\n", error, cudaGetErrorString(error), mensaje); } } int main(int argc, char** argv) { //declaracions float *hst_A, *hst_B, *hst_C; float *dev_A, *dev_B, *dev_C; int size = N * N * sizeof(float); //reserva de memoria en el host hst_A = (float*)malloc(size); hst_B = (float*)malloc(size); hst_C = (float*)malloc(size); //reserva de memoria en el device cudaMalloc((void**)&dev_A, size); check_CUDA_Error("Error malloc dev_A!"); cudaMalloc((void**)&dev_B, size); check_CUDA_Error("Error malloc dev_B!"); cudaMalloc((void**)&dev_C, size); check_CUDA_Error("Error malloc dev_C!"); //inicializacion de los vectores for (int i = 0; i < N*N; i++) { hst_A[i] = (float)(rand() % 5); hst_B[i] = (float)(rand() % 5); } //enviar datos del hosto al device cudaMemcpy(dev_A, hst_A, size, cudaMemcpyHostToDevice); check_CUDA_Error("Error Memcpy hst_A To dev_A"); cudaMemcpy(dev_B, hst_B, size, cudaMemcpyHostToDevice); check_CUDA_Error("Error Memcpy hst_B To dev_B"); //cudaMemcpy(dev_C, hst_C, N*N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_C, hst_C, size, cudaMemcpyHostToDevice); check_CUDA_Error("Error Memcpy hst_C To dev_C"); //dimenciones del kernel dim3 Nbloques(1); dim3 hilosB(N, N); //////MEDICION DE TIEMPO EN GPU/////////////// // declaracion de eventos para medir el tiempo de ejecucion en la GPU cudaEvent_t start; cudaEvent_t stop; // creacion de eventos cudaEventCreate(&start); cudaEventCreate(&stop); // marca de inicio cudaEventRecord(start, 0); //llamada al kernel dibimensional de NxN hilos //suma <<<Nbloques, hilosB >>> (dev_A, dev_B, dev_C); add<<<Nbloques, hilosB >>>(dev_A, dev_C); check_CUDA_Error("Error kernel"); // marca de final cudaEventRecord(stop, 0); // sincronizacion GPU-CPU cudaEventSynchronize(stop); // calculo del tiempo en milisegundos float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); ////////MEDICION DE TIEMPO EN GPU///////////////// //recodiga de los datos cudaMemcpy(hst_C, dev_C, size, cudaMemcpyDeviceToHost); check_CUDA_Error("Error Memcpy dev_C To hst_C"); //impresion del resultado printf("A:\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.2f\t", hst_A[j + i * N]); } printf("\n"); } printf("B:\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.2f\t", hst_B[j + i * N]); } printf("\n"); } printf("C:\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.2f\t", hst_C[j + i * N]); } printf("\n"); } printf("\n\n"); printf("> Tiempo de ejecucion: %f ms\n", elapsedTime); cudaFree(hst_A); cudaFree(hst_B); cudaFree(hst_C); free(hst_A); free(hst_B); free(hst_C); printf("\n pulsa INTRO parsa finalizar..."); fflush(stdin); char tecla = getchar(); return 0; }
4,536
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> //implement one grid with 4 blocks and 256 threads in total, 8x8 threads for each block __global__ void print_threadIds() { printf("blockIdx,x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n",blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main() { //define number of threads for each dimension int nx,ny,nz; nx = 4; ny = 4; nz = 4; dim3 block(2,2,2); dim3 grid(nx/block.x, ny/block.y, nz/block.z); print_threadIds <<< grid, block >>> (); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
4,537
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdio> static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __device__ int add(int a, int b) { return a + b; } __global__ void kernel(int a, int b, int* c) { int q = add(a,b); *c = q; } int main() { int c; int *dev_c; HANDLE_ERROR (cudaMalloc((void**)&dev_c, sizeof(int))); kernel<<<1, 1>>>(2, 7, dev_c); HANDLE_ERROR (cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost)); printf ("2 + 7 = %d\n", c); cudaFree(dev_c); return 0; }
4,538
/* number of mathematical operations (only floating point) operation flo/o total +-* : 23 1 23 / : 6 4 24 sqrt: 1 4 4 pow : 1 13 13 sum 64 */ #define G_P2P_KERNEL_CORE \ dxij=veci[0]-vecj[jj7 ];\ dyij=veci[1]-vecj[jj7+1];\ dzij=veci[2]-vecj[jj7+2];\ rij=rsqrtf(dxij*dxij+dyij*dyij+dzij*dzij+eps);\ rsij=pi14*vecj[jj7+6]*rij*rij*rij;\ veck[0]+=pi14*vecj[jj7+6]*rij;\ veck[1]-=dxij*rsij;\ veck[2]-=dyij*rsij;\ veck[3]-=dzij*rsij;\ jj7+=7;
4,539
extern "C" #define BLOCK_WIDTH 16 #define BLOCK_HEIGHT 16 __global__ void filter(int *Input_Image, int *Output_Image, int Image_Width, int Image_Height) { int surround[9]; int iterator; const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if( (x >= (Image_Width - 1)) || (y >= Image_Height - 1) || (x == 0) || (y == 0)) return; // --- Fill array private to the threads iterator = 0; for (int r = x - 1; r <= x + 1; r++) { for (int c = y - 1; c <= y + 1; c++) { surround[iterator] = Input_Image[c*Image_Width+r]; iterator++; } } // --- Sort private array to find the median using Bubble Short for (int i=0; i<5; ++i) { // --- Find the position of the minimum element int minval=i; for (int l=i+1; l<9; ++l) if (surround[l] < surround[minval]) minval=l; // --- Put found minimum element in its place int temp = surround[i]; surround[i]=surround[minval]; surround[minval]=temp; } // --- Pick the middle one Output_Image[(y*Image_Width)+x]=surround[4]; }
4,540
#include <iostream> // #include <cstdio> // #include <cstdlib> #define BLOCK_NUM 2 #define THREAD_NUM 8 #define N 16 __global__ void addBlockIdxAndThreadIdx(int *output) { output[blockDim.x * blockIdx.x + threadIdx.x] = blockIdx.x + threadIdx.x; } int main() { int *d_arr; cudaMallocManaged(&d_arr, N * sizeof(int)); addBlockIdxAndThreadIdx<<<BLOCK_NUM, THREAD_NUM>>>(d_arr); cudaDeviceSynchronize(); int h_arr[N]; cudaMemcpy(&h_arr, d_arr, N * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { // h_arr[i] = d_arr[i]; std::cout << h_arr[i]; if (i < N - 1) { std::cout << " "; } else { std::cout << "\n"; } } cudaFree(d_arr); return 0; }
4,541
__global__ void test(float *A){ int i = threadIdx.x; if (i == 5){ return; } int x = 2*4; int y = 1-2; A[i] = A[i+1]; } //__global__ void test1(float *A){ // int i = threadIdx.x; // if (i != 5){ // A[i] = A[i+1]; // }else{ // return; // } //}
4,542
#include <cstdio> #include <iostream> #include <chrono> #include <algorithm> #include <stdlib.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // CUDA: use 512 threads per block const int CAFFE_CUDA_NUM_THREADS = 512; inline int CAFFE_GET_BLOCKS(const int N) { return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; } #define sign(x) ((x)>=0?1:-1) __global__ void sign_gpu_v1(int num, float *dx, float *dy) { CUDA_KERNEL_LOOP(i, num){ dy[i] = sign(dx[i]); } } // operate ((x>=0)*2-1) is three times faster than sign function __global__ void sign_gpu_v2(int num, float *dx, float *dy) { CUDA_KERNEL_LOOP(i, num){ dy[i] = (dx[i]>=0)*2-1; } } __global__ void abs_gpu_v1(int num, float *dx, float *dy) { CUDA_KERNEL_LOOP(i, num){ dy[i] = std::abs(dx[i]); } } // operate v2 is 2~3 times faster than sign function __global__ void abs_gpu_v2(int num, float *dx, float *dy) { CUDA_KERNEL_LOOP(i, num){ dy[i] = ((dx[i]>=0)*2-1)*dx[i]; } } int main() { int N = 1 << 20; std::cout<<"N:"<<N<<std::endl; float *hx, *hy, *dx, *dy; hx = new float[N]; hy = new float[N]; cudaMalloc(&dx, N*sizeof(float)); cudaMalloc(&dy, N*sizeof(float)); for(int i = 0; i < N; i++){ hx[i] = (rand()%100)/100.0-0.5; hy[i] = (rand()%100)/100.0-0.5; } cudaMemcpy(dx, hx, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dy, hy, N*sizeof(float), cudaMemcpyHostToDevice); std::chrono::time_point<std::chrono::system_clock> begin; std::chrono::time_point<std::chrono::system_clock> end; std::chrono::duration<double> elapsedTime; // call add_cpu begin = std::chrono::system_clock::now(); //sign_gpu_v1<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, dx, dy); abs_gpu_v1<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, dx, dy); end = std::chrono::system_clock::now(); elapsedTime = end - begin; printf("Call version 1, Time: %.6lfs\n", elapsedTime.count()); // call add_gpu begin = std::chrono::system_clock::now(); //sign_gpu_v2<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, dx, dy); abs_gpu_v2<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, dx, dy); end = std::chrono::system_clock::now(); elapsedTime = end - begin; printf("Call version 2, Time: %.6lfs\n", elapsedTime.count()); // block 同步 cudaDeviceSynchronize(); cudaMemcpy(hy, dy, N*sizeof(float), cudaMemcpyDeviceToHost); for (int i=0; i<10; i++){ printf("a: %.2f, b: %.2f", hx[i], hy[i]); } delete[] hx; delete[] hy; cudaFree(dx); cudaFree(dy); return 0; }
4,543
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) { for (int i=0; i < var_1; ++i) { float tmp_1 = -1.5914E-43f; float tmp_2 = (-1.3808E-35f / ceilf((-1.0851E-37f * log10f((-1.0437E36f - (var_3 - (-1.9410E-41f * -1.4893E7f + var_4 / +1.7362E-35f))))))); float tmp_3 = (-0.0f / +1.4785E-8f + var_5); comp += tmp_3 - tmp_2 * tmp_1 / var_6 + -1.3380E-35f; for (int i=0; i < var_2; ++i) { comp = var_7 * var_8; } if (comp == -0.0f / cosf(+1.0663E-37f + -1.8452E-42f - var_9 + logf(+1.5149E-36f / -1.0874E19f * logf(+1.7400E13f)))) { float tmp_4 = -1.3328E-36f; float tmp_5 = floorf((var_10 + (var_11 * (var_12 - ldexpf((+1.2927E-35f - (var_13 - (var_14 * +1.4363E-42f + var_15))), 2))))); comp += tmp_5 / tmp_4 / var_16 * (-1.9099E36f + var_17); } if (comp >= +1.7018E-44f + +1.3527E-37f) { comp += +1.7350E-42f + expf(tanhf((var_18 - +1.0340E-41f))); comp += atan2f((+0.0f / (var_19 + var_20 - +1.1250E34f / floorf(+1.6097E36f / fmodf(+1.2477E-8f, sqrtf(atan2f(+1.1544E-43f / +1.5844E-43f, -0.0f)))))), (+1.5745E-14f * floorf((+1.8944E-43f + -1.6229E-35f - var_21 - var_22)))); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23); cudaDeviceSynchronize(); return 0; }
4,544
//##############################################################################################################################################################################################################// //Aquila - An Open-Source GPU-Accelerated Toolkit for Cognitive and Neuro-Robotics Research // // // //Copyright (c) <2012>, <Martin Peniak - www.martinpeniak.com> // //All rights reserved. // // // //Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: // // // // - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // // - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. // // // //THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // //A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // //LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // //TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // // //The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted // //as representing official policies,either expressed or implied, of the FreeBSD Project. // //##############################################################################################################################################################################################################// #include <cuda.h> #include <cuda_runtime.h> /*! * \brief Forward pass. * \note Used with forwardPassV21Kernel - faster version for networks of up to 1024 neurons. * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] activity - activations * \param[in] input - input * \param[in] weight - weights * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[out] buffer - buffer used for storing new activations */ __global__ void forwardPassV2Kernel(int step, int sequenceOffset, float *activity, float *input, float *weight, int numNeurons, int numIONeurons, float *buffer) { int idx = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if(idx<numNeurons && j<numNeurons) { if(j<numIONeurons) { buffer[(j*numNeurons)+idx] = input[sequenceOffset+(numIONeurons*(step-1))+j] * weight[(j*numNeurons)+idx]; } else { buffer[(j*numNeurons)+idx] = activity[(numNeurons*(step-1))+j] * weight[(j*numNeurons)+idx]; } } } /*! * \brief Forward pass. * \note Used with forwardPassV2Kernel. Faster version for networks of up to 1024 neurons. * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] input - input * \param[in] weight - weights * \param[in] deltaT - delta-t values * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[out] potential - potentials * \param[out] previousPotential - previous potentials * \param[out] activity - activities * \param[out] error - errors */ __global__ void forwardPassV21Kernel(int step, int sequenceOffset, float *activity, float *input, float *buffer, float *potential, float *weight, float *previousPotential, float *error, int *deltaT, int numNeurons, int numIONeurons) { extern __shared__ float sdata[]; int tid = threadIdx.x; int i = blockDim.x/2; //init to zero sdata[tid] = 0.0; if(tid < numNeurons) { //load data sdata[tid] = buffer[(tid * numNeurons)+blockIdx.x]; __syncthreads(); while (i != 0) { if(tid < i) { //each thread puts its local sum into shared memory sdata[tid] += sdata[tid + i]; __syncthreads(); } i /= 2; } //write result for this block to global mem if (tid == 0) { potential[blockIdx.x] = sdata[0]; //add bias to the activity potential[blockIdx.x] += weight[(numNeurons*numNeurons)+blockIdx.x]; //calculate current membrane potential taking delta-t value as well as the previous membrane potential into account potential[blockIdx.x] = ((1.0f-(1.0f/(float)deltaT[blockIdx.x])) * previousPotential[blockIdx.x]) + ((1.0f/(float)deltaT[blockIdx.x])*potential[blockIdx.x]); //save current membrane potential for the next time step where it will be used as the previous membrane potential previousPotential[blockIdx.x] = potential[blockIdx.x]; //sigmoid activation activity[(numNeurons*step)+blockIdx.x] = 1.0f/(1.0f+__expf(-potential[blockIdx.x])); //save error if(blockIdx.x<numIONeurons) { error[(numNeurons*step)+blockIdx.x] = activity[(numNeurons*step)+blockIdx.x] - input[sequenceOffset+(numIONeurons*(step+1))+blockIdx.x]; } } } } /*! * \brief Forward pass. * \note Slower version for larger networks. * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] input - input * \param[in] weight - weights * \param[in] deltaT - delta-t values * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[out] potential - potentials * \param[out] previousPotential - previous potentials * \param[out] activity - activities * \param[out] error - errors */ __global__ void forwardPassV1Kernel(int step, int sequenceOffset, float *activity, float *input, float *weight, float *previousPotential, float *error, float *potential, int *deltaT, int numNeurons, int numIONeurons) { int idx = blockIdx.x*blockDim.x + threadIdx.x; float sum = 0.0; if(idx<numNeurons) { potential[idx] = 0.0; //calculates membrabe potential of a particular neuron given by the idx index for(int i=0; i<numNeurons; i++) { if(i<numIONeurons) { sum += input[sequenceOffset+(numIONeurons*(step-1))+i] * weight[(i*numNeurons)+idx]; } else { sum += activity[(numNeurons*(step-1))+i] * weight[(i*numNeurons)+idx]; } } //add bias to the activity sum += weight[(numNeurons*numNeurons)+idx]; //calculate current membrane potential taking delta-t value as well as the previous membrane potential into account potential[idx] = ((1.0f-(1.0f/(float)deltaT[idx])) * previousPotential[idx]) + ((1.0f/(float)deltaT[idx])*sum); //save current membrane potential for the next time step where it will be used as the previous membrane potential previousPotential[idx] = potential[idx]; //Sigmoid activation function activity[(numNeurons*step)+idx] = 1.0f/(1.0f+__expf(-potential[idx])); //save error if(idx<numIONeurons) { error[(numNeurons*step)+idx] = activity[(numNeurons*step)+idx] - input[sequenceOffset+(numIONeurons*(step+1))+idx]; } sum = 0.0; } } /*! * \brief Calculates deltas, deltas on weights and errors parts. * \note Slower version for larger networks over 1024 neurons. * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[in] input - input * \param[in] activity - activities * \param[in] error - errors * \param[in] individualError - error buffer * \param[in] deltaT - delta-t values * \param[in] weight - weights * \param[in] previousDelta - previous deltas * \param[out] delta - deltas * \param[out] deltaWeight - delta weights */ __global__ void backwardPassV1Kernel(int step, int sequenceOffset, int numNeurons, int numIONeurons, float *input, float *activity, float *delta, float *deltaWeight, float *previousDelta, float *error, float *individualError, int *deltaT, float *weight) { int idx = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if(idx<numIONeurons && j<numNeurons) { if(j==0) { individualError[idx] += ((input[sequenceOffset+(numIONeurons*(step+1))+idx] - activity[(numNeurons*step)+idx]) * (input[sequenceOffset+(numIONeurons*(step+1))+idx] - activity[(numNeurons*step)+idx]))/2.0f; } delta[idx] = error[(numNeurons*step)+idx] + (1.0f - (1.0f/deltaT[idx])) * previousDelta[idx]; if(weight[(j*numNeurons)+idx]!=0) { deltaWeight[(j*numNeurons)+idx] += (1.0f/deltaT[idx]) * delta[idx] * activity[(numNeurons*(step-1))+j]; } } } /*! * \brief Calculates deltas on hidden neurons. * \note Slower version for larger networks over 1024 neurons. * \param[in] step - current step * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-ouput neurons * \param[in] activity - activities * \param[in] deltaT - delta-t values * \param[in] weight - weights * \param[in] previousDelta - previous deltas * \param[out] delta - deltas */ __global__ void backwardPassV11Kernel(int step, int numNeurons, int numIONeurons, float *activity, float *delta, float *previousDelta, int *deltaT, float *weight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int kroneckerDelta; if(idx<numNeurons && idx>=numIONeurons) { delta[idx] = 0.0; for(int j=0; j<numNeurons; j++) { if(weight[(j*numNeurons)+idx]!=0) { //set Kronecker's delta if (idx==j) { kroneckerDelta = 1; } else { kroneckerDelta = 0; } //see bottom part of the equation 11 in Yamashita & Tani 2008 - SUM(k->N) of prev_hidden_delta * kroneckerDelta * (1-1/hidden-delta) + (1/delta-on-unit-k) * weight(ki) * derivative of activation of neuron i delta[idx] += previousDelta[j] * (kroneckerDelta * (1.0f - (1.0f/deltaT[idx])) + (1.0f/deltaT[j]) * weight[(idx*numNeurons)+j] * (activity[(numNeurons*step)+idx] * (1.0f - activity[(numNeurons*step)+idx]))); } } } } /*! * \brief Loads the buffer with deltas on weights fractions calculate deltas, deltas on weights and errors parts. * \note Deltas on weights and errors are later summed by parallel reduction. * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[in] input - input * \param[in] activity - activities * \param[in] previousDelta - previous deltas * \param[in] error - errors * \param[in] individualError - error buffer * \param[in] deltaT - delta-t values * \param[in] weight - weights * \param[out] delta - deltas * \param[out] deltaWeight - delta weights * \param[out] buffer - buffer used for storing delta weights */ __global__ void backwardPassV2Kernel(int step, int sequenceOffset, int numNeurons, int numIONeurons, float *input, float *activity, float *delta, float *deltaWeight, float *previousDelta, float *error, float *individualError, int *deltaT, float *weight, float *buffer) { int idx = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int kroneckerDelta; if(idx<numNeurons && j<numNeurons) { if(idx<numIONeurons) { if(j==0) { individualError[idx] += ((input[sequenceOffset+(numIONeurons*(step+1))+idx] - activity[(numNeurons*step)+idx]) * (input[sequenceOffset+(numIONeurons*(step+1))+idx] - activity[(numNeurons*step)+idx]))/2.0f; } delta[idx] = error[(numNeurons*step)+idx] + (1.0f - (1.0f/deltaT[idx])) * previousDelta[idx]; if(weight[(j*numNeurons)+idx]!=0) { //see eqaution 10 in Yamashita & Tani 2008 - SUM(t) of (1/fast-unit-delta)*delta-output(t) * fast-neuron-activity(t-1) deltaWeight[(j*numNeurons)+idx] += (1.0f/deltaT[idx]) * delta[idx] * activity[(numNeurons*(step-1))+j]; } } else //load the buffer with deltas on weights fractions { buffer[(j*numNeurons)+idx] = 0.0; //calculate delta on weight if the two neurons are connected if(weight[(j*numNeurons)+idx]!=0) { //set Kronecker's delta if (idx==j) { kroneckerDelta = 1; } else { kroneckerDelta = 0; } //see bottom part of the equation 11 in Yamashita & Tani 2008 - SUM(k->N) of prev_hidden_delta * kroneckerDelta * (1-1/hidden-delta) + (1/delta-on-unit-k) * weight(ki) * derivative of activation of neuron i buffer[(j*numNeurons)+idx] = previousDelta[j] * (kroneckerDelta * (1.0f - (1.0f/deltaT[idx])) + (1.0f/deltaT[j]) * weight[(idx*numNeurons)+j] * (activity[(numNeurons*step)+idx] * (1.0f - activity[(numNeurons*step)+idx]))); } } } } /*! * \brief Calculates deltas on hidden neurons. * \note Used together with backwardPassV2Kerne. Faster version for networks of up to 1024 neurons. * \param[in] input - input * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[out] output - output */ __global__ void backwardPassV21Kernel(float *input, float *output, int numNeurons, int numIONeurons) { extern __shared__ float sdata[]; int tid = threadIdx.x; int i = blockDim.x/2; //init to zero sdata[tid] = 0.0; if(tid<numNeurons) { //load data sdata[tid] = input[(tid * numNeurons)+(numIONeurons+blockIdx.x)]; __syncthreads(); while(i!=0) { if(tid<i) { //each thread puts its local sum into shared memory sdata[tid] += sdata[tid+i]; __syncthreads(); } i/=2; } //write result for this block to global memory if(tid==0) { output[numIONeurons+blockIdx.x] = sdata[0]; } } } /*! * \brief Calculates deltas on weights on hidden neurons and biases. * \param[in] step - current step * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[in] activity - activities * \param[in] deltaT - delta-t values * \param[in] weight - weights * \param[in] delta - deltas * \param[out] previousDelta - previous deltas * \param[out] deltaWeight - delta weights */ __global__ void backwardPassV3Kernel(int step, int numNeurons, int numIONeurons, float *activity, float *delta, float *previousDelta, float *deltaWeight, int *deltaT, float *weight) { int idx = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if(j<numNeurons && idx<numNeurons) { //calculate delta on weights for those neurons that are directly connected to hidden neurons if(idx>=numIONeurons) { if(weight[(j*numNeurons)+idx]!=0) { //see eqaution 10 in Yamashita & Tani 2008 - SUM(t) of (1/fast-unit-delta)*delta-output(t) * fast-neuron-activity(t-1) deltaWeight[(j*numNeurons)+idx] += (1.0f/deltaT[idx]) * delta[idx] * activity[(numNeurons*(step-1))+j]; } } //calculate deltas on bias weights connecting to each neuron if(j==0) { deltaWeight[(numNeurons*numNeurons)+idx] += (1.0f/deltaT[idx]) * delta[idx]; previousDelta[idx] = delta[idx]; } } } /*! * \brief Updates weights. * \param[in] learningRate - learning rate * \param[in] momentum - momentum * \param[in] numWeights - number of weights * \param[in] deltaWeight - delta weights * \param[out] previousDeltaWeight - previous delta weights * \param[out] weight - weights */ __global__ void updateWeightsKernel(float learningRate, float momentum, float *weight, float *deltaWeight, float *previousDeltaWeight, int numWeights) { int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if(idx<numWeights) { weight[idx] -= ((learningRate * deltaWeight[idx]) + (momentum * previousDeltaWeight[idx])); previousDeltaWeight[idx] = deltaWeight[idx]; } } /*! * \brief Sums delta weights on the master device. * \param[in] numWeights - number of weights * \param[in] peerDeltaWeight - delta weights from peer device * \param[out] masterDeltaWeight - delta weights from master device */ __global__ void sumDeltaWeightsP2PKernel(int numWeights, float *masterDeltaWeight, float *peerDeltaWeight) { int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if(idx<numWeights) { masterDeltaWeight[idx] += peerDeltaWeight[idx]; } } /*! * \brief Modifies weights on the master device and copies to the peer device. * \param[in] numWeights - number of weights * \param[in] learningRate - learning rate * \param[in] momentum - momentum * \param[in] deltaWeight - delta weights * \param[out] previousDeltaWeight - previous delta weights * \param[out] masterWeight - weigths from master device * \param[out] peerWeight - weights from peer device */ __global__ void updateWeightsP2PKernel(int numWeights, float learningRate, float momentum, float *masterWeight, float *peerWeight, float *deltaWeight, float *previousDeltaWeight) { int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if(idx<numWeights) { masterWeight[idx] -= ((learningRate * deltaWeight[idx]) + (momentum * previousDeltaWeight[idx])); peerWeight[idx] = masterWeight[idx]; previousDeltaWeight[idx] = deltaWeight[idx]; } } /*! * \brief Modifies weights on the master device and copies to the peer device. * \param[in] peerError - error from peer device * \param[out] masterError - error from master device */ __global__ void sumErrorP2PKernel(float *masterError, float *peerError) { if(threadIdx.x==0) { masterError[0] += peerError[0]; } } /*! * \brief Sets the initial states for all the units on device. * \note Slow context units will be initialised with a specific value. * \param[in] initState - initial state * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[in] numFastNeurons - number of fast neurons * \param[out] activity - activities */ __global__ void setInitStatesKernel(float initState, float *activity, int numNeurons, int numIONeurons, int numFastNeurons) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx<numNeurons) { if(idx>=numIONeurons+numFastNeurons && idx<numIONeurons+numFastNeurons+5) { activity[idx] = initState; } else { activity[idx] = 0.0; } } } /*! * \brief Resets delta and error parameters. * \param[in] numNeurons - number of neurons * \param[in] maxSequenceSteps - maximum number of sequence steps * \param[out] delta - deltas * \param[out] previousDelta - previous deltas * \param[out] potential - potentials * \param[out] previousPotential - previous potentials * \param[out] error - errors */ __global__ void resetParametersKernel(int numNeurons, int maxSequenceSteps, float *delta, float *previousDelta, float *potential, float *previousPotential, float *error) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx<(numNeurons*maxSequenceSteps)) { if(idx<numNeurons) { delta[idx] = 0.0; previousDelta[idx] = 0.0; potential[idx] = 0.0; previousPotential[idx] = 0.0; } error[idx] = 0.0; } } /*! * \brief Resets delta weights and errors. * \param[in] numWeights - number of weights * \param[in] numIONeurons - number of input-output neurons * \param[out] deltaWeight - delta weights * \param[out] individualError - error buffer */ __global__ void resetDeltaWeightsKernel(int numWeights, int numIONeurons, float *deltaWeight, float *individualError) { int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if(idx<numWeights) { deltaWeight[idx] = 0.0; if(idx<numIONeurons) { individualError[idx] = 0.0; } } } /*! * \brief Parallel reduction sum modified from NVIDIA SDK. * \note Number of threads are not known at a compile time, however, we always stick to power of 2 sizes * \note so here we are using templates to allow compilation for all known size, which results in higher throughput. * \param[in] input - input * \param[in] n - number of elements to sum * \param[in] nIsPow2 - determines if the number is of power of two * \param[out] output - output */ template <unsigned int blockSize> __global__ void reduceKernel(float *input, float *output, unsigned int n, bool nIsPow2) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; float sum = 0.0; //perform first level of reduction, reading from global memory, writing to shared memory //we reduce multiple elements per thread. The number is determined by the number of active thread blocks (via gridDim). //More blocks will result in a larger gridSize and therefore fewer elements per thread while(i<n) { sum += input[i]; //ensure we don't read out of bounds -- this is optimised away for powerOf2 sized arrays if (nIsPow2 || (i+blockSize)<n) { sum += input[i+blockSize]; } i+=gridSize; } //each thread puts its local sum into shared memory sdata[tid] = sum; __syncthreads(); //do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = sum = sum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = sum = sum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = sum = sum + sdata[tid + 64]; } __syncthreads(); } if(tid<32) { //now that we are using warp-synchronous programming (below) we need to declare our shared memory //volatile so that the compiler doesn't reorder stores to it and induce incorrect behavior volatile float* smem = sdata; if (blockSize>=64) { smem[tid] = sum = sum + smem[tid+32]; } if (blockSize>=32) { smem[tid] = sum = sum + smem[tid+16]; } if (blockSize>=16) { smem[tid] = sum = sum + smem[tid+8]; } if (blockSize>=8) { smem[tid] = sum = sum + smem[tid+4]; } if (blockSize>=4) { smem[tid] = sum = sum + smem[tid+2]; } if (blockSize>=2) { smem[tid] = sum = sum + smem[tid+1]; } } //write result for this block to global mem if(tid==0) { output[blockIdx.x] = sdata[0]; } } /*! * \brief Wrapper for resetDeltaWeightsKernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] stream - CUDA stream * \param[in] numWeights - number of weights * \param[in] numIONeurons - number of input-output neurons * \param[out] deltaWeight - delta weights * \param[out] individualError - error buffer */ void resetDeltaWeightsOnDevice(dim3 grid, dim3 block, cudaStream_t stream, int numWeights, int numIONeurons, float *deltaWeight, float *individualError) { resetDeltaWeightsKernel<<<grid,block,0,stream>>>(numWeights, numIONeurons, deltaWeight, individualError); } /*! * \brief Wrapper for setInitStatesKernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] stream - CUDA stream * \param[in] initState - initial state * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[in] numFastNeurons - number of fast neurons * \param[out] activity - activities */ void setInitStatesOnDevice(dim3 grid, dim3 block, cudaStream_t stream, float initState, float *activity, int numNeurons, int numIONeurons, int numFastNeurons) { setInitStatesKernel<<<grid,block,0,stream>>>(initState, activity, numNeurons, numIONeurons, numFastNeurons); } /*! * \brief Wrapper for resetParametersKernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] stream - CUDA stream * \param[in] numNeurons - number of neurons * \param[in] maxSequenceSteps - maximum number of sequence steps * \param[out] delta - deltas * \param[out] previousDelta - previous deltas * \param[out] potential - potentials * \param[out] previousPotential - previous potentials * \param[out] error - errors */ void resetParametersOnDevice(dim3 grid, dim3 block, cudaStream_t stream, int numNeurons, int maxSequenceSteps, float *delta, float *previousDelta, float *potential, float *previousPotential, float *error) { resetParametersKernel<<<grid,block,0,stream>>>(numNeurons, maxSequenceSteps, delta, previousDelta, potential, previousPotential, error); } /*! * \brief Wrapper for updateWeightsKernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] learningRate - learning rate * \param[in] momentum - momentum * \param[in] numWeights - number of weights * \param[in] deltaWeight - delta weights * \param[out] previousDeltaWeight - previous delta weights * \param[out] weight - weights */ void updateWeightsOnDevice(dim3 grid, dim3 block, float learningRate, float momentum, float *weight, float *deltaWeight, float *previousDeltaWeight, int numWeights) { updateWeightsKernel<<<grid,block>>>(learningRate, momentum, weight, deltaWeight, previousDeltaWeight, numWeights); } /*! * \brief Wrapper for forwardPassV1Kernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] stream - CUDA stream * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] input - input * \param[in] weight - weights * \param[in] deltaT - delta-t values * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[out] potential - potentials * \param[out] previousPotential - previous potentials * \param[out] activity - activities * \param[out] error - errors */ void forwardPassV1onDevice(dim3 grid, dim3 block, cudaStream_t stream, int step, int sequenceOffset, float *activity, float *input, float *weight, float *previousPotential, float *error, float *potential, int *deltaT, int numNeurons, int numIONeurons) { forwardPassV1Kernel<<<grid,block,0,stream>>>(step, sequenceOffset, activity, input, weight, previousPotential, error, potential, deltaT, numNeurons, numIONeurons); } /*! * \brief Wrapper for forwardPassV2Kernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] stream - CUDA stream * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] activity - activations * \param[in] input - input * \param[in] weight - weights * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[out] buffer - buffer used for storing new activations */ void forwardPassV2onDevice(dim3 grid, dim3 block, cudaStream_t stream, int step, int sequenceOffset, float *activity, float *input, float *weight, int numNeurons, int numIONeurons, float *buffer) { forwardPassV2Kernel<<<grid,block,0,stream>>>(step, sequenceOffset, activity, input, weight, numNeurons, numIONeurons, buffer); } /*! * \brief Wrapper for forwardPassV21Kernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] smemSize - CUDA shared memory size * \param[in] stream - CUDA stream * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] activity - activations * \param[in] input - input * \param[in] weight - weights * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[out] buffer - buffer used for storing new activations */ void forwardPassV21onDevice(dim3 grid, dim3 block, int smemSize, cudaStream_t stream, int step, int sequenceOffset, float *activity, float *input, float *buffer, float *potential, float *weight, float *previousPotential, float *error, int *deltaT, int numNeurons, int numIONeurons) { forwardPassV21Kernel<<<grid,block,smemSize,stream>>>(step, sequenceOffset, activity, input, buffer, potential, weight, previousPotential, error, deltaT, numNeurons, numIONeurons); } /*! * \brief Wrapper for backwardPassV1Kernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] stream - CUDA stream * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[in] input - input * \param[in] activity - activities * \param[in] error - errors * \param[in] individualError - error buffer * \param[in] deltaT - delta-t values * \param[in] weight - weights * \param[in] previousDelta - previous deltas * \param[out] delta - deltas * \param[out] deltaWeight - delta weights */ void backwardPassV1onDevice(dim3 grid, dim3 block, cudaStream_t stream, int step, int sequenceOffset, int numNeurons, int numIONeurons, float *input, float *activity, float *delta, float *deltaWeight, float *previousDelta, float *error, float *individualError, int *deltaT, float *weight) { backwardPassV1Kernel<<<grid,block,0,stream>>>(step, sequenceOffset, numNeurons, numIONeurons, input, activity, delta, deltaWeight, previousDelta, error, individualError, deltaT, weight); } /*! * \brief Wrapper for backwardPassV11Kernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] stream - CUDA stream * \param[in] step - current step * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-ouput neurons * \param[in] activity - activities * \param[in] deltaT - delta-t values * \param[in] weight - weights * \param[in] previousDelta - previous deltas * \param[out] delta - deltas */ void backwardPassV11onDevice(dim3 grid, dim3 block, cudaStream_t stream, int step, int numNeurons, int numIONeurons, float *activity, float *delta, float *previousDelta, int *deltaT, float *weight) { backwardPassV11Kernel<<<grid,block,0,stream>>>(step, numNeurons, numIONeurons, activity, delta, previousDelta, deltaT, weight); } /*! * \brief Wrapper for backwardPassV2Kernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] stream - CUDA stream * \param[in] step - current step * \param[in] sequenceOffset - sequence offsets * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[in] input - input * \param[in] activity - activities * \param[in] previousDelta - previous deltas * \param[in] error - errors * \param[in] individualError - error buffer * \param[in] deltaT - delta-t values * \param[in] weight - weights * \param[out] delta - deltas * \param[out] deltaWeight - delta weights * \param[out] buffer - buffer used for storing delta weights */ void backwardPassV2onDevice(dim3 grid, dim3 block, cudaStream_t stream, int step, int sequenceOffset, int numNeurons, int numIONeurons, float *input, float *activity, float *delta, float *deltaWeight, float *previousDelta, float *error, float *individualError, int *deltaT, float *weight, float *buffer) { backwardPassV2Kernel<<<grid,block,0,stream>>>(step, sequenceOffset, numNeurons, numIONeurons, input, activity, delta, deltaWeight, previousDelta, error, individualError, deltaT, weight, buffer); } /*! * \brief Wrapper for backwardPassV21Kernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] smemSize - CUDA shared memory size * \param[in] stream - CUDA stream * \param[in] input - input * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[out] output - output */ void backwardPassV21onDevice(dim3 grid, dim3 block, int smemSize, cudaStream_t stream, float *input, float *output, int numNeurons, int numIONeurons) { backwardPassV21Kernel<<<grid,block,smemSize,stream>>>(input, output, numNeurons, numIONeurons); } /*! * \brief Wrapper for backwardPassV3Kernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] stream - CUDA stream * \param[in] step - current step * \param[in] numNeurons - number of neurons * \param[in] numIONeurons - number of input-output neurons * \param[in] activity - activities * \param[in] deltaT - delta-t values * \param[in] weight - weights * \param[in] delta - deltas * \param[out] previousDelta - previous deltas * \param[out] deltaWeight - delta weights */ void backwardPassV3onDevice(dim3 grid, dim3 block, cudaStream_t stream, int step, int numNeurons, int numIONeurons, float *activity, float *delta, float *previousDelta, float *deltaWeight, int *deltaT, float *weight) { backwardPassV3Kernel<<<grid,block,0,stream>>>(step, numNeurons, numIONeurons, activity, delta, previousDelta, deltaWeight, deltaT, weight); } /*! * \brief Wrapper for reduceKernel. * \param[in] size - number of elements to sum * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] smemSize - CUDA shared memory size * \param[in] stream - CUDA stream * \param[in] input - input * \param[in] n - number of elements to sum * \param[in] nIsPow2 - determines if the number is of power of two * \param[out] output - output */ void reduceOnDevice(int size, dim3 grid, dim3 block, int smemSize, cudaStream_t stream, float *input, float *output, unsigned int n, bool nIsPow2) { switch(size) { case 512: reduceKernel<512><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; case 256: reduceKernel<256><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; case 128: reduceKernel<128><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; case 64: reduceKernel<64><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; case 32: reduceKernel<32><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; case 16: reduceKernel<16><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; case 8: reduceKernel<8><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; case 4: reduceKernel<4><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; case 2: reduceKernel<2><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; case 1: reduceKernel<1><<<grid,block,smemSize,stream>>>(input, output, n, nIsPow2); break; } } /*! * \brief Wrapper for sumDeltaWeightsP2PKernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] numWeights - number of weights * \param[in] peerDeltaWeight - delta weights from peer device * \param[out] masterDeltaWeight - delta weights from master device */ void sumDeltaWeightsP2PonDevice(dim3 grid, dim3 block, int numWeights, float *masterDeltaWeight, float *peerDeltaWeight) { sumDeltaWeightsP2PKernel<<<grid,block>>>(numWeights, masterDeltaWeight, peerDeltaWeight); } /*! * \brief Wrapper for updateWeightsP2PKernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] numWeights - number of weights * \param[in] learningRate - learning rate * \param[in] momentum - momentum * \param[in] deltaWeight - delta weights * \param[out] previousDeltaWeight - previous delta weights * \param[out] masterWeight - weigths from master device * \param[out] peerWeight - weights from peer device */ void updateWeightsP2PonDevice(dim3 grid, dim3 block, int numWeights, float learningRate, float momentum, float *masterWeight, float *peerWeight, float *deltaWeight, float *previousDeltaWeight) { updateWeightsP2PKernel<<<grid,block>>>(numWeights, learningRate, momentum, masterWeight, peerWeight, deltaWeight, previousDeltaWeight); } /*! * \brief Wrapper for sumErrorP2PKernel. * \param[in] grid - CUDA grid size * \param[in] block - CUDA block size * \param[in] peerError - error from peer device * \param[out] masterError - error from master device */ void sumErrorP2PonDevice(dim3 grid, dim3 block, float *masterError, float *peerError) { sumErrorP2PKernel<<<grid,block>>>(masterError, peerError); }
4,545
#include <cuda_runtime.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <time.h> #define TILE_WIDTH 16 #define MAX_MASK_WIDTH 128 int UI(int argc, char* argv[], int* jkl); void initData(int* data, int x, int y); void initMask(float* mask, int k); void showData(int* data, int len); void showMask(float* mask, int len); void convCPU(int* data, float* mask, float* conv_CPU, int x, int y, int k); void checkCUDAError(cudaError_t e); __global__ void convGPU(int* d_data, float* d_conv, int x, int y, int k); void resultCheck(float* result_CPU, float* result_GPU, int size); __constant__ float M[MAX_MASK_WIDTH * MAX_MASK_WIDTH]; int main(int argc, char* argv[]){ cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop, 0); printf("Total constant memory: %zd \n", dev_prop.totalConstMem); // reset rand seed srand((unsigned)time(NULL)); clock_t start, finish; int total_time; // Go through UI first. // In UI section, only command with valid param can go to the next step. int UIStatus; int param[3]; UIStatus = UI(argc, argv, param); if (UIStatus != 0) { printf("\nApplication terminates.\n"); return 0; } // UI section ends // Initialize data array with int type const int x = param[0]; const int y = param[1]; const int k = param[2]; int* data = (int*)malloc(x * y * sizeof(int)); float* mask = (float*)malloc(k * k * sizeof(float)); initData(data, x, y); initMask(mask, k); showData(data, x * y); showMask(mask, k * k); printf("Done initializing data array.\n"); // Initialzing ends // CPU code for calculating convolution // Use this result to varify the kernel result later float* conv_CPU = (float*)calloc(x * y, sizeof(float)); float* conv_GPU = (float*)calloc(x * y, sizeof(float)); start = clock(); convCPU(data, mask, conv_CPU, x, y, k); finish = clock(); total_time = (int)(finish - start); showMask(conv_CPU, x * y); printf("Done convolution with CPU in %d miliseconds.\n", total_time); // Convolution calculating with CPU ends // Allocate device memory, copy data from host to device int* d_data; float *d_conv; checkCUDAError(cudaMalloc((int**)&d_data, x * y * sizeof(int))); checkCUDAError(cudaMalloc((float**)&d_conv, x * y * sizeof(float))); printf("Done allocating space in device."); checkCUDAError(cudaMemcpy(d_data, data, x * y * sizeof(int), cudaMemcpyHostToDevice)); checkCUDAError(cudaMemcpyToSymbol(M, mask, k * k * sizeof(float))); printf("\nDone copying memory from host to device."); // Done allocating, transfering and initializing // Initialize thread block and kernel grid dimensions dim3 threads(TILE_WIDTH, TILE_WIDTH); dim3 grid((int)ceil(1.0 * x / threads.x), (int)ceil(1.0 * y / threads.y)); printf("\nDone initializing block dimention and grid dimention."); // Done initializing thread block and kernel grid dimensions // launch CUDA device kernel start = clock(); convGPU<<< grid, threads >>>(d_data, d_conv, x, y, k); checkCUDAError(cudaDeviceSynchronize()); finish = clock(); total_time = (int)(finish - start); printf("\nDone matrix multiplication with GPU in %d miliseconds.\n", total_time); // Done CUDA device kernel // Copy results from device to host and free device memory checkCUDAError(cudaMemcpy(conv_GPU, d_conv, x * y * sizeof(float), cudaMemcpyDeviceToHost)); printf("conv_GPU:"); showMask(conv_GPU, x * y); checkCUDAError(cudaFree(d_conv)); checkCUDAError(cudaFree(d_data)); // Done copying results and freeing device memory // Check the result of the Calculated Matrix resultCheck(conv_CPU, conv_GPU, x * y); // Done result checking. return 0; } // UI for main function // return 0 means everything's fine, just continue; // return 1 means there's invalid input or '--help', terminate running. int UI(int argc, char* argv[], int* param) { if (argc == 2 && (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0)) { printf("CUDA Programming Homework. Histogram Algorithm.\n"); printf("\nUsage: hist [OPTION]...\n"); printf("\nOptions:\n"); printf("%5s, %-10s %-50s\n", "-h", "--help", "Show helping information."); printf("%5s, %-10s %-50s\n", "-i", "--input", "Followed by 3 integers as input parameters."); printf("\nExamples:\n"); printf("hist -h\n"); printf(" Shows the helping information.\n"); printf("hist -i 1960 1080 9\n"); printf(" 1960 1080 represents the picture is 1960 * 1080, 9 means the 2D mask is 9 * 9\n"); return 1; } if (argc == 5 && (strcmp(argv[1], "-i") == 0 || strcmp(argv[1], "--input") == 0)) { int x = atoi(argv[2]); int y = atoi(argv[3]); int k = atoi(argv[4]); if (x <= 0 || y <= 0 || k <= 0) { printf("Invalid array length. The input values should be an integer greater than 0.\n"); return 1; } if (k % 2 == 0) { printf("Invalid k, k should be odd.\n"); return 1; } if (k > MAX_MASK_WIDTH) { printf("Invalid k, k is too big. Can't store it in texture memory.\n"); return 1; } else { printf("x: %d\n", x); printf("y: %d\n", y); printf("k: %d\n", k); param[0] = x; param[1] = y; param[2] = k; return 0; } } else { printf("Invalid command. Please check how to make valid command by '-h' or '--help'.\n"); return 1; } } void initData(int* data, int x, int y) { for (int i = 0; i < x * y; ++i) data[i] = rand() % 16; return; } void initMask(float* mask, int k) { float sum = 0.0f; for (int i = 0; i < k * k; ++i) { mask[i] = rand() % 100 / 100.0f; sum += mask[i]; } for (int i = 0; i < k * k; ++i) { mask[i] /= sum; } return; } void showData(int* data, int len) { printf("data:\n["); for (int i = 0; i < len && i < 10; ++i) { if (i != 0) printf(","); printf("%4d", data[i]); } if (len > 10) printf("..."); printf("]\n"); return; } void showMask(float* mask, int len) { printf("data:\n["); for (int i = 0; i < len && i < 10; ++i) { if (i != 0) printf(","); printf("%4f", mask[i]); } if (len > 10) printf("..."); printf("]\n"); return; } void convCPU(int* data, float* mask, float* conv_CPU, int x, int y, int k) { for (int i = 0; i < y; ++i) { for (int j = 0; j < x; ++j) { float sum = 0.0f; for (int m = 0; m < k; ++m) { for (int n = 0; n < k; ++n) { if (i + m - k / 2 >= 0 && i + m - k / 2 < y && j + n - k / 2 >= 0 && j + n - k / 2 < x) { sum += data[(i + m - k / 2) * x + (j + n - k / 2)] * mask[m * k + n]; } } } conv_CPU[i * x + j] = sum; } } return; } void checkCUDAError(cudaError_t e) { if (e == 0) return; printf("\nError: %s\n", cudaGetErrorName(e)); printf("%s\n", cudaGetErrorString(e)); exit(0); } __global__ void convGPU(int* d_data, float* d_conv, int x, int y, int k) { unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float data_ds[TILE_WIDTH][TILE_WIDTH]; if (ty < y && tx < x) { data_ds[threadIdx.y][threadIdx.x] = d_data[ty * x + tx]; __syncthreads(); float Pvalue = 0; for (int m = 0; m < k; ++m) { for (int n = 0; n < k; ++n) { int cur_row = ty + m - k / 2; int cur_col = tx + n - k / 2; if (cur_col >= 0 && cur_col < x && cur_row >= 0 && cur_row < y) { if (cur_col >= blockDim.x * blockIdx.x && cur_col < blockDim.x * (blockIdx.x + 1) && cur_row >= blockDim.y * blockIdx.y && cur_row < blockDim.y * (blockIdx.y + 1)) Pvalue += data_ds[threadIdx.y + m - k / 2][threadIdx.x + n - k / 2] * M[m * k + n]; else Pvalue += d_data[cur_row * x + cur_col] * M[m * k + n]; } } } d_conv[ty * x + tx] = Pvalue; } } void resultCheck(float* result_CPU, float* result_GPU, int size) { for (int i = 0; i < size; ++i) { if (result_CPU[i] * 1.001 <= result_GPU[i] || result_CPU[i] * 0.999 >= result_GPU[i]) { printf("\nResult check: Error!!!! Didn't pass."); return; } } printf("\nResult check: ---PASS---."); return; }
4,546
// #include "LinearSolver.hh" // #include "table.hh" #include <vector> #include <iostream> #include <cmath> using namespace std; #define THREADS_PER_BLOCK 1024 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void gpu(int * a, int * b, int * dot) { __shared__ int product[THREADS_PER_BLOCK]; int tid = blockIdx.x * blockDim.x + threadIdx.x; product[threadIdx.x] = a[tid] * b[tid]; __syncthreads(); if (threadIdx.x == 0) { int sum = 0; for (int i=0; i<THREADS_PER_BLOCK; i++) { sum += product[i]; } atomicAdd(dot, sum); } } int main(int argc, char * argv[]) { int N = atoi(argv[1]); // int threadsPerBlock = pow(2, 2); // threadsPerBlock = min(N, threadsPerBlock); // int nBlock = max(1, (N + threadsPerBlock - 1)/threadsPerBlock); // cout << "threads/block = " << threadsPerBlock << " number of blocks = " << nBlock << endl; int *a, *b; int *d_a, *d_b; int dot, *d_dot; size_t size = N*sizeof(int); cudaMalloc(&d_a, size); cudaMalloc(&d_b, size); cudaMalloc(&d_dot, sizeof(int)); a = new int[N]; b = new int[N]; dot = 0; for (int i=0; i<N; i++) { a[i] = 1; b[i] = 1; } cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); cudaMemcpy(d_dot, &dot, sizeof(int), cudaMemcpyHostToDevice); gpu<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_dot); cudaMemcpy(&dot, d_dot, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_dot); delete a, b; } // int main() { // int tend = 1; // int Nt = 10; // int y0 = 1; // int dt = tend/Nt; // auto f = [] (int y) {return -1*pow(y, 3); }; // vector<int> t = linspace(0, tend, Nt+1); // vector<int> y(Nt+1); // y[0] = y0; // vector<int> y_ex(Nt+1); // y_ex[0] = y0; // int tol = 1e-6; // int maxiter = 100; // for (int i=1; i<=Nt; i++) { // int yi = y[i-1]; // int ylag = yi; // for (int j=0; j<maxiter; j++) { // y[i] = yi + dt * f(.5*(ylag + yi)); // if (abs(ylag - y[i]) < tol) break; // if (j == maxiter - 1) cout << "max iterations reached" << endl; // ylag = y[i]; // } // y_ex[i] = y0*pow(1/(1 + 2*pow(y0,2)*t[i]), .5); // } // Table table("out"); // table.addColumn(t); // table.addColumn(y); // table.addColumn(y_ex); // table.write(); // }
4,547
#include "includes.h" __global__ void KerResety(unsigned n,unsigned ini,float3 *v) { unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Number of particle. if(p<n)v[p+ini].y=0; }
4,548
#include <stdio.h> __global__ void FormPNGData(float3* colors, unsigned char* pixelData, unsigned int simWidth, unsigned int simHeight) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; int quadIdx = x + simWidth*y; if (quadIdx * 4 >= (simWidth * simHeight * 4)) printf("BAD: Out of colors[] boundary in FormPNGData, %d , max: %d\n", quadIdx*4, simWidth*simHeight*4); if (quadIdx*3 + 2 >= (simWidth * simHeight * 3)) printf("BAD: Out of pixelData[] boundary in FormPNGData, %d, max: %d\n", quadIdx*3+2, simWidth*simHeight*3); // scale and map floating point pixel data [0.0,1.0] to unsigned char* // data in [0,255] for each RGB value. Size of device data // should match (3 * float3's == 4 * num pixelData) unsigned char r = (unsigned char)(colors[4 * quadIdx].x * (255)); unsigned char g = (unsigned char)(colors[4 * quadIdx].y * (255)); unsigned char b = (unsigned char)(colors[4 * quadIdx].z * (255)); pixelData[3 * quadIdx + 0] = r; pixelData[3 * quadIdx + 1] = g; pixelData[3 * quadIdx + 2] = b; }
4,549
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <sys/time.h> #include <inttypes.h> #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #define ERT_FLOP 2 #define ERT_TRIALS_MIN 1 #define ERT_WORKING_SET_MIN 1 #define GBUNIT (1024 * 1024 * 1024) #define REP2(S) S ; S #define REP4(S) REP2(S); REP2(S) #define REP8(S) REP4(S); REP4(S) #define REP16(S) REP8(S); REP8(S) #define REP32(S) REP16(S); REP16(S) #define REP64(S) REP32(S); REP32(S) #define REP128(S) REP64(S); REP64(S) #define REP256(S) REP128(S); REP128(S) #define REP512(S) REP256(S); REP256(S) #define KERNEL2(a,b,c) ((a) = (a)*(b) + (c)) #define KERNEL1(a,b,c) ((a) = (b) + (c)) void initialize(uint64_t nsize, double* __restrict__ A, double value) { uint64_t i; for (i = 0; i < nsize; ++i) { A[i] = value; } } void gpuKernel(uint64_t nsize, uint64_t ntrials, double* __restrict__ array, int* bytes_per_elem, int* mem_accesses_per_elem); __global__ void block_stride(uint64_t ntrials, uint64_t nsize, double *A) { uint64_t total_thr = gridDim.x * blockDim.x; uint64_t elem_per_thr = (nsize + (total_thr-1)) / total_thr; uint64_t blockOffset = blockIdx.x * blockDim.x; uint64_t start_idx = blockOffset + threadIdx.x; uint64_t end_idx = start_idx + elem_per_thr * total_thr; uint64_t stride_idx = total_thr; if (start_idx > nsize) { start_idx = nsize; } if (end_idx > nsize) { end_idx = nsize; } double alpha = 0.5; uint64_t i, j; for (j = 0; j < ntrials; ++j) { for (i = start_idx; i < end_idx; i += stride_idx) { double beta = 0.8; /* add 1 flop */ KERNEL1(beta,A[i],alpha); /* add 2 flops */ //KERNEL2(beta,A[i],alpha); /* add 4 flops */ //REP2(KERNEL2(beta,A[i],alpha)); /* add 8 flops */ //REP4(KERNEL2(beta,A[i],alpha)); /* add 16 flops */ //REP8(KERNEL2(beta,A[i],alpha)); /* add 32 flops */ //REP16(KERNEL2(beta,A[i],alpha)); /* add 64 flops */ //REP32(KERNEL2(beta,A[i],alpha)); /* add 128 flops */ //REP64(KERNEL2(beta,A[i],alpha)); /* add 256 flops */ //REP128(KERNEL2(beta,A[i],alpha)); /* add 512 flops */ //REP256(KERNEL2(beta,A[i],alpha)); /* add 1024 flops */ //REP512(KERNEL2(beta,A[i],alpha)); A[i] = beta; } alpha = alpha * (1 - 1e-8); } } int gpu_blocks = 512; int gpu_threads = 512; void gpuKernel(uint64_t nsize, uint64_t ntrials, double* __restrict__ A, int* bytes_per_elem, int* mem_accesses_per_elem) { *bytes_per_elem = sizeof(*A); *mem_accesses_per_elem = 2; //gpu_blocks = (nsize+1023)/1024; block_stride <<< gpu_blocks, gpu_threads>>> (ntrials, nsize, A); } double getTime() { double time; struct timeval tm; gettimeofday(&tm, NULL); time = tm.tv_sec + (tm.tv_usec / 1000000.0); return time; } int main(int argc, char *argv[]) { int rank = 0; int nprocs = 1; int nthreads = 1; int id = 0; uint64_t TSIZE = 1<<30; uint64_t PSIZE = TSIZE / nprocs; double * buf = (double *)malloc(PSIZE); if (buf == NULL) { fprintf(stderr, "Out of memory!\n"); return -1; } { id = 0; nthreads = 1; int num_gpus = 0; int gpu; int gpu_id; int numSMs; cudaGetDeviceCount(&num_gpus); if (num_gpus < 1) { fprintf(stderr, "No CUDA device detected.\n"); return -1; } for (gpu = 0; gpu < num_gpus; gpu++) { cudaDeviceProp dprop; cudaGetDeviceProperties(&dprop,gpu); /* printf("%d: %s\n",gpu,dprop.name); */ } cudaSetDevice(id % num_gpus); cudaGetDevice(&gpu_id); cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, gpu_id); uint64_t nsize = PSIZE / nthreads; nsize = nsize & (~(64-1)); nsize = nsize / sizeof(double); uint64_t nid = nsize * id ; // initialize small chunck of buffer within each thread initialize(nsize, &buf[nid], 1.0); double *d_buf; cudaMalloc((void **)&d_buf, nsize*sizeof(double)); cudaMemset(d_buf, 0, nsize*sizeof(double)); cudaDeviceSynchronize(); double startTime, endTime; uint64_t n,nNew; uint64_t t; int bytes_per_elem; int mem_accesses_per_elem; n = 1<<22; while (n <= nsize) { // working set - nsize uint64_t ntrials = nsize / n; if (ntrials < 1) ntrials = 1; //600 original for (t = 1; t <= 600; t = t + 1) { // working set - ntrials cudaMemcpy(d_buf, &buf[nid], n*sizeof(double), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); if ((id == 0) && (rank==0)) { startTime = getTime(); } gpuKernel(n, t, d_buf, &bytes_per_elem, &mem_accesses_per_elem); cudaDeviceSynchronize(); if ((id == 0) && (rank == 0)) { endTime = getTime(); double seconds = (double)(endTime - startTime); uint64_t working_set_size = n * nthreads * nprocs; uint64_t total_bytes = t * working_set_size * bytes_per_elem * mem_accesses_per_elem; uint64_t total_flops = t * working_set_size * ERT_FLOP; printf("thread: %d\n", nthreads); // nsize; trials; microseconds; bytes; single thread bandwidth; total bandwidth printf("%12lld %12lld %15.3lf %12lld %12lld\n", working_set_size * bytes_per_elem, t, seconds * 1000000, total_bytes, total_flops); printf("BW: %15.3lf\n",total_bytes*1.0/seconds/1024/1024/1024); } // print cudaMemcpy(&buf[nid], d_buf, n*sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } // working set - ntrials nNew = 1.1 * n; if (nNew == n) { nNew = n+1; } n = nNew; // no break brfore break; } // working set - nsize cudaFree(d_buf); if (cudaGetLastError() != cudaSuccess) { printf("Last cuda error: %s\n",cudaGetErrorString(cudaGetLastError())); } cudaDeviceReset(); } // parallel region free(buf); printf("\n"); printf("META_DATA\n"); printf("FLOPS %d\n", ERT_FLOP); printf("GPU_BLOCKS %d\n", gpu_blocks); printf("GPU_THREADS %d\n", gpu_threads); return 0; }
4,550
#include <cuda.h> #include <stdio.h> #include <time.h> #define SIZE 10 __global__ void max(int *a , int *c) // kernel function definition { int i = threadIdx.x; // initialize i to thread ID *c = a[0]; if(a[i] < *c) { *c = a[i]; } } int main() { int i; srand(time(NULL)); //makes use of the computer's internal clock to control the choice of the seed int a[SIZE]; int c; int *dev_a, *dev_c; //GPU / device parameters cudaMalloc((void **) &dev_a, SIZE*sizeof(int)); //assign memory to parameters on GPU from CUDA runtime API cudaMalloc((void **) &dev_c, SIZE*sizeof(int)); printf( "Enter the elements:\n"); //scanf("%d",SIZE) for(int i=0;i<SIZE;i++) { printf( "\n"); scanf("%d",a[i]); } for( i = 0 ; i < SIZE ; i++) { printf("%d", a[i]); // input the numbers } cudaMemcpy(dev_a , a, SIZE*sizeof(int),cudaMemcpyHostToDevice); //copy the array from CPU to GPU max<<<1,SIZE>>>(dev_a,dev_c); // call kernel function <<<number of blocks, number of threads cudaMemcpy(&c, dev_c, SIZE*sizeof(int),cudaMemcpyDeviceToHost); // copy the result back from GPU to CPU printf("\nmin = %d ",c); cudaFree(dev_a); // Free the allocated memory cudaFree(dev_c); printf(""); return 0; }
4,551
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> int main() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); printf("Device name:%s\n", deviceProp.name); printf("Total global memory:%u\n", deviceProp.totalGlobalMem); printf("Total constant memory:%d\n", deviceProp.totalConstMem); printf("Shared memory per block:%d\n", deviceProp.sharedMemPerBlock); printf("Registers per block:%d\n", deviceProp.regsPerBlock); printf("Warp size:%d\n", deviceProp.warpSize); printf("Max threads per block:%d\n", deviceProp.maxThreadsPerBlock); printf("Computer capabiliti:%d\n", deviceProp.major, deviceProp.minor); printf("Multiprocessor count:%d\n", deviceProp.multiProcessorCount); printf("Clock rate:%d\n", deviceProp.clockRate); printf("Memory clock rate:%d\n", deviceProp.memoryClockRate); printf("L2 cache:%d\n", deviceProp.l2CacheSize); printf("Memory bus width:%d\n", deviceProp.memoryBusWidth); printf("Max threads dimensions:x=%d,y=%d,z=%d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); system("pause"); }
4,552
// Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p1.cu -o assignment5-p1 // Execute: ./assignment5-p1 #include <cmath> #include <cstdint> #include <cuda.h> #include <iostream> #include <new> #include <sys/time.h> #define THRESHOLD (0.000001) #define SIZE1 8192 #define SIZE2 8200 #define ITER 100 using std::cerr; using std::cout; using std::endl; __global__ void kernel1(double* d_k1) { // TODO: Fill in int j = blockIdx.x * blockDim.x + threadIdx.x; if(j < SIZE1 - 1){ for(int k = 0; k < ITER; k++){ for(int i = 1; i < SIZE1 - 1; i++){ d_k1[i*SIZE1 + (j+1)] = d_k1[(i-1)*SIZE1 + (j+1)] + d_k1[i*SIZE1 + (j+1)] + d_k1[(i+1)*SIZE1 + (j+1)]; } } } } __global__ void kernel2(double* d_k2) { // TODO: Fill in int j = blockIdx.x * blockDim.x + threadIdx.x; if(j < SIZE2 - 1){ for(int k = 0; k < ITER; k++){ for(int i = 1; i < SIZE2 - 1; i++){ d_k2[i*SIZE2 + (j+1)] = d_k2[(i-1)*SIZE2 + (j+1)] + d_k2[i*SIZE2 + (j+1)] + d_k2[(i+1)*SIZE2 + (j+1)]; } } } } // 2-way unrolling __global__ void kernel3(double* d_k3) { // TODO: Fill in int j = blockIdx.x * blockDim.x + threadIdx.x; if(j < SIZE2 - 1){ for(int k = 0; k < ITER; k++){ for(int i = 1; i < SIZE2 - 1; i += 2){ double w, x, y, z; w = d_k3[(i-1)*SIZE2 + (j+1)]; x = d_k3[(i)*SIZE2 + (j+1)]; y = d_k3[(i+1)*SIZE2 + (j+1)]; z = d_k3[(i+2)*SIZE2 + (j+1)]; x = w + x + y; y = x + y + z; d_k3[(i)*SIZE2 + (j+1)] = x; d_k3[(i+1)*SIZE2 + (j+1)] = y; } } } } __host__ void serial(double** h_ser) { for (int k = 0; k < ITER; k++) { for (int i = 1; i < (SIZE1 - 1); i++) { for (int j = 0; j < (SIZE1 - 1); j++) { h_ser[i][j + 1] = (h_ser[i - 1][j + 1] + h_ser[i][j + 1] + h_ser[i + 1][j + 1]); } } } } __host__ void check_result(double** w_ref, double** w_opt, uint64_t size) { double maxdiff = 0.0, this_diff = 0.0; int numdiffs = 0; for (uint64_t i = 0; i < size; i++) { for (uint64_t j = 0; j < size; j++) { this_diff = w_ref[i][j] - w_opt[i][j]; if (fabs(this_diff) > THRESHOLD) { numdiffs++; if (this_diff > maxdiff) maxdiff = this_diff; } } } if (numdiffs > 0) { cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff << endl; } else { cout << "No differences found between base and test versions\n"; } } __host__ double rtclock() { // Seconds struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) { cout << "Error return from gettimeofday: " << stat << "\n"; } return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } int main() { double** h_ser = new double*[SIZE1]; double** h_k1 = new double*[SIZE1]; double** h_k2 = new double*[SIZE2]; double** h_k3 = new double*[SIZE2]; for(int i = 0; i < SIZE1; i++){ h_ser[i] = new double[SIZE1]; } h_k1[0] = new double[SIZE1 * SIZE1]; h_k2[0] = new double[SIZE2 * SIZE2]; h_k3[0] = new double[SIZE2 * SIZE2]; for (int i = 1; i < SIZE1; i++) { h_k1[i] = h_k1[i-1] + SIZE1; } for (int i = 0; i < SIZE1; i++) { for (int j = 0; j < SIZE1; j++) { h_ser[i][j] = 1; h_k1[i][j] = 1; } } for (int i = 1; i < SIZE2; i++) { h_k2[i] = h_k2[i-1] + SIZE2; h_k3[i] = h_k3[i-1] + SIZE2; } for (int i = 0; i < SIZE2; i++) { for (int j = 0; j < SIZE2; j++) { h_k2[i][j] = 1; h_k3[i][j] = 1; } } double clkbegin = rtclock(); serial(h_ser); double clkend = rtclock(); double time = clkend - clkbegin; // seconds cout << "Serial code on CPU: " << ((2.0 * SIZE1 * SIZE1 * ITER) / time) << " GFLOPS; Time = " << time * 1000 << " msec" << endl; cudaError_t status; cudaEvent_t start, end; float k1_time, k2_time, k3_time; // ms double* d_k1; // TODO: Fill in size_t size = SIZE1 * SIZE1 * sizeof(double); dim3 threadsPerBlock(1024); dim3 numBlocks((SIZE1+threadsPerBlock.x-1)/threadsPerBlock.x); status = cudaMalloc(&d_k1, size); if (status != cudaSuccess) { fprintf(stderr, "cudaMalloc() failed"); return EXIT_FAILURE; } cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); status = cudaMemcpy(d_k1, h_k1[0], size, cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } kernel1<<<numBlocks, threadsPerBlock>>>(d_k1); status = cudaMemcpy(h_k1[0], d_k1, size, cudaMemcpyDeviceToHost); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&k1_time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); check_result(h_ser, h_k1, SIZE1); cout << "Kernel 1 on GPU: " << ((2.0 * SIZE1 * SIZE1 * ITER) / (k1_time * 1.0e-3)) << " GFLOPS; Time = " << k1_time << " msec" << endl; double* d_k2; // TODO: Fill in size = SIZE2 * SIZE2 * sizeof(double); threadsPerBlock = dim3(32); numBlocks = dim3((SIZE2+threadsPerBlock.x-1)/threadsPerBlock.x); status = cudaMalloc(&d_k2, size); if (status != cudaSuccess) { fprintf(stderr, "cudaMalloc() failed"); return EXIT_FAILURE; } cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); status = cudaMemcpy(d_k2, h_k2[0], size, cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } kernel2<<<numBlocks, threadsPerBlock>>>(d_k2); status = cudaMemcpy(h_k2[0], d_k2, size, cudaMemcpyDeviceToHost); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&k2_time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); cout << "Kernel 2 on GPU: " << ((2.0 * SIZE2 * SIZE2 * ITER) / (k2_time * 1.0e-3)) << " GFLOPS; Time = " << k2_time << " msec" << endl; // kernel 3 double* d_k3; // TODO: Fill in size = SIZE2 * SIZE2 * sizeof(double); threadsPerBlock = dim3(32); numBlocks = dim3((SIZE2+threadsPerBlock.x-1)/threadsPerBlock.x); status = cudaMalloc(&d_k3, size); if (status != cudaSuccess) { fprintf(stderr, "cudaMalloc() failed"); return EXIT_FAILURE; } cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); status = cudaMemcpy(d_k3, h_k3[0], size, cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } kernel3<<<numBlocks, threadsPerBlock>>>(d_k3); status = cudaMemcpy(h_k3[0], d_k3, size, cudaMemcpyDeviceToHost); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&k3_time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); // checking whether kernel2 and kernel3 produce same result check_result(h_k2, h_k3, SIZE2); cout << "Kernel 3 on GPU: " << ((2.0 * SIZE2 * SIZE2 * ITER) / (k3_time * 1.0e-3)) << " GFLOPS; Time = " << k3_time << " msec" << endl; cudaFree(d_k1); cudaFree(d_k2); cudaFree(d_k3); for(int i = 0; i < SIZE1; i++){ delete[] h_ser[i]; } delete[] h_k1[0]; delete[] h_ser; delete[] h_k1; delete[] h_k2[0]; delete[] h_k3[0]; delete[] h_k2; delete[] h_k3; return EXIT_SUCCESS; }
4,553
__global__ void kernel(int *a) { int i = gridDim.x; a[i] = i; } int main(void) { int *a; cudaMalloc(&a, 4); kernel<<<1,1>>>(a); cudaError_t err = cudaDeviceSynchronize(); if (cudaSuccess != err) { return 1; } return 0; }
4,554
#include "includes.h" __global__ void MakeMerges_MarkSplits(int size, int* mergeWith, int* offsets, int* mis, int* splitsToMake) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int currentAgg = mis[idx]; int newAgg = mergeWith[currentAgg]; // If the aggregate is not merging just apply offset if (newAgg == -1) { mis[idx] = currentAgg - offsets[currentAgg]; } // The aggregate is merging find offset of aggregate merging with else { int newId = newAgg - offsets[newAgg]; mis[idx] = newId; splitsToMake[newId] = 1; } } }
4,555
//xfail:BOOGIE_ERROR //--blockDim=512 --gridDim=64 --loop-unwind=2 --no-inline //kernel.cu: error: possible write-write race on B #include <cuda.h> #include <stdio.h> #include <assert.h> #define N 2//512 extern "C" { __global__ void helloCUDA(float *A) { __shared__ float B[256]; for(int i = 0; i < N*2; i ++) { B[i] = A[i]; } } }
4,556
/** * * @brief Add function GPU implementation * * @file addCU.cu * @author Guillermo Hernández * @date 16 Mar 2016 * */ // System includes #include <stdlib.h> #include <stdio.h> // CUDA runtime #include <cuda_runtime.h> /** * @brief Macro to check for CUDA errors * * If code!=cudaSuccess (0) it prints a message in stderr and returns 1. * * @param code integer code returned by last CUDA-related function (cudaMalloc, cudaGetLastError,...) * @param msg a string describing the error */ #define checkError(code,msg) if (code != cudaSuccess) {\ fprintf(stderr, msg);\ fprintf(stderr,"(error code %s)\n",cudaGetErrorString(err));\ return 1;\ } /** * @brief CUDA Kernel to calculate vector addition * * Kernel to computes the vector addition of @p A and @p B into @p C, all of them having @p n elements */ __global__ void vectorAdd(const float *A, const float *B, float *C, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { C[i] = A[i] + B[i]; } } extern "C" int AddGPU(const float *h_A, const float *h_B,float *h_C, int n) { // GPU implementation must wrap the call to the kernel // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; size_t size = n * sizeof(float); // Allocate the device input vectors float *d_A = NULL; err = cudaMalloc((void **) &d_A, size); checkError(err,"Failed to allocate device vector A"); float *d_B = NULL; err = cudaMalloc((void **) &d_B, size); checkError(err,"Failed to allocate device vector B"); float *d_C = NULL; err = cudaMalloc((void **) &d_C, size); checkError(err,"Failed to allocate device vector C"); // Copy input to device memory err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); checkError(err,"Failed to copy vector A from host to device"); err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); checkError(err,"Failed to copy vector B from host to device"); // Launch the kernel int threadsPerBlock = 256; int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, n); err = cudaGetLastError(); checkError(err,"Failed to launch vectorAdd kernel"); // Copy the device result vector in device memory to the host result vector // in host memory. err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); checkError(err,"Failed to copy vector C from device to host"); // Free device global memory err = cudaFree(d_A); checkError(err,"Failed to free device vector A"); err = cudaFree(d_B); checkError(err,"Failed to free device vector B"); err = cudaFree(d_C); checkError(err,"Failed to free device vector C"); return 0; }
4,557
#include "includes.h" #define PI 3.141592653589793 #define BLOCKSIZE 1024 __global__ void cuMultiplyNum(float *dst, float num, int size) { int id=blockIdx.x*blockDim.x+threadIdx.x; if(id>=size) return; dst[id]*=num; }
4,558
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #define BOX_SIZE 23000 /* size of the data box on one dimension */ /* descriptors for single atom in the tree */ typedef struct atomdesc { double x_pos; double y_pos; double z_pos; } atom; typedef struct hist_entry{ unsigned long long d_cnt; /* need a long long type as the count might be huge */ } bucket; bucket *histogram; /* list of all buckets in the histogram */ long long PDH_acnt; /* total number of data points */ int num_buckets; /* total number of buckets in the histogram */ double PDH_res; /* value of w */ atom *atom_list; /* list of all data points */ struct timezone Idunno; struct timeval startTime, endTime; double p2p_distanceOriginal(int ind1, int ind2) { double x1 = atom_list[ind1].x_pos; double x2 = atom_list[ind2].x_pos; double y1 = atom_list[ind1].y_pos; double y2 = atom_list[ind2].y_pos; double z1 = atom_list[ind1].z_pos; double z2 = atom_list[ind2].z_pos; return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2)); } __device__ double p2p_distance(atom *atom, int ind1, int ind2) { double x1 = atom[ind1].x_pos; double x2 = atom[ind2].x_pos; double y1 = atom[ind1].y_pos; double y2 = atom[ind2].y_pos; double z1 = atom[ind1].z_pos; double z2 = atom[ind2].z_pos; return sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2)); } double report_running_time() { long sec_diff, usec_diff; gettimeofday(&endTime, &Idunno); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff); return (double)(sec_diff*1.0 + usec_diff/1000000.0); } double report_running_time_gpu() { long sec_diff, usec_diff; gettimeofday(&endTime, &Idunno); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } printf("Running time for GPU version: %ld.%06ld\n", sec_diff, usec_diff); return (double)(sec_diff*1.0 + usec_diff/1000000.0); } int PDH_baselineOrginal() { int i, j, h_pos; double dist; for(i = 0; i < PDH_acnt; i++) { for(j = i+1; j < PDH_acnt; j++) { dist = p2p_distanceOriginal(i,j); h_pos = (int) (dist / PDH_res); histogram[h_pos].d_cnt++; } } return 0; } __global__ void PDH_baseline(bucket *histogram, atom *atom, double weight, int size) { int i, j; int position; double distance; // Add the thread Index with the block index and the dim x i = blockIdx.x * blockDim.x + threadIdx.x; j = i + 1; // Get the distance and then atomic add the position of the histogram with 1 for (int a = j; a < size; a++) { distance = p2p_distance(atom, i, a); printf("Distance: %lf\n", distance); position = (int) (distance / weight); printf("position: %d\n", position); atomicAdd( &histogram[position].d_cnt, 1); } } long long output_histogram(){ int i; long long total_cnt = 0; for(i=0; i< num_buckets; i++) { if(i%5 == 0) /* we print 5 buckets in a row */ printf("\n%02d: ", i); printf("%15lld ", histogram[i].d_cnt); total_cnt += histogram[i].d_cnt; /* we also want to make sure the total distance count is correct */ if(i == num_buckets - 1) printf("\n T:%lld \n", total_cnt); else printf("| "); } return total_cnt; } int main(int argc, char const *argv[]) { PDH_acnt = atoi(argv[1]); // Number of atoms PDH_res = atof(argv[2]); // Input Distance: W num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1; size_t histogramSize = sizeof(bucket)*num_buckets; size_t atomSize = sizeof(atom)*PDH_acnt; histogram = (bucket *)malloc(histogramSize); atom_list = (atom *)malloc(atomSize); bucket *d_histogram = NULL; atom *d_atom_list = NULL; double difference_time1, difference_time2; long long difference_t1, difference_t2; srand(1); /* generate data following a uniform distribution */ for(int i = 0; i < PDH_acnt; i++) { atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; } /* start counting time */ gettimeofday(&startTime, &Idunno); /* call CPU single thread version to compute the histogram */ PDH_baselineOrginal(); /* check the total running time */ difference_time1 = report_running_time(); /* print out the histogram */ difference_t1 = output_histogram(); // Allocate memory to GPU arrays and then copy the data from the CPU arrays cudaMalloc((void**) &d_histogram, histogramSize); cudaMalloc((void**) &d_atom_list, atomSize); cudaMemcpy(d_histogram, histogram, histogramSize, cudaMemcpyHostToDevice); cudaMemcpy(d_atom_list, atom_list, atomSize, cudaMemcpyHostToDevice); /* start counting time */ gettimeofday(&startTime, &Idunno); // Launch the kernal and perform calcualtions with the GPU PDH_baseline PDH_baseline <<<ceil(PDH_acnt/32), 32>>> (d_histogram, d_atom_list, PDH_res, PDH_acnt); cudaMemcpy(histogram, d_histogram, histogramSize, cudaMemcpyDeviceToHost); /* check the total running time */ difference_time2 = report_running_time_gpu(); // Print the histogram difference_t2 = output_histogram(); printf("\nDifference Of the two histograms.\n"); printf("Running time for CPU version: %lf\n", difference_time1); printf("Running time for GPU version: %lf\n", difference_time2); printf("Total distance for CPU version: %lld\n", difference_t1); printf("Total distance for GPU version: %lld\n", difference_t2); // Free the GPU(device) and the CPU(host) arrays cudaFree(d_histogram); cudaFree(d_atom_list); free(histogram); free(atom_list); return 0; }
4,559
#include <stdio.h> #define BLOCK_SIZE 2 int get_max_size (int a, int d) { int temp = a/d; if (a%d != 0) { temp = temp+1; } return temp; } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void fixup(unsigned int *input, unsigned int *aux, int len) { unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE; if (blockIdx.x > 0) { if (start + t < len) input[start + t] += aux[blockIdx.x ]; if (start + BLOCK_SIZE + t < len) input[start + BLOCK_SIZE + t] += aux[blockIdx.x ]; } } __global__ void scanPart1 (unsigned int* input, unsigned int* output, unsigned int* aux, int numElems) { extern __shared__ unsigned int sdata[]; //int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; int myLocalId = threadIdx.x; int start = 2 * blockIdx.x * BLOCK_SIZE; //Each block reads 2*BLOCK_SIZE so idx*this value is total inputs read int lastReadValue = 0; //Input Read if (start + myLocalId < numElems) { sdata[myLocalId] = input[start + myLocalId]; } else { sdata[myLocalId] = 0; } if (start + BLOCK_SIZE + myLocalId < numElems) { sdata[BLOCK_SIZE + myLocalId] = input[start + BLOCK_SIZE + myLocalId]; } else { sdata[BLOCK_SIZE + myLocalId] = 0; } __syncthreads(); lastReadValue = sdata[2*BLOCK_SIZE-1]; //Reduction int stride; for (stride = 1; stride <= BLOCK_SIZE; stride <<= 1) { int index = (myLocalId + 1) * stride * 2 - 1; if (index < 2 * BLOCK_SIZE) sdata[index] += sdata[index - stride]; __syncthreads(); } if (myLocalId == 0) { sdata[2*BLOCK_SIZE-1] = 0; } __syncthreads(); // Post reduction for (stride = BLOCK_SIZE ; stride; stride >>= 1) { int index = (myLocalId + 1) * stride * 2 - 1; //if (index + stride < 2 * BLOCK_SIZE) if (index < 2 * BLOCK_SIZE) { // unsigned int temp = sdata[index+stride]; // sdata[index + stride] += sdata[index]; // sdata[index] = temp; unsigned int temp = sdata[index]; sdata[index] += sdata[index-stride]; sdata[index-stride] = temp; } __syncthreads(); } if (start + myLocalId < numElems) output[start + myLocalId] = sdata[myLocalId]; if (start + BLOCK_SIZE + myLocalId < numElems) output[start + BLOCK_SIZE + myLocalId] = sdata[BLOCK_SIZE + myLocalId]; if (myLocalId == 0 && aux!=NULL) aux[blockIdx.x] = sdata[2 * BLOCK_SIZE - 1] + lastReadValue; } __global__ void splitInput(int compareAndValue, unsigned int* input, unsigned int* output, int maxElements) { int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; if (myGlobalId >= maxElements) { return; } if(((input[myGlobalId] & compareAndValue)) > 0) { printf("%d. %d & %d is 0\n",myGlobalId,input[myGlobalId],compareAndValue); output[myGlobalId] = 0; } else { printf("%d. %d & %d is 1\n",myGlobalId,input[myGlobalId],compareAndValue); output[myGlobalId] = 1; } printf("%d. %d\n",myGlobalId,input[myGlobalId]); } __global__ void possibleLocations (unsigned int* input, unsigned int* input_scan, unsigned int* output, unsigned int numElems, unsigned int compareAndValue) { int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; int myLocalId = threadIdx.x; int start = 2 * blockIdx.x * BLOCK_SIZE; //Each block reads 2*BLOCK_SIZE so idx*this value is total inputs read int total = input_scan[numElems-1] + (((input[numElems-1] & compareAndValue) > 0)?0:1); printf("Total %d\n",total); if (myLocalId + start < numElems) { //output[myGlobalId] = myGlobalId - input_scan[myGlobalId] + total; output[start + myLocalId] = start + myLocalId - input_scan[start + myLocalId] + total; printf("%d. %d might go to %d\n",start + myLocalId,input[myLocalId + start], output[start + myLocalId]); } if (myLocalId + start + BLOCK_SIZE < numElems) { output[start + myLocalId + BLOCK_SIZE] = start + myLocalId + BLOCK_SIZE - input_scan[start + myLocalId + BLOCK_SIZE] + total ; printf("%d. %d might go to %d\n",start + myLocalId,input[myLocalId + start+BLOCK_SIZE], output[start + myLocalId + BLOCK_SIZE]); } } __global__ void finalLocations ( unsigned int* input, unsigned int* input_scan, unsigned int* input_vals, unsigned int* d_setOneIfOne, unsigned int* output, unsigned int numElems) { int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; int myLocalId = threadIdx.x; int start = 2 * blockIdx.x * BLOCK_SIZE; //Each block reads 2*BLOCK_SIZE so idx*this value is total inputs read if (myLocalId + start < numElems) { if (d_setOneIfOne[myLocalId + start] == 0) { output[input[myLocalId + start]] = input_vals[myLocalId + start]; printf("%d. %d goes to %d\n",myGlobalId, input_vals[myLocalId + start], input[myLocalId + start]); } else { output[input_scan[myLocalId + start]] = input_vals[myLocalId + start]; printf("%d. %d goes to %d\n",myGlobalId, input_vals[myLocalId + start], input_scan[myLocalId + start]); } } if (myLocalId + start + BLOCK_SIZE < numElems) { if (d_setOneIfOne[myLocalId + start + BLOCK_SIZE] == 0) { output[input[myLocalId + start + BLOCK_SIZE]] = input_vals[myLocalId + start + BLOCK_SIZE]; printf("%d. %d goes to %d\n",myGlobalId, input_vals[myLocalId + start+ BLOCK_SIZE], input[myLocalId + start +BLOCK_SIZE]); } else { output[input_scan[myLocalId + start + BLOCK_SIZE]] = input_vals[myLocalId + start + BLOCK_SIZE]; printf("%d. %d goes to %d\n", myGlobalId, input_vals[myLocalId + start+BLOCK_SIZE] , input_scan[myLocalId + start +BLOCK_SIZE]); } } } int main() { unsigned int h_inputVals[10] = {3, 4, 1, 2, 7, 6, 5, 0, 9, 8}; unsigned int numElems = 10; unsigned int h_bins[2]; int histo_size = sizeof(unsigned int)*2; unsigned int* d_inputVals; gpuErrchk(cudaMalloc(&d_inputVals, numElems*sizeof(numElems))); gpuErrchk(cudaMemcpy(d_inputVals, h_inputVals, numElems*sizeof(numElems), cudaMemcpyHostToDevice)); unsigned int* d_bins; gpuErrchk(cudaMalloc(&d_bins, histo_size)); unsigned int* d_setOneIfOne; unsigned int* d_possibleLocations; unsigned int* d_finalLocations; unsigned int* d_scan; unsigned int* h_scan; h_scan = (unsigned int*)malloc(numElems*sizeof(numElems)); gpuErrchk(cudaMalloc(&d_setOneIfOne, numElems*sizeof(numElems))); gpuErrchk(cudaMalloc(&d_scan, numElems*sizeof(numElems))); gpuErrchk(cudaMalloc(&d_possibleLocations, numElems*sizeof(numElems))); for (int i=0;i<10;i++) { printf("%d ", h_inputVals[i]); } printf("\n"); unsigned int* h_setOneIfOne; h_setOneIfOne = (unsigned int*)malloc(numElems*sizeof(numElems)); for (int i=0;i<4;i++) { gpuErrchk(cudaMalloc(&d_finalLocations, numElems*sizeof(numElems))); printf("Round %d\n",i); gpuErrchk(cudaMemset(d_bins, 0, histo_size)); gpuErrchk(cudaMemset(d_setOneIfOne,0, numElems*sizeof(numElems))); gpuErrchk(cudaMemset(d_scan,0, numElems*sizeof(numElems))); int compareAndValue = 1 << i; int numberThreadPerBlock = 512; dim3 blockDim_si(numberThreadPerBlock); dim3 gridDim_si(get_max_size(numElems,numberThreadPerBlock)); splitInput<<<gridDim_si,blockDim_si>>>(compareAndValue, d_inputVals, d_setOneIfOne, numElems); gpuErrchk(cudaMemcpy(h_setOneIfOne, d_setOneIfOne, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); for (int i=0;i<10;i++) { printf("%d ", h_setOneIfOne[i]); h_setOneIfOne[i] = 0; } printf("\n"); dim3 blockDim_sp(BLOCK_SIZE); dim3 gridDim_sp(get_max_size(numElems,2*BLOCK_SIZE)); unsigned int* d_aux; unsigned int* d_aux_scan; unsigned int* h_aux; gpuErrchk(cudaMalloc(&d_aux, get_max_size(numElems,2*BLOCK_SIZE)*sizeof(unsigned int))); gpuErrchk(cudaMalloc(&d_aux_scan, get_max_size(numElems,2*BLOCK_SIZE)*sizeof(unsigned int))); h_aux = (unsigned int*)malloc(get_max_size(numElems,2*BLOCK_SIZE)*sizeof(unsigned int)); // gpuErrchk(cudaMemcpy(d_scan, d_setOneIfOne, numElems*sizeof(numElems), cudaMemcpyDeviceToDevice)); printf ("Size of Kernel is Grid - %d, Block - %d\n",gridDim_sp.x,blockDim_sp.x); scanPart1<<<gridDim_sp,blockDim_sp,BLOCK_SIZE*2*sizeof(unsigned int)>>> (d_setOneIfOne,d_scan,d_aux,numElems); gpuErrchk(cudaMemcpy(h_scan, d_scan, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); for (int i=0;i<10;i++) { printf("%d ", h_scan[i]); h_scan[i] = 0; } printf("\n"); dim3 blockDim_sp2(get_max_size(numElems,2*BLOCK_SIZE)); gpuErrchk(cudaMemcpy(h_aux, d_aux, blockDim_sp2.x*sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (int i=0;i<blockDim_sp2.x;i++) { printf("%d ", h_aux[i]); h_aux[i] = 0; } printf("\n"); printf ("Size of Kernel is Grid - 1, Block - %d\n",blockDim_sp2.x); scanPart1<<<1,blockDim_sp2,BLOCK_SIZE*2*sizeof(unsigned int)>>>(d_aux,d_aux_scan,NULL,blockDim_sp2.x); gpuErrchk(cudaMemcpy(h_aux, d_aux_scan, blockDim_sp2.x*sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (int i=0;i<blockDim_sp2.x;i++) { printf("%d ", h_aux[i]); h_aux[i] = 0; } printf("\n"); printf ("Size of Kernel is Grid - %d, Block - %d\n",gridDim_sp.x,blockDim_sp.x); fixup<<<gridDim_sp,blockDim_sp>>>(d_scan,d_aux_scan,numElems); gpuErrchk(cudaMemcpy(h_scan, d_scan, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); for (int i=0;i<10;i++) { printf("%d ", h_scan[i]); h_scan[i] = 0; } printf("\n"); printf ("Size of Kernel is Grid - %d, Block - %d\n",gridDim_sp.x,blockDim_sp.x); possibleLocations<<<gridDim_sp,blockDim_sp>>>(d_inputVals,d_scan, d_possibleLocations, numElems, compareAndValue); gpuErrchk(cudaMemcpy(h_setOneIfOne, d_possibleLocations, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); printf ("Possible Locations are \n"); for (int i=0;i<10;i++) { printf("%d ", h_setOneIfOne[i]); h_setOneIfOne[i] = 0; } printf ("\n"); finalLocations<<<gridDim_sp,blockDim_sp>>>(d_possibleLocations,d_scan,d_inputVals, d_setOneIfOne, d_finalLocations,numElems); cudaDeviceSynchronize(); gpuErrchk(cudaFree(d_inputVals)); d_inputVals = d_finalLocations; gpuErrchk(cudaMemcpy(h_setOneIfOne, d_finalLocations, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); printf ("\nFinal Positions are \n"); for (int i=0;i<10;i++) { printf("%d ", h_setOneIfOne[i]); h_setOneIfOne[i] = 0; } printf("\n******************************************\n"); //printf("Histogram Values - %d %d %d %d %d \n", h_bins[0], h_bins[1], h_bins[0]+h_bins[1], numElems, compareAndValue); } gpuErrchk(cudaFree(d_bins)); gpuErrchk(cudaFree(d_setOneIfOne)); free(h_setOneIfOne); gpuErrchk(cudaFree(d_possibleLocations)); free(h_scan); return 0; }
4,560
#include "includes.h" __global__ void lots_of_float_compute(float *inputs, int N, size_t niters, float *outputs) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; size_t nthreads = gridDim.x * blockDim.x; for ( ; tid < N; tid += nthreads) { size_t iter; float val = inputs[tid]; for (iter = 0; iter < niters; iter++) { val = (val + 5.0f) - 101.0f; val = (val / 3.0f) + 102.0f; val = (val + 1.07f) - 103.0f; val = (val / 1.037f) + 104.0f; val = (val + 3.00f) - 105.0f; val = (val / 0.22f) + 106.0f; } outputs[tid] = val; } }
4,561
__global__ void vectorAdd1(const float *A, const float *B, float *C, int numElements) { /*@ requires 0 <= numElements; requires numElements < gridDim.x * blockDim.x; ensures \forall i; 0 <= i -> i < numElements -> C[i] == A[i] + B[i]; */ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { /*@ ghost int m; requires numElements == m * blockDim.x * gridDim.x; ensures \forall i [C[i]]; 0 <= i -> i < numElements -> C[i] == A[i] + B[i]; */ int i = blockDim.x * blockIdx.x + threadIdx.x; /*@ loop invariant (\exists t : thread; active(t)) -> \forall t : thread; active(t); loop invariant i == loop_count * blockDim.x * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; loop invariant \forall k [C[k]]; 0 <= k && k < blockDim.x * gridDim.x * loop_count -> C[k] == A[k] + B[k]; */ while (i < numElements) { C[i] = A[i] + B[i]; i += gridDim.x * blockDim.x; } } __global__ void vectorAdd_non_unif(const float *A, const float *B, float *C, int numElements) { /*@ requires numElements > 0; ensures \forall i; 0 <= i -> i < numElements -> C[i] == A[i] + B[i] */ int i = blockDim.x * blockIdx.x + threadIdx.x; /*@ loop invariant (loop_count - 1) * blockDim.x * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x < numElements -> i == loop_count * blockDim.x * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; loop invariant (loop_count - 1) * blockDim.x * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x >= numElements -> i == (loop_count - 1) * blockDim.x * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; loop invariant \forall k; 0 <= k && k < blockDim.x * gridDim.x * loop_count && k < numElements -> C[k] == A[k] + B[k]; */ while (i < numElements) { C[i] = A[i] + B[i]; i += gridDim.x * blockDim.x; } } __global__ void vectorAddDownward(const float *A, const float *B, float *C, int numElements) { /*@ ghost int m requires numElements == m * blockDim.x * gridDim.x; ensures \forall i [C[i]]; 0 <= i -> i < numElements -> C[i] == A[i] + B[i]; */ int i = numElements - 1 - blockDim.x * blockIdx.x - threadIdx.x; /*@ loop invariant (\exists t : thread; active(t)) -> \forall t : thread; active(t); loop invariant i == numElements - 1 - loop_count * blockDim.x * gridDim.x - blockDim.x * blockIdx.x - threadIdx.x; loop invariant \forall k [C[k]]; numElements - blockDim.x * gridDim.x * loop_count <= k && k < numElements -> C[k] == A[k] + B[k]; */ while (0 <= i) { C[i] = A[i] + B[i]; i -= gridDim.x * blockDim.x; } }
4,562
#include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/generate.h> #include <thrust/random.h> #include <thrust/iterator/transform_iterator.h> #include <iostream> template <typename T> class rand_ { public: __host__ __device__ T operator() (){ thrust::default_random_engine rng; thrust::uniform_int_distribution<T> dist(1, 100); return dist(rng); } }; template <typename T> struct square : public thrust::unary_function<T, T> { __host__ __device__ T operator() (const T& x) const { return x*x; } }; int main() { int N = 1000; thrust::device_vector<int> v_device(N); thrust::generate(v_device.begin(), v_device.end(), rand_<int>()); thrust::make_transform_iterator(v_device.begin(), square<int>()); int sum = thrust::reduce(v_device.begin(), v_device.end(), 0, thrust::plus<int>()); std::cout << "sum : " << sum << std::endl; return 0; }
4,563
#include <stdio.h> #include <chrono> #include <random> #include <limits> void serial_add(float* A, float* B, float* C, int N) { for (int i = 0; i < N; i++) { C[i] = A[i] + B[i]; } } /** * @brief void vecAdd(float* A, float* B, float* C) * Adds elements of vectors A and B and stores them in vector D * @param A: vector addend * @param B: vector addend * @param C: pointer to where the result of A + B will be stored * @return C: pointer to address where A + B will be stored */ __global__ void vecAdd(float* A, float* B, float* C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } int main() { int N = 10000000; float* A = (float*)malloc(N * sizeof(float)); float* B = (float*)malloc(N * sizeof(float)); float* C = (float*)malloc(N * sizeof(float)); for (int i = 0; i < N; i++) { unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); float d = std::generate_canonical<float, std::numeric_limits<float>::digits>(generator); A[i] = d; } for (int i = 0; i < N; i++) { unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); float d = std::generate_canonical<float, std::numeric_limits<float>::digits>(generator); B[i] = d; } float* dev_A; float* dev_B; float* dev_C; cudaMalloc((void**) &dev_A, N * sizeof(float)); cudaMalloc((void**) &dev_B, N * sizeof(float)); cudaMalloc((void**) &dev_C, N * sizeof(float)); cudaMemcpy(dev_A, A, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_B, B, N * sizeof(float), cudaMemcpyHostToDevice); auto start = std::chrono::high_resolution_clock::now(); vecAdd<<<1, N>>>(dev_A, dev_B, dev_C); cudaMemcpy(C, dev_C, N * sizeof(float), cudaMemcpyDeviceToHost); auto end = std::chrono::high_resolution_clock::now(); printf("GPU Compute Time: %u\n", std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 25.0); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); for (int i = 0; i < N; i++) { unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); float d = std::generate_canonical<float, std::numeric_limits<float>::digits>(generator); A[i] = d; } for (int i = 0; i < N; i++) { unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); float d = std::generate_canonical<float, std::numeric_limits<float>::digits>(generator); B[i] = d; } start = std::chrono::high_resolution_clock::now(); serial_add(A, B, C, N); end = std::chrono::high_resolution_clock::now(); printf("CPU Compute Time: %u\n", std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 25.0); free(A); free(B); free(C); return 0; }
4,564
#include <stdio.h> int main(void) { int N; int *a, *b, *c; N = 10; a = (int*)malloc(N*sizeof(int)); b = (int*)malloc(N*sizeof(int)); cudaMalloc(&c, N*sizeof(int)); for (int i = 0; i < N; i++){ a[i] = i; } cudaMemcpy(c, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b, c, N*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++){ printf("%d ,", a[i]); } for (int i = 0; i < N; i++){ printf("%d ,", b[i]); } }
4,565
#include <cuda.h> #include <time.h> /* Size of a block */ #define BLOCK_X 32 #define BLOCK_Y 16 #define MANY 200 #define STOP 0 #define START 1 __global__ void kernadd (float* mout, float* min1, float *min2, int nx, int ny, size_t pitch) { int i, j, index; /* UP TO YOU edit line below so that the index is correctly evaluated */ i = blockDim.x * blockIdx.x + threadIdx.x; j = blockDim.y * blockIdx.y + threadIdx.y; index = i + j * pitch / sizeof(float); if ((i < nx) && (j < ny)) mout[index] = min1[index] + min2[index]; } /* extern "C" below is because this file follows C++ linking conventions */ /* whereas the companion C file (addition.c) follows C linking conventions */ /* which are different */ extern "C" void chrono (int kind, float *time); extern "C" float AddOnGpu(float* mat_out, float *mat_in1, float *mat_in2,int nx, int ny) { size_t pitch; /* Same pitch for all matrices, since they all have same size */ /* Matrix allocation on device */ float *mat_out_gpu, *mat_in1_gpu, *mat_in2_gpu; /* UP TO YOU : do the allocation below, using cudaMallocPitch ()*/ cudaMallocPitch (&mat_out_gpu, &pitch, sizeof(float) * nx, ny); cudaMallocPitch (&mat_in1_gpu, &pitch, sizeof(float) * nx, ny); cudaMallocPitch (&mat_in2_gpu, &pitch, sizeof(float) * nx, ny); /* The arguments mat_in1 and mat_in2 passed above are on the host. */ /* UP TO YOU : write below the instructions to copy it to the device */ /* You'll need to google the function cudaMemcpy2D () */ cudaMemcpy2D(mat_in1_gpu,pitch,mat_in1,nx*sizeof(float),nx*sizeof(float),ny,cudaMemcpyDeviceToHost); cudaMemcpy2D(mat_in2_gpu,pitch,mat_in2,nx*sizeof(float),nx*sizeof(float),ny,cudaMemcpyDeviceToHost); /* Grid topology below */ /* A block is BLOCK_X threads wide by BLOCK_Y threads high */ dim3 block (BLOCK_X, BLOCK_Y); /* UP TO YOU : complete the number of blocks below */ int n1 = (nx+BLOCK_X-1)/BLOCK_X; int n2 = (nx+BLOCK_Y-1)/BLOCK_Y;; dim3 grid (n1,n2); int count; float time; chrono (START, &time); /* UP TO YOU : kernel invocation */ for (count = 0;count < MANY; count++){ kernadd <<< grid,block >>> (mat_out_gpu, mat_in1_gpu,mat_in2_gpu,nx,ny,pitch); cudaThreadSynchronize(); } chrono (STOP, &time); /* We now transfer back the matrix from the device to the host */ /* UP TO YOU : write cudaMemcpy2D () instruction below */ cudaMemcpy2D (mat_out,sizeof(float)*nx,mat_out_gpu,pitch,nx*sizeof(float),ny,cudaMemcpyDeviceToHost); /* free memory */ cudaFree(mat_out_gpu); cudaFree(mat_in1_gpu); cudaFree(mat_in2_gpu); return time/float(MANY); }
4,566
#include <stdio.h> #define STREAM_COUNT 3 #define ELEMENT_COUNT 300000 __global__ void kernel0() { float sum = 0.0; for (int x = 0; x < ELEMENT_COUNT; x++) { for (int y = 0; y < ELEMENT_COUNT; y++) { sum = sum + tan(0.1) * tan(0.1); } } } __global__ void kernel1() { float sum = 0.0; for (int x = 0; x < ELEMENT_COUNT; x++) { for (int y = 0; y < ELEMENT_COUNT; y++) { sum = sum + tan(0.1) * tan(0.1); } } } __global__ void kernel2() { float sum = 0.0; for (int x = 0; x < ELEMENT_COUNT; x++) { for (int y = 0; y < ELEMENT_COUNT; y++) { sum = sum + tan(0.1) * tan(0.1); } } } void warmUp(dim3 block, dim3 grid) { kernel0<<<grid,block>>>(); kernel1<<<grid,block>>>(); kernel2<<<grid,block>>>(); } void createArraysNullStream(dim3 block, dim3 grid) { } void createArrayNonNullStream(dim3 block, dim3 grid) { cudaStream_t* streams = (cudaStream_t*)malloc(STREAM_COUNT*sizeof(cudaStream_t)); for (int x = 0; x < STREAM_COUNT; x++) { cudaStreamCreate(&streams[x]); } for (int x = 0; x < STREAM_COUNT; x++) { kernel0<<<grid,block,0,streams[x]>>>(); kernel1<<<grid,block,0,streams[x]>>>(); kernel2<<<grid,block,0,streams[x]>>>(); } for (int x = 0; x < STREAM_COUNT; x++) { cudaStreamDestroy(streams[x]); } } int main(void) { dim3 block(1); dim3 grid(1); warmUp(block, grid); createArraysNullStream(block, grid); createArrayNonNullStream(block, grid); cudaDeviceReset(); }
4,567
#include <math.h> #include <stdio.h> #include <cuda_runtime.h> #define PI 3.141592 /* * Return number of hyperparameters */ __device__ int retNumParams(){ return 4; } /* * Compute the covariance matrix in column oriented vector format using X * The output matrix is upper triangular * Output matrix formula k(x,z) = sf^2 * exp( -2*sin^2( pi*||x-z||/p )/ell^2 ) */ __device__ void covMat(double *X, int nX, double *cM, double *lh){ double l = exp(lh[0]); double sf2 = exp(2 * lh[1]); double p = exp(lh[2]); double sn2 = exp(2 * lh[3]); int i, j; for(i = 0; i < nX; i++){ for(j = i; j < nX; j++){ double _int = fabs(X[i] - X[j]); cM[i + j * nX] = sf2 * exp(-2 * pow(sin((PI * _int) / p), 2) / pow(l, 2)); if(j == i) cM[i + j * nX] += sn2; } } } /* * Compute the covariance matrix in column oriented vector format using X * The output matrix is upper triangular * Output matrix formula k(x,z) = sf^2 * exp( -2*sin^2( pi*||x-z||/p )/ell^2 ) * same as above except doesn't add sn2 to main diagonal * Divides all elements by sn2 (when noise is high) and then adds an identity matrix */ __device__ void covMatSp(double *X, int nX, double *cM, double *lh){ double l = exp(lh[0]); double sf2 = exp(2 * lh[1]); double p = exp(lh[2]); double sn2 = exp(2 * lh[3]); for(int i = 0; i < nX; i++) { for(int j = i; j < nX; j++) { double _int = fabs(X[i] - X[j]); cM[i + j * nX] = sf2 * exp(-2 * pow(sin((PI * _int) / p), 2) / pow(l, 2)) / sn2; if (j == i) { cM[i + j * nX] += 1; } } } } /* * Compute the derivative of the covariance matrix in column oriented vector format using X with respect to hyperparameter #n * The output matrix is upper triangular */ __device__ void dcovMat(double *X, int nX, double *dcM, double *lh, int n){ double l = exp(lh[0]); double sf2 = exp(2*lh[1]); double p = exp(lh[2]); double sn2 = exp(2*lh[3]); int i, j; double tmp100; for(i = 0; i < nX; i++) { for(j = i; j < nX; j++) { double _int = fabs(X[i]-X[j]); switch(n){ case 1://log(l) dcM[i+j*nX] = (sf2*exp(-2*pow(sin((PI*_int)/p),2)/pow(l,2)))*4*(pow(sin((PI*_int)/p),2)/pow(l,2)); break; case 2://log(sf) dcM[i+j*nX] = 2*sf2*exp(-2*pow(sin((PI*_int)/p),2)/pow(l,2)); break; case 3://log(p) // dcM[i+j*nX] = sf2*exp(-2*pow(sin((PI*_int)/p),2)/pow(l,2))*(4*sin((PI*_int)/p))*((PI*_int)/(pow(p,2)*pow(l,2)))*(cos((PI*_int)/p)); tmp100 = (PI*_int)/p; dcM[i+j*nX]= ((sf2*4*tmp100)/(pow(l,2)))*exp((-2*pow(sin(tmp100),2))/pow(l,2))*sin(tmp100)*cos(tmp100); break; case 4://log(sn) if(i == j) dcM[i+j*nX] = 2*sn2; else dcM[i+j*nX] = 0; break; default: return; } } } } /* * Compute element wise cross-covariance matrix between a vector of training observations, X and a vector test observations, Y. * The output matrix cM is a nX x nY matrix in column oriented format */ __device__ void covMatCross(double *X, double *Y, int nX, int nY, double *cM, double *lh){ double l = exp(lh[0]); double sf2 = exp(2 * lh[1]); double p = exp(lh[2]); int i, j; for(j = 0; j < nY; j++) { for(i = 0; i < nX; i++) { double _int = fabs(X[i] - Y[j]); cM[i + j * nX] = sf2 * exp(-2 * pow(sin((PI * _int) / p), 2) / pow(l, 2)); } } }
4,568
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) { for (int i=0; i < var_1; ++i) { for (int i=0; i < var_2; ++i) { comp = var_5 - (+1.4273E19f + -0.0f / (+1.8916E-37f / var_6)); float tmp_1 = -1.8916E36f - var_7 - ceilf((var_8 * (var_9 - ceilf((-1.5710E35f * -1.3049E-42f - (-1.5032E-43f + cosf(var_10 / (+1.1798E35f - var_11 * (var_12 / -0.0f))))))))); comp += tmp_1 / var_13 / sqrtf(atan2f(-1.3663E-36f, -1.5190E-37f - var_14 + (var_15 - (var_16 * +0.0f)))); comp += (var_17 / -1.0008E14f * (+1.8691E-37f - +1.2479E6f + (+1.4507E-35f / var_18))); if (comp >= +1.3965E-35f - sinf((-1.0602E-11f / (var_19 - var_20)))) { comp += (var_21 / (+1.9612E-37f / +1.6092E35f)); comp = (+1.0689E-24f * log10f(+1.1543E-44f)); float tmp_2 = (var_22 / -1.7166E-44f); comp = tmp_2 / (var_23 * +1.5281E-11f * tanhf(-1.2234E-36f * (var_24 + (var_25 - var_26)))); } for (int i=0; i < var_3; ++i) { comp += +0.0f - -1.1859E36f * (+0.0f - +1.5805E-43f); } for (int i=0; i < var_4; ++i) { comp = (var_27 + -1.4677E-35f); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); int tmp_5 = atoi(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28); cudaDeviceSynchronize(); return 0; }
4,569
#include <cuda_runtime_api.h> #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> // Add your kernel here __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } // main #define N 512 int main(void) { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i; // Allocate memory in Host a = (int *) malloc (size); b = (int *) malloc (size); c = (int *) malloc (size); // Allocate memory in Device cudaMalloc ((void **) &d_a, size); cudaMalloc ((void **) &d_b, size); cudaMalloc ((void **) &d_c, size); // Initialize values (0 - 9) for(i = 0;i < N; i++) { a[i] = rand() % 10; b[i] = rand() % 10; } // Copy data from Host to Device cudaMemcpy (d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy (d_b, b, size, cudaMemcpyHostToDevice); // Execute add<<<N,1>>>(d_a, d_b, d_c); // Copy result back to Host // Take note that it will be smart enough to wait // until the task at device completed cudaMemcpy (c, d_c, size, cudaMemcpyDeviceToHost); // Display the outcome for(i=0;i<N;i++) { printf("[%3d]\t%2d + %2d = %2d\n", i, a[i], b[i], c[i]); } // Clean up at Host free (a); free (b); free (c); // Clean up at Device cudaFree (d_a); cudaFree (d_b); cudaFree (d_c); return 0; }
4,570
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <chrono> #include <iostream> //Mnoenie macierzy // 1 2 * 5 6 = (1*5+2*7) (1*6+2*8) // 3 4 7 8 (3*5+4*7) (3*6+4*8) __global__ void matrixMul(int* m, int* n, int* p, int size) { int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; int p_sum = 0; for (int i = 0; i < size; ++i) { p_sum += m[row * size + i] * n[i * size + column]; } p[row * size + column] = p_sum; } void matrixMul_seq(int* m, int* n, int* p, int size) { for (int i = 0; i < size; ++i) { for (int j = 0; j < size; ++j) { for (int k = 0; k < size; ++k) { p[i * size + j] += m[i * size + k] * n[k * size + j]; } } } } int main() { //W C/C++ jest Row-major////// /*int tab[2][2] = {{1,2},{3,4}}; int* tab_ptr = &tab[0][0]; for (int i = 0; i < 4; ++i) { std::cout << *tab_ptr << " "; ++tab_ptr; } std::cout << std::endl; for (int i = 0; i < 2; ++i) { for (int j = 0; j < 2; ++j) { std::cout << tab[i][j] << " "; } std::cout << std::endl; } std::cout << std::endl;*/ ////////////////////////////// int n = 1 << 10; // == 1024 or 2^10 printf("Square matrix of size %d\n", n); //n*m = p //Host Matrix m, n, p int* h_m; int* h_n; int* h_p; int* h_p_seq; //Device Matrix m, n, p int* d_m; int* d_n; int* d_p; size_t bytes = n * n * sizeof(int); //Allocate memory on host side h_m = (int*)malloc(bytes); h_n = (int*)malloc(bytes); h_p = (int*)malloc(bytes); h_p_seq = (int*)malloc(bytes); //Initialize matrix m, n, p for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { h_m[i * n + j] = rand() % 1024; h_n[i * n + j] = rand() % 1024; h_p_seq[i * n + j] = 0; } } //Allocate memoru on device side cudaMalloc(&d_m, bytes); cudaMalloc(&d_n, bytes); cudaMalloc(&d_p, bytes); //Copy data to the device cudaMemcpy(d_m, h_m, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_n, h_n, bytes, cudaMemcpyHostToDevice); int threads_per_block = 16; dim3 block_size(threads_per_block, threads_per_block); dim3 grid_size(n / block_size.x, n / block_size.y); printf("Grid size X: %d, Grid size Y: %d\n", grid_size.x, grid_size.y); printf("block_size X: %d, block_size Y: %d\n", block_size.x, block_size.y); // Benchmark phase auto start = std::chrono::steady_clock::now(); //Start code testing matrixMul<<<grid_size, block_size>>> (d_m, d_n, d_p, n); cudaDeviceSynchronize(); //End code testing auto end = std::chrono::steady_clock::now(); auto usecs = std::chrono::duration_cast<std::chrono::duration<float, std::chrono::milliseconds::period>>(end - start); std::cout << "Elapsed time: " << usecs.count() << " ms." << std::endl; // Benchmark phase start = std::chrono::steady_clock::now(); //Start code testing matrixMul_seq(h_m, h_n, h_p_seq, n); //End code testing end = std::chrono::steady_clock::now(); usecs = std::chrono::duration_cast<std::chrono::duration<float, std::chrono::milliseconds::period>>(end - start); std::cout << "Elapsed time: " << usecs.count() << " ms." << std::endl; cudaMemcpy(h_p, d_p, bytes, cudaMemcpyDeviceToHost); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if(h_p[n * i + j] != h_p_seq[n * i + j]) { printf("ERROR!\n"); printf("h_p: %d, h_p_seq: %d\n", h_p[n * i + j], h_p_seq[n * i + j]); } } } free(h_m); free(h_n); free(h_p); free(h_p_seq); cudaFree(d_m); cudaFree(d_n); cudaFree(d_p); return 0; }
4,571
#include "includes.h" __global__ void max_pool3d_forward(int B, int N, int M, int C, int K, const int* nnIndex, const int* nnCount, const float* input, float* output, int* maxIndex) { for(int i=blockIdx.x;i<B;i+=gridDim.x) { for(int j=threadIdx.x;j<M*C;j+=blockDim.x) { int m = j/C; int c = j%C; int nnSize = nnCount[i*M+m]; for(int k=0;k<nnSize;k++) { int n = nnIndex[i*M*K+m*K+k]; if (k==0) { output[i*M*C+j] = input[i*N*C+n*C+c]; maxIndex[i*M*C+j] = n; continue; } if (input[i*N*C+n*C+c]>output[i*M*C+j]) { output[i*M*C+j] = input[i*N*C+n*C+c]; maxIndex[i*M*C+j] = n; } } } } }
4,572
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #define ITER 4000 typedef struct Map{ int length; double *A; int *x; int *dx; int *y; int *dy; int *delta; int *phi; }Map; typedef struct Coefs{ int length; double *x; double *dx; double *y; double *dy; double *delta; double *phi; }Coefs; typedef struct Vars{ double mass; double momentum; double kinEn; double gamma; double beta; double mAnomalyG; double spinTuneGgamma; double lRefOrbit; }Vars; void scanFile(FILE* fp, int *size){ char* line = (char*)malloc(200*sizeof(char)); line = fgets(line, 200, fp); if(strncmp(line, " I COEFFICIENT ORDER EXPONENTS", 46)!=0){ if(strncmp(line, " ALL COMPONENTS ZERO ", 25)!=0){ exit(EXIT_FAILURE); }else{ *size=1; } }else{ for((*size)=0;!strstr((line = fgets(line, 200, fp)), "------");(*size)++); } free(line); } void mallocMap(Map *m, int p){ (*m).length = p; if(p>0){ (*m).A = (double*)calloc(p,sizeof(double)); (*m).x = (int*)calloc(p,sizeof(int)); (*m).dx = (int*)calloc(p,sizeof(int)); (*m).y = (int*)calloc(p,sizeof(int)); (*m).dy = (int*)calloc(p,sizeof(int)); (*m).delta = (int*)calloc(p,sizeof(int)); (*m).phi = (int*)calloc(p,sizeof(int)); } } void freeMap(Map *m){ if((*m).length > 0){ free((*m).A); free((*m).x); free((*m).dx); free((*m).y); free((*m).dy); free((*m).delta); free((*m).phi); } } void mallocCoefs(Coefs **c, int iter, int p){ if(iter>0){ int i; (*c) = (Coefs*) malloc(p*sizeof(Coefs)); for(i=0;i<p;i++){ (*c)[i].length = iter; (*c)[i].x = (double*)calloc(iter,sizeof(double)); (*c)[i].dx = (double*)calloc(iter,sizeof(double)); (*c)[i].y = (double*)calloc(iter,sizeof(double)); (*c)[i].dy = (double*)calloc(iter,sizeof(double)); (*c)[i].delta = (double*)calloc(iter,sizeof(double)); (*c)[i].phi = (double*)calloc(iter,sizeof(double)); } } } void freeCoefs(Coefs **c, int p){ if((*c)[0].length > 0){ int i; for(i=0;i<p;i++){ free((*c)[i].x); free((*c)[i].dx); free((*c)[i].y); free((*c)[i].dy); free((*c)[i].delta); free((*c)[i].phi); } free(*c); } } void readMap(FILE *fp, Map *m, int nr){ char* line = (char*)malloc(200*sizeof(char)); line = fgets(line, 200, fp); int dum1, dum2; if(strncmp(line, " I COEFFICIENT ORDER EXPONENTS", 46)!=0){ if(strncmp(line, " ALL COMPONENTS ZERO ", 25)!=0){ exit(EXIT_FAILURE); } else{ (*m).A[0] = 1.0; (*m).x[0] = nr == 0?1:0; (*m).dx[0] = nr == 1?1:0; (*m).y[0] = nr == 2?1:0; (*m).dy[0] = nr == 3?1:0; (*m).delta[0] = nr == 4?1:0; (*m).phi[0] = nr == 5?1:0; } } for(int i=0;!strstr((line = fgets(line, 200, fp)), "------");i++){ //TODO read chars ipv ints sscanf(line,"%d %lf %d %d %d %d %d %d %d", &dum1, &((*m).A[i]), &dum2, &((*m).x[i]), &((*m).dx[i]), &((*m).y[i]), &((*m).dy[i]), &((*m).delta[i]), &((*m).phi[i]) ); } free(line); } void readVars(FILE *fp, Vars *v){ char* line = (char*)malloc(200*sizeof(char)); line = fgets(line, 200, fp); fprintf(stderr, "check and store muon mass\n"); if (sscanf(line,"Muon Mass = %lf MeV/c^2",&((*v).mass)) != 1) exit(EXIT_FAILURE); line = fgets(line, 200, fp); fprintf(stderr, "check and store muon momentum\n"); if (sscanf(line,"Muon Momentum = %lf MeV/c",&((*v).momentum)) != 1) exit(EXIT_FAILURE); line = fgets(line, 200, fp); fprintf(stderr, "check and store muon kin energy\n"); if (sscanf(line,"Muon Kinetic Energy = %lf MeV",&((*v).kinEn)) != 1) exit(EXIT_FAILURE); line = fgets(line, 200, fp); fprintf(stderr, "check and store Muon gamma\n"); if (sscanf(line,"Muon gamma = %lf",&((*v).gamma)) != 1) exit(EXIT_FAILURE); line = fgets(line, 200, fp); fprintf(stderr, "check and store Muon beta\n"); if (sscanf(line,"Muon beta = %lf",&((*v).beta)) != 1) exit(EXIT_FAILURE); line = fgets(line, 200, fp); fprintf(stderr, "check and store Muon Anomaly G\n"); if (sscanf(line,"Muon Anomaly G = %lf",&((*v).mAnomalyG)) != 1) exit(EXIT_FAILURE); line = fgets(line, 200, fp); fprintf(stderr, "check and store Muon Spin Tune G.gamma\n"); if (sscanf(line,"Muon Spin Tune G.gamma = %lf",&((*v).spinTuneGgamma)) != 1) exit(EXIT_FAILURE); line = fgets(line, 200, fp); if (sscanf(line," L %lf",&((*v).lRefOrbit)) != 1) exit(EXIT_FAILURE); line = fgets(line, 200, fp); if (line[1] !='P') exit(EXIT_FAILURE); line = fgets(line, 200, fp); if (line[1] !='A') exit(EXIT_FAILURE); free(line); } void sumArrayHelper(double *nums, int length, int interval){ int index = 0; int next = interval/2; do{ if(next < length){ nums[index] += nums[next]; } index += interval; next += interval; } while (index < length); } double sumArray(double *nums, int length){ if(length <= 0){ return 0; } int interval = 2; while(interval < length*2){ sumArrayHelper(nums, length, interval); interval *= 2; } return nums[0]; } void scanCoefs(char *fileName, int *count){ char* line = (char*)malloc(200*sizeof(char)); FILE *fp = fopen(fileName, "r"); if( fp == NULL ){ fprintf(stderr, "Error while opening the coefficients file: %s\n", fileName); exit(EXIT_FAILURE); } for((*count)=0;fgets(line, 200, fp) != NULL;(*count)++){ if( strncmp(line, "\n", 1)==0 || strncmp(line, "\0", 1)==0){ (*count)--; } } if(!feof(fp)){ fprintf(stderr, "Something was wrong with the coefficient file!"); exit(EXIT_FAILURE); } fclose(fp); free(line); } void getCoefs(Coefs *c){ fprintf(stderr, "Begin values of the 6 dimentions: "); scanf("%lf %lf %lf %lf %lf %lf", &((*c).x[0]), &((*c).dx[0]), &((*c).y[0]), &((*c).dy[0]), &((*c).delta[0]), &((*c).phi[0]) ); } void readCoefs(Coefs **c, char *fileName, int count){ FILE *fp = fopen(fileName, "r"); if( fp == NULL ){ fprintf(stderr, "Error while opening the coefficients file: %s\n", fileName); exit(EXIT_FAILURE); } int i; for(i=0;i<count;i++){ fscanf(fp, "%lf %lf %lf %lf %lf %lf", &((*c)[i].x[0]), &((*c)[i].dx[0]), &((*c)[i].y[0]), &((*c)[i].dy[0]), &((*c)[i].delta[0]), &((*c)[i].phi[0]) ); } fclose(fp); } void calcCoefs(Coefs *c, int idx, Map *m, double *newValue){ double *nums = (double*)calloc((*m).length,sizeof(double)); for(int i = 0; i < (*m).length; i++) { nums[i] = (*m).A[i] * pow((*c).x[idx],(*m).x[i]) * pow((*c).dx[idx],(*m).dx[i]) * pow((*c).y[idx],(*m).y[i]) * pow((*c).dy[idx],(*m).dy[i]) * pow((*c).delta[idx],(*m).delta[i]) * pow((*c).phi[idx],(*m).phi[i]); } *newValue = sumArray(nums, (*m).length); free(nums); } int main(int argc, char **argv){ char fileName[200] = ""; char coefsFileName[200] = ""; char outputFileName[200] = ""; int separateFiles = 0; int xSize, dxSize, ySize, dySize, deltaSize, phiSize, argcCounter, particleCount = 1, iter = ITER; Map x, dx, y, dy, delta, phi; Coefs *c; Vars v; // Read the program arguments argcCounter = argc; while ((argcCounter > 1) && (argv[1][0] == '-')) { if(argv[1][2] == '=' && argv[1][3] != '\0'){ switch (argv[1][1]){ case 'm': sprintf(fileName, "%s",&argv[1][3]); break; case 'c': sprintf(coefsFileName ,"%s",&argv[1][3]); break; case 'o': sprintf(outputFileName ,"%s",&argv[1][3]); break; case 'i': sscanf(&argv[1][3] ,"%d", &iter); break; default: fprintf(stderr, "Wrong Argument: %s\n", argv[1]); exit(EXIT_FAILURE); } }else{ switch (argv[1][1]){ case 's': separateFiles = 1; break; case '-': if(!strstr(&argv[1][2], "help") || argv[1][6] != '\0'){ fprintf(stderr, "Wrong Argument: %s\n", argv[1]); exit(EXIT_FAILURE); } case 'h': if(strstr(&argv[1][2], "help") || argv[1][2] == '\0'){ printf("Calculates a certain amount of steps of a charged particle in a inhomogeneous\nmagnetic field.\n\n"); printf("<executable> (-h|--help) | <executable> [-m=<mapFileName>] [-c=<coeffFileName>]\n[-o=<outputFileName> [-s]] [-i=<nr>]\n\n"); printf("-h, --help\t\t Display help\n"); printf("-m=<mapFileName>\t Set the map file to be <mapFileName>. If not set, it\n\t\t\t will be asked for in the program itself.\n"); printf("-c=<coeffFileName>\t Set the coefficients file to be <coeffFileName>. If\n\t\t\t not set, it will be asked for in the program itself.\n\t\t\t Note that the coefficients file supports multiple\n\t\t\t particles, while if the program is run without this\n\t\t\t file, it supports only one particle.\n"); printf("-o=<outputFileName>\t Set the output file to be <outputFileName>. If not\n\t\t\t set, it will default to stdout\n"); printf("-s\t\t\t Choose if you want one output file or (if applicable)\n\t\t\t multiple output files. Note that this parameter can\n\t\t\t only be set if an output file is set.\n"); printf("-i=<nr>\t\t\t Set the number of iterations to <nr>. If not set, it\n\t\t\t will default to 4000.\n"); exit(EXIT_SUCCESS); }else{ fprintf(stderr, "Wrong Argument: %s\n", argv[1]); exit(EXIT_FAILURE); } break; default: fprintf(stderr, "Wrong Argument: %s\n", argv[1]); exit(EXIT_FAILURE); } } ++argv; --argcCounter; } if(separateFiles == 1 && strncmp(outputFileName, "\0", 1)==0){ fprintf(stderr, "-s shouldn't be used without setting an output file"); exit(EXIT_FAILURE); } // if not set in argument, ask for file name of the map file if(strncmp(fileName, "\0", 1)==0){ fprintf(stderr, "Filename of the map: "); scanf("%s", fileName); } // use the map file to gather the sizes of the 6 coefficients fprintf(stderr, "open file\n"); FILE *scanFileP = fopen(fileName, "r"); fprintf(stderr, "check if file is NULL\n"); if( scanFileP == NULL ){ fprintf(stderr, "Error while opening the map file: %s\n", fileName); exit(EXIT_FAILURE); } fprintf(stderr, "Get map sizes\n"); char* line = (char*)malloc(200*sizeof(char)); do{ line = fgets(line, 200, scanFileP); }while(!strstr(line, " A ")); free(line); scanFile(scanFileP, &xSize); scanFile(scanFileP, &dxSize); scanFile(scanFileP, &ySize); scanFile(scanFileP, &dySize); scanFile(scanFileP, &deltaSize); scanFile(scanFileP, &phiSize); fclose(scanFileP); fprintf(stderr, "\nmap sizes: %d %d %d %d %d %d\n", xSize, dxSize, ySize, dySize, deltaSize, phiSize); // allocate memory for the map fprintf(stderr, "\nmap x\n"); mallocMap(&x, xSize); fprintf(stderr, "map dx\n"); mallocMap(&dx, dxSize); fprintf(stderr, "map y\n"); mallocMap(&y, ySize); fprintf(stderr, "map dy\n"); mallocMap(&dy, dySize); fprintf(stderr, "map delta\n"); mallocMap(&delta, deltaSize); fprintf(stderr, "map phi\n"); mallocMap(&phi, phiSize); // read some variables and the map lines from the map file fprintf(stderr, "open file\n"); FILE *mapFileP = fopen(fileName, "r"); fprintf(stderr, "check if file is NULL\n"); if( mapFileP == NULL ){ fprintf(stderr, "Error while opening the map file: %s\n", fileName); exit(EXIT_FAILURE); } fprintf(stderr, "read vars"); readVars(mapFileP,&v); fprintf(stderr, "read x\n"); readMap(mapFileP, &x, 0); fprintf(stderr, "read dx\n"); readMap(mapFileP, &dx, 1); fprintf(stderr, "read y\n"); readMap(mapFileP, &y, 2); fprintf(stderr, "read dy\n"); readMap(mapFileP, &dy, 3); fprintf(stderr, "read delta\n"); readMap(mapFileP, &delta, 4); fprintf(stderr, "read phi\n"); readMap(mapFileP, &phi, 5); fclose(mapFileP); // allocate mempry for the coefficients // read the coefficients from user input if(strncmp(coefsFileName, "\0", 1)==0){ fprintf(stderr, "map coefs\n"); mallocCoefs(&c, iter, particleCount); fprintf(stderr, "read coefs\n"); getCoefs(c); }else{ scanCoefs(coefsFileName, &particleCount); fprintf(stderr, "map coefs\n"); mallocCoefs(&c, iter, particleCount); fprintf(stderr, "read coefs\n"); readCoefs(&c, coefsFileName, particleCount); fprintf(stderr, "Particle count: %d\n", particleCount); } // calculate the coefficients for 4000 iterations for(int n = 0;n < particleCount;n++){ for(int i = 0;i < iter-1;i++){ calcCoefs(&c[n], i, &x, &(c[n].x[i+1])); calcCoefs(&c[n], i, &dx, &(c[n].dx[i+1])); calcCoefs(&c[n], i, &y, &(c[n].y[i+1])); calcCoefs(&c[n], i, &dy, &(c[n].dy[i+1])); calcCoefs(&c[n], i, &delta, &(c[n].delta[i+1])); calcCoefs(&c[n], i, &phi, &(c[n].phi[i+1])); } // show or save the coefficients FILE* outputFile; char fullOutputFileName[200] = ""; if(!strncmp(outputFileName, "\0", 1)==0){ if(separateFiles==1){ sprintf(fullOutputFileName, "part%09d.%s", n+1, outputFileName); }else{ sprintf(fullOutputFileName, "%s", outputFileName); } if(n==0){ outputFile = fopen(fullOutputFileName, "w"); }else{ if(separateFiles==1){ outputFile = fopen(fullOutputFileName, "w"); }else{ outputFile = fopen(fullOutputFileName, "a"); } } if( outputFile == NULL ){ fprintf(stderr, "Error while opening the output file: %s\n", fullOutputFileName); exit(EXIT_FAILURE); } }else{ outputFile = stdout; } for(int i = 0;i < iter;i++){ fprintf(outputFile, "%10.7f %10.7f %10.7f %10.7f %10.7f %10.7f\n", c[n].x[i], c[n].dx[i], c[n].y[i], c[n].dy[i], c[n].delta[i], c[n].phi[i]); } fprintf(outputFile, "\n"); if(!strncmp(outputFileName, "\0", 1)==0){ fclose(outputFile); } } // clean up the heap and tell that the computation is finished freeMap(&x); freeMap(&dx); freeMap(&y); freeMap(&dy); freeMap(&delta); freeMap(&phi); freeCoefs(&c, particleCount); fprintf(stderr, "Output is created. Press Enter to continue...\n"); getchar(); if(strncmp(coefsFileName, "\0", 1)==0){ getchar(); } return 0; }
4,573
#include "constants.cuh" #include "mandelbrot.cuh" /** * Translate the coordinates from the block coordinates into their real and imaginary values in the zoom space * @param r Data table for coordinate storage * @param i Data table for coordinate storage */ __global__ void translate( double *r, double *i, const double width, const double height, const double offset_r, const double offset_i ) { r[blockIdx.y * SIZE + blockIdx.x] = (double(blockIdx.x) * width / double(SIZE)) - (width / 2.) + offset_r; i[blockIdx.y * SIZE + blockIdx.x] = (double(blockIdx.y) * height / double(SIZE)) - (height / 2.) + offset_i; } /* * Compute how much iterations are needed to escape the mandelbrot set */ __global__ void mandelbrot(int *iteration_table, const double *r, const double *i, const int max) { // Retrieve the coordinate from the pixel map double c_r = r[blockIdx.x * SIZE + blockIdx.y], c_i = i[blockIdx.x * SIZE + blockIdx.y]; double temp_z_r, z_r = c_r; double temp_z_i, z_i = c_i; int iteration; for (iteration = 0; iteration < max && (z_r * z_r + z_i * z_i <= 16.); iteration++) { // z*z + c temp_z_r = (z_r * z_r) - (z_i * z_i) + c_r; temp_z_i = (z_r * z_i * 2.0 ) + c_i; z_r = temp_z_r, z_i = temp_z_i; } iteration_table[blockIdx.x * SIZE + blockIdx.y] = iteration; } /** * Transform an integer table into their 3 component color [255, 255, 255] * @param pixel_table 3 * pixel numbers * @param iteration_table * @param max_loop */ __global__ void colorize(unsigned char *pixel_table, const int *iteration_table, const int max_loop) { int iteration = iteration_table[blockIdx.x * SIZE + blockIdx.y]; if (iteration == max_loop) { RGB(pixel_table, (blockIdx.x * SIZE + blockIdx.y) * 3, 0u, 0u, 0u) return; } double h = cfmod(iteration * 360. / COLOR_LOOP, 360.); double hp = h / 60.0; double z = 1.0 - cabs(cfmod(hp, 2.) - 1.); if (hp < 1.0) { RGB(pixel_table, (blockIdx.x * SIZE + blockIdx.y) * 3, F_FACTOR, C_FACTOR * z, Z_FACTOR) } else if (hp < 2.0) { RGB(pixel_table, (blockIdx.x * SIZE + blockIdx.y) * 3, C_FACTOR * z, F_FACTOR, Z_FACTOR) } else if (hp < 3.0) { RGB(pixel_table, (blockIdx.x * SIZE + blockIdx.y) * 3, Z_FACTOR, F_FACTOR, C_FACTOR * z) } else if (hp < 4.0) { RGB(pixel_table, (blockIdx.x * SIZE + blockIdx.y) * 3, Z_FACTOR, C_FACTOR * z, F_FACTOR) } else if (hp < 5.0) { RGB(pixel_table, (blockIdx.x * SIZE + blockIdx.y) * 3, C_FACTOR * z, Z_FACTOR, F_FACTOR) } else if (hp < 6.0) { RGB(pixel_table, (blockIdx.x * SIZE + blockIdx.y) * 3, F_FACTOR, Z_FACTOR, C_FACTOR * z) } else { RGB(pixel_table, (blockIdx.x * SIZE + blockIdx.y) * 3, Z_FACTOR, Z_FACTOR, Z_FACTOR) } }
4,574
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 /** 再帰バージョン 非CUDA 1. バックトラック 2. ビットマップ 14: 365596 0 00:00:00:00.44 15: 2279184 0 00:00:00:02.81 */ long long nqInternal(int n,unsigned int left,unsigned int down,unsigned int right) { unsigned int msk=(1<<n)-1; if(down==msk){return 1;} unsigned int bm=(left|down|right); if((bm&msk)==msk){return 0;} long long total=0; unsigned int bit=(bm+1)&~bm; while((bit&msk)!=0){ total+=nqInternal(n,(left|bit)<<1,down|bit,(right|bit)>>1); bm|=bit; bit=(bm+1)&~bm; } return total; } long long solve_nqueen_recursive(int n){ return nqInternal(n,0,0,0); } /** 非再帰バージョン 非CUDA 1. バックトラック 2. ビットマップ 14: 365596 0 00:00:00:00.22 15: 2279184 0 00:00:00:01.50 */ long long solve_nqueen_nonRecursive(int n){ unsigned int down[32];unsigned int left[32];unsigned int right[32];unsigned int bm[32]; if(n<=0||n>32){return 0;} const unsigned int msk=(1<<n)-1;long long total=0;long long uTotal=0; int i=0;int j=0;unsigned int bit; down[0]=0;left[0]=0;right[0]=0;bm[0]=0; for(j=0;j<(n+1)/2;j++){ bit=(1<<j); bm[0]|=bit;down[1]=bit;left[1]=bit<<1;right[1]=bit>>1; bm[1]=(down[1]|left[1]|right[1]); i=1; if(n%2==1&&j==(n+1)/2-1){uTotal=total;total=0;} while(i>0){ if((bm[i]&msk)==msk){i--;} else{ bit=((bm[i]+1)^bm[i])&~bm[i]; bm[i]|=bit; if((bit&msk)!=0){ if(i+1==n){total++;i--;} else{ down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1; bm[i+1]=(down[i+1]|left[i+1]|right[i+1]); i++; } }else{i--;} } } } if(n%2==0){return total*2;} else{return uTotal*2+total;} } /** CUDA 非再帰バージョン CPUイテレータから複数の初期条件を受け取り、カウント 1. バックトラック backTrack 2. ビットマップ bitmap 14: 365596 0 00:00:00:00.08 15: 2279184 0 00:00:00:00.49 */ __global__ void solve_nqueen_cuda_kernel_bt_bm( int n,int mark, unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight, unsigned int* results,int totalCond){ const int tid=threadIdx.x,bid=blockIdx.x,idx=bid*blockDim.x+tid; __shared__ unsigned int down[THREAD_NUM][10],left[THREAD_NUM][10],right[THREAD_NUM][10], bitmap[THREAD_NUM][10],sum[THREAD_NUM]; const unsigned int mask=(1<<n)-1;int total=0,i=0;unsigned int bit; if(idx<totalCond){ down[tid][i]=totalDown[idx]; left[tid][i]=totalLeft[idx]; right[tid][i]=totalRight[idx]; bitmap[tid][i]=down[tid][i]|left[tid][i]|right[tid][i]; while(i>=0){ if((bitmap[tid][i]&mask)==mask){i--;} else{ bit=(bitmap[tid][i]+1)&~bitmap[tid][i]; bitmap[tid][i]|=bit; if((bit&mask)!=0){ if(i+1==mark){total++;i--;} else{ down[tid][i+1]=down[tid][i]|bit; left[tid][i+1]=(left[tid][i]|bit)<<1; right[tid][i+1]=(right[tid][i]|bit)>>1; bitmap[tid][i+1]=(down[tid][i+1]|left[tid][i+1]|right[tid][i+1]); i++; } }else{i--;} } } sum[tid]=total; }else{sum[tid]=0;} __syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];} __syncthreads();if(tid<32){sum[tid]+=sum[tid+32];} __syncthreads();if(tid<16){sum[tid]+=sum[tid+16];} __syncthreads();if(tid<8){sum[tid]+=sum[tid+8];} __syncthreads();if(tid<4){sum[tid]+=sum[tid+4];} __syncthreads();if(tid<2){sum[tid]+=sum[tid+2];} __syncthreads();if(tid<1){sum[tid]+=sum[tid+1];} __syncthreads();if(tid==0){results[bid]=sum[0];} } long long solve_nqueen_cuda(int n,int steps) { unsigned int down[32];unsigned int left[32];unsigned int right[32]; unsigned int m[32];unsigned int bit; if(n<=0||n>32){return 0;} unsigned int* totalDown=new unsigned int[steps]; unsigned int* totalLeft=new unsigned int[steps]; unsigned int* totalRight=new unsigned int[steps]; unsigned int* results=new unsigned int[steps]; unsigned int* downCuda;unsigned int* leftCuda;unsigned int* rightCuda; unsigned int* resultsCuda; cudaMalloc((void**) &downCuda,sizeof(int)*steps); cudaMalloc((void**) &leftCuda,sizeof(int)*steps); cudaMalloc((void**) &rightCuda,sizeof(int)*steps); cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM); const unsigned int mask=(1<<n)-1; const unsigned int mark=n>11?n-10:2; long long total=0;int totalCond=0; int i=0,j;down[0]=0;left[0]=0;right[0]=0;m[0]=0;bool computed=false; for(j=0;j<n/2;j++){ bit=(1<<j);m[0]|=bit; down[1]=bit;left[1]=bit<<1;right[1]=bit>>1; m[1]=(down[1]|left[1]|right[1]); i=1; while(i>0){ if((m[i]&mask)==mask){i--;} else{ bit=(m[i]+1)&~m[i];m[i]|=bit; if((bit&mask)!=0){ down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1; m[i+1]=(down[i+1]|left[i+1]|right[i+1]); i++; if(i==mark){ totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i]; totalCond++; if(totalCond==steps){ if(computed){ cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice); /** backTrack+bitmap*/ solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); computed=true;totalCond=0; } i--; } }else{i --;} } } } if(computed){ cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice); /** backTrack+bitmap*/ solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} total*=2; if(n%2==1){ computed=false;totalCond=0;bit=(1<<(n-1)/2);m[0]|=bit; down[1]=bit;left[1]=bit<<1;right[1]=bit>>1; m[1]=(down[1]|left[1]|right[1]); i=1; while(i>0){ if((m[i]&mask)==mask){i--;} else{ bit=(m[i]+1)&~m[i];m[i]|=bit; if((bit&mask)!=0){ down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1; m[i+1]=(down[i+1]|left[i+1]|right[i+1]); i++; if(i==mark){ totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i]; totalCond++; if(totalCond==steps){ if(computed){ cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice); /** backTrack+bitmap*/ solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); computed=true;totalCond=0; } i--; } }else{i --;} } } if(computed){ cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice); /** backTrack+bitmap*/ solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} } cudaFree(downCuda);cudaFree(leftCuda);cudaFree(rightCuda);cudaFree(resultsCuda); delete[] totalDown;delete[] totalLeft;delete[] totalRight;delete[] results; return total; } /** CUDA 初期化 **/ bool InitCUDA(){ int count; cudaGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} cudaSetDevice(i); return true; } int main(int argc,char** argv) { long long solution; bool cpu=true,gpu=true; int argstart=1,steps=24576; /** パラメータの処理 */ if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){gpu=false;} else if(argv[1][1]=='g'||argv[1][1]=='G'){cpu=false;} argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g] n steps\n",argv[0]); printf(" -c: CPU only\n"); printf(" -g: GPU only\n"); printf("Default to 8 queen\n"); } /** 出力と実行 */ /** CPU */ if(cpu){ int min=4;int targetN=15; struct timeval t0;struct timeval t1;int ss;int ms;int dd; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int i=min;i<=targetN;i++){ gettimeofday(&t0,NULL); // 計測開始 /** 再帰 */ /* solution=solve_nqueen_recursive(i);*/ /** 非再帰 */ solution=solve_nqueen_nonRecursive(i); gettimeofday(&t1,NULL); // 計測終了 if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; } int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; long lGUnique=0; printf("%2d:%18llu%18llu%12.2d:%02d:%02d:%02d.%02d\n", i,(unsigned long long)solution,(unsigned long long)lGUnique,dd,hh,mm,ss,ms); } } /** GPU */ if(gpu){ if(!InitCUDA()){return 0;} int min=4;int targetN=17; struct timeval t0;struct timeval t1;int ss;int ms;int dd; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int i=min;i<=targetN;i++){ gettimeofday(&t0,NULL); // 計測開始 solution=solve_nqueen_cuda(i,steps); gettimeofday(&t1,NULL); // 計測終了 if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; } int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; long lGUnique=0; printf("%2d:%18llu%18llu%12.2d:%02d:%02d:%02d.%02d\n", i,(unsigned long long)solution,(unsigned long long)lGUnique,dd,hh,mm,ss,ms); } } return 0; }
4,575
#include <cstdio> #include <cmath> #include <vector> #include <chrono> #include <stdlib.h> using namespace std; #define M 32 __global__ void matrix(int N, float *A, float *B, float* C){ int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; __shared__ float s1[M][M+1]; __shared__ float s2[M][M+1]; float sum=0; for(int i =0;i<N/M;i++) { s1[ty][tx]=A[(by*M+ty)*N+(i*M+tx)]; s2[ty][tx]=B[(bx*M+tx)+(i*M+ty)*N]; __syncthreads(); for (int k=0;k<M;k++) sum+=s1[ty][k]*s2[k][tx]; __syncthreads(); } C[(by*M+ty)*N+bx*M+tx]=sum; } int main(int argc, char** argv) { const int N = 256; vector<float> A(N*N); vector<float> B(N*N); vector<float> C(N*N, 0); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { A[N*i+j] = drand48(); B[N*i+j] = drand48(); } } float *d_A, *d_B, *d_C; cudaMalloc(&d_A, sizeof(float) * N * N); cudaMalloc(&d_B, sizeof(float) * N * N); cudaMalloc(&d_C, sizeof(float) * N * N); cudaMemcpy(d_A, &A[0], sizeof(float) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(d_B, &B[0], sizeof(float) * N * N, cudaMemcpyHostToDevice); auto tic = chrono::steady_clock::now(); int GRID_SIZE=(N+M-1)/M; dim3 grid(GRID_SIZE,GRID_SIZE); dim3 block(M,M); matrix<<<grid,block>>>(N,d_A,d_B,d_C); cudaDeviceSynchronize(); auto toc = chrono::steady_clock::now(); double time = chrono::duration<double>(toc-tic).count(); cudaMemcpy(&C[0], d_C, sizeof(float) * N * N, cudaMemcpyDeviceToHost); for (int i=0; i<N; i++) for (int j=0; j<N; j++) for (int k=0; k<N; k++) C[N*i+j] -= A[N*i+k] * B[N*k+j]; double err = 0; for (int i=0; i<N; i++) for (int j=0; j<N; j++) err += fabs(C[N*i+j]); printf("N : %d\n",N); printf("total: %lf s (%lf GFlops)\n",time,2.*N*N*N/time/1e9); printf("error: %lf\n",err/N/N); }
4,576
#include "includes.h" __global__ void sec_min_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float min_val = 1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] < min_val){ min_val = inp[i * C + plane]; } } out[p_id * C + plane] = min_val; } } }
4,577
#include "includes.h" __global__ void Compute(int *a, int k, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int id = i / k * 2 * k + k + i % k; if(id < n) { a[id] += a[id - id % k - 1]; } }
4,578
#include "includes.h" __global__ void reprojectPoint2(double *d_N, int nRxns, int istart, double *d_umat, double *points, int pointsPerFile, int pointCount,int index){ int newindex= blockIdx.x * blockDim.x + threadIdx.x; int stride= blockDim.x * gridDim.x; for(int i=newindex;i<nRxns;i+=stride){ points[pointCount+pointsPerFile*i]=0; for(int j=0;j<nRxns-istart;j++){ points[pointCount+pointsPerFile*i]+=d_N[j*nRxns+i]*d_umat[nRxns*index+j];//here N*tmp } } }
4,579
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/for_each.h> // CA Update process functor struct caUpdate{ private: const int* ruleArr; int states; public: caUpdate(){} caUpdate(int* x, int s):ruleArr(x),states(s){} template <class Tuple> __device__ void operator()(Tuple t) { int a = thrust::get<0>(t); // Left cell int b = thrust::get<1>(t); // This cell int c = thrust::get<2>(t); // Right cell thrust::get<3>(t) = *(ruleArr+b+states*a+states*states*c); } }; class ca1d{ typedef thrust::device_vector<int> intDvec; int length; // Array length of CA int states; // CA number of states intDvec bk; // Back (past) array intDvec rt; // Right neighbour map intDvec lt; // Left neighbour map caUpdate update; // Update functor public: intDvec ft; // Front (current) array ca1d(int l, int s, caUpdate ca):length(l),states(s) { bk.resize(length); ft.resize(length); lt.resize(length); rt.resize(length); update = ca; // Load maps to neighbouring cells (wrapping ends) thrust::counting_iterator<int> it(0); lt[0] = length-1; thrust::copy(it,it+length,lt.begin()+1); thrust::copy(it+1,it+length,rt.begin()); rt[length-1] = 0; } void loadInitial(thrust::host_vector<int> i) { thrust::copy(i.begin(),i.end(),bk.begin()); } void updateFront() { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( bk.begin(), thrust::make_permutation_iterator(bk.begin(),lt.begin()), thrust::make_permutation_iterator(bk.begin(),rt.begin()), ft.begin() ) ), thrust::make_zip_iterator( thrust::make_tuple( bk.end(), thrust::make_permutation_iterator(bk.begin(),lt.end()), thrust::make_permutation_iterator(bk.begin(),rt.end()), ft.end() ) ), update ); } void swapFB() { ft.swap(bk); } };
4,580
#include <iostream> using namespace std; __global__ void kernel(){ //it does nothing for now }; int main(){ kernel<<<1,1>>>(); cout<<"Hello, world! \n"; return 0; }
4,581
#include "includes.h" __device__ void assign_add(float *target, const float *source) { target[0] += source[0]; target[1] += source[1]; target[2] += source[2]; } __global__ void PoissonImageCloningIteration( const float *fixed, const float *mask, const float *source, float *target ,const int wt, const int ht) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; const int Nt = wt*(yt-1)+xt; const int Wt = wt*yt+xt-1; const int St = wt*(yt+1)+xt; const int Et = wt*yt+xt+1; float sum[3] = {}; if(yt < ht and xt < wt){ assign_add(sum, &fixed[curt*3]); if((yt-1) >= 0){ if(mask[Nt] > 127.0f){ assign_add(sum, &source[Nt*3]); } } if((xt-1) >= 0){ if(mask[Wt] > 127.0f){ assign_add(sum, &source[Wt*3]); } } if((yt+1) < ht){ if(mask[St] > 127.0f){ assign_add(sum, &source[St*3]); } } if((xt+1) < wt){ if(mask[Et] > 127.0f){ assign_add(sum, &source[Et*3]); } } target[curt*3+0] = sum[0]/4; target[curt*3+1] = sum[1]/4; target[curt*3+2] = sum[2]/4; } }
4,582
#include <stdio.h> int main(void) { cudaDeviceProp prop; int count, i; cudaGetDeviceCount(&count); for (i = 0; i < count; i++) { cudaGetDeviceProperties(&prop, i); printf("%s\n", prop.name); printf("%d\n", prop.canMapHostMemory); printf("%d\n", prop.deviceOverlap); printf("%d\n", prop.multiProcessorCount); printf("%d\n", prop.integrated); printf("%d\n", prop.clockRate); } }
4,583
//#include "crop_cuda.h" // //#include <stdio.h> //#include <cstdlib> //#include <math.h> //#include <iostream> // //#include "../common/macro.h" // //#define PIXEL_PER_THREAD 128 // //namespace va_cv { // //texture<unsigned char, 2> tex_src; //__constant__ int rect[5]; // // //__global__ void kernel_crop_chw(unsigned char *dst, size_t pitch_dst) { // // map from threadIdx/BlockIdx to pixel position(on dst) // // int tid = threadIdx.x + blockIdx.x * blockDim.x; // while (tid < pitch_dst * rect[3]) { // int dst_x = tid % pitch_dst; // int dst_y = tid / pitch_dst; // dst[tid] = tex2D(tex_src, dst_x + rect[0], dst_y + rect[1]); // // tid += blockDim.x * gridDim.x; // } //} // //void CropCuda::crop_cuda_chw_int8(const unsigned char *src, int src_width, int src_height, int src_channel, // unsigned char *dst, // int crop_left, int crop_top, int crop_width, int crop_height) { // // crop rect, use const value // int *rect_vec = new int[5]{crop_left, crop_top, crop_width, crop_height, src_width}; // cudaMemcpyToSymbol( rect, rect_vec, sizeof(int) * 5); // // int dst_size = crop_width * crop_height; // int src_size = src_width * src_height; // // dst使用cuda malloc // unsigned char *dev_src, *dev_dst; // size_t pitch_dst = 0; // // pitch的訪問效率更高,但拷貝效率不受影響。。。 // cudaMallocPitch((void**)&dev_dst, &pitch_dst, crop_width * sizeof(unsigned char), crop_height); // // src cudaMallocPitch // size_t pitch_src = 0; // cudaMallocPitch((void**)&dev_src, &pitch_src, src_width * sizeof(unsigned char), src_height); // cudaMemcpy2D(dev_src, pitch_src, src, src_width * sizeof(unsigned char), // src_width * sizeof(unsigned char), src_height, cudaMemcpyHostToDevice); // // // src使用紋理內存 // cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>(); // int err = cudaBindTexture2D( NULL, tex_src, dev_src, desc, src_width, src_height, // sizeof(unsigned char) * src_width ); // if (err != cudaSuccess) { // printf("bind failed!!! %d\n", err); // } // // // 設備函數 // dim3 blocks((dst_size + PIXEL_PER_THREAD - 1) / PIXEL_PER_THREAD); // dim3 threads(PIXEL_PER_THREAD); // kernel_crop_chw<<<blocks,threads>>>( dev_dst, pitch_dst ); // // // 讀取dst內存 // cudaMemcpy2D(dst, crop_width * sizeof(unsigned char), dev_dst, pitch_dst, // crop_width * sizeof(unsigned char), crop_height, cudaMemcpyDeviceToHost); // // 回收內存 // cudaFree(dev_dst); // cudaFree(dev_src); // cudaUnbindTexture( tex_src ); // // delete[] rect_vec; //} // //}
4,584
#include <stdio.h> template <typename T> void check(T result, char const *const func, const char *const file, int const line) { if (result) { fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line, static_cast<unsigned int>(result), cudaGetErrorString(result), func); cudaDeviceReset(); exit(EXIT_FAILURE); } } // This will output the proper CUDA error strings in the event // that a CUDA host call returns an error #define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__) template<typename T, typename U> constexpr T ceildiv(T t, U u) { return (t + u - 1) / u; } constexpr int LOOPS_1 = 32; constexpr int FMAS = LOOPS_1; __global__ void peak_flops(float* in, float* out, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { float x = in[i]; float y = 1.0f; for (int j = 0; j < LOOPS_1; j++) { y = fma(y, x, 1.0f); } out[i] = y; } } constexpr int LOOPS_2 = 256; constexpr int FMAS_2 = LOOPS_2 * 4; __global__ void peak_flops_2(float* in, float* out, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { float x = in[i]; float y = 1.0f; float z = -1.0f; float w = 2.0f; float u = -2.0f; for (int j = 0; j < LOOPS_2; j++) { y = fma(y, x, 1.0f); z = fma(z, x, -1.0f); w = fma(w, x, 2.0f); u = fma(u, x, -2.0f); } out[i] = y + z + w + u; } } void init(float* in, int n) { for (int i = 0; i < n; i++) { in[i] = (float)(i) / n; } } int main() { constexpr int N = (1 << 28); constexpr int ITERS = 100; float *x, *y, *d_x, *d_y; x = (float*)malloc(N * sizeof(float)); y = (float*)malloc(N * sizeof(float)); checkCudaErrors(cudaMalloc(&d_x, N * sizeof(float))); checkCudaErrors(cudaMalloc(&d_y, N * sizeof(float))); init(x, N); checkCudaErrors(cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice)); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start)); int blockSize = 32; for (int i = 0; i < ITERS; i++) { peak_flops<<<ceildiv(N, blockSize), blockSize>>>(d_x, d_y, N); } checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); float millis = 0.0f; checkCudaErrors(cudaEventElapsedTime(&millis, start, stop)); millis /= ITERS; float bytes = (float)N * sizeof(float) * 2; float flops = (float)N * FMAS * 2; printf("%.3f ms %.1f gb/s %.1f gflops/s\n", millis, bytes / millis / 1e6, flops / millis / 1e6); cudaFree(d_x); cudaFree(d_y); free(x); free(y); }
4,585
#include "includes.h" __global__ void gpu_grayscale(int width, int height, float *image, float *image_out) { //////////////// // TO-DO #4.2 ///////////////////////////////////////////// // Implement the GPU version of the grayscale conversion // /////////////////////////////////////////////////////////// const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < width && y < height) { int offset_out = ((width * y) + x); int offset = offset_out*3; float *pixel = &image[offset]; image_out[offset_out] = pixel[0] * 0.0722f + // B pixel[1] * 0.7152f + // G pixel[2] * 0.2126f; // R } }
4,586
#include "nn_include.cuh" #include "neural_network.cuh" #define MATRIX_TEST 0 #define MATRIX_UTIL_TEST 0 int main(int argc,char **argv) { #if (MATRIX_TEST) matrix_test(); #elif (MATRIX_UTIL_TEST) matrix_util_test(); #else //unsigned int layer_sizes[] = {400, 25, 10}; unsigned int num_layers = 3; unsigned int num_labels = 10; double lambda = 0.8; /* if(argc < 2) { printf("need exactly 1 argument for vector length\n"); return 0; } unsigned int iteration_number = atoi(argv[1]); */ unsigned int iteration_number = 100; matrix_list_t* theta = matrix_list_constructor(2); //theta->matrix_list[0] = matrix_random(25, 401, .12); //theta->matrix_list[1] = matrix_random(10, 26, .12); theta->matrix_list[0] = load_from_file("theta1.csv", 25, 401); theta->matrix_list[1] = load_from_file("theta2.csv", 10, 26); assert(theta->num == 2); assert(theta->matrix_list[0]->rows == 25 && theta->matrix_list[0]->cols == 401); assert(theta->matrix_list[1]->rows == 10 && theta->matrix_list[1]->cols == 26); matrix_t* X = load_from_file("X.csv", 5000, 400); matrix_t* tmp = load_from_file("y.csv", 5000, 1); matrix_t* y = matrix_transpose(tmp); free_matrix(tmp); gradient_descent(&theta, num_layers, num_labels, X, y, lambda, iteration_number); free_matrix(X); free_matrix(y); free_matrix_list(theta); #endif //printf("%ld %ld\n", get_memory_used(), get_total_mallocs()); return 1; }
4,587
#include "iostream" #include <fstream> #include <string> #define N 1024 // size of matrix is N*N #define BLOCK_SIZE 16 __global__ void gpuMult ( float * a, float * b, int n, float * c ) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float result = 0.0f; int idxA = n * BLOCK_SIZE * by + n * ty; int idxB = BLOCK_SIZE * bx + tx; for ( int idx = 0; idx < n; idx++ ) result += a [idxA + idx] * b [idxB + idx*n]; int idxC = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; c [idxC + n * ty + tx] = result; } void cpuMult( float * a, float * b, int n, float * c ) { for (int rowIdxC = 0; rowIdxC < n; rowIdxC++) { for (int colIdxC = 0; colIdxC < n; colIdxC++) { float resultC = 0.0f; for (int idx = 0; idx < n; idx++) { int idxA = (rowIdxC * n) + idx; int idxB = colIdxC + (idx * n); resultC += a[idxA] * b[idxB]; } int idxC = rowIdxC * n + colIdxC; c[idxC] = resultC; } } } int main() { float * a = new float [N*N]; float * b = new float [N*N]; float * c = new float [N*N]; for ( int i = 0; i < N; i++ ) { for ( int j = 0; j < N; j++ ) { int k = N * i + j; a [k] = k; b [k] = k; } } //CPU------------------------------------- clock_t start_s = clock(); cpuMult(a, b, N, c); clock_t stop_s = clock(); std::cout << "Time CPU: " << (stop_s - start_s) / double(CLOCKS_PER_SEC) * 1000 << " ms\n"; //GPU------------------------------------- int size = N * N * sizeof(float); float * adev = NULL; float * bdev = NULL; float * cdev = NULL; cudaMalloc((void**)&adev, size); cudaMalloc((void**)&bdev, size); cudaMalloc((void**)&cdev, size); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(N / threads.x, N / threads.y); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float gpuTime = 0.0f; cudaEventRecord(start, 0); cudaMemcpy(adev, a, size, cudaMemcpyHostToDevice); cudaMemcpy(bdev, b, size, cudaMemcpyHostToDevice); gpuMult<<<blocks, threads>>>(adev, bdev, N, cdev); cudaMemcpy(c, cdev, size, cudaMemcpyDeviceToHost); cudaEventRecord( stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpuTime, start, stop); std::cout << "Time GPU: " << gpuTime << " ms\n"; cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(adev); cudaFree(bdev); cudaFree(cdev); delete a; delete b; delete c; return 0; }
4,588
#include <stdio.h> #include <stdlib.h> #include <time.h> #define NB 1 #define NTPB 1024 #define SIZE 2048*512 void testCUDA(cudaError_t error, const char *file, int line) { if (error != cudaSuccess) { printf("There is an error in file %s at line %d : %d\n", file, line, error); exit(EXIT_FAILURE); } } #define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__)) __device__ int Ad[NB*NTPB+1]; __device__ int Bd[NB*NTPB+1]; __device__ void printRange(int idx,int* t, int d,int f){ if(idx == 1){ printf("\t["); for (int i = d; i < f; ++i) { printf("%d, ",t[i]); } printf("]\n"); } } /*------ @param : int idx : indice du thread qui entre dans la fonction C : tableau de résultat A : tableau d'entrée ab : indice de debut pour A ad : indice de fin pour A B : tableau d'entrée bb : indice de début pour B bd : indide de fin pour B */ __device__ void merge(int idx,int* C,int* A, int ab,int ad,int* B, int bb, int bd){ int start = ab+bb; int i = 0; while(1){ if(ab >= ad && bb >= bd){ return; }else if(ab == ad){ C[start+i] = B[bb]; bb++; }else if(bb == bd){ C[start+i] = A[ab]; ab++; }else if( A[ab] > B[bb]){ C[start+i] = B[bb]; bb += 1; }else{ C[start+i] = A[ab]; ab++; } i++; } } /*------ @param : C : fusion de A et B trié A : tableau d' entrée trié de lenR éléments B : tableau d' entrée trié de lenR éléments len* : longeur du talbeau * nbT : nombre de thread dans la fonction */ __device__ void mergePath(int* C,int* A,int lenA,int *B,int lenB,int nbT){ int idx = (threadIdx.x + blockIdx.x * blockDim.x)% nbT +1; if(idx <= nbT){ Ad[idx-1] = 0; Bd[idx-1] = 0; if(idx == 1){ Ad[nbT] = lenA; Bd[nbT] = lenB; } int index = (idx*(lenA+lenB))/(nbT); int atop = (index > lenA)? lenA-1: index; int btop = (index > lenA)? index - (lenA-1) : 0; int abot = btop; int offset; int ai; int bi; int flag = 0; while(!flag && idx != nbT){ offset = (int)floor(((float)(atop - abot))/(float)(2)); ai = atop - offset; bi = btop + offset; if( bi == 0 || ai >= lenA || A[ai] > B[bi-1]){ if(A[ai-1] <= B[bi]) { Ad[idx] = ai; Bd[idx] = bi; flag = 1; } else{ atop = ai - 1; btop = bi + 1; } } else{ abot = ai + 1; } } __syncthreads(); // int size = (lenA+lenB)/(nbT); for (int i = 1; i <= nbT; ++i) { merge(idx,C,A,Ad[i-1],Ad[i],B,Bd[i-1],Bd[i]); } } } __device__ void mergeTab(int* C,int* A,int a,int* B,int b) { int start = 0; int i = 0; int a_ = 0,b_ = 0; while(1){ if(a_ >= a && b_ >= b){ return; }else if(a_ == a){ C[start+i] = B[b_]; b_++; }else if(b_ == b){ C[start+i] = A[a_]; a_++; }else if( A[a_] > B[b_]){ C[start+i] = B[b_]; b_ += 1; }else{ C[start+i] = A[a_]; a_++; } i++; } } /*------ @param : R : tableau de sortie trié de lenR éléments S : tableau d' entrée trié de lenR éléments lenR : longeur des tableaux */ __global__ void sort(int *R,int* S,int lenR){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int size = lenR/(NB*NTPB); int tmp; int turn = 1; if(lenR < 2*NB*NTPB){ size = 2; } printf("%d : size\n",size ); //création de tableau trié de "size" éléments for (int j = 0; j < size; j++) { for (int k = j; k < size; k++) { if( R[j+idx*size] > R[k+idx*size]){ tmp = R[j+idx*size]; R[j+idx*size] = R[k+idx*size]; R[k+idx*size] = tmp; } } } //optimisation ? // for(int l = 2 ; l <= size; l *= 2){ // for(int m = l ; m <= size ; m *=2){ // printf("%d,%d -> mergePath(R+%d,S+%d ,%d, S+%d, %d,%d);\n",idx,m,idx*m,idx*m ,l/2,l/2+idx*m,l/2,1); // mergeTab(R+idx*m,S+idx*m,l/2,S+l/2+idx*m,l/2); // } // for(int i = 0; i < size ; i++){ // S[idx*size+i] = R[idx*size+i]; // } // // memcpy(S,R,menR); // } printRange(idx,R,0,lenR); for(int i = 0; i < size ; i++){ S[idx*size+i] = R[idx*size+i]; } // memcpy(S+idx*size,R+idx*size,size); //__syncthreads(); turn *=2; size *=2; idx = idx%((NB*NTPB)/turn); while(size <= lenR){ //printf("%d,%d -> mergePath(R+%d,S+%d ,%d, S+%d, %d,%d);\n",idx,turn,idx*size,idx*size ,size/2,size/2+idx*size,size/2,1); mergePath(R+idx*size, S+idx*size ,size/2, S+size/2+idx*size, size/2,turn); for(int i = 0; i < size ; i++){ S[idx*size+i] = R[idx*size+i]; } // memcpy(S+idx*size,R+idx*size,size); turn *=2; idx = idx%((NB*NTPB)/turn); size *=2 ; } } //affiche un tablrau de "len" élément(s) void affiche(int* A,int len){ for (int i = 0; i < len; ++i) { printf("%d, ",A[i] ); } puts("\b\b "); } int comp (const void * elem1, const void * elem2) { int f = *((int*)elem1); int s_ = *((int*)elem2); if (f > s_) return 1; if (f < s_) return -1; return 0; } int main(int argc,char** argv){ // Initialisation des tableau int *A = (int*)malloc(sizeof(int)*SIZE); int* ret = (int*)malloc(sizeof(int)*SIZE); // srand(time(NULL)); for(int i =0 ; i < SIZE ; i++){ A[i] = rand()%10000+1; ret[i] = 0; } //--------------------------------- //création des variable sur le GPU int *aGPU,*bGPU; testCUDA(cudaMalloc(&aGPU,SIZE*sizeof(int))); testCUDA(cudaMemcpy(aGPU,A,SIZE*sizeof(int),cudaMemcpyHostToDevice)); testCUDA(cudaMalloc(&bGPU,SIZE*sizeof(int))); testCUDA(cudaMemcpy(bGPU,A,SIZE*sizeof(int),cudaMemcpyHostToDevice)); //--------------------------------- affiche(A,SIZE); // affichage du tableau non trié printf("len tab : %d\n",SIZE ); // gestion du temps int count; cudaDeviceProp prop; testCUDA(cudaGetDeviceCount(&count)); testCUDA(cudaGetDeviceProperties(&prop, count-1)); float TimerAddOne; // GPU timer instructions cudaEvent_t start, stop; // GPU timer instructions testCUDA(cudaEventCreate(&start)); // GPU timer instructions testCUDA(cudaEventCreate(&stop)); // GPU timer instructions testCUDA(cudaEventRecord(start,0)); // GPU timer instructions //----------------- // lancement de l'algorithme de tri sort<<<NB,NTPB>>>(aGPU,bGPU,SIZE); // récupération du temps mis par le GPU testCUDA(cudaEventRecord(stop,0)); // GPU timer instructions testCUDA(cudaEventSynchronize(stop)); // GPU timer instructions testCUDA(cudaEventElapsedTime(&TimerAddOne, // GPU timer instructions start, stop)); // GPU timer instructions // récupération du talbeau sur le CPU testCUDA(cudaMemcpy(ret,aGPU,SIZE*sizeof(int),cudaMemcpyDeviceToHost)); printf("\n\n"); affiche(ret,SIZE); printf("GPU Timer for the addition on the GPU of vectors: %f ms\n", TimerAddOne); // libération de la memoire free(A);free(ret); testCUDA(cudaFree(aGPU)); testCUDA(cudaFree(bGPU)); }
4,589
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <curand_kernel.h> static const long BLOCKS = 256; static const long THREAD_X_BLOCK = 256; static const long ITER_X_THREAD = 10000; __global__ void piMC(long long* blockCounter, unsigned long long seed) { // blockCounter debe tener un contador por cada bloque // Debe haber un contador por cada hilo en el bloque (compartido en el bloque) __shared__ long long threadCounter[THREAD_X_BLOCK]; // ID de la thread int id = threadIdx.x + blockIdx.x * blockDim.x; // Inicializa el RNG curandState_t rng; curand_init(seed, id, 0, &rng); // Inicializa el contador threadCounter[threadIdx.x] = 0; // Calcula los puntos dentro del círculo for (int i = 0; i < ITER_X_THREAD; i++) { float x = curand_uniform(&rng); float y = curand_uniform(&rng); if (x * x + y * y <= 1.0) { threadCounter[threadIdx.x] += 1; } } // La primera thread en cada bloque suma los contadores individuales en el de bloque if (threadIdx.x == 0) { // Inicializa el contador de este bloque blockCounter[blockIdx.x] = 0; // Suma los contadores de thread en el de bloque for (int i = 0; i < THREAD_X_BLOCK; i++) { blockCounter[blockIdx.x] += threadCounter[i]; } } } int main(void) { // Crea el buffer para los contadores de bloque en el host long long* blockCounter = (long long*)malloc(sizeof(long long) * BLOCKS); // Crea el buffer para los contadores de bloque en la GPU long long* gpuBlockCounter; cudaMalloc(&gpuBlockCounter, sizeof(long long) * BLOCKS); // Ejecuta la kernel unsigned long long seed = (unsigned long long) time(NULL); piMC <<< BLOCKS, THREAD_X_BLOCK >>> (gpuBlockCounter, seed); // Recupera el resultado desde la GPU y lo pone en el buffer del host cudaMemcpy(blockCounter, gpuBlockCounter, sizeof(long long) * BLOCKS, cudaMemcpyDeviceToHost); // Suma los contadores y calcula PI unsigned long long total = 0; for (int i = 0; i < BLOCKS; i++) { total += blockCounter[i]; } unsigned long long iters = BLOCKS * THREAD_X_BLOCK * ITER_X_THREAD; printf("Aproximado con %lld iteraciones\n", iters); printf("%lld puntos dentro del círculo\n", total); printf("PI= %f\n", 4.0 * ( (double)total / (double)iters ) ); // Libera los recursos cudaFree(gpuBlockCounter); free(blockCounter); return 0; }
4,590
__device__ long long int mod(int base, int exponent, int den) { long long int ret; ret = 1; for (int i = 0; i < exponent; i++) { ret *= base; ret = ret % den; } return ret; } __device__ long long int mod_optimized(int base, int exponent, int den) { unsigned int a = (base % den) * (base % den); unsigned long long int ret = 1; float size = (float) exponent / 2; if (exponent == 0) { return base % den; } else { while (1) { if (size > 0.5) { ret = (ret * a) % den; size = size - 1.0; } else if (size == 0.5) { ret = (ret * (base % den)) % den; break; } else { break; } } return ret; } } __global__ void rsa(int * num, int *key, int *den, int * result) { int i = blockDim.x * blockIdx.x + threadIdx.x; int temp; temp = mod(num[i], *key, *den); //temp = mod_optimized(num[i], *key, *den); atomicExch(&result[i], temp); }
4,591
#include "scan.cuh" #include <cuda.h> #include <stdio.h> #include <random> int main(int argc, char *argv[]) { int n = atol(argv[1]); int threads_per_block = atol(argv[2]); // set up random number from -1 to 1 generator std::random_device entropy_source; std::mt19937_64 generator(entropy_source()); const float min = -1.0, max = 1.0; // The range for the random number std::uniform_real_distribution<float> dist(min, max); float *input, *output; cudaMallocManaged((void **)&input, sizeof(float) * n); cudaMallocManaged((void **)&output, sizeof(float) * n); for (int i = 0; i < n; i++) { input[i] = dist(generator); } /// time for the operations. // set up timer cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start timing cudaEventRecord(start); scan(input, output, n, threads_per_block); cudaEventRecord(stop); cudaEventSynchronize(stop); // Get the elapsed time in milliseconds float ms; cudaEventElapsedTime(&ms, start, stop); // test // for (int i = 0; i < n; i++) { // printf("%f, ", input[i]); // } // printf("\n"); // for (int i = 0; i < n; i++) { // printf("%f, ", output[i]); // } // printf("\n"); printf("%f\n%f\n", output[n - 1], ms); //free memory cudaFree(input); cudaFree(output); }
4,592
// -*- C++ -*- // -*- coding: utf-8 -*- // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // michael a.g. aïvázis // california institute of technology // (c) 1998-2010 all rights reserved // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // memxchng.cu #include <cuda.h> #include <assert.h> int main(int argc, char* argv[]) { const int N = 12; // allocate some buffers on the host float *send_host = (float *) malloc(N*sizeof(float)); float *recv_host = (float *) malloc(N*sizeof(float)); // allocate matching ones on the device float *send_device, *recv_device; cudaMalloc((void **) &recv_device, N*sizeof(float)); cudaMalloc((void **) &send_device, N*sizeof(float)); // and initialize the host data for (int i=0; i<N; i++) { send_host[i] = 2.0f + i*i; recv_host[i] = 0.0f; } // send the data from the host to the device cudaMemcpy(recv_device, send_host, N*sizeof(float), cudaMemcpyHostToDevice); // move the data in device memory cudaMemcpy(send_device, recv_device, N*sizeof(float), cudaMemcpyDeviceToDevice); // get it back on the host cudaMemcpy(recv_host, send_device, N*sizeof(float), cudaMemcpyDeviceToHost); // check the result for (int i=0; i<N; i++) { assert(send_host[i] == recv_host[i]); } // free the buffers; free(send_host); free(recv_host); cudaFree(send_device); cudaFree(recv_device); return 0; } // end of file
4,593
#include <iostream> #include <cassert> #define N 32768 __global__ void scaleVector(float scale, float * input, float * output) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { output[tid] = input[tid] * scale; } } int main() { float * h_a = new float[N]; float * h_b = new float[N]; float * d_a; float * d_b; const float scale = 2.; for (int i=0; i<N; i++) h_a[i] = (float)i/2.; std::cout << "Initializing data on GPU\n"; cudaMalloc( (void**)&d_a, N*sizeof(float) ); cudaMalloc( (void**)&d_b, N*sizeof(float) ); cudaMemcpy( d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice ); std::cout << "Launching kernels on GPU\n"; const int nblocks = 128; const int nthreads = 256; scaleVector<<< nblocks, nthreads >>>(scale, d_a, d_b); std::cout << "Downloading data\n"; cudaMemcpy( h_b, d_b, N*sizeof(float), cudaMemcpyDeviceToHost ); std::cout << "Verifying results\n"; for (int i=0; i<N; i++) { std::cout << h_b[i] << std::endl; assert((double)i == h_b[i]); } std::cout << "Done!\n"; cudaFree(d_a); cudaFree(d_b); delete [] h_a; delete [] h_b; }
4,594
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float* var_19,float var_20,float var_21,float var_22,float var_23,float var_24) { if (comp <= logf((var_1 - sinhf(+1.3758E22f)))) { comp = (var_4 - var_5); comp += (var_6 * (-1.1832E35f * var_7 * var_8)); for (int i=0; i < var_2; ++i) { comp = (var_9 - (var_10 + acosf(var_11 * -1.6475E-43f))); comp += asinf(+1.2860E-35f); comp = fmodf(atan2f((var_12 - (var_13 + -0.0f - (var_14 * var_15 / -1.7790E14f))), var_16 + +1.6107E36f / -0.0f / +1.8488E36f * +1.4295E36f), -1.8084E-37f); comp = -1.7217E35f / (+1.5718E-36f + (var_17 + (-1.0460E-41f - var_18))); } for (int i=0; i < var_3; ++i) { float tmp_1 = var_20 - -1.0851E34f; var_19[i] = (-1.6743E-44f * +0.0f / (-1.3048E34f * +1.4836E-37f)); comp += var_19[i] - tmp_1 / (var_21 + var_22); } if (comp == fabsf((+1.0207E-36f / -1.1486E-41f))) { comp = -1.3839E-41f + (+1.5019E36f + (var_23 + -0.0f)); float tmp_2 = (-0.0f / (+1.9028E35f * +1.3077E-35f)); comp += tmp_2 + powf(+1.9011E-44f, (var_24 / +1.1185E35f)); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float* tmp_20 = initPointer( atof(argv[20]) ); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25); cudaDeviceSynchronize(); return 0; }
4,595
#include <iostream> #include <cuda.h> __global__ void mmult(double* A, double* B, double* C, int N) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if((row < N) && (col < N)) { double dot = .0; for(int i = 0; i < N; ++i) dot += A[row*N + i] * B[col + i*N]; C[row*N + col] = dot; } } #define CUDA_CHECK(stmt) do { \ cudaError_t err = stmt; \ if(err != cudaSuccess) { \ std::cerr << "Failed to run statement: " << #stmt << std::endl; \ return -1; \ } \ } while(0) extern bool do_cuda_mmult(double* A, double* B, double* C, int N) { double *deviceA, *deviceB, *deviceC; int bytes = sizeof(double) * N * N; CUDA_CHECK(cudaMalloc((void**)&deviceA, bytes)); CUDA_CHECK(cudaMalloc((void**)&deviceB, bytes)); CUDA_CHECK(cudaMalloc((void**)&deviceC, bytes)); CUDA_CHECK(cudaMemcpy(deviceA, A, bytes, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(deviceB, B, bytes, cudaMemcpyHostToDevice)); //cudaMemcpy(deviceC, C, bytes, cudaMemcpyHostToDevice); static unsigned int constexpr TILE_WIDTH = 8; dim3 dimGrid{ (N-1)/TILE_WIDTH+1, (N-1)/TILE_WIDTH+1, 1u }; dim3 dimBlock{ TILE_WIDTH, TILE_WIDTH, 1 }; mmult<<< dimGrid, dimBlock >>>(deviceA, deviceB, deviceC, N); cudaThreadSynchronize(); CUDA_CHECK(cudaMemcpy(C, deviceC, bytes, cudaMemcpyDeviceToHost)); return true; }
4,596
// CMPE297-6 HW2 // CUDA version Rabin-Karp #include<stdio.h> #include<iostream> #include <cuda_runtime.h> /*ADD CODE HERE: Implement the parallel version of the sequential Rabin-Karp*/ __device__ int memcpy(char* input, int index, char* pattern, int pattern_length) { for(int i = 0; i< pattern_length; i++) if(pattern[i] != input[index+i]) return 1; return 0; } __global__ void findIfExistsCu(char* input, int input_length, char* pattern, int pattern_length, int patHash, int* result, int *runtime) { int start_time = clock64(); int tid = threadIdx.x; int inputHash = 0; __shared__ char inputStr[20]; __shared__ char patternStr[10]; inputStr[tid] = input[tid]; if(tid<pattern_length) patternStr[tid] = pattern[tid]; if(tid<input_length) inputStr[tid] = input[tid]; if(pattern_length>input_length - pattern_length+1) for(int p=input_length - pattern_length+1; p<pattern_length;p++) patternStr[p] = pattern[p]; if(input_length>input_length - pattern_length+1) for(int p=input_length - pattern_length+1; p<input_length;p++) inputStr[p] = input[p]; for(int i = tid; i< tid+pattern_length; i++) inputHash = (inputHash*256 + inputStr[i]) % 997; if(inputHash == patHash && memcpy(inputStr,tid,patternStr,pattern_length)==0) result[tid]=1; else result[tid]=0; int stop_time = clock64(); runtime[tid] = (int)(stop_time - start_time); } int main() { // host variables char input[] = "HEABAL"; /*Sample Input*/ char pattern[] = "AB"; /*Sample Pattern*/ int patHash = 0; /*hash for the pattern*/ int* result; /*Result array*/ int* runtime; /*Exection cycles*/ int pattern_length = 2; /*Pattern Length*/ int input_length = 6; /*Input Length*/ /*ADD CODE HERE*/; int match_times = input_length - pattern_length +1; /*Match Times*/ cudaError_t err = cudaSuccess; // device variables char* d_input; char* d_pattern; int* d_result; int* d_runtime; // measure the execution time by using clock() api in the kernel as we did in Lab3 /*ADD CODE HERE*/; int runtime_size = match_times*sizeof(int); cudaMalloc((void**)&d_runtime, runtime_size); runtime = (int *) malloc(runtime_size); memset(runtime, 0, runtime_size); result = (int *) malloc((match_times)*sizeof(int)); /*Calculate the hash of the pattern*/ for (int i = 0; i < pattern_length; i++) { patHash = (patHash * 256 + pattern[i]) % 997; } printf("patHash %d \n", patHash); /*ADD CODE HERE: Allocate memory on the GPU and copy or set the appropriate values from the HOST*/ int size = input_length*sizeof(char); err = cudaMalloc((void**)&d_input, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_input(error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Copy input string from the host memory to the CUDA device\n"); err = cudaMemcpy(d_input, input, size, cudaMemcpyHostToDevice); size = pattern_length*sizeof(char); err = cudaMalloc((void**)&d_pattern, size); printf("Copy pattern string from the host memory to the CUDA device\n"); err = cudaMemcpy(d_pattern, pattern, size, cudaMemcpyHostToDevice); size = match_times*sizeof(int); err = cudaMalloc((void**)&d_result, size); /*ADD CODE HERE: Launch the kernel and pass the arguments*/ int blocksPerGrid = 1;// FILL HERE int threadsPerBlock = match_times;// FILL HERE findIfExistsCu<<<blocksPerGrid, threadsPerBlock>>>(d_input, input_length, d_pattern, pattern_length, patHash, d_result,d_runtime); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); /*ADD CODE HERE: COPY the result and print the result as in the HW description*/ // Copy the device result device memory to the host result matrix // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(result, d_result, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy result from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); /*ADD CODE HERE: Copy the execution times from the GPU memory to HOST Code*/ cudaMemcpy(runtime, d_runtime, runtime_size, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); /*RUN TIME calculation*//*ADD CODE HERE:*/ unsigned long long elapsed_time = 0; for(int i = 0; i < input_length-pattern_length; i++) if(elapsed_time < runtime[i]) elapsed_time = runtime[i]; //Print printf("Kernel Execution Time: %llu cycles\n", elapsed_time); printf("Total cycles: %d \n", elapsed_time); printf("Kernel Execution Time: %d cycles\n", elapsed_time); printf("Searching for a single pattern in a single string\nPrint at which position the pattern was found\n"); printf("Input string: %s\n", input); printf("Pattern: %s\n", pattern); //Print Result[]; for(int i = 0; i < match_times; i++) printf("Pos: %d Result: %d\n", i, result[i]); // Free device memory cudaFree(d_input); cudaFree(d_pattern); cudaFree(d_result); cudaFree(d_runtime); // Free host memory free(result); free(runtime); return 0; }
4,597
#include<stdexcept> int main(int argc, char **argv) { printf("haohaibo hhh\n"); return 0; }
4,598
#define PI_F 3.141592654f #define TAU_F 6.283185307f extern "C" /** * For each Point stored in the supplied buffer apply brownian motion by moving it * @param pointBuffer * @param randomNumberBuffer * @param countPoints * @param magnitude */ __global__ void moveBrownian(float *pointBuffer, float *randomNumberBuffer, int countPoints, float magnitude) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= countPoints) return; float theta = randomNumberBuffer[index] * TAU_F; pointBuffer[index] += magnitude * cos(theta); pointBuffer[index + countPoints] += magnitude * sin(theta); } extern "C" /** * For each Point stored in the pointBuffer apply an inverse squared * @param pointBuffer * @param countPoints * @param forceConst */ __global__ void applyForceField(float *pointBuffer, int countPoints, float forceConst) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= countPoints) return; float xAmt = 0.0f; float yAmt = 0.0f; for(int i = 0; i < countPoints; i++) { if (i != index) { float diffX = pointBuffer[i] - pointBuffer[index]; float diffY = pointBuffer[i + countPoints] - pointBuffer[index + countPoints]; float theta = atan2(diffY, diffX); float dist = sqrtf(powf(diffX, 2) + powf(diffY, 2)); float mag = forceConst / powf(dist, 2); xAmt += mag * cos(theta); yAmt += mag * sin(theta); } } pointBuffer[index] += xAmt; pointBuffer[index + countPoints] += yAmt; } extern "C" /** * For each Point stored in the pointBuffer move it by the amount in the moveByBuffer buffer. * X and Y coordinates are moved independently. * @param pointBuffer * @param moveByBuffer * @param countPoints */ __global__ void moveEach(float *pointBuffer, float *moveByBuffer, int countPoints) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= countPoints) return; int yIndex = index + countPoints; pointBuffer[index] += moveByBuffer[index]; pointBuffer[yIndex] += moveByBuffer[yIndex]; } extern "C" /** * For all Points stored in the pointBuffer move it by the supplied distances. * @param pointBuffer * @param xDistance * @param yDistance * @param countPoints */ __global__ void moveAll(float *pointBuffer, float xDistance, float yDistance, int countPoints) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= countPoints) return; pointBuffer[index] += xDistance; pointBuffer[index + countPoints] += yDistance; }
4,599
#include <iostream> #include<cstdlib> #include "cu_nbmath.cuh" using namespace std; __device__ void vecAdd(double* result, double* A, double B, int sizeA){ for (int i = 0; i < sizeA; i++){ result[i] = A[i]+B; } } __device__ void vecMult(double* result, double* A, double B, int sizeA){ for (int i = 0; i < sizeA; i++){ result[i] = A[i]*B; } } __device__ void vecDiv(double* result, double* A, double B, int sizeA){ for (int i = 0; i < sizeA; i++){ result[i] = A[i]/B; } } __device__ void elementWiseAdd(double* result, double* A, double* B, int size){ for (int i = 0; i < size; i++){ result[i] = A[i]+B[i]; } }
4,600
#include "includes.h" __global__ void winrotate(float* inframe2, float* inframe, float *win, int N, int offset){ int k = (threadIdx.x + blockIdx.x*blockDim.x); inframe2[k] = win[k]*inframe[(k+offset)%N]; }