serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
1,501
#include "includes.h" __global__ void cudaUpdateMostActive_kernel(unsigned int * exampleFiringRate, unsigned int * mostActiveId, unsigned int inputsDimX, unsigned int inputsDimY, unsigned int inputsDimZ) { const unsigned int inputSize = inputsDimZ * inputsDimX * inputsDimY; const unsigned int batchInputOffset = blockIdx.z * inputSize; extern __shared__ unsigned int partialActiveIdx[]; // For case that threadIdx.x > inputSize partialActiveIdx[threadIdx.x] = 0; // TODO: Index 0 has a slight advantage here for (unsigned int i=threadIdx.x; i<inputSize; i+=blockDim.x) { partialActiveIdx[threadIdx.x] = threadIdx.x; } // Search for max ID in each thread for (unsigned int i=threadIdx.x; i<inputSize; i+=blockDim.x) { if (exampleFiringRate[i + batchInputOffset] > exampleFiringRate[partialActiveIdx[threadIdx.x] + batchInputOffset]) { partialActiveIdx[threadIdx.x] = i; } } __syncthreads(); // Reduction over neurons for (int offset = blockDim.x/2; offset > 0; offset >>= 1) { if (threadIdx.x < offset){ if (exampleFiringRate[partialActiveIdx[threadIdx.x] + batchInputOffset] < exampleFiringRate[partialActiveIdx[threadIdx.x + offset] + batchInputOffset]) { partialActiveIdx[threadIdx.x] = partialActiveIdx[threadIdx.x + offset]; } } __syncthreads(); } if (threadIdx.x == 0) { mostActiveId[blockIdx.z] = partialActiveIdx[0]; } }
1,502
/******************************************************************************** * * Copyright (C) 2009-2011 Bauhaus University Weimar * ********************************************************************************* * * module : splat_volume_samples.cu * project : gpucast * description: * ********************************************************************************/
1,503
#include "includes.h" __global__ void AplusB(int *ret, int a, int b) { ret[threadIdx.x] = a + b + threadIdx.x; }
1,504
#include<iostream> #include<fstream> #include<math.h> #include<stdlib.h> #include<curand_kernel.h> #include<curand.h> #define MAX_CITIES 318 #define MAX_ANTS 318 #define Q 100 #define ALPHA 1.0 #define BETA 5.0 #define RHO 0.5 using namespace std; int n=0; int NC = 0; int t = 0; struct cities { int x,y; }; int s; struct ants{ int curCity, nextCity; int visited[MAX_CITIES]; int tabu[MAX_CITIES]; float L; }; cities city[MAX_CITIES]; float pheromone[MAX_CITIES][MAX_CITIES]; float dist[MAX_CITIES][MAX_CITIES]; ants ant[MAX_ANTS]; int best=999999; int bestIndex; float delta[MAX_CITIES][MAX_CITIES]; float fitness[MAX_CITIES][MAX_CITIES]; curandState state[MAX_ANTS]; __global__ void initialize(float *d_dist,float *d_pheromone,float *d_delta,cities *d_city,int n) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if((row<n)&&(col<n)){ d_dist[col + row * n] = 0.0f; d_pheromone[col + row * n] = 1.0f / n; d_delta[col + row * n] = 0.0f; if(row!=col) { d_dist[col + row * n]=sqrt(powf(abs(d_city[row].x-d_city[col].x),2)+powf(abs(d_city[row].y-d_city[col].y),2)); } } } __global__ void setup_curand_states(curandState *state_d,int t){ int id = threadIdx.x + blockIdx.x*blockDim.x; curand_init(t, id, 0, &state_d[id]); } __device__ float generate(curandState* globalState, int ind){ //int ind = threadIdx.x; curandState localState = globalState[ind]; float RANDOM = curand_uniform( &localState ); globalState[ind] = localState; return RANDOM; } __global__ void initTour(ants *d_ant,int n){ //cout << "inside init tour" << endl; int id = blockIdx.x * blockDim.x + threadIdx.x; if(id<n){ int j = id; d_ant[id].curCity = j; for(int i=0;i<n;i++) { d_ant[id].visited[i]=0; } d_ant[id].visited[j] = 1; d_ant[id].tabu[0] = j; d_ant[id].L = 0.0; } } __global__ void calcFitness(float *d_fitness, float *d_dist, float *pheromone, int n){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < n && col < n){ int id = row * n + col; d_fitness[id] = powf( pheromone[id], ALPHA) * powf( (1.0/ d_dist[id]), BETA); } } __device__ int selectNextCity(int k,int n,float *d_fitness,ants *d_ant,curandState *state_d) { //cout<<"next city"<<endl; int i = d_ant[k].curCity; int j; double prod=0.0; for(j=0;j<n;j++) { if(d_ant[k].visited[j]==0) { prod+= d_fitness[i*n+j]; } } while(1) { j++; if(j >= n) j=0; if(d_ant[k].visited[j] == 0) { float p = d_fitness[i*n+j]/prod; float x = (float)generate(state_d,i); if(x < p) { break; } } } return j; } __global__ void tourConstruction(ants *d_ant, float *d_dist, float *d_fitness,int n,curandState *state_d) { //printf("tour contruction\n"); int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < n){ for(int s=1;s<n;s++) { int j = selectNextCity(id, n, d_fitness,d_ant,state_d); d_ant[id].nextCity = j; d_ant[id].visited[j]=1; d_ant[id].tabu[s] = j; d_ant[id].L+=d_dist[d_ant[id].curCity * n + j]; d_ant[id].curCity = j; } } } __global__ void wrapUpTour(float *delta, ants *ant,float *dist, int *best, int *bestIndex){ //printf("wrap tour\n"); int k = threadIdx.x + blockIdx.x * blockDim.x; if(k < MAX_ANTS){ ant[k].L += dist[ant[k].curCity * MAX_CITIES + ant[k].tabu[0]]; ant[k].curCity = ant[k].tabu[0]; int temp = *best; printf("before best %d\n", *best); atomicMin(best, ant[k].L); printf("after best %d\n", *best); if (*best!= temp){ *bestIndex = k; } for(int i = 0; i < MAX_CITIES;i++){ int first = ant[k].tabu[i]; int second = ant[k].tabu[(i + 1) % MAX_CITIES]; delta[first * MAX_CITIES + second] += Q/ant[k].L; } } } __global__ void updatePheromone(float *d_pheromone, float *d_delta, int n){ //printf("inside update phero\n"); int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < n){ for(int s=0;s<n;s++){ if(id!=s) { d_pheromone[id*n+s] *=( 1.0 - RHO); if(d_pheromone[id*n+s]<0.0) { d_pheromone[id*n+s] = (1.0/n); } } d_pheromone[id*n+s] += d_delta[id*n+s]; d_delta[id*n+s] = 0; } } } __global__ void emptyTabu(ants *d_ant,float *d_delta,int n){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < n){ //printf("Empty Tabu\n"); for(int s=0;s<n;s++){ d_ant[id].tabu[s] = 0; d_ant[id].visited[s] = 0; } } } int main(int argc, char *argv[]) { if (argc > 1){ cout << "Reading File "<< argv[1]<<endl; } else{ cout << "Usage:progname inputFileName" <<endl; return 1; } ifstream in; in.open(argv[1]); in>>n; cout<<n<<endl; int num; for(int i=0;i<n;i++) { in>>num; in>>city[i].x; in>>city[i].y; cout<<city[i].x<<" "<<city[i].y<<" "<<endl; } dim3 blockDim(32, 32, 1); dim3 gridDim((n - 1)/ 32 + 1, (n - 1)/ 32 + 1, 1 ); float *d_dist,*d_pheromone,*d_delta,*d_fitness; ants *d_ant; cities *d_city; curandState *state_d; int *d_best, *d_bestIndex; cudaMalloc((void**)&d_pheromone, sizeof(float) * n * n); cudaMalloc((void**)&d_dist, sizeof(float) * n * n); cudaMalloc((void**)&d_delta, sizeof(float) * n * n); cudaMalloc((void**)&d_ant, sizeof(ants) * n); cudaMalloc((void**)&d_city, sizeof(cities) * n); cudaMalloc((void**)&d_fitness, sizeof(float) * n *n); cudaMalloc( (void**) &state_d, sizeof(state)); cudaMalloc((void **)&d_best, sizeof(int)); cudaMalloc((void **)&d_bestIndex, sizeof(int)); cudaMemcpy(d_city,city,sizeof(cities) * n,cudaMemcpyHostToDevice); srand(time(0)); cudaMemcpy(d_best, &best, sizeof(int), cudaMemcpyHostToDevice); int seed = rand(); setup_curand_states <<< (n-1)/32+1,32 >>> (state_d,seed); initialize<<<gridDim, blockDim>>>(d_dist,d_pheromone,d_delta,d_city,n); cudaMemcpy(dist,d_dist,sizeof(float) * n * n,cudaMemcpyDeviceToHost); cudaMemcpy(pheromone,d_pheromone,sizeof(float) * n * n,cudaMemcpyDeviceToHost); cudaMemcpy(delta,d_delta,sizeof(float) * n * n,cudaMemcpyDeviceToHost); int MAX_TIME = 20; for(;;) { initTour<<<(n-1)/32+1,32>>>(d_ant,n); cudaThreadSynchronize(); calcFitness<<< gridDim, blockDim>>>(d_fitness, d_dist, d_pheromone, n); cudaThreadSynchronize(); tourConstruction<<<(n-1)/32+1,32>>>(d_ant,d_dist,d_fitness,n,state_d); cudaThreadSynchronize(); cudaMemcpy(ant,d_ant,sizeof(ants) * n,cudaMemcpyDeviceToHost); wrapUpTour<<<(n - 1)/32 + 1, 32>>>(d_delta, d_ant, d_dist, d_best, d_bestIndex); updatePheromone<<< (n-1)/32+1,32>>>(d_pheromone,d_delta,n); cudaThreadSynchronize(); t += MAX_ANTS; NC += 1; if(NC < MAX_TIME){ emptyTabu<<<(n-1)/32+1,32>>>(d_ant,d_delta,n); cudaMemcpy(&best, d_best, sizeof(int), cudaMemcpyDeviceToHost); cout<<"Best Tour so far --> "<<best<<endl; cudaThreadSynchronize(); } else{ break; } } cout<<endl; cudaMemcpy(&best, d_best, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&bestIndex, d_bestIndex, sizeof(int), cudaMemcpyDeviceToHost); for(int i=0;i<n;i++) { cout<<ant[bestIndex].tabu[i]<<" "; } cout<<endl; cout<<"\nSACO: Best tour = "<<best<<endl<<endl<<endl; return 0; }
1,505
#include "includes.h" __global__ void unsafe(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; int old = *shared_var; *shared_var = old + 1; values_read[tid] = old; for (i = 0; i < iters; i++) { int old = *shared_var; *shared_var = old + 1; } }
1,506
#include<bits/stdc++.h> using namespace std; const double pi = 3.14159265358979323846264; const double L = 100.; const double Diff = 1.; const int MAX_BLOCK_SIZE = 1048; /* | coordinate system: -|---------------y | x = i * d_x | y = i * d_x | x */ inline double left(double y) { return 0; } inline double right(double y) { return 0; } inline double bottom(double x) { return 0; } inline double top(double x){ return sinh(pi)*sin(x*pi/L); } inline double analytical(double x,double y){ return sinh(y*pi/L)*sin(x*pi/L); } __global__ void oneIteration(int N, double *cur, double *old, double delta_x,double delta_t){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int dX[8] = {0, 0, 1, -1, 1, -1, 1, -1}; int dY[8] = {1, -1, 0, 0, 1, 1, -1, -1}; assert(N > 1); if(idx < (N-1)*(N-1)){ int i = idx/(N-1) + 1, j = idx%(N-1) + 1; double d_val = 0; for(int k=0;k<8;++k) d_val += old[(i+dX[k])*(N+1) + j+dY[k]]; cur[i*(N+1) + j] += Diff*delta_t*(d_val - 8.*old[i*(N+1) + j])/3./pow(delta_x, 2.); } return; } class DiffEqnSolver{ int n_grid, array_size, block_size, n_block; double d_x, **val, *cur, *old; public: DiffEqnSolver(int N,int b_size):n_grid(N), block_size(b_size){ assert(block_size > 0 && block_size <= MAX_BLOCK_SIZE); d_x = L/n_grid; n_block = ((n_grid-1)*(n_grid-1) + block_size - 1)/block_size; array_size = (n_grid+1)*(n_grid+1); val = new double* [n_grid + 1]; val[0] = new double [array_size]; for(int i=1;i<=n_grid;++i) val[i] = val[i-1] + n_grid + 1; cudaMalloc((void **)&cur, array_size*sizeof(double)); cudaMalloc((void **)&old, array_size*sizeof(double)); for(int i=0;i<=n_grid;++i){ val[0][i] = left(i*d_x); val[n_grid][i] = right(i*d_x); val[i][0] = bottom(i*d_x); val[i][n_grid] = top(i*d_x); } } void init(double init_val){ // Initialize the grid for(int i=1;i<n_grid;++i) for(int j=1;j<n_grid;++j) val[i][j] = init_val; } double getError(){ // Using L2 norm double sum = 0.; for(int i=0;i<=n_grid;++i) for(int j=0;j<=n_grid;++j) sum += pow(val[i][j] - analytical(i*d_x, j*d_x),2.); return sqrt(sum); } void oneStep( double d_t){ cudaMemcpy(old, val[0], array_size*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cur, val[0], array_size*sizeof(double), cudaMemcpyHostToDevice); oneIteration<<<n_block, block_size>>>(n_grid, cur, old, d_x, d_t); cudaMemcpy(val[0], cur, array_size*sizeof(double), cudaMemcpyDeviceToHost); } double runIterations(int N_step, double d_t){ for(int t=0;t<N_step;++t) oneStep(d_t); return getError(); } ~DiffEqnSolver(){ delete [] val[0]; delete [] val; cudaFree(cur); cudaFree(old); } }; int main(int argc, char *argv[]){ int block_size = 33; if(argc > 1) block_size = stoi(argv[1]); DiffEqnSolver solver(100, block_size); solver.init(1.); int n_batch = 20, n_step = 1000; double dt = 0.5; cout<<setprecision(3); cout<<"Start running iterations:"<<endl; clock_t start_time = clock(), end_time; for(int i=1;i<=n_batch;++i) cout<<"Iteration: "<<i<<"\t error:"<<solver.runIterations(n_step, dt)<<endl; end_time = clock(); cout<<"End running iterations!"<<endl<<endl; cout<<"Time spent during iterations: "<<double(end_time-start_time)/CLOCKS_PER_SEC<<"s\n\n\n"; cout<<"================================================================================"<<endl; return 0; }
1,507
#include <stdio.h> /* * ๅ…ˆใปใฉใ‚ใฃใŸๅผ•ๆ•ฐ `N` ใŒใชใ„ใ“ใจใซๆณจ็›ฎใ—ใฆใใ ใ•ใ„ใ€‚ */ __global__ void loop() { /* * ใ“ใฎใ‚ซใƒผใƒใƒซใฏใ€ๅ…ƒใฎ for ใƒซใƒผใƒ—ใ‚’ 1 ๅ›žใ ใ‘ๅๅพฉใ—ใพใ™ใ€‚ * ใ“ใฎใ‚ซใƒผใƒใƒซใซใ‚ˆใฃใฆไฝ•ๅ›ž็›ฎใฎใ€Œๅๅพฉใ€ใŒๅฎŸ่กŒใ•ใ‚Œใฆใ„ใ‚‹ใ‹ใฏใ€ * `threadIdx.x` ใ‚’ไฝฟ็”จใ—ใฆ็ขบ่ชใงใใพใ™ใ€‚ */ printf("This is iteration number %d\n", threadIdx.x); } int main() { /* * ใ“ใ‚Œใฏใ€Œใƒซใƒผใƒ—ใ€ใฎใ€Œๅๅพฉใ€ๅ›žๆ•ฐใ‚’่จญๅฎšใ™ใ‚‹ๅฎŸ่กŒใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใงใ™ใ€‚ */ loop<<<1, 10>>>(); cudaDeviceSynchronize(); }
1,508
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #define BLOCK_SIZE 16 __global__ void gpu_matrix_multiply(float* a,float* b,float* c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; int i; if( col < k && row < m) { for(i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } extern "C" void gpu_matrix_mult(float* a, float* b, float* c, int m, int n, int k) { float *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(float)*m*n); cudaMalloc((void **) &d_b, sizeof(float)*n*k); cudaMalloc((void **) &d_c, sizeof(float)*m*k); cudaMemcpy(d_a, a, sizeof(float)*m*n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float)*n*k, cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, sizeof(float)*m*k, cudaMemcpyHostToDevice); unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); gpu_matrix_multiply<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k); cudaError_t i = cudaMemcpy(c, d_c, sizeof(float)*m*k, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
1,509
extern "C" __global__ void matrixMultiplicationKernel(double* A, double* B, double* C, long N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; double tmpSum = 0.0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; }
1,510
#include "includes.h" __global__ void CalculateSampleT( const float *target, const float *mask, float *subT, int *subM, const int wt, const int ht, const int ws, const int hs, const int sRate ){ const int ys = blockIdx.y * blockDim.y + threadIdx.y; const int xs = blockIdx.x * blockDim.x + threadIdx.x; const int curst = ws * ys + xs; if (ys < hs && xs < ws){ const int yt = ys * sRate; const int xt = xs * sRate; int num = 0; float sum[3] = {0}; int _or = 0; for (int i=0; i<sRate; i++){ for (int j=0; j<sRate; j++){ if (yt + i < ht && xt + j < wt){ int curt = wt * (yt+i) + (xt+j); sum[0] += target[curt*3+0]; sum[1] += target[curt*3+1]; sum[2] += target[curt*3+2]; _or |= (mask[curt] > 127.0f); num++; } } } subM[curst] = _or; subT[curst*3+0] += sum[0] / num; subT[curst*3+1] += sum[1] / num; subT[curst*3+2] += sum[2] / num; } }
1,511
#include "includes.h" __global__ void reverse_colors_kernel(int num_rows, int max_color, int *row_colors) { int row_id = blockIdx.x * blockDim.x + threadIdx.x; for ( ; row_id < num_rows ; row_id += blockDim.x * gridDim.x ) { int color = row_colors[row_id]; if (color > 0) { //1 -> max_color //max_color -> 1 color = max_color - color + 1; } row_colors[row_id] = color; } }
1,512
#include<math.h> #include<time.h> #include<stdexcept> #include<iostream> #include<cstdlib> //for abs(x) #include<stdio.h> using namespace std; __global__ void findMax(int* A,int* current_max,int* mutex,unsigned int n); int main() { const int NUMBER_OF_ELEMENTS = 1024*1024*20; int* hostA = (int*)malloc(NUMBER_OF_ELEMENTS*sizeof(int)); int* hostMax = (int*)malloc(sizeof(int)); *hostMax = -1; srand(time(0)); int i,j; //initialize host vector by random elements for(i=0;i<NUMBER_OF_ELEMENTS;i++) { hostA[i] = NUMBER_OF_ELEMENTS*rand() / RAND_MAX/123; } int* deviceA,*deviceMax,*deviceMutex; cudaMalloc(&deviceA,NUMBER_OF_ELEMENTS*sizeof(int)); cudaMalloc(&deviceMax,sizeof(int)); cudaMalloc(&deviceMutex,sizeof(int)); cudaMemset(deviceMax,-1,sizeof(int)); cudaMemset(deviceMutex,0,sizeof(int)); cudaMemcpy(deviceA,hostA,NUMBER_OF_ELEMENTS*sizeof(int),cudaMemcpyHostToDevice); //set up timing variables float gpu_elapsed_time; cudaEvent_t gpu_start,gpu_stop; cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_stop); cudaEventRecord(gpu_start,0); findMax<<<256,256>>>(deviceA,deviceMax,deviceMutex,NUMBER_OF_ELEMENTS); cudaDeviceSynchronize(); cudaMemcpy(hostMax,deviceMax,sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(gpu_stop, 0); cudaEventSynchronize(gpu_stop); cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop); cudaEventDestroy(gpu_start); cudaEventDestroy(gpu_stop); cout<<"Answer by CUDA for MAX is = "<<*hostMax<<endl; std::cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl; clock_t cpu_start = clock(); int maxx = -1; for(int i=0;i<NUMBER_OF_ELEMENTS;i++) { if(hostA[i]>maxx) maxx = hostA[i]; } clock_t cpu_stop = clock(); clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC; cout<<"Expected max value is = "<<maxx<<endl; std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl; cudaFree(deviceA); delete[] hostA; return cudaDeviceSynchronize(); } __global__ void findMax(int* A,int* current_max,int* mutex,unsigned int n) { //printf("threadIdx.x = %d and blockIdx = %d and gridDim.x = %d\n",threadIdx.x,blockIdx.x,gridDim.x); unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ int cache[256]; int temp = -1; while(index+offset<n) { temp = fmaxf(temp,A[index+offset]); offset+=stride; } cache[threadIdx.x]=temp; __syncthreads(); //reduction //printf("blockDim.x = %d\n",blockDim.x/2); unsigned int i=blockDim.x/2; while(i!=0) { if(threadIdx.x<i) { cache[threadIdx.x] = fmaxf(cache[threadIdx.x],cache[threadIdx.x+i]); } __syncthreads(); i/=2; } if(threadIdx.x ==0) while(atomicCAS(mutex,0,1)!=0); *current_max = fmaxf(*current_max,cache[0]); atomicExch(mutex,0); }
1,513
#include "includes.h" __global__ void gpu_seqrd_kernel(int *buffer, size_t reps, size_t elements) { int errors = 0; for(size_t j = 0; j < reps; j++) { size_t ofs = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; while(ofs < elements) { // manually unroll loop to get multiple loads in flight per thread int val1 = buffer[ofs]; ofs += step; int val2 = (ofs < elements) ? buffer[ofs] : 0; ofs += step; int val3 = (ofs < elements) ? buffer[ofs] : 0; ofs += step; int val4 = (ofs < elements) ? buffer[ofs] : 0; ofs += step; // now check result of all the reads if(val1 != 0) errors++; if(val2 != 0) errors++; if(val3 != 0) errors++; if(val4 != 0) errors++; } } if(errors > 0) buffer[0] = errors; }
1,514
#include "includes.h" __global__ static void reduce(int *g_idata, int l1, int l2) { extern __shared__ unsigned int sdata[]; unsigned int tid = threadIdx.x; if (tid < l1) { sdata[tid] = g_idata[tid]; } else { sdata[tid] = 0; } __syncthreads(); // Parallel Reduction (l2 must be power of 2) for (unsigned int s = l2 / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid == 0) { g_idata[0] = sdata[0]; } }
1,515
// #CSCS CUDA Training // // #Example 2.1 - sum vectors, fix number of threads // // #Author Ugo Varetto // // #Goal: compute the scalar product of two 1D vectors using a number of threads lower than the // size of the output vector. // // #Rationale: shows how to implement a kernel with a computation/memory configuration independent on the // domain data layout; this is required in case the data is bigger than the computation grid (see exercise 1) // // #Solution: // Given the maximum number of threads to use compute number of blocks // . total number of threads = T // . number of threads per block = Tb // The number of blocks is = Tb div T; note that it doesn't matter if the integer division // gives a reminder since each GPU thread will iterate over multiple elements and ensure that // the entire domain is processed regardless of the number of threads concurrently executed // // #Code: typical flow: // 1) compute launch grid configuration // 2) allocate data on host(cpu) and device(gpu) // 3) copy data from host to device // 4) launch kernel // 5) read data back // 6) consume data (in this case print result) // 7) free memory // // #Compilation: nvcc -arch=sm_13 2_1_sum-vectors.cu -o sum-vectors-2 // // #Execution: ./sum-vectors-2 // // #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to // cudaThreadSynchronize() is required to wait for the end of kernel execution from // a host thread; in case of synchronous copy operations like cudaMemcpy(...,cudaDeviceToHost) // kernel execution is guaranteed to be terminated before data are copied // // #Note: the code is C++ also because the default compilation mode for CUDA is C++, all functions // are named with C++ convention and the syntax is checked by default against C++ grammar rules // // #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better // // #Note: -arch=sm_13 is the lowest architecture version that supports double precision // // #Note: the example can be extended to read configuration data and array size from the command line // and could be timed to investigate how performance is dependent on single/double precision // and thread block size //#include <cuda_runtime.h> // automatically added by nvcc #include <vector> #include <iostream> #include <iomanip> #include <sstream> #include <string> typedef float real_t; // In this case the number of GPU threads is smaller than the number of elements in the domain: // every iterates over multple elements to ensure than the entire domain is covered __global__ void sum_vectors( const real_t* v1, const real_t* v2, real_t* out, size_t num_elements ) { // compute current thread id int xIndex = blockIdx.x * blockDim.x + threadIdx.x; // iterate over vector: grid can be smaller than vector, it is therefore // required that each thread iterate over more than one vector element while( xIndex < num_elements ) { out[ xIndex ] = v1[ xIndex ] + v2[ xIndex ]; xIndex += gridDim.x * blockDim.x; } } //------------------------------------------------------------------------------ int main( int , char** ) { const int VECTOR_SIZE = 0x10000 + 1; //vector size 65537 const int MAX_NUMBER_OF_THREADS = VECTOR_SIZE / 5; const int SIZE = sizeof( real_t ) * VECTOR_SIZE; // total size in bytes const int THREADS_PER_BLOCK = 32; //number of gpu threads per block const int NUMBER_OF_BLOCKS = MAX_NUMBER_OF_THREADS / THREADS_PER_BLOCK; // host allocated storage; use std vectors to simplify memory management // and initialization std::vector< real_t > v1 ( VECTOR_SIZE, 1.f ); //initialize all elements to 1 std::vector< real_t > v2 ( VECTOR_SIZE, 2.f ); //initialize all elements to 2 std::vector< real_t > vout( VECTOR_SIZE, 0.f ); //initialize all elements to 0 // gpu allocated storage real_t* dev_in1 = 0; //vector 1 real_t* dev_in2 = 0; //vector 2 real_t* dev_out = 0; //result value cudaMalloc( &dev_in1, SIZE ); cudaMalloc( &dev_in2, SIZE ); cudaMalloc( &dev_out, SIZE ); // copy data to GPU cudaMemcpy( dev_in1, &v1[ 0 ], SIZE, cudaMemcpyHostToDevice ); cudaMemcpy( dev_in2, &v2[ 0 ], SIZE, cudaMemcpyHostToDevice ); // execute kernel with num threads >= num elements sum_vectors<<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK>>>( dev_in1, dev_in2, dev_out, VECTOR_SIZE ); // read back result cudaMemcpy( &vout[ 0 ], dev_out, SIZE, cudaMemcpyDeviceToHost ); // print first and last element of vector std::cout << "result: " << vout.front() << ".." << vout.back() << std::endl; // free memory cudaFree( dev_in1 ); cudaFree( dev_in2 ); cudaFree( dev_out ); return 0; }
1,516
#include <iostream> #include <fstream> #include <stdlib.h> #include <unistd.h> #include <vector> #include <complex> #include <sys/types.h> #include <sys/stat.h> #include <string.h> #include <math.h> #include <map> #include <stdexcept> #include <cuda.h> #include <cufft.h> //#include <helper_functions.h> //#include <helper_cuda.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA_ERROR:\ncode:%s\nfile: %s\nline:%d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /////////////////////////////////////////////////////////////////////////////////////////////// //// //// Constants & Typedefs //// /////////////////////////////////////////////////////////////////////////////////////////////// #define PI 3.14159265359 #define BLOCK_SIZE 4096 #define SIGNAL_THRESHOLD 200 #define MAX_TRANSMISSIONS 200 //172MHz gives us CB #define SAMPLE_RATE 172089331.259 #define BATCH_SIZE 1 #define HzInMHz 1000000 typedef char byte; typedef float2 Complex; /////////////////////////////////////////////////////////////////////////////////////////////// //// //// Device Variables //// /////////////////////////////////////////////////////////////////////////////////////////////// __device__ int transmissionCount; __device__ int timeStep; /////////////////////////////////////////////////////////////////////////////////////////////// //// //// Kernels //// /////////////////////////////////////////////////////////////////////////////////////////////// void __device__ createTransmission( int idx , int* transmissionBins, cufftReal* scaledResultBuffer, cufftReal* transmissionFrequencies, cufftReal* transmissionStarts, cufftReal* transmissionStrengths, bool* activeTransmissions ) { transmissionBins[ transmissionCount - 1 ] = idx; //frequency in MHz transmissionFrequencies[ transmissionCount - 1 ] = idx * SAMPLE_RATE / BLOCK_SIZE / HzInMHz; transmissionStarts[ transmissionCount - 1 ] = timeStep / SAMPLE_RATE; transmissionStrengths[ transmissionCount - 1 ] = scaledResultBuffer[ idx ]; activeTransmissions[ idx ] = true; } void __device__ finishTransmission( int idx, int* transmissionBins, cufftReal* transmissionEnds, bool* activeTransmissions ) { for( int i = transmissionCount - 1 ; i >= 0 ; i-- ) { if( transmissionBins[ i ] == idx ) { transmissionEnds[ i ] = timeStep / SAMPLE_RATE; activeTransmissions[ idx ] = false; return; } } } void __global__ scaleResult( cufftReal* scaledResultBuffer , cufftComplex* resultBuffer ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if( idx < BLOCK_SIZE ) { scaledResultBuffer[ idx ] = sqrt( resultBuffer[ idx ].x * resultBuffer[ idx ].x * + resultBuffer[ idx ].y * resultBuffer[ idx ].y ); scaledResultBuffer[ idx ] = 20 * log10( scaledResultBuffer[ idx ] ); } } void __global__ initTransmissionArray( bool* activeTransmissions ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; transmissionCount = 0; if( idx < BLOCK_SIZE ) activeTransmissions[ idx ] = false; } void __global__ findTransmissions( cufftReal* scaledResultBuffer , int* deviceBins, cufftReal *deviceFrequencies, cufftReal *deviceStarts, cufftReal *deviceEnds, cufftReal *deviceStrengths, bool* activeTransmissions ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if( idx < BLOCK_SIZE && idx != 0 ) { if( scaledResultBuffer[ idx ] > SIGNAL_THRESHOLD && activeTransmissions[ idx ] == false ) { atomicAdd( &transmissionCount , 1 ); createTransmission( idx , deviceBins , scaledResultBuffer , deviceFrequencies , deviceStarts , deviceStrengths, activeTransmissions ); } if( scaledResultBuffer[ idx ] < SIGNAL_THRESHOLD && activeTransmissions[ idx ] == true ) { finishTransmission( idx , deviceBins , deviceEnds , activeTransmissions ); } } //timeStep += BLOCK_SIZE; atomicAdd( &timeStep , 1 ); } /////////////////////////////////////////////////////////////////////////////////////////////// //// //// Device Pointers //// /////////////////////////////////////////////////////////////////////////////////////////////// cufftComplex *deviceResult = 0; cufftReal *deviceSource = 0; cufftReal *deviceScaledResult = 0; int* deviceBins = 0; cufftReal *deviceFrequencies = 0; cufftReal *deviceStarts = 0; cufftReal *deviceEnds = 0; cufftReal *deviceStrengths = 0; bool* deviceActiveTransmissions = 0; int* deviceCount = 0; /////////////////////////////////////////////////////////////////////////////////////////////// //// //// Host Variables //// /////////////////////////////////////////////////////////////////////////////////////////////// int* hostBins; cufftReal *hostFrequencies; cufftReal *hostStarts; cufftReal *hostEnds; cufftReal *hostStrengths; /////////////////////////////////////////////////////////////////////////////////////////////// //// //// Functions //// /////////////////////////////////////////////////////////////////////////////////////////////// int main( int argc , char** argv ) { std::string filename = std::string( argv[1] ); std::ifstream f; struct stat filestatus; stat( filename.c_str() , &filestatus ); size_t filesize = filestatus.st_size; f.open( filename.c_str() , std::ios::in | std::ios::binary ); if( !f.good() ) { std::cerr << "Can't open file" << std::endl; exit( 1 ); } cufftReal* original = new cufftReal[ filesize ]; for( unsigned int i = 0 ; i < filesize ; i++ ) { original[i] = (cufftReal) (byte) f.get(); } f.close(); int fft_size = BLOCK_SIZE; int max_transmissions = MAX_TRANSMISSIONS; cudaGetSymbolAddress( (void**) &deviceCount , transmissionCount ); gpuErrchk( cudaMalloc( &deviceSource , filesize * sizeof(cufftReal) )); gpuErrchk( cudaMalloc( &deviceResult , fft_size * sizeof(cufftComplex) )); gpuErrchk( cudaMalloc( &deviceScaledResult , fft_size * sizeof(cufftReal) )); gpuErrchk( cudaMalloc( &deviceBins , max_transmissions * sizeof(int) )); gpuErrchk( cudaMalloc( &deviceFrequencies , max_transmissions * sizeof(cufftReal) )); gpuErrchk( cudaMalloc( &deviceStarts , max_transmissions * sizeof(cufftReal) )); gpuErrchk( cudaMalloc( &deviceEnds , max_transmissions * sizeof(cufftReal) )); gpuErrchk( cudaMalloc( &deviceStrengths , max_transmissions * sizeof(cufftReal) )); gpuErrchk( cudaMalloc( &deviceActiveTransmissions , fft_size * sizeof(bool) )); // TODO: This giant memcpy will become a pipelined streaming thingy gpuErrchk( cudaMemcpy( deviceSource , original , filesize * sizeof( cufftReal ) , cudaMemcpyHostToDevice )); initTransmissionArray<<< 32 , 64 >>>( deviceActiveTransmissions ); //prepare the FFT cufftHandle p; cufftResult_t fft_result; fft_result = cufftPlan1d( &p , BLOCK_SIZE , CUFFT_R2C , BATCH_SIZE ); if( fft_result != CUFFT_SUCCESS ) exit(1); for( unsigned int j = 0 ; j < filesize * 0.25 - fft_size ; j += fft_size ) { fft_result = cufftExecR2C( p , deviceSource + j * sizeof( cufftReal ) , deviceResult ); if( fft_result != CUFFT_SUCCESS ) exit(2); // num blocks * num threads = fftsize / 2 ... nyquist limit scaleResult<<< 32 , 64 >>>( deviceScaledResult , deviceResult ); gpuErrchk( cudaPeekAtLastError() ); findTransmissions<<< 32 , 64 >>>( deviceScaledResult, deviceBins, deviceFrequencies, deviceStarts, deviceEnds, deviceStrengths, deviceActiveTransmissions ); gpuErrchk( cudaPeekAtLastError() ); } //Copy all that crap back int* hostCount = new int; hostBins = new int[ MAX_TRANSMISSIONS ]; hostFrequencies = new cufftReal[ MAX_TRANSMISSIONS ]; hostStarts = new cufftReal[ MAX_TRANSMISSIONS ]; hostEnds = new cufftReal[ MAX_TRANSMISSIONS ]; hostStrengths = new cufftReal[ MAX_TRANSMISSIONS ]; std::cout << "LOLZ" << std::endl; gpuErrchk( cudaMemcpy( hostBins , deviceBins , MAX_TRANSMISSIONS * sizeof( int ) , cudaMemcpyDeviceToHost )); gpuErrchk( cudaMemcpy( hostFrequencies , deviceFrequencies , MAX_TRANSMISSIONS * sizeof( cufftReal ) , cudaMemcpyDeviceToHost )); gpuErrchk( cudaMemcpy( hostStarts , deviceStarts , MAX_TRANSMISSIONS * sizeof( cufftReal ) , cudaMemcpyDeviceToHost )); gpuErrchk( cudaMemcpy( hostEnds , deviceEnds , MAX_TRANSMISSIONS * sizeof( cufftReal ) , cudaMemcpyDeviceToHost )); gpuErrchk( cudaMemcpy( hostStrengths , deviceStrengths , MAX_TRANSMISSIONS * sizeof( cufftReal ) , cudaMemcpyDeviceToHost )); gpuErrchk( cudaMemcpy( hostCount , deviceCount , sizeof( int ) , cudaMemcpyDeviceToHost ) ); std::cout << *hostCount << std::endl; std::ofstream fo; fo.open( "spikes.txt" ); for( unsigned int i = 0 ; i < *hostCount ; i++ ) { fo << "==== TRANSMISSION ====" << "\n"; //In MHz fo << "Bin : " << hostBins[ i ] << " \n"; fo << "Frequency : " << hostFrequencies[ i ] << " MHz\n"; fo << "Signal strength : " << hostStrengths[ i ] << " dB\n"; fo << "Time start : " << hostStarts[ i ] << " s\n"; fo << "Time end : " << hostEnds[ i ] << " s\n"; } return 0; }
1,517
#include "cuda.h" #include <stdio.h> #include <stdlib.h> __global__ void cudaADD(int* a, int* b) { a[0]+=b[0]; } int main(){ int a=5, b=6; int *c_a, *c_b; // Allocate memory for CUDA cudaMalloc(&c_b, sizeof(int)); cudaMalloc(&c_a, sizeof(int)); // Transfer data to GPU from CPU cudaMemcpy(c_a, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(c_b, &b, sizeof(int), cudaMemcpyHostToDevice); // Run on GPU cudaADD<<<1,1>>>(c_a, c_b); // Transfer for GPU to CPU cudaMemcpy(&a, c_a, sizeof(int), cudaMemcpyDeviceToHost); printf("%d\n", a); // Free allocated memory cudaFree(c_a); cudaFree(c_b); return 0; }
1,518
#include <stdio.h> #include <stdlib.h> #include <time.h> #define BLOCK 30000 #define THREAD 1000 #define CHECK 1 void stopwatch(int); //๊ทธ๋ƒฅ ๊ธ€๋กœ๋ฒŒ ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ __global__ void count( int* cnt) { (*cnt)++; } //Atomic ํ•จ์ˆ˜ ์‚ฌ์šฉ __global__ void atomic_count( int* cnt) { //Atomic ํ•จ์ˆ˜, ๋”ํ•˜๋Š” ๋Œ€์ƒ์„ ํฌ์ธํ„ฐ๋กœ ์ง€์ •ํ•ด์•ผํ•œ๋‹ค //ํ•œ๋ฒˆ์— ํ•˜๋‚˜์˜ ์“ฐ๋ ˆ๋“œ๋งŒ ์ž‘์—…ํ•œ๋‹ค atomicAdd(cnt,1); } //Atonic ํ•จ์ˆ˜์™€ Shared Memory ์‚ฌ์šฉ __global__ void atomic_with_shared_count( int* cnt) { __shared__ int s_cnt; //ํ•˜๋‚˜์˜ ์“ฐ๋ ˆ๋“œ๋งŒ ์ดˆ๊ธฐํ™” ์‹œ์ผœ์ฃผ๋ฉด ๋œ๋‹ค if(threadIdx.x==0) s_cnt = 0; //๋ธ”๋ก๋‹จ์œ„ ๋™๊ธฐํ™” __syncthreads(); atomicAdd(&s_cnt,1); //๋ธ”๋ก๋‹จ์œ„ ๋™๊ธฐํ™” __syncthreads(); //ํ•˜๋‚˜์˜ ์“ฐ๋ ˆ๋“œ๋งŒ ๊ธ€๋กœ๋ฒŒ ๋ณ€์ˆ˜์— ๋”ํ•ด์ฃผ๋ฉด ๋œ๋‹ค if(threadIdx.x==0) atomicAdd(cnt,s_cnt); } int main() { int * host_cnt; int * dev_cnt; dim3 Dg(BLOCK,1,1); dim3 Db(THREAD,1,1); printf("BLOCK : %d\nTHREAD : %d\n",BLOCK,THREAD); host_cnt= (int*)malloc(sizeof(int)); cudaMalloc((void**)&dev_cnt,sizeof(int)); printf("Just cnt++ : "); stopwatch(0); cudaMemcpy(dev_cnt, host_cnt,sizeof(int), cudaMemcpyHostToDevice); count<<<Dg,Db >>>(dev_cnt); cudaMemcpy(host_cnt,dev_cnt,sizeof(int),cudaMemcpyDeviceToHost); stopwatch(1); #if CHECK printf("cnt : %d\n",*host_cnt); #endif (*host_cnt)=0; printf("AtomicAdd : "); stopwatch(0); cudaMemcpy(dev_cnt, host_cnt,sizeof(int), cudaMemcpyHostToDevice); atomic_count<<<Dg,Db >>>(dev_cnt); cudaMemcpy(host_cnt,dev_cnt,sizeof(int),cudaMemcpyDeviceToHost); stopwatch(1); #if CHECK printf("cnt : %d\n",*host_cnt); #endif (*host_cnt)=0; printf("AtomicAdd with Shared Memory : "); stopwatch(0); cudaMemcpy(dev_cnt, host_cnt,sizeof(int), cudaMemcpyHostToDevice); atomic_with_shared_count<<<Dg,Db >>>(dev_cnt); cudaMemcpy(host_cnt,dev_cnt,sizeof(int),cudaMemcpyDeviceToHost); stopwatch(1); #if CHECK printf("cnt : %d\n",*host_cnt); #endif cudaFree(dev_cnt); free(host_cnt); return 0; } void stopwatch(int flag) { enum clock_unit{nano = 0, micro , milli, sec} unit; const long long NANOS = 1000000000LL; static struct timespec startTS,endTS; static long long diff = 0; /* ์—ฌ๊ธฐ์„œ ๋‹จ์œ„ ์กฐ์ • nano, micro, milli, sec */ unit = micro; //start if(flag == 0) { diff = 0; if(-1 == clock_gettime(CLOCK_MONOTONIC,&startTS)) printf("Failed to call clock_gettime\n"); } //end else if(flag == 1) { if(-1 == clock_gettime(CLOCK_MONOTONIC,&endTS)) printf("Failed to call clock_gettime\n"); diff = NANOS * (endTS.tv_sec - startTS.tv_sec) + (endTS.tv_nsec - startTS.tv_nsec); switch(unit) { case nano : printf("% lld nano sec\n",diff); break; case micro : printf("%lld micro sec\n",diff/1000); break; case sec : printf("% lld sec\n",diff/1000000000); break; default : printf("% lld milli sec\n",diff/100000); break; } } else { printf("wrong flag | 0 : start, 1 : end\n"); } }
1,519
#include "includes.h" __global__ void naiveHistKernel(int* bins, int nbins, int* in, int nrows) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; auto offset = blockIdx.y * nrows; auto binOffset = blockIdx.y * nbins; for (; tid < nrows; tid += stride) { int id = in[offset + tid]; if (id < 0) id = 0; else if (id >= nbins) id = nbins - 1; in[offset + tid] = id; atomicAdd(bins + binOffset + id, 1); } }
1,520
#include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* change dimension size as needed */ const int dimension = 4096 ; const int blocksize = 64; const int K = 4; const int tilewidth = 2 ; struct timeval tv; __global__ void gpuSmMM( float *Ad , float *Bd , float *Cd , int dimention ) { //Taking shared array to break the MAtrix in Tile widht and fatch them in that array per ele __shared__ float Ads [tilewidth][tilewidth] ; __shared__ float Bds [tilewidth][tilewidth] ; // calculate thread id unsigned int col = tilewidth*blockIdx.x + threadIdx.x ; unsigned int row = tilewidth*blockIdx.y + threadIdx.y ; for (int m = 0 ; m<dimention/tilewidth ; m++ ) // m indicate number of phase { Ads[threadIdx.y][threadIdx.x] = Ad[row*dimention + (m*tilewidth + threadIdx.x)] ; Bds[threadIdx.y][threadIdx.x] = Bd[ ( m*tilewidth + threadIdx.y) * dimention + col] ; __syncthreads() ; // for syncronizeing the threads // Do for tile for ( int k1 = 0; k1<tilewidth ; k1++ ) Cd[row*dimention + col]+= Ads[threadIdx.x][k1] * Bds[k1][threadIdx.y] ; __syncthreads() ; // for syncronizeing the threads } } int main(int argc, char *argv[]) { cudaEvent_t start_i, stop_i,start_mc_h2d, stop_mc_h2d,start_mc_d2h, stop_mc_d2h,start_pl, stop_pl; float time_i,time_mc_h2d,time_mc_d2h,time_pl; cudaEventCreate(&start_i); cudaEventCreate(&stop_i); cudaEventCreate(&start_mc_h2d); cudaEventCreate(&stop_mc_h2d); cudaEventCreate(&start_mc_d2h); cudaEventCreate(&stop_mc_d2h); cudaEventCreate(&start_pl); cudaEventCreate(&stop_pl); int i, j; float *A, *B, *C;// start, end; float *Ad, *Bd, *Cd; cudaEventRecord( start_i, 0 ); A = (float*)malloc(dimension*dimension*sizeof(float)); B = (float*)malloc(dimension*dimension*sizeof(float)); C = (float*)malloc(dimension*dimension*sizeof(float)); srand(292); for(i = 0; i < dimension; i++) for(j = 0; j < dimension; j++) { A[dimension*i+j] = (rand()/(RAND_MAX + 1.0)); B[dimension*i+j] = (rand()/(RAND_MAX + 1.0)); C[dimension*i+j] = 0.0; } cudaEventRecord( stop_i, 0 ); cudaEventSynchronize( stop_i ); cudaEventElapsedTime( &time_i, start_i, stop_i ); cudaEventRecord( start_mc_h2d, 0 ); cudaMalloc( (void**)&Ad, dimension*dimension*sizeof(float) ); cudaMemcpy( Ad, A, dimension*dimension*sizeof(float), cudaMemcpyHostToDevice ); cudaMalloc( (void**)&Bd, dimension*dimension*sizeof(float) ); cudaMemcpy( Bd, B, dimension*dimension*sizeof(float), cudaMemcpyHostToDevice ); cudaMalloc( (void**)&Cd, dimension*dimension*sizeof(float) ); cudaEventRecord( stop_mc_h2d, 0 ); cudaEventSynchronize( stop_mc_h2d ); cudaEventElapsedTime( &time_mc_h2d, start_mc_h2d, stop_mc_h2d ); //start = timestamp(); cudaEventRecord( start_pl, 0 ); dim3 threadBlock(blocksize,blocksize); dim3 grid(K,K); gpuSmMM<<<grid,threadBlock>>>( Ad,Bd,Cd,dimension); //end = timestamp(); cudaEventRecord( stop_pl, 0 ); cudaEventSynchronize( stop_pl ); cudaEventElapsedTime( &time_pl, start_pl, stop_pl ); cudaEventRecord( start_mc_d2h, 0 ); cudaMemcpy(C,Cd,dimension*dimension*sizeof(float),cudaMemcpyDeviceToHost); cudaEventRecord( stop_mc_d2h, 0 ); cudaEventSynchronize( stop_mc_d2h ); cudaEventElapsedTime( &time_mc_d2h, start_mc_d2h, stop_mc_d2h ); //printf("IT: %f ", time_i); printf("MC: %f ", ( time_mc_d2h + time_mc_h2d ) ); printf("PLT: %f \n ", time_pl); //printf("T:%f .... \n\n", (time_pl + time_mc_d2h + time_mc_h2d+time_i)); //printf("\nsecs:%f\n", end-start); cudaEventDestroy( start_i ); cudaEventDestroy( stop_i ); cudaEventDestroy( start_mc_d2h ); cudaEventDestroy( stop_mc_d2h ); cudaEventDestroy( start_mc_h2d ); cudaEventDestroy( stop_mc_h2d ); cudaEventDestroy( start_pl ); cudaEventDestroy( stop_pl ); free(A); free(B); free(C); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); return 0; }
1,521
#include<stdio.h> #include<assert.h> #include<cuda.h> #define N 1000000 #define HANDLE_ERROR( err )(handleCudaError( err, __FILE__, __LINE__ ) ) int handleCudaError(cudaError_t cut,const char* file, int line) { if(cut != cudaSuccess) { printf("%s : File: %s Line: %d \n",cudaGetErrorString(cut),file,line); return -1 ; } return 0; } __global__ void parallel_add(int n, int *a ,int *b , int *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread if(i < n) { c[i] = a[i] + b[i]; // simple add } } __global__ void parallel_scalar_product(int n, int *a,int *b, int *erg) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread if(i < n) { int product = a[i] * b[i]; atomicAdd(erg,product); // simple add } } int main(int argc, char* argv[]) { // ============= INIT ===================== int *a_host= NULL; int *b_host=NULL; int erg_host=0; int *a_device_ptr = NULL ; int *b_device_ptr = NULL ; int *erg_device_ptr = NULL ; a_host=(int*)malloc(sizeof(int)*N); b_host=(int*)malloc(sizeof(int)*N); for(unsigned int i = 0; i < N; ++i) { a_host[i] = 1 ; b_host[i] = 1; } //============TRANSFER====================== HANDLE_ERROR(cudaMalloc(&a_device_ptr, sizeof(int)*N)); // malloc of a_device HANDLE_ERROR(cudaMalloc(&b_device_ptr, sizeof(int)*N)); // malloc of b_device HANDLE_ERROR(cudaMalloc(&erg_device_ptr, sizeof(int))); // malloc of erg_device //Transfer a_host to a_device HANDLE_ERROR(cudaMemcpy(a_device_ptr, a_host, sizeof(int)*N, cudaMemcpyHostToDevice)); //Transfer b_host to b_device HANDLE_ERROR(cudaMemcpy(b_device_ptr, b_host, sizeof(int)*N, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(erg_device_ptr, &erg_host, sizeof(int), cudaMemcpyHostToDevice)); //=============Calculation ================== cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); parallel_scalar_product<<<ceil((float)N/(float)256),256>>>(N,a_device_ptr,b_device_ptr,erg_device_ptr); cudaEventRecord(stop); //===========CHECK============================ HANDLE_ERROR(cudaMemcpy(&erg_host,erg_device_ptr, sizeof(int), cudaMemcpyDeviceToHost)); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time %f milliseconds \n", milliseconds) ; /* int erg = 0 for(unsigned int i = 0; i < N; ++i) { //correct_transfer = correct_transfer & (a_host[i] == b_host[i]); erg += a[i] *b[i] ; } */ if(erg_host == N) { printf("Correct Calculation \n"); } else { printf(" Non Correct Calculation %d %d \n", erg_host , N); } //============CLEAN============================== HANDLE_ERROR(cudaFree(a_device_ptr)); HANDLE_ERROR(cudaFree(b_device_ptr)); HANDLE_ERROR(cudaFree(erg_device_ptr)); free(a_host); free(b_host); a_host= NULL; b_host= NULL; return 0 ; }
1,522
#define FORRANGE(index, length) for (size_t index = threadIdx.x + blockIdx.x * blockDim.x; index < length; index += gridDim.x * blockDim.x) extern "C" __global__ void axpy_f(const size_t len, const float alpha, const float *x, float *y) { FORRANGE(i, len) { y[i] += alpha * x[i]; } }
1,523
// Mandelbrot.Framework.Gpu.JuliaGpu extern "C" __global__ void JuliaKernel( int* levels, int levelsLen0, unsigned char* colors, int colorsLen0, unsigned char* palette, int paletteLen0, int w, int h, double sx, double sy, double sw, double sh, int maxLevels, double* parameters, int parametersLen0); // Mandelbrot.Framework.Gpu.JuliaGpu __device__ int JSetLevel(double zr, double zi, double cr, double ci, int max); // Mandelbrot.Framework.Gpu.MandelbrotGpu extern "C" __global__ void MandelbrotKernel( int* levels, int levelsLen0, unsigned char* colors, int colorsLen0, unsigned char* palette, int paletteLen0, int w, int h, double sx, double sy, double sw, double sh, int maxLevels, double* parameters, int parametersLen0); // Mandelbrot.Framework.Gpu.MandelbrotGpu __device__ int MSetLevel(double cr, double ci, int max); // Mandelbrot.Framework.Gpu.JuliaGpu extern "C" __global__ void JuliaKernel( int* levels, int levelsLen0, unsigned char* colors, int colorsLen0, unsigned char* palette, int paletteLen0, int w, int h, double sx, double sy, double sw, double sh, int maxLevels, double* parameters, int parametersLen0) { int num = blockDim.x * blockIdx.x + threadIdx.x; int num2 = blockDim.y * blockIdx.y + threadIdx.y; int num3 = num + num2 * w; double num4 = sw / (double)w; double num5 = sh / (double)h; double zr = sx + (double)num * num4; double zi = sy + (double)num2 * num5; int num6 = num3 * 4; int num7 = JSetLevel(zr, zi, parameters[(0)], parameters[(1)], maxLevels); levels[(num3)] = num7; int num8 = (int)((double)num7 / (double)maxLevels * 256.0); bool flag = num8 > 255; if (flag) { num8 = 255; } num8 *= 3; bool flag2 = num7 <= maxLevels; if (flag2) { colors[(num6)] = palette[(num8 + 2)]; colors[(num6 + 1)] = palette[(num8 + 1)]; colors[(num6 + 2)] = palette[(num8)]; colors[(num6 + 3)] = 255; } } // Mandelbrot.Framework.Gpu.JuliaGpu __device__ int JSetLevel(double zr, double zi, double cr, double ci, int max) { double num = zr; double num2 = zi; double num3 = num * num; double num4 = num2 * num2; int num5 = 0; do { num2 = 2.0 * (num * num2) + ci; num = num3 - num4 + cr; num3 = num * num; num4 = num2 * num2; num5++; } while (num5 < max && num3 + num4 <= 4.0); return num5; } // Mandelbrot.Framework.Gpu.MandelbrotGpu extern "C" __global__ void MandelbrotKernel( int* levels, int levelsLen0, unsigned char* colors, int colorsLen0, unsigned char* palette, int paletteLen0, int w, int h, double sx, double sy, double sw, double sh, int maxLevels, double* parameters, int parametersLen0) { int num = blockDim.x * blockIdx.x + threadIdx.x; int num2 = blockDim.y * blockIdx.y + threadIdx.y; int num3 = num + num2 * w; double num4 = sw / (double)w; double num5 = sh / (double)h; double cr = sx + (double)num * num4; double ci = sy + (double)num2 * num5; int num6 = num3 * 4; int num7 = MSetLevel(cr, ci, maxLevels); levels[(num3)] = num7; bool flag = num7 < maxLevels; if (flag) { int num8 = num7 * 3 % paletteLen0; colors[(num6)] = palette[(num8 + 2)]; colors[(num6 + 1)] = palette[(num8 + 1)]; colors[(num6 + 2)] = palette[(num8)]; colors[(num6 + 3)] = 255; } else { colors[(num6)] = 0; colors[(num6 + 1)] = 0; colors[(num6 + 2)] = 0; colors[(num6 + 3)] = 255; } } // Mandelbrot.Framework.Gpu.MandelbrotGpu __device__ int MSetLevel(double cr, double ci, int max) { double num = 0.0; double num2 = 0.0; double num3 = 0.0; double num4 = 0.0; int num5 = 0; while (num5 < max && num4 + num3 < 4.0) { num2 = 2.0 * (num * num2) + ci; num = num3 - num4 + cr; num4 = num2 * num2; num3 = num * num; num5++; } return num5; }
1,524
#include "includes.h" __global__ void reduce2(float *in, float *out, int n) { extern __shared__ float sdata[]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? in[i] : 0; __syncthreads(); // do reduction in shared mem for (unsigned int s=1; s < blockDim.x; s *= 2) { // modulo arithmetic is slow! if ((tid % (2*s)) == 0) { sdata[tid] += sdata[tid + s]; //sum number stored in low index } __syncthreads(); } // write result for this block to global mem if (tid == 0) out[blockIdx.x] = sdata[0]; }
1,525
#include "includes.h" __global__ void init(int n, float *x, float *y) { int lane_id = threadIdx.x & 31; size_t warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; size_t warps_per_grid = (blockDim.x * gridDim.x) >> 5; size_t warp_total = ((sizeof(float)*n) + STRIDE_64K-1) / STRIDE_64K; if(blockIdx.x==0 && threadIdx.x==0) { //printf("\n TId[%d] ", threadIdx.x); //printf(" WId[%u] ", warp_id); //printf(" LId[%u] ", lane_id); //printf(" WperG[%u] ", warps_per_grid); //printf(" wTot[%u] ", warp_total); //printf(" rep[%d] ", STRIDE_64K/sizeof(float)/32); } for(; warp_id < warp_total; warp_id += warps_per_grid) { #pragma unroll for(int rep = 0; rep < STRIDE_64K/sizeof(float)/32; rep++) { size_t ind = warp_id * STRIDE_64K/sizeof(float) + rep * 32 + lane_id; if (ind < n) { x[ind] = 1.0f; //if(blockIdx.x==0 && threadIdx.x==0) { // printf(" \nind[%d] ", ind); //} y[ind] = 2.0f; } } } }
1,526
__global__ void wave1Drusanov3(double * f_next,double * f_tmp, double * f_in, double nu, double omega, int N){ int tid=threadIdx.x+blockIdx.x*blockDim.x; if(tid<N){ int x_2m=tid-2; if(x_2m<0) x_2m+=N; int x_m = tid-1; if(x_m<0) x_m+=N; int x_p = tid+1; if(x_p>(N-1)) x_p-=N; int x_2p = tid+2; if(x_2p>(N-1)) x_2p-=N; double f_2m = f_in[x_2m]; double f_m = f_in[x_m]; double f = f_in[tid]; double f_p = f_in[x_p]; double f_2p = f_in[x_2p]; f_next[tid]=f-(nu/24.)*(-2.*f_2p+7.*f_p - 7.*f_m+2.*f_2m) -(3.*nu/8.)*(f_tmp[x_p]-f_tmp[x_m]) -(omega/24.)*(f_2p - 4.*f_p + 6.*f - 4.*f_m + f_2m); } }
1,527
#include <stdio.h> __global__ void hello_cuda() { printf("Hello Cuda!\n"); } int main() { dim3 block(4); dim3 grid(8); hello_cuda<<<grid, block>>>();; cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
1,528
#include "includes.h" __global__ void histogram( int * hist_out, unsigned char * img_in, int img_w,int img_h, int nbr_bin){ int tx=threadIdx.x; int ty=threadIdx.y; int bx=blockIdx.x; int by=blockIdx.y; unsigned int col= tx + blockDim.x * bx; unsigned int row= ty + blockDim.y * by; int grid_width = gridDim.x * blockDim.x; int id = row * grid_width + col; if(id<nbr_bin) hist_out[id] = 0; __syncthreads(); if(row<img_w && col<img_h) atomicAdd( &(hist_out[img_in[id]]), 1); }
1,529
// fermi // Avoid mangling of function names extern "C" { __global__ void matmulKernel (int n, int m, int p, float* c, const float* a, const float* b); } __global__ void matmulKernel (int n, int m, int p, float* c, const float* a, const float* b) { const int ttj = threadIdx.x; const int wtj = threadIdx.y; const int bj = blockIdx.x; const int bi = blockIdx.y; __shared__ float l_a[2048]; float sums[16]; for (int ei = 0; ei < 16; ei++) { sums[ei] = 0.0; } for (int l = 0; l < p / 128; l++) { for (int ei = 0; ei < 16; ei++) { l_a[32 * wtj + ttj + 128 * ei] = a[32 * wtj + ttj + 128 * l + (ei + 16 * bi) * (128 * (p / 128))]; } __syncthreads(); for (int k2 = 0; k2 < p / (p / 128); k2++) { const float bkj = b[128 * bj + (32 * wtj + ttj) + (l * p / (p / 128) + k2) * m]; for (int ei = 0; ei < 16; ei++) { sums[ei] += l_a[k2 + 128 * ei] * bkj; } } __syncthreads(); } for (int ei = 0; ei < 16; ei++) { c[32 * wtj + ttj + 128 * bj + (ei + 16 * bi) * (128 * (m / 128))] += sums[ei]; } }
1,530
#include "compare.cuh" __global__ void forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K) { /* The goal here is to be correct AND fast. We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own. */ // An example use of these macros: // float a = y4d(0,0,0,0) // y4d(0,0,0,0) = a #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] #define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0] #define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0] int b = blockDim.x * blockIdx.x + threadIdx.x; const int H_out = H - K + 1; const int W_out = W - K + 1; if (b < B) // for each image in the batch { for (int m = 0; m < M; m++) // for each output feature maps for (int h = 0; h < H_out; h++) // for each output element for (int w = 0; w < W_out; w++) { y4d(b, m, h, w) = 0; for (int c = 0; c < C; c++) // sum over all input feature maps for (int p = 0; p < K; p++) // KxK filter for (int q = 0; q < K; q++) y4d(b, m, h, w) += x4d(b, c, h + p, w + q) * k4d(m, c, p, q); } } #undef y4d #undef x4d #undef k4d } __global__ void unrollKernel(int C, int H, int W, int K, float* X, float* X_out) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int H_out = H - K + 1; int W_out = W - K + 1; int W_unroll = H_out*W_out; int c, s, h_out, w_out, h_unroll, w_unroll, w_base, k1, k2; if (tid < C*W_unroll) { c = tid / W_unroll; s = tid % W_unroll; h_out = s / W_out; w_out = s % W_out; h_unroll = h_out*W_out + w_out; w_base = c*K*K; for (k1 = 0; k1 < K; k1++) { for (k2 = 0; k2 < K; k2++) { w_unroll = w_base + k1*K + k2; X_out[w_unroll*W_unroll + h_unroll] = X[c*H*W + (h_out + k1)*W + w_out + k2]; } } } } #define TILE_WIDTH 32 #define TILE_WIDTH_FLOAT 32.0 __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float subTileA[TILE_WIDTH][TILE_WIDTH]; __shared__ float subTileB[TILE_WIDTH][TILE_WIDTH]; int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col = blockIdx.x * TILE_WIDTH + threadIdx.x; float multVal = 0; //printf("%d %d\n", numAColumns, numAColumns/16); //if((row * numCColumns + col) < (numCRows*numCColumns)) { //printf("row %d col %d \n", row, col); //compute the number of tiles needed for (int i = 0; i < ceilf(numAColumns / TILE_WIDTH_FLOAT); i++) { //each thread loads its bit if (row * numAColumns + i * TILE_WIDTH + threadIdx.x < numARows*numAColumns) { subTileA[threadIdx.y][threadIdx.x] = A[row * numAColumns + i * TILE_WIDTH + threadIdx.x]; //subTileB[threadIdx.y][threadIdx.x] = B[(i * TILE_WIDTH + threadIdx.y) * numBColumns + col]; //printf("%d, %d, %d, %d, %f\n", row, i, threadIdx.y, threadIdx.x, A[row * numAColumns + i * TILE_WIDTH + threadIdx.x]); } if ((i * TILE_WIDTH + threadIdx.y) * numBColumns + col < numBRows*numBColumns) { subTileB[threadIdx.y][threadIdx.x] = B[(i * TILE_WIDTH + threadIdx.y) * numBColumns + col]; } __syncthreads(); if (row < numCRows && col < numCColumns) { for (int j = 0; j < TILE_WIDTH; j++) { if (i*TILE_WIDTH + j < numAColumns) { //printf("%d, %d, %f, %f\n", threadIdx.y, threadIdx.x, subTileA[threadIdx.y][j], subTileB[j][threadIdx.x]); multVal += subTileA[threadIdx.y][j] * subTileB[j][threadIdx.x]; } } C[row*numCColumns + col] = multVal; } __syncthreads(); } } void forwardWithMatmul(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K) { float* dev_y; float* dev_x; float* dev_k; float* dev_X_out; int H_out = H - K + 1; int W_out = W - K + 1; cudaMalloc((void**)&dev_x, (size_t)(B*H*W*C * sizeof(float))); cudaMalloc((void**)&dev_X_out, (size_t)((H - K + 1)*(W - K + 1)*K*K*C * sizeof(float))); cudaMalloc((void**)&dev_y, (size_t)(B*M*H_out*W_out * sizeof(float))); cudaMalloc((void**)&dev_k, (size_t)(M*C*K*K * sizeof(float))); cudaMemcpy(dev_x, x, (size_t)(B*H*W*C * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dev_k, k, (size_t)(M*C*K*K * sizeof(float)), cudaMemcpyHostToDevice); //eventually make this do it in one shot dim3 unrollBlockDim(H*W*C, 1, 1); dim3 unrollGridDim(1, 1, 1); const int inner_dim = K*K*C; dim3 mulBlockDim(TILE_WIDTH, TILE_WIDTH, 1); dim3 mulGridDim(ceilf((H_out*W_out)/TILE_WIDTH_FLOAT), ceilf(M/TILE_WIDTH_FLOAT), 1); //for everything in batch for (int b = 0; b < B; b++) { //unroll unrollKernel <<< unrollGridDim, unrollBlockDim >>> (C, H, W, K, dev_x+b*H*W*C, dev_X_out); //multiply first by second matrixMultiplyShared <<< mulGridDim, mulBlockDim >>> (dev_k, dev_X_out, dev_y+b*M*H_out*W_out, M, inner_dim, inner_dim, H_out*W_out, M, H_out*W_out); } cudaMemcpy(y, dev_y, (size_t)(B*M*H_out*W_out * sizeof(float)), cudaMemcpyDeviceToHost); cudaFree(dev_y); cudaFree(dev_x); cudaFree(dev_k); cudaFree(dev_X_out); } void forwardWithLoop(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K) { float* dev_y; float* dev_x; float* dev_k; int H_out = H - K + 1; int W_out = W - K + 1; cudaMalloc((void**)&dev_x, (size_t)(B*H*W*C * sizeof(float))); cudaMalloc((void**)&dev_y, (size_t)(B*M*H_out*W_out * sizeof(float))); cudaMalloc((void**)&dev_k, (size_t)(M*C*K*K * sizeof(float))); cudaMemcpy(dev_x, x, (size_t)(B*H*W*C * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dev_k, k, (size_t)(M*C*K*K * sizeof(float)), cudaMemcpyHostToDevice); dim3 gridDim((B + 511) / 512); dim3 blockDim(512); forward_kernel << < gridDim, blockDim >> > (dev_y, dev_x, dev_k, B, M, C, H, W, K); cudaMemcpy(y, dev_y, (size_t)(B*M*H_out*W_out * sizeof(float)), cudaMemcpyDeviceToHost); cudaFree(dev_y); cudaFree(dev_x); cudaFree(dev_k); }
1,531
/***************************************************************************//** * \file LHS1.cu * \author Christopher Minar (minarc@oregonstate.edu) * \brief kernels to generate the left hand side for the intermediate velocity solve */ #include "LHS1.h" namespace kernels { __global__ void LHS1_mid_iter_X(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny, int *hybridTagsUV, int *ghostTagsUV, double *ns_rhs, double *interp_rhs, int *count, int *index1, int *index2, int *index3, int *index4, double *xu, double *yu, double *alpha, double *uB, //xu, yu not used double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *q1, double *q2, double *q3, double *q4 ) { if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny) return; int iu = threadIdx.x + blockDim.x * blockIdx.x, I = iu % (nx-1), J = iu / (nx-1); if (I == 0 || I == nx-2 || J == 0 || J == ny-1) return; //int numE = i*5; // top row - corner mid sides current row int numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*5 - 1; double temp = 1; if (hybridTagsUV[iu]>0) { int interp_index[4] = {index1[iu], index2[iu], index3[iu], index4[iu]}; double q[4] = {q1[iu], q2[iu], q3[iu], q4[iu]}; double CInterp[4]; double Cns[5]; Cns[0] = -dt*nu/(dy[J+1]*(dy[J]+dy[J+1])); Cns[1] = -dt*nu/(dx[I] *(dx[I]+dx[I+1])); Cns[2] = -dt*nu/(dy[J] *(dy[J]+dy[J+1])); Cns[3] = -dt*nu/(dx[I] *(dx[I]+dx[I-1])); Cns[4] = 1-Cns[0] - Cns[1] - Cns[2] - Cns[3]; CInterp[0] = q1coef[iu]; CInterp[1] = q2coef[iu]; CInterp[2] = q3coef[iu]; CInterp[3] = q4coef[iu]; for (int l=0; l<4; l++) { Cns[l] = Cns[l]*(1-alpha[iu])/Cns[4]; CInterp[l] = CInterp[l]*alpha[iu]; } /* 0 1 2 NW N NE * 3 4 5 W P E * 6 7 8 SW S SE */ int stencil_index[9] = {iu + (nx-1) - 1, iu + (nx-1), iu + (nx-1) + 1, iu - 1 , iu , iu + 1, iu - (nx-1) - 1, iu - (nx-1), iu - (nx-1) + 1}; double stencil[9] = {0, Cns[0], 0, Cns[3], 1, Cns[1], 0, Cns[2], 0}; //combine ns and interp stencils bool stencil_used[9] = {false, true, false, true, true, true, false, true, false}; for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && m != 4) { stencil[m] -= CInterp[n]; //flag should this be minus? } } } //add ns to sparse matrix for (int m = 0; m<9; m++) { if (stencil_used[m]) { row[numE] = iu; col[numE] = stencil_index[m]; val[numE] = stencil[m]; numE++; } } ns_rhs[iu] = (1-alpha[iu])/Cns[4]; interp_rhs[iu] = 0; //calc new numE numE = ny*(nx-1)*5 - ny*2 - (nx-1)*2 + nx*(ny-1)*5 - nx*2 - (ny-1)*2 + count[iu]-1; //add interp corner to sparse matrix for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && !stencil_used[m]) { row[numE] = iu; col[numE] = interp_index[n]; val[numE] = -CInterp[n]; //this should be minus? } //else if(stencil_index[m] == interp_index[n] && stencil_used[m]) else if(stencil_index[m] == interp_index[n] && interp_index[n] == iu) interp_rhs[iu] += CInterp[n]*q[n]; } } } else if (ghostTagsUV[iu]>0) { int interp_index[4] = {index1[iu], index2[iu], index3[iu], index4[iu]}; bool interp_in[4] = {false, false, false, false}; int ns_index[5] = {iu + (nx-1), iu + 1, iu - (nx-1), iu -1, iu}; //n e s w p bool ns_overlap[5] = {false, false, false, false, true}; double q[4] = {q1[iu], q2[iu], q3[iu], q4[iu]}; double CInterp[4]; CInterp[0] = q1coef[iu]; CInterp[1] = q2coef[iu]; CInterp[2] = q3coef[iu]; CInterp[3] = q4coef[iu]; //count the number of nodes the interp is using //find how which ns nodes are occupied int counter = 0; temp = 0; for (int l=0; l<4; l++) { if (ghostTagsUV[interp_index[l]]>0) { counter +=1; interp_in[l] = true; } for (int n=0; n<5; n++) { if (interp_index[l] == ns_index[n]) ns_overlap[n] = true; } } //add center to matrix row[numE] = iu; col[numE] = iu; val[numE] = 1; numE++; //add real interp values to matrix for (int i=0; i<4; i++) { if (!interp_in[i] && interp_index[i] != iu) { row[numE] = iu; col[numE] = interp_index[i]; val[numE] = CInterp[i]; numE++; } else { temp -= CInterp[i] * q[i]; } } //fill remainder of values int counter2 = 0; for (int i=0; i<5; i++) { if (counter2>=counter) break; if (ns_overlap[i]==false) { row[numE] = iu; col[numE] = ns_index[i]; val[numE] = 0; numE++; counter2++; } } ns_rhs[iu] = 0; interp_rhs[iu] = 2*uB[0] + temp;//flag this doesn't account for the interpolation part } else { temp = 1 + 0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5)) + 0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5)) + 0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5)) + 0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5)); //EAST row[numE] = iu; col[numE] = iu+1; val[numE] = -0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5))/temp; numE++; //WEST row[numE] = iu; col[numE] = iu-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5))/temp; numE++; //NORTH row[numE] = iu; col[numE] = iu+(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5))/temp; numE++; //SOUTH row[numE] = iu; col[numE] = iu-(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5))/temp; numE++; //CENTER row[numE] = iu; col[numE] = iu; val[numE] = 1; numE++; ns_rhs[iu] = 1/temp; interp_rhs[iu] = 0; } } __global__ void LHS1_mid_iter_Y(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny, int *hybridTagsUV, int *ghostTagsUV, double *ns_rhs, double *interp_rhs, int *count, int *index1, int *index2, int *index3, int *index4, double *xv, double *yv, double *alpha, double *vB, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *q1, double *q2, double *q3, double *q4 ) { if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1)) return; int ip = threadIdx.x + blockDim.x * blockIdx.x, I = ip % nx, J = ip / nx, iv = ip + (nx-1)*ny; if (I == 0 || I == nx-1 || J == 0 || J == ny-2) return; int numE = (nx-1)*ny*5 - 2*ny-2*(nx-1) + nx*4-2 + (J-1)*(nx*5 - 2) + I*5 - 1; double temp = 1; if (hybridTagsUV[iv]>0) { int interp_index[4] = {index1[iv], index2[iv], index3[iv], index4[iv]}; double q[4] = {q1[iv], q2[iv], q3[iv], q4[iv]}; double CInterp[4]; double Cns[5]; Cns[0] = -dt*nu/(dy[J+1]*(dy[J]+dy[J+1])); Cns[1] = -dt*nu/(dx[I]*(dx[I]+dx[I+1])); Cns[2] = -dt*nu/(dy[J]*(dy[J]+dy[J+1])); Cns[3] = -dt*nu/(dx[I]*(dx[I]+dx[I-1])); Cns[4] = 1-Cns[0] - Cns[1] - Cns[2] - Cns[3]; CInterp[0] = q1coef[iv]; CInterp[1] = q2coef[iv]; CInterp[2] = q3coef[iv]; CInterp[3] = q4coef[iv]; for (int l=0; l<4; l++) { Cns[l] = Cns[l]*(1-alpha[iv])/Cns[4]; CInterp[l] = CInterp[l]*alpha[iv]; } /* 0 1 2 NW N NE * 3 4 5 W P E * 6 7 8 SW S SE */ int stencil_index[9] = {iv + nx - 1, iv + nx, iv + nx + 1, iv - 1 , iv , iv + 1, iv - nx - 1, iv - nx, iv - nx + 1}; double stencil[9] = {0, Cns[0], 0, Cns[3], 1, Cns[1], 0, Cns[2], 0}; //combine ns and interp stencils bool stencil_used[9] = {false, true, false, true, true, true, false, true, false}; for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && m != 4) { stencil[m] -= CInterp[n]; //flag should this be minus? } } } //add ns to sparse matrix for (int m = 0; m<9; m++) { if (stencil_used[m]) { row[numE] = iv; col[numE] = stencil_index[m]; val[numE] = stencil[m]; numE++; } } ns_rhs[iv] = (1-alpha[iv])/Cns[4]; interp_rhs[iv] = 0; //calc new numE numE = ny*(nx-1)*5 - ny*2 - (nx-1)*2 + nx*(ny-1)*5 - nx*2 - (ny-1)*2 + count[iv]-1; //add interp corner to sparse matrix for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && !stencil_used[m]) { row[numE] = iv; col[numE] = interp_index[n]; val[numE] = -CInterp[n]; } //else if(stencil_index[m] == interp_index[n] && stencil_used[m]) else if(stencil_index[m] == interp_index[n] && interp_index[n] == iv) interp_rhs[iv] += CInterp[n]*q[n]; } } } else if (ghostTagsUV[iv]>0) { int interp_index[4] = {index1[iv], index2[iv], index3[iv], index4[iv]}; bool interp_in[4] = {false, false, false, false}; int ns_index[5] = {iv+nx, iv+1, iv-nx, iv-1, iv}; //n e s w p bool ns_overlap[5] = {false, false, false, false, true}; double q[4] = {q1[iv], q2[iv], q3[iv], q4[iv]}; double CInterp[4]; CInterp[0] = q1coef[iv]; CInterp[1] = q2coef[iv]; CInterp[2] = q3coef[iv]; CInterp[3] = q4coef[iv]; //count the number of nodes the interp is using //find how which ns nodes are occupied int counter = 0; temp = 0; for (int l=0; l<4; l++) { if (ghostTagsUV[interp_index[l]]>0) { counter +=1; interp_in[l] = true; } for (int n=0; n<5; n++) { if (interp_index[l] == ns_index[n]) ns_overlap[n] = true; } } //add center to matrix row[numE] = iv; col[numE] = iv; val[numE] = 1; numE++; //add real interp values to matrix for (int i=0; i<4; i++) { if (!interp_in[i] && interp_index[i] != iv) { row[numE] = iv; col[numE] = interp_index[i]; val[numE] = CInterp[i]; numE++; } else { temp -= CInterp[i] * q[i]; } } //fill remainder of values int counter2 = 0; for (int i=0; i<5; i++) { if (counter2>=counter) break; if (ns_overlap[i]==false) { row[numE] = iv; col[numE] = ns_index[i]; val[numE] = 0; numE++; counter2++; } } ns_rhs[iv] = 0; interp_rhs[iv] = 2*vB[0] + temp;//flag this doesn't account for the interpolation part } else { temp = 1 + 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5)) + 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5)) + 0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5)) + 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5)); //EAST row[numE] = iv; col[numE] = iv+1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5))/temp; numE++; //WEST row[numE] = iv; col[numE] = iv-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5))/temp; numE++; //NORTH row[numE] = iv; col[numE] = iv + nx; val[numE] = -0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5))/temp; numE++; //SOUTH row[numE] = iv; col[numE] = iv-nx; val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5))/temp; numE++; //CENTER row[numE] = iv; col[numE] = iv; val[numE] = 1; numE++; ns_rhs[iv] = 1/temp; interp_rhs[iv] = 0; } } }//end kernel
1,532
/* Copyright 2012 by Erik Opavsky * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> int readSequences (char * fileName, char ** sequences, int numSequences) { cudaDeviceReset(); FILE *dataFile; if ((dataFile = fopen (fileName, "r")) == NULL) { printf("The file %s could not be opened.\n", fileName); return 0; } // skip first row // while (getc (dataFile) != ','); for (int i = 0; i < numSequences; i++) { // skip first column while (getc (dataFile) != ','); // skip second column while (getc (dataFile) != ','); // skip third column // while (getc (dataFile) != ','); fscanf (dataFile, "%s", sequences[i]); } fclose (dataFile); return 1; } char * copySequencesToDevice (char ** sequences, int numSequences, int sequenceLength) { char * d_sequences; cudaMalloc (&d_sequences, sizeof (char) * sequenceLength * numSequences); for (int i = 0; i < numSequences; i++) cudaMemcpy (d_sequences + i * sequenceLength, *(sequences + i), sizeof (char) * sequenceLength, cudaMemcpyHostToDevice); return d_sequences; }
1,533
#include<stdio.h> #define N 32 void add(int *X, int *Y, int *Z) { for(int i = 0; i < N; i++) for(int j = 0; j < N; j++) { Z[i*N+j] = X[i*N+j] + Y[i*N+j]; } } __global__ void add_kernel(int *X, int *Y, int *Z){ int i = threadIdx.x; int j = threadIdx.y; Z[i*N+j] = X[i*N+j] + Y[i*N+j]; } int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //Input matrix int X[N*N]; int Y[N*N]; for(int i = 0; i < N; i++) for(int j = 0; j < N; j++) { X[i*N+j] = 0; Y[i*N+j] = 1; } //Output matrix int Z[N*N]; int *d_X, *d_Y, *d_Z; cudaMalloc((void**) &d_X, (N*N)*sizeof(int)); cudaMalloc((void**) &d_Y, (N*N)*sizeof(int)); cudaMalloc((void**) &d_Z, (N*N)*sizeof(int)); cudaMemcpy(d_X, &X, (N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_Y, &Y, (N*N)*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid(1,1,1); dim3 dimBlock(20,20,1); cudaEventRecord(start); add_kernel<<<dimGrid, dimBlock>>>(d_X, d_Y, d_Z); cudaEventRecord(stop); //add(X, Y, Z); cudaMemcpy(&Z, d_Z, (N*N)*sizeof(int), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaFree(d_X); cudaFree(d_Y); cudaFree(d_Z); printf("%f ms\n", milliseconds); for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { printf("%d ", Z[i*N+j]); } printf("\n"); } return -1; }
1,534
// Cudafy_Test.RuneCalc extern "C" __global__ void calc_r(int n, int* build, int buildLen0, int buildLen1, int* stat, int statLen0, int* mult, int multLen0, int multLen1, int* flat, int flatLen0, int flatLen1, int* res, int resLen0, int resLen1); // Cudafy_Test.RuneCalc extern "C" __global__ void calc_r(int n, int* build, int buildLen0, int buildLen1, int* stat, int statLen0, int* mult, int multLen0, int multLen1, int* flat, int flatLen0, int flatLen1, int* res, int resLen0, int resLen1) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int j = 0; j < 8; j++) { for (int k = 0; k < 6; k++) { int num = build[(i) * buildLen1 + ( k)]; res[(i) * resLen1 + ( j)] = stat[(j)] * mult[(num) * multLen1 + ( j)] + flat[(num) * flatLen1 + ( j)]; } } } }
1,535
#include "includes.h" __global__ void add(float *array_a, float *array_b, float *array_c, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int step = blockDim.x * gridDim.x; for (int i = tid; i < size; i += step) { array_c[i] = array_a[i] + array_b[i]; } }
1,536
#include <sys/types.h> #include <sys/ioctl.h> #include <sys/mman.h> #include <linux/fb.h> #include <fcntl.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <time.h> /* Conway's Game of Life running on nVidia GPUs. Each GPU thread updates the state of a single cell in the playing field, then the current generation is displayed in Linux's framebuffer (this means the program must be run outside X Window System). Since this program is just a demonstration, the data model is NOT decoupled from its representation. */ /** Cell drawing function Updates the state of a single cell and draws it on screen. Each GPU thread updates a single cell (the mapping is linear, so even if the field's size exceeds the number of computation units of the GPU, the pixels assigned to the same thread are far away from each other), based on the number of neighbors. Since the number of neighbors is only read (and this function is called AFTER the neighbors array is written) and each thread writes only its cell, there are no race conditions. @param field Array of cell states. 1: live cell, 0: dead cell. @param neighbors Array of neighbors' count (e.g. neighbors[42] = 4 means cell 42 has 4 neighbors). @param screen Framebuffer (the function assumes the buffer uses 32 bit colors). @param w Width of the framebuffer. @param h Height of the framebuffer. */ __global__ void updateCell(char *field, char *neighbors, char *screen, int w, int h) { int i = threadIdx.x + blockIdx.x * blockDim.x; /* Pick the cell based on thread and block IDs. */ if (i < w * h) { if ((field[i] != 0 && (neighbors[i] == 3 || neighbors[i] == 2)) || /* Survive rule: S23 */ (field[i] == 0 && neighbors[i] == 3)) /* Birth rule: B3 */ { /* Live cell */ field[i] = 1; screen[4 * i] = 0xff; /* blue */ screen[4 * i + 1] = 0xff; /* green */ screen[4 * i + 2] = 0xff; /* red */ screen[4 * i + 3] = 0xff; /* alpha */ } else { /* Dead cell */ field[i] = 0; screen[4 * i] = 0x00; screen[4 * i + 1] = 0x00; screen[4 * i + 2] = 0x00; screen[4 * i + 3] = 0x00; } } } /** Neighbors counting function Checks the eight neighbors' state and counts the live ones. Each GPU thread writes the neighbors count for a single cell while the field is not modified, so there are no race conditions. @param field Array of cell states. 1: live cell, 0: dead cell. @param neighbors Array of neighbors' count (e.g. neighbors[42] = 4 means cell 42 has 4 neighbors). @param w Width of the framebuffer. @param h Height of the framebuffer. */ __global__ void countNeighbors(char *field, char *neighbors, int w, int h) { int i = threadIdx.x + blockIdx.x * blockDim.x; int x = i % w; int y = i / w * w; /* Rounding to the closest multiple of w. */ int xp1 = (i + 1) % w; int xm1 = (i + w - 1) % w; int yp1 = ((i + w) % (h * w)) / w * w; int ym1 = ((i - w + h * w) % (h * w)) / w * w; neighbors[i] = 0; if (field[xm1 + ym1] != 0) neighbors[i]++; if (field[xm1 + y] != 0) neighbors[i]++; if (field[xm1 + yp1] != 0) neighbors[i]++; if (field[x + ym1] != 0) neighbors[i]++; if (field[x + yp1] != 0) neighbors[i]++; if (field[xp1 + ym1] != 0) neighbors[i]++; if (field[xp1 + y] != 0) neighbors[i]++; if (field[xp1 + yp1] != 0) neighbors[i]++; } /** Game of Life wrapper function. Initializes the playing field with a random initial state and iterates over Life's generations. This function takes care of synchronizing GPU threads so that no race conditions are triggered. @param screen Framebuffer. @param w Width of the framebuffer. @param h Height of the framebuffer. */ void life(char *screen, int w, int h) { char field[w * h]; /* Generate a random initial playing field. */ srand(time(NULL)); for (int i = 0; i < w * h; i++) field[i] = (rand() % 100 < 75)? 1: 0; /* Find the maximum number of threads for the current GPU. */ struct cudaDeviceProp properties; cudaGetDeviceProperties(&properties, 0); char *d_field, *d_screen, *d_neighbors; int maxthds = properties.maxThreadsPerBlock; /* Copy memory to the GPU's internal RAM. Since this process is slow, it's done only once and the state of the playing field is effectively kept in GPU's memory, updating the main RAM only to display the result on screen. */ cudaMalloc((void **) &d_field, sizeof(char) * w * h); cudaMalloc((void **) &d_neighbors, sizeof(char) * w * h); cudaMalloc((void **) &d_screen, sizeof(char) * w * h * 4); cudaMemcpy(d_field, field, sizeof(char) * w * h, cudaMemcpyHostToDevice); cudaMemcpy(d_screen, screen, sizeof(char) * w * h * 4, cudaMemcpyHostToDevice); while (1) /* Maybe a termination condition would have been more elegant... */ { /* Count the neighbors, synchronize the threads (in order to avoid race conditions), then update the state of each cell. */ countNeighbors<<<(w * h + maxthds - 1) / maxthds, maxthds>>>(d_field, d_neighbors, w, h); cudaDeviceSynchronize(); updateCell<<<(w * h + maxthds - 1) / maxthds, maxthds>>>(d_field, d_neighbors, d_screen, w, h); /* Synchronize the threads again and finally copy the framebuffer from GPU's internal memory to main RAM (and therefore display the result). */ cudaDeviceSynchronize(); cudaMemcpy(screen, d_screen, sizeof(char) * w * h * 4, cudaMemcpyDeviceToHost); } /* Free the GPU's internal memory. Since the loop condition is never false, this cleanup is never performed. */ cudaFree(d_field); cudaFree(d_neighbors); cudaFree(d_screen); /* printf("%s\n", cudaGetErrorString(cudaGetLastError())); Debug message. */ } /* Semplice funzione main che controlla il framebuffer. Copiata da: https://stackoverflow.com/a/1830865 /** Program entrypoint. Initializes the main framebuffer (/dev/fb0), as suggested on: https://stackoverflow.com/a/1830865 then passes it to the life() function. @return 0 if no error occurred, 1 otherwise. */ int main() { struct fb_var_screeninfo screen_info; struct fb_fix_screeninfo fixed_info; char *buffer = NULL; size_t buflen; int fd = -1; int r = 1; fd = open("/dev/fb0", O_RDWR); if (fd >= 0) { if (!ioctl(fd, FBIOGET_VSCREENINFO, &screen_info) && !ioctl(fd, FBIOGET_FSCREENINFO, &fixed_info)) { buflen = screen_info.yres_virtual * fixed_info.line_length; buffer = (char *) mmap(NULL, buflen, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (buffer != MAP_FAILED) { life(buffer, screen_info.xres_virtual, screen_info.yres_virtual); /* Start the game. */ r = 0; } else { perror("mmap"); } } else { perror("ioctl"); } } else { perror("open"); } if (buffer && buffer != MAP_FAILED) munmap(buffer, buflen); if (fd >= 0) close(fd); return r; }
1,537
// Host defines #define NUM_THREADS 1024 #define NUM_GRID 1 #define MAX_SIM_NUM 50000 #define THRESHOLD 2 // Includes #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <iostream> #include <limits.h> using namespace std; // GPU Kernels declarations __global__ void CudaTest_kernel(int max_sim_num, int threshold, unsigned int* di_index, unsigned int* di_query, unsigned int* result, char* di_static_table, unsigned int* result_num, unsigned int frame_num_index, unsigned int frame_num_query); void bulidSimTable(string hashcodePath, string queryPath, string resultPath, unsigned int frame_num_index, unsigned int frame_num_query, int threshold){ clock_t cstart=clock(); //initalize data size // unsigned int frame_num_index = 924287;//number of frames in lib // unsigned int frame_num_query =1068;//number of frames in query unsigned int static_table_num = 256; unsigned int mem_size_lib = frame_num_index * sizeof(unsigned int);//the memory size hvectors_in_lib needed unsigned int mem_size_query = frame_num_query * sizeof(unsigned int);//the memory size hvectors_query needed unsigned int result_size = frame_num_query * MAX_SIM_NUM * sizeof(unsigned int);//the memory size which will store the result unsigned int static_table_size = static_table_num * sizeof(char); // Host variables unsigned int* hi_index;//input data in cpu which contains all feature vectors of frames in lib unsigned int* hi_query;//input data in cpu which contains all feature vectors of query frames unsigned int* ho_result;//the result in host unsigned int* ho_result_cnt; char hi_static_table[256]= { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8, }; // Device variables unsigned int* di_index;//input data in gpu which contains all feature vectors of frames unsigned int* di_query;//input data in cpu which contains all feature vectors of query frames unsigned int* do_result;//the result in device unsigned int* do_result_cnt; char* di_static_table; //allocate memory in host and device hi_index = (unsigned int*) malloc(mem_size_lib);//allocate the memory to hvectors_in_lib hi_query = (unsigned int*) malloc(mem_size_query);//allocate the memory to hvectors_query ho_result_cnt = (unsigned int*) malloc(mem_size_query); cudaMalloc((void**) &di_index, mem_size_lib);//allocate dvectors_in_lib to the device memory cudaMalloc((void**) &di_query, mem_size_query);//allocate dvectors_query to the device memory cudaMalloc((void**) &di_static_table, static_table_size);//allocate device_result to the device memory cudaMalloc((void**) &do_result_cnt, mem_size_query); cudaSetDevice(0); cudaSetDeviceFlags(cudaDeviceMapHost); cudaHostAlloc((void**)&(ho_result),result_size,cudaHostAllocMapped); cudaHostGetDevicePointer((void**)&(do_result),ho_result,0); //initalize host data unsigned int n; int cnt = 0; FILE *fp,*fp2; fp = fopen(hashcodePath.c_str(),"r"); //cout<<hashcodePath.c_str()<<endl; while(!feof(fp) && cnt < frame_num_index){ fscanf(fp, "%u", &n); hi_index[cnt] = n; cnt++; } //cout<<"count ="<<cnt<<endl; cnt = 0; fp = fopen(queryPath.c_str(),"r"); //cout<<queryPath.c_str()<<endl; while(!feof(fp) && cnt < frame_num_query){ fscanf(fp,"%u", &n); hi_query[cnt] = n; cnt++; } //cout<<"count = "<<cnt<<endl; memset(ho_result_cnt, 0, frame_num_query*4); //initalize device data cudaMemcpy(di_index, hi_index, mem_size_lib, cudaMemcpyHostToDevice);//copy hvectors_in_lib in memory to dvectors_in_lib in gpu cudaMemcpy(di_query, hi_query, mem_size_query, cudaMemcpyHostToDevice);//copy hvectors_query in memory to dvectors_query in gpu cudaMemcpy(di_static_table,hi_static_table,static_table_size,cudaMemcpyHostToDevice); cudaMemcpy(do_result_cnt, ho_result_cnt, mem_size_query, cudaMemcpyHostToDevice); //excute kernel in GPU //int threshold = THRESHOLD; int max_sim_num = MAX_SIM_NUM; dim3 grid((int)ceil((double)frame_num_index/1024), (int)ceil((double)frame_num_query/512), 1); CudaTest_kernel<<<grid, NUM_THREADS>>>(max_sim_num, threshold, di_index, di_query, do_result, di_static_table, do_result_cnt, frame_num_index, frame_num_query); cudaThreadSynchronize(); //copy the result back to host memory cudaMemcpy(ho_result_cnt, do_result_cnt, mem_size_query, cudaMemcpyDeviceToHost); cudaMemcpy(ho_result, do_result, result_size, cudaMemcpyDeviceToHost); //deal with the result // printf("query:%u\n",hi_query[0]); // for(int i = 0; i < frame_num_query ; i ++) // { // //printf("result 0:%u\n",ho_result_cnt[i]); // for(int j = 0; j < ho_result_cnt[i]; j++) // cout<<ho_result[i*MAX_SIM_NUM+j]<<" "; // cout<<endl; // } fp2 = fopen(resultPath.c_str(),"w+"); int max_length = 0; for(int i = 0 ; i < frame_num_query ; i++) { if(max_length < ho_result_cnt[i]) max_length = ho_result_cnt[i]; for(int j = 0; j < ho_result_cnt[i]; j++) fprintf(fp2,"%u ",ho_result[i*MAX_SIM_NUM+j]); fprintf(fp2,"\n"); } cout<<"max matched count is: "<<max_length<<endl; cout << "size of long: " << sizeof(2l) << endl; clock_t cend=clock(); printf("time:%f\n",(float)(cend-cstart)/CLOCKS_PER_SEC); //// free up the host memory // free(ho_result_cnt); // free(hi_index); // free(hi_query); // free(ho_result); // // //// free up the device memory // cudaFree(do_result_cnt); // cudaFree(di_index);// // cudaFree(di_query); // cudaFree(do_result); } void sortTable(unsigned int* ho_result_cnt, unsigned int* ho_result, unsigned int frame_num_query){ } ////////////////////// // Program main ////////////////////// int main(int argc, char** argv) { if(argc == 7) { string hashcodePath = argv[1]; string queryPath = argv[2]; string resultPath = argv[3]; int frame_num_index = atoi(argv[4]); int frame_num_query = atoi(argv[5]); int threshold = atoi(argv[6]); // unsigned int frame_num_index = 1000001; // unsigned int frame_num_query =493; // int threshold = 2; // string hashcodePath = "/root/hashcode.txt"; // string queryPath = "/root/query.txt"; // string resultPath = "/root/result.txt"; //cout<<hashcodePath<<endl<<queryPath<<endl<<frame_num_index<<endl<<frame_num_query<<endl<<threshold<<endl; bulidSimTable(hashcodePath, queryPath, resultPath, frame_num_index, frame_num_query, threshold); } return 0; }
1,538
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> // DEVICE CODE: // Kernel: __global__ void hello_cuda(){ printf("Hello CUDA world!\n"); } // HOST CODE int main(){ int nx, ny; nx = 16; ny = 4; dim3 block(8, 2, 1); dim3 grid(nx/block.x, ny/block.y, 1); // launching kernel: hello_cuda <<<grid, block>>>(); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
1,539
// Some useful utilities // system includes #include <stdio.h> #include <assert.h> #include <cuda.h> // External function definitions void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } // When there is more than one device, // select the fastest device and report characteristics void selectAndReport(int *major, int *minor) { int best_gpu = 0; // Use the device with the most cores int number_of_devices; cudaGetDeviceCount(&number_of_devices); checkCUDAError("Get Count"); printf("\n%d Devices\n",number_of_devices); if (number_of_devices > 1) { int max_cores = 0; int device_number; for (device_number = 0; device_number < number_of_devices; device_number++) { cudaDeviceProp device_properties; cudaGetDeviceProperties(&device_properties, device_number); printf("Device # %d has %d cores\n",device_number, device_properties.multiProcessorCount); double gb = 1024*1024*1024; printf("Device # %d has %f GB global memory\n",device_number, ((double)device_properties.totalGlobalMem)/gb); if (max_cores < device_properties.multiProcessorCount) { max_cores = device_properties.multiProcessorCount; best_gpu = device_number; } } printf("\n *** Best GPU is: %d\n",best_gpu); cudaSetDevice(best_gpu); checkCUDAError("Can't set device\n"); } printf("\n"); // get number of SMs on this GPU int devID; cudaGetDevice(&devID); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, devID); printf("Device %d has %d cores\n", best_gpu, deviceProp.multiProcessorCount); double gb = 1024*1024*1024; printf("Device # %d has %f GB global memory\n",best_gpu, ((double)deviceProp.totalGlobalMem)/gb); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { printf("There is no device supporting CUDA.\n"); cudaThreadExit(); } else { printf("Device is a %s, capability: %d.%d\n", deviceProp.name, deviceProp.major, deviceProp.minor); *major = deviceProp.major; *minor = deviceProp.minor; } int driverVersion, runtimeVersion; assert(cudaSuccess == cudaDriverGetVersion(&driverVersion)); assert(cudaSuccess == cudaRuntimeGetVersion(&runtimeVersion)); printf("CUDA Driver version: %d, runtime version: %d\n\n", driverVersion, runtimeVersion); }
1,540
/* Command to compile on Windows: nvcc .\lab5_2_3.cu -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64" Output should be: a: [22, 13, 16, 5] b: [5, 22, 17, 37] c: 853 */ #include <stdio.h> __global__ void dot_prod(int *c, int *a, int *b) { extern __shared__ int tmp[]; tmp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x]; __syncthreads(); if (threadIdx.x == 0) { *c = 0; for (int i = 0; i < blockDim.x; i++) { *c += tmp[i]; } } } void array2str(char *str, int *array, int n, int str_size) { int written = 0; written += snprintf(str + written, str_size - written, "["); for (int idx = 0; idx < n - 1; idx++) { written += snprintf(str + written, str_size - written, "%i, ", *(array + idx)); } written += snprintf(str + written, str_size - written, "%i]", *(array + n - 1)); return; } int main(void) { /* Intiialize inputs (CPU) */ const int N = 4; int a[N] = {22, 13, 16, 5}; int b[N] = {5, 22, 17, 37}; int c = 0; int *c_p = &c; char str_a[80]; char str_b[80]; array2str(str_a, a, N, 80); array2str(str_b, b, N, 80); printf("a: %s\n", str_a); printf("b: %s\n", str_b); /* Allocate memory for calculation on GPU */ int *gpu_a; int *gpu_b; int *gpu_c; cudaMalloc((void**) &gpu_a, sizeof(int) * N); cudaMalloc((void**) &gpu_b, sizeof(int) * N); cudaMalloc((void**) &gpu_c, sizeof(int)); /* Copy inputs to GPU */ cudaMemcpy(gpu_a, a, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b, b, sizeof(int) * N, cudaMemcpyHostToDevice); /* Do the thing */ dot_prod<<<1, N, sizeof(int) * N>>>(gpu_c, gpu_a, gpu_b); cudaMemcpy(c_p, gpu_c, sizeof(int), cudaMemcpyDeviceToHost); /* Remember to clean up after ourselves */ cudaFree(gpu_a); cudaFree(gpu_b); cudaFree(gpu_c); /* Print result */ printf("c: %i\n", *c_p); return 0; }
1,541
#include "simuParams.cuh" __host__ SimuParams::SimuParams(){} __host__ SimuParams::SimuParams(int _number_of_particles, int _particles_per_stream, real _TR, real _TE, real _timestep, int _n_mags_track, Vector3 m_initial, Vector3 _B0, int _res_x, int _res_y, real _FOVx, real _FOVy ):seed_offset(0){ number_of_particles = _number_of_particles; particles_per_stream = _particles_per_stream; if (number_of_particles != particles_per_stream) particle_concurrency = true; else particle_concurrency = false; num_streams = number_of_particles / particles_per_stream; TR = _TR; TE = _TE; measurements = 1; timestep = _timestep; n_mags_track = _n_mags_track; seed = time(NULL); mx_initial = m_initial.x; my_initial = m_initial.y; mz_initial = m_initial.z; time_end = timestep*steps; B0 = _B0; blocks = particles_per_stream / SIM_THREADS; res_x = _res_x; res_y = _res_y; FOVx = _FOVx; FOVy = _FOVy; //END PARAMETERS. steps = (TR / timestep) * res_x; printf("Number of steps in simulation: %d\n", steps); copyToDevice(); } __host__ int SimuParams::getSeed(){ return seed+(particles_per_stream*seed_offset++); } __host__ void SimuParams::copyToDevice(){ cudaMalloc(&devPointer, sizeof(SimuParams)); cudaMemcpy(devPointer, this, sizeof(SimuParams), cudaMemcpyHostToDevice); }
1,542
/** * TSM2 and ISM2 Testbed and Evaluation Platform * by Cody Rivera, 2019-2020 * * Usage - ./multiply [-d] [-i] matrixA matrixB matrixC * where -d signifies double precision, and -i signifies * ISM2 */ #include <cmath> #include <cstdio> #include <cstdlib> #include <fstream> #include <iostream> #include "cublas_v2.h" #include "cuda_runtime.h" #include "cuda_error.cuh" #include "kernels.cuh" #include "multiply.cuh" #include "launch_cublas.cuh" // Testing Parameters -- Adjust as needed #define EPS 1e-3 #define N_WARMUP 10 #define N_ROUNDS 100 /** * Testbed helper functions. */ // Based on // https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ bool approxEqual(double A, double B, double maxRelDiff = EPS) { // Calculate the difference. double diff = fabs(A - B); A = fabs(A); B = fabs(B); // Find the largest double largest = (B > A) ? B : A; if (diff <= largest * maxRelDiff) return true; return false; } template <typename FloatType> bool matrixCompare(const FloatType* A, const FloatType* B, unsigned int m, unsigned int n, unsigned int& iFail, unsigned int& jFail) { FloatType aVal, bVal; bool b = true; // Cache-friendly comparison pattern for (unsigned int j = 0; j < n && b; j++) { for (unsigned int i = 0; i < m && b; i++) { aVal = A[i + (j * m)]; bVal = B[i + (j * m)]; if (!approxEqual(aVal, bVal, EPS)) { iFail = i; jFail = j; b = false; } } } return b; } template <typename FloatType> void reportTestSuccess(const char* testName, double GFLOPs, double totalGFLOPs) { printf("%s succeeded: %g GFLOPs, %g GFLOPs acc. for transfers\n", testName, GFLOPs, totalGFLOPs); } template <typename FloatType> void reportTestFailure(const char* testName, const FloatType* orig, const FloatType* cand, unsigned int leadDim, unsigned int iFail, unsigned int jFail) { double oVal = (double)orig[iFail + (jFail * leadDim)]; double cVal = (double)cand[iFail + (jFail * leadDim)]; fprintf(stderr, "%s failed: Original[%u, %u] = %.6f != Candidate[%u, %u] = %.6f\n", testName, iFail, jFail, oVal, iFail, jFail, cVal); } template <typename FloatType> double getGFLOPs(double time, unsigned int m, unsigned int n, unsigned int k) { double instCount = ((double)m * (double)n * (double)k) / 1e9; double timeSeconds = time / 1000; return instCount / timeSeconds; } /** * Kernel launch wrapper. Runs both CUBLAS and TSM2/ISM2, for evaluation * purposes. */ template <typename FloatType> bool runKernels(const FloatType* A, const FloatType* B, FloatType* C, const unsigned int m, const unsigned int n, const unsigned int k, const bool runIsm2) { // Candidate for C -- Used by GPU kernels FloatType* candC; // Device memory FloatType *devA, *devB, *devC; // Events used for timing cudaEvent_t start, end, startTotal, endTotal; float time, timeTotal; printf("Multiplying matrix A[%u, %u] by matrix B[%u, %u]\n\n", m, k, k, n); // Change test name depending on runIsm2 const char* testName = "TSM2 Kernel Test"; if (runIsm2) { testName = "ISM2 Kernel Test"; } // Allocates new memory candC = (FloatType*)malloc(m * n * sizeof(FloatType)); if (candC == NULL) { fprintf(stderr, "Not enough memory\n"); return false; } cudaErrchk(cudaMalloc((FloatType**)&devA, m * k * sizeof(FloatType))); cudaErrchk(cudaMalloc((FloatType**)&devB, k * n * sizeof(FloatType))); cudaErrchk(cudaMalloc((FloatType**)&devC, m * n * sizeof(FloatType))); // Inits CUDA events cudaErrchk(cudaEventCreate(&start)); cudaErrchk(cudaEventCreate(&end)); cudaErrchk(cudaEventCreate(&startTotal)); cudaErrchk(cudaEventCreate(&endTotal)); // Runs CUBLAS call cublasHandle_t handle; cublasErrchk(cublasCreate(&handle)); FloatType one = 1; FloatType zero = 0; cudaErrchk(cudaEventRecord(startTotal)); // Cuda Memory Copy cudaErrchk( cudaMemcpy(devA, A, m * k * sizeof(FloatType), cudaMemcpyHostToDevice)); cudaErrchk( cudaMemcpy(devB, B, k * n * sizeof(FloatType), cudaMemcpyHostToDevice)); for (int i = 0; i < N_WARMUP; ++i) { cublasErrchk(launchCublas<FloatType>(handle, one, zero, devA, devB, devC, m, n, k)); } cudaErrchk(cudaEventRecord(start)); for (int i = 0; i < N_ROUNDS; ++i) { cublasErrchk(launchCublas<FloatType>(handle, one, zero, devA, devB, devC, m, n, k)); } cudaErrchk(cudaEventRecord(end)); // Copies result back cudaErrchk( cudaMemcpy(C, devC, m * n * sizeof(FloatType), cudaMemcpyDeviceToHost)); cudaErrchk(cudaEventRecord(endTotal)); cudaErrchk(cudaDeviceSynchronize()); cudaErrchk(cudaEventElapsedTime(&time, start, end)); cudaErrchk(cudaEventElapsedTime(&timeTotal, startTotal, endTotal)); time /= N_ROUNDS; timeTotal /= N_ROUNDS; reportTestSuccess<FloatType>("CUBLAS Test", getGFLOPs<FloatType>(time, m, n, k), getGFLOPs<FloatType>(timeTotal, m, n, k)); cublasErrchk(cublasDestroy(handle)); // Runs kernel // Failure flag bool status; // Failure indices unsigned int iFail, jFail; // Clear result matrix cudaErrchk(cudaMemset(devC, 0, m * n * sizeof(FloatType))); cudaErrchk(cudaEventRecord(startTotal)); // Cuda Memory Copy cudaErrchk( cudaMemcpy(devA, A, m * k * sizeof(FloatType), cudaMemcpyHostToDevice)); cudaErrchk( cudaMemcpy(devB, B, k * n * sizeof(FloatType), cudaMemcpyHostToDevice)); for (int i = 0; i < N_WARMUP; ++i) { cudaErrchk(cudaMemset(devC, 0, m * n * sizeof(FloatType))); if (runIsm2) { launchKernelIsm2(devA, devB, devC, m, n, k); } else { launchKernelTsm2(devA, devB, devC, m, n, k); } } cudaErrchk(cudaEventRecord(start)); for (int i = 0; i < N_ROUNDS; ++i) { cudaErrchk(cudaMemset(devC, 0, m * n * sizeof(FloatType))); if (runIsm2) { launchKernelIsm2(devA, devB, devC, m, n, k); } else { launchKernelTsm2(devA, devB, devC, m, n, k); } } cudaErrchk(cudaGetLastError()); cudaErrchk(cudaEventRecord(end)); // Copies result back cudaErrchk(cudaMemcpy(candC, devC, m * n * sizeof(FloatType), cudaMemcpyDeviceToHost)); cudaErrchk(cudaEventRecord(endTotal)); cudaErrchk(cudaDeviceSynchronize()); cudaErrchk(cudaEventElapsedTime(&time, start, end)); cudaErrchk(cudaEventElapsedTime(&timeTotal, startTotal, endTotal)); time /= N_ROUNDS; timeTotal /= N_ROUNDS; status = matrixCompare<FloatType>(C, candC, m, n, iFail, jFail); if (status) { reportTestSuccess<FloatType>(testName, getGFLOPs<FloatType>(time, m, n, k), getGFLOPs<FloatType>(timeTotal, m, n, k)); } else { reportTestFailure<FloatType>(testName, C, candC, m, iFail, jFail); } cudaErrchk(cudaEventDestroy(start)); cudaErrchk(cudaEventDestroy(end)); cudaErrchk(cudaEventDestroy(startTotal)); cudaErrchk(cudaEventDestroy(endTotal)); free(candC); cudaErrchk(cudaFree(devA)); cudaErrchk(cudaFree(devB)); cudaErrchk(cudaFree(devC)); return true; } /** * Runs testbed on specified input files. Handles file IO. */ template <typename FloatType> bool runMatmul(std::istream& fileA, std::istream& fileB, std::ostream& outFile, bool runIsm2) { FloatType *A, *B, *C; int m, n, k, kCand; // Reads Matrix Sizes fileA.read((char*)&m, sizeof(unsigned int)); fileA.read((char*)&k, sizeof(unsigned int)); fileB.read((char*)&kCand, sizeof(unsigned int)); fileB.read((char*)&n, sizeof(unsigned int)); if (k != kCand) { fprintf(stderr, "Matrix multiplication is undefined where A's" "column count is not equal\n to B's row count\n\n" "Matrix A (%u x %u) and Matrix B (%u x %u)\n", m, k, kCand, n); return false; } // Mallocs Matrices on CPU A = (FloatType*)malloc((size_t)m * k * sizeof(FloatType)); B = (FloatType*)malloc((size_t)k * n * sizeof(FloatType)); C = (FloatType*)malloc((size_t)m * n * sizeof(FloatType)); if (A == NULL || B == NULL || C == NULL) { fprintf(stderr, "Not enough memory\n"); return false; } // Loads Data to Matrix A and B fileA.read((char*)A, (size_t)m * k * sizeof(FloatType)); fileB.read((char*)B, (size_t)k * n * sizeof(FloatType)); // Calls CUDA bool status = runKernels<FloatType>(A, B, C, m, n, k, runIsm2); if (!status) { free(A); free(B); free(C); return false; } // Writes output matrix outFile.write((const char*)&m, sizeof(unsigned int)); outFile.write((const char*)&n, sizeof(unsigned int)); outFile.write((const char*)C, (size_t)m * n * sizeof(FloatType)); free(A); free(B); free(C); return true; } /** * Entry point */ int main(int argc, char** argv) { int fileArg[3]; int nFiles = 0; bool isDouble = false; bool runIsm2 = false; for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-d") == 0) { isDouble = true; } else if (strcmp(argv[i], "-i") == 0) { runIsm2 = true; } else { if (nFiles < 3) { fileArg[nFiles] = i; } nFiles++; } } if (nFiles != 3) { fprintf(stderr, "Usage: %s [-d] [-i] matrixA matrixB matrixC\n", argv[0]); return 1; } std::ifstream fileA(argv[fileArg[0]], std::ios::binary), fileB(argv[fileArg[1]], std::ios::binary); std::ofstream outFile(argv[fileArg[2]], std::ios::binary); if (!fileA) { fprintf(stderr, "Cannot open %s for reading\n", argv[fileArg[0]]); return 1; } if (!fileB) { fprintf(stderr, "Cannot open %s for reading\n", argv[fileArg[1]]); return 1; } if (!outFile) { fprintf(stderr, "Cannot open %s for writing\n", argv[fileArg[2]]); return 1; } // Runs matmul bool status = false; if (isDouble) { status = runMatmul<double>(fileA, fileB, outFile, runIsm2); } else { status = runMatmul<float>(fileA, fileB, outFile, runIsm2); } fileA.close(); fileB.close(); outFile.close(); if (status) { return 0; } else { return 1; } }
1,543
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> __global__ void squareKernel(float* d_in, float *d_out) { const unsigned int lid = threadIdx.x; const unsigned int gid = blockIdx.x*blockDim.x + lid; d_out[gid] = d_in[gid]*d_in[gid]; } int main(int argc, char** argv) { unsigned int N = 32; unsigned int mem_size = N*sizeof(float); // allocate host memory float* h_in = (float*) malloc(mem_size); float* h_out = (float*) malloc(mem_size); // initialize the memory for(unsigned int i=0; i<N; ++i) { h_in[i] = (float)i; } // allocate device memory float* d_in; float* d_out; cudaMalloc((void**)&d_in, mem_size); cudaMalloc((void**)&d_out, mem_size); // copy host memory to device cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice); // execute the kernel squareKernel<<< 1, N>>>(d_in, d_out); // copy result from ddevice to host cudaMemcpy(h_out, d_out, mem_size, cudaMemcpyDeviceToHost); // print result for(unsigned int i=0; i<N; ++i) printf("%.6f\n", h_out[i]); // clean-up memory free(h_in); free(h_out); cudaFree(d_in); cudaFree(d_out); }
1,544
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> __global__ void Pass0_clean(int32_t *ptBufferDxOut, int32_t *ptBufferDyOut, u_int32_t *ptSobelOut, u_int32_t *ptLabelOut, u_int32_t *ptArea, u_int32_t *ptOut, u_int32_t *u32_BufferCornerList_Device, int w,int h) { int x = blockIdx.x*blockDim.x; int y = blockIdx.y*blockDim.y; int xglobal = x+threadIdx.x; int yglobal = y+threadIdx.y; if(xglobal>w || yglobal >h) { return; } ptBufferDxOut[xglobal+yglobal*w] = 0; ptBufferDyOut[xglobal+yglobal*w] = 0; ptSobelOut[xglobal+yglobal*w] = 0; ptLabelOut[xglobal+yglobal*w] = 0; ptArea[xglobal+yglobal*w] = 0; ptOut[xglobal+yglobal*w] = 0; u32_BufferCornerList_Device[xglobal+yglobal*w] = 0; }
1,545
#include "add.cuh" __global__ void add(int a, int b, int *c) //kernel function๏ผŒrunning on gpu { *c = a + b; } int add(int a,int b) { int c; int *dev_c; cudaMalloc((void**)&dev_c, sizeof(int)); add<<<1,1>>>(a, b, dev_c); cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_c); return c; } int TEST::ADD() { int c; int *dev_c; cudaMalloc((void**)&dev_c, sizeof(int)); add<<<1,1>>>(a, b, dev_c); cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_c); return c; }
1,546
#include <stdlib.h> #include <vector> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> int main(int argc, char **argv) { thrust::host_vector<int> h_vec(100); std::vector<int> a_std(100); thrust::generate(h_vec.begin(), h_vec.end(), rand); thrust::copy(h_vec.begin(), h_vec.end(), a_std.begin()); printf("%d\n", a_std[0]); return 0; }
1,547
#include<iostream> #include<cuda.h> #include<cstdlib> #include<ctime> #define LIM 100 using namespace std; __global__ void cudaAdd(int *d_a, int *d_b, int *d_c) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<LIM) { d_c[i] = d_a[i] + d_b[i]; } } int main() { int a[LIM],b[LIM],c[LIM]; int *d_a, *d_b, *d_c; srand(time(NULL)); for(int i = 0;i<LIM;i++) { a[i] = rand()%1000; b[i]= rand()%1000; } cudaMalloc(&d_a,sizeof(int)*LIM); cudaMalloc(&d_b,sizeof(int)*LIM); cudaMalloc(&d_c,sizeof(int)*LIM); cudaMemcpy(d_a,a,sizeof(int)*LIM,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,sizeof(int)*LIM,cudaMemcpyHostToDevice); cudaAdd<<<(LIM/100)+1,100>>>(d_a,d_b,d_c); cudaMemcpy(c,d_c,sizeof(int)*LIM,cudaMemcpyDeviceToHost); for(int i = 0;i<LIM;i++) { cout<<a[i]<<" + "<<b[i]<<" = "<<c[i]<<endl; } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); //delete[] a; //delete[] b; //delete[] c; }
1,548
#include "complex.h" __device__ double2 complexPlus(double2 A, double2 B) { return { A.x + B.x, A.y + B.y }; } __device__ double2 complexMinus(double2 A, double2 B) { return { A.x - B.x, A.y - B.y }; } __device__ double2 complexMultiply(double2 A, double2 B) { return { A.x * B.x - A.y * B.y, A.x * B.y + A.y * B.x }; } __device__ double2 complexSquare(double2 A) { return { A.x * A.x - A.y * A.y, 2 * A.x * A.y }; } __device__ double complexLength2(double2 A) { return A.x * A.x + A.y * A.y; }
1,549
// starter code taken from https://www.topcoder.com/community/competitive-programming/tutorials/assignment-problem-and-hungarian-algorithm/ // // edited by Joseph Greshik and Allee Zarrini #include<iostream> #include<stdio.h> #include<string> #include<fstream> #include<cstring> #define INF 100000000 //just infinity #define THREADS_PER_BLOCK 32 #define BLOCK_SIZE(a) (a < 1) ? 1 : a int *cost; //cost matrix int *dcost; int n, max_match;//n workers and n jobs int bytes, blocks; //bytes dependent on n int *lx, *ly; //labels of X and Y parts int *dlx, *dly; int *xy; //xy[x] - vertex that is matched with x, int *yx; //yx[y] - vertex that is matched with y bool *S, *T; //sets S and T in algorithm bool *dS, *dT; int *slack; //as in the algorithm description int *dslack; int *slackx; //slackx[y] such a vertex, that int *dslackx; // l(slackx[y]) + l(y) - w(slackx[y],y) = slack[y] int *prev; //array for memorizing alternating paths bool verbose=false; bool maximum=false; int maxi = 0; __global__ void init_labels(int n, int* lx, int* ly, int *cost) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= n) return; lx[x] = 0; ly[x] = 0; for (int y = 0; y < n; y++) lx[x] = max(lx[x], cost[x*n+y]); } //void init_labels_s() //{ // memset(lx, 0, sizeof(int)*n); // memset(ly, 0, sizeof(int)*n); // for (int x = 0; x < n; x++) // for (int y = 0; y < n; y++) // lx[x] = std::max(lx[x], cost[x][y]); //} __global__ void add_to_tree(int x, int prevx, int n, int* lx, int* ly, int* cost, int* slack, int* slackx) //x - current vertex,prevx - vertex from X before x in the alternating path, //so we add edges (prevx, xy[x]), (xy[x], x) { int y = blockIdx.x * blockDim.x + threadIdx.x; if (y >= n) return; //update slacks, because we add new vertex to S if (lx[x] + ly[y] - cost[x*n+y] < slack[y]) { slack[y] = lx[x] + ly[y] - cost[x*n+y]; slackx[y] = x; } } //void add_to_tree(int x, int prevx) // //x - current vertex,prevx - vertex from X before x in the alternating path, // //so we add edges (prevx, xy[x]), (xy[x], x) //{ // S[x] = true; //add x to S // prev[x] = prevx; //we need this when augmenting // for (int y = 0; y < n; y++) //update slacks, because we add new vertex to S // if (lx[x] + ly[y] - cost[x*n+y] < slack[y]) // { // slack[y] = lx[x] + ly[y] - cost[x*n+y]; // slackx[y] = x; // } //} __global__ void update_labels(int n, bool* T, bool* S, int* lx, int* ly, int* slack) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= n) return; __shared__ int delta; delta = 100000000; //calculate delta using slack if (!T[x]) atomicMin(&delta, slack[x]); //update X labels __syncthreads(); if (S[x]) lx[x] -= delta; if (T[x]) ly[x] += delta; else slack[x] -= delta; } //void update_labels() //{ // int x, delta = INF; //init delta as infinity // for (x = 0; x < n; x++) //calculate delta using slack // if (!T[x]) // delta = std::min(delta, slack[x]); // for (x = 0; x < n; x++) //update X labels // { // if (S[x]) lx[x] -= delta; // if (T[x]) ly[x] += delta; // else // slack[x] -= delta; // } //} void augment() //Main function of the algorithm { if (max_match == n) return; //check wether matching is already perfect int x, y, root; //just counters and root vertex int *q, wr = 0, rd = 0; //q - queue for bfs, wr,rd - write and read q = new int[n]; //pos in queue memset(S, false, sizeof(bool)*n); //init set S memset(T, false, sizeof(bool)*n); //init set T memset(prev, -1, sizeof(int)*n); //init set prev - for the alternating tree for (x = 0; x < n; x++) //finding root of the tree if (xy[x] == -1) { q[wr++] = root = x; prev[x] = -2; S[x] = true; break; } for (y = 0; y < n; y++) //initializing slack array { slack[y] = lx[root] + ly[y] - cost[root*n+y]; slackx[y] = root; } cudaMemcpy(dslack, slack, bytes, cudaMemcpyHostToDevice); cudaMemcpy(dslackx, slackx, bytes, cudaMemcpyHostToDevice); //second part of augment() function while (true) //main cycle { while (rd < wr) //building tree with bfs cycle { x = q[rd++]; //current vertex from X part for (y = 0; y < n; y++) //iterate through all edges in equality graph if (cost[x*n+y] == lx[x] + ly[y] && !T[y]) { if (yx[y] == -1) break; //an exposed vertex in Y found, so //augmenting path exists! T[y] = true; //else just add y to T, q[wr++] = yx[y]; //add vertex yx[y], which is matched //with y, to the queue S[yx[y]] = true; //add x to S prev[yx[y]] = x; //we need this when augmenting //add_to_tree(yx[y], x); add_to_tree<<<blocks, THREADS_PER_BLOCK>>>(yx[y], x, n, dlx, dly, dcost, dslack, dslackx); //add edges (x,y) and (y,yx[y]) to the tree } if (y < n) break; //augmenting path found! } if (y < n) break; //augmenting path found! cudaMemcpy(dT, T, sizeof(bool)*n, cudaMemcpyHostToDevice); cudaMemcpy(dS, S, sizeof(bool)*n, cudaMemcpyHostToDevice); cudaMemcpy(dslack, slack, bytes, cudaMemcpyHostToDevice); //update_labels(); update_labels<<<blocks, THREADS_PER_BLOCK>>>(n, dT, dS, dlx, dly, dslack); //augmenting path not found, so improve labeling cudaMemcpy(lx, dlx, bytes, cudaMemcpyDeviceToHost); cudaMemcpy(ly, dly, bytes, cudaMemcpyDeviceToHost); cudaMemcpy(slack, dslack, bytes, cudaMemcpyDeviceToHost); cudaMemcpy(slackx, dslackx, bytes, cudaMemcpyDeviceToHost); wr = rd = 0; for (y = 0; y < n; y++) //in this cycle we add edges that were added to the equality graph as a //result of improving the labeling, we add edge (slackx[y], y) to the tree if //and only if !T[y] && slack[y] == 0, also with this edge we add another one //(y, yx[y]) or augment the matching, if y was exposed if (!T[y] && slack[y] == 0) { if (yx[y] == -1) //exposed vertex in Y found - augmenting path exists! { x = slackx[y]; break; } else { T[y] = true; //else just add y to T, if (!S[yx[y]]) { q[wr++] = yx[y]; //add vertex yx[y], which is matched with //y, to the queue S[yx[y]] = true; //add x to S prev[yx[y]] = slackx[y]; //we need this when augmenting //add_to_tree(yx[y],slackx[y]); add_to_tree<<<blocks, THREADS_PER_BLOCK>>>(yx[y], slackx[y], n, dlx, dly, dcost, dslack, dslackx); //add edges (x,y) and (y,yx[y]) to the tree //yx[y]) to the tree cudaMemcpy(slack, dslack, bytes, cudaMemcpyDeviceToHost); cudaMemcpy(slackx, dslackx, bytes, cudaMemcpyDeviceToHost); } } } if (y < n) break; //augmenting path found! } if (y < n) //we found augmenting path! { max_match++; //increment matching //in this cycle we inverse edges along augmenting path for (int cx = x, cy = y, ty; cx != -2; cx = prev[cx], cy = ty) { ty = xy[cx]; yx[cy] = cx; xy[cx] = cy; } augment(); //recall function, go to step 1 of the algorithm } }//end of augment() function int hungarian() { int ret = 0; max_match = 0; memset(xy, -1, bytes); memset(yx, -1, bytes); init_labels<<<blocks, THREADS_PER_BLOCK>>>(n, dlx, dly, dcost); cudaMemcpy(lx, dlx, bytes, cudaMemcpyDeviceToHost); cudaMemcpy(ly, dly, bytes, cudaMemcpyDeviceToHost); augment(); //steps 1-3 for (int x = 0; x < n; x++) {//forming answer there if (maximum) ret += cost[x*n+xy[x]]; else ret += maxi-cost[x*n+xy[x]]; } return ret; } void output_assignment() { std::cout<<std::endl; for (int x = 0; x < n; x++){ //forming answer there if (maximum) std::cout<<cost[x*n+xy[x]]<<"\t"; else std::cout<<maxi-cost[x*n+xy[x]]<<"\t"; } std::cout<<std::endl<<std::endl; std::cout<<"Optimal assignment: "<<std::endl; for (int x = 0; x < n; x++){ std::cout<<x+1<<" => "<<xy[x]+1; if(x!=n-1) std::cout<<", "; } std::cout<<std::endl; } void read_in_cost_matrix(char* filename) { std::ifstream fin (filename); fin>>n; bytes = sizeof(int)*n; blocks = BLOCK_SIZE(bytes / THREADS_PER_BLOCK); cost = new int[n*n]; cudaMalloc(&dcost, bytes*n); lx = new int[n]; ly = new int[n]; cudaMalloc(&dlx, bytes); cudaMalloc(&dly, bytes); xy = new int[n]; yx = new int[n]; S = new bool[n]; T = new bool[n]; cudaMalloc(&dS, sizeof(bool)*n); cudaMalloc(&dT, sizeof(bool)*n); slack = new int[n]; cudaMalloc(&dslack, bytes); slackx = new int[n]; cudaMalloc(&dslackx, bytes); prev = new int[n]; if (verbose) std::cout<<n<<std::endl; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ fin>>cost[i*n+j]; if (verbose&&maximum) std::cout<<cost[i*n+j]<<"\t"; } if (verbose&&maximum) std::cout<<std::endl; } //* NEW MIN FUNCTION if(!maximum){ for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ if (cost[i*n+j]>maxi) maxi = cost[i*n+j]; } } for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ if(verbose) std::cout<<cost[i*n+j]<<"\t"; cost[i*n+j] = maxi - cost[i*n+j]; } if(verbose) std::cout<<std::endl; } } fin.close(); cudaMemcpy(dcost, cost, bytes*n, cudaMemcpyHostToDevice); // std::cout<<"Finished reading input" << std::endl; } int main(int argc, char*argv[]) { if (argc != 4) { std::cerr << "Arguments must be presented as follows." << std::endl; std::cerr << "./hungarian_parallel ./matrix/<matrix-file-name> <max/min> <0/1>" << std::endl; exit(1); } // static const int arr[] = {7,4,2,8,2,3,4,7,1}; verbose=atoi(argv[3]); if (std::string(argv[2])=="max") maximum=true; clock_t start, end; // start = clock(); read_in_cost_matrix(argv[1]); // end = clock(); // double time_io = double(end - start) / double(CLOCKS_PER_SEC); // std::cout << "File IO: " << time_io << "s" << std::endl; start = clock(); int x=hungarian(); end = clock(); double time_algo = double(end - start) / double(CLOCKS_PER_SEC); if (verbose) output_assignment(); std::cout<<n<<"\t\t"<<x<<"\t\t"<<time_algo<<std::endl; // std::cout<<"Algorithm execution: " << time_algo<<"s"<<std::endl; }
1,550
#include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <time.h> #define GRID_SIZE 8 #define BLOCK_SIZE 32 #define min(a, b) (a < b ? a : b) __global__ void mergeSort(int *d1, int *d2, int width, int n){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int n_threads = gridDim.x * blockDim.x; int start = tid * width; int middle = min(start + (width >> 1), n); int end = min(start + width, n); // each thread may sort more than one tile (or zero tiles if start >= n) while(start < n){ int a = start; int b = middle; //printf("thread id %d start %d end %d middle %d width %d\n", tid, start, end, middle, width); // merge for (int k = start; k < end; k++) { if (a < middle && (b >= end || d1[a] < d1[b])) { d2[k] = d1[a]; a += 1; } else { d2[k] = d1[b]; b += 1; } } start += n_threads * width; middle = min(start + (width >> 1), n); end = min(start + width, n); } } int main(){ int n; n = 100000; int size = n * sizeof(int); int *h_in = (int *)malloc(size); int *h_out = (int *)malloc(size); int *d_in, *d_out; for (int i=0; i<n; i++){ h_in[i] = 100000 - i; } cudaMalloc((void **)&d_in, size); cudaMalloc((void **)&d_out, size); cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice); int *d1 = d_in; int *d2 = d_out; clock_t start_time = clock(); for (int width = 2; width < (n << 1); width <<= 1){ mergeSort <<<GRID_SIZE, BLOCK_SIZE>>>(d1, d2, width, n); cudaThreadSynchronize(); d1 = d1 == d_in ? d_out : d_in; d2 = d2 == d_in ? d_out : d_in; } clock_t end_time = clock(); cudaMemcpy(h_out, d1, size, cudaMemcpyDeviceToHost); for(int i=0; i<n; i++) printf("%d\n", h_out[i]); printf("\n"); double total_time = ((double) (end_time - start_time)) / CLOCKS_PER_SEC; printf("total time: %f\n", total_time); return 0; }
1,551
#include <iostream> #include <stdio.h> #include <vector> #define MAX_THREADS 256 #define SIZE 524288 #define __START__ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #define __STOP__(_V) cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); _V.push_back(time); cudaEventDestroy(start); cudaEventDestroy(stop); #define __NEXT__(_V) __STOP__(_V) __START__ #define __START_BIS__ cudaEventCreate(&startBis); cudaEventCreate(&stopBis); cudaEventRecord(startBis, 0); #define __STOP_BIS__(_V) cudaEventRecord(stopBis, 0); cudaEventSynchronize(stopBis); cudaEventElapsedTime(&time, startBis, stopBis); _V.push_back(time); cudaEventDestroy(startBis); cudaEventDestroy(stopBis); #define __NEXT_BIS__(_V) __STOP_BIS__(_V) __START_BIS__ __global__ void square_kernel(float *d_vector) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= SIZE) return; d_vector[i] = d_vector[i]*d_vector[i]; } void showMean(std::vector<float> v) { float sum(0); for (unsigned int i(0) ; i!=v.size() ; i++) sum += v[i]; std::cout << 1000.*sum/v.size() << " microseconds" << std::endl; } int main(int argc, char **argv) { std::cout << "SIZE (vs. Native): " << SIZE << std::endl; cudaEvent_t start, stop, startBis, stopBis; std::vector<float> cRun, ckRun, hRun; float time(0); cudaFree(0); // Force runtime API context establishment float h_vector[SIZE]; // For input and output for (unsigned int i(0) ; i!=SIZE ; i++) h_vector[i] = i; for (unsigned int i(0) ; i!=1000 ; i++) { float *d_vector; __START__ cudaMalloc(&d_vector, SIZE*sizeof(float)); cudaMemcpy(d_vector, h_vector, SIZE*sizeof(float), cudaMemcpyHostToDevice); __START_BIS__ square_kernel<<<(SIZE+MAX_THREADS-1)/MAX_THREADS, MAX_THREADS>>>(d_vector); cudaThreadSynchronize(); // Block until the device is finished __STOP_BIS__(ckRun) cudaMemcpy(h_vector, d_vector, SIZE*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_vector); __NEXT__(cRun) for (unsigned int i(0) ; i!=SIZE ; i++) h_vector[i] = h_vector[i]*h_vector[i]; __STOP__(hRun) } showMean(cRun); showMean(ckRun); showMean(hRun); }
1,552
/* * Author: Tejeswini * Purpose: 2D convolution of image on CPU and GPU */ #include <iostream> #include <stdio.h> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define _USE_MATH_DEFINES #include <math.h> #include <time.h> #include <string> #include <cstring> #define _CRT_SECURE_NO_DEPRECATE using namespace std; #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define RGB_COMPONENT_COLOR 255 static int width= 0; static int height = 0; /*------------------------------------------------- PPMPixel ----------------- | Structure- to store image data *-------------------------------------------------------------------*/ typedef struct { unsigned char red,green,blue; } PPMPixel; /*------------------------------------------------- PPMImage ----------------- | Structure- to store image dimension and data *-------------------------------------------------------------------*/ typedef struct { int x, y; PPMPixel *data; } PPMImage; /*------------------------------------------------- HandleError ----- | Function HandleError | | Purpose: This functions checks the status of CUDA APIs | | Parameters: | cudaError_t Error Status | const char *file A file pointer, | int line where the errror occured | | Returns: Void *-------------------------------------------------------------------*/ static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } /*------------------------------------------------- gpuConvolution_x ----- | gpuConvolution_x | | Purpose: This function is called by CPU and executed on GPU | This function is to compute the Convolution of image pixels along x axis on GPU | | Parameters: | float *kernel Gausian kernel used to blur the image | int *kernelSizeX Gausian kernel size | int *width image width | int *height image hight | unsigned char* redPixels pointerto buffer storing redPixels | unsigned char* greenPixels pointer to buffer storing green pixels | unsigned char* bluePixelOp pointer to buffer storing convoluted bluepixels | unsigned char* greenPixelOp pointer to buffer storing convoluted green pixels | unsigned char* bluePixelOp pointer to buffer storing convoluted bluepixels | | | Returns: Void *-------------------------------------------------------------------*/ __global__ void gpuConvolution_x(float *kernel, int *kernelSizeX, int *width, int *height, unsigned char* redPixels, unsigned char* greenPixels, unsigned char* bluePixels, unsigned char* redPixelsOp, unsigned char* greenPixelsOp, unsigned char* bluePixelsOp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i > (*width) || j > (*height)) return; // for(int j = 0; j < (*height); j++) // { // for(int i = 0; i < (*width); i++) // { if(i == 0 || i == 1 || i == 0 || i == 1) { } else { /*convolute red, green and blue pixels seperately with gausian kernel along x direction*/ for(int r = 0; r < 5; r++) { redPixelsOp[(j*(*width)) + i] += (unsigned char)redPixels[((*width)*j)+(i+r-1)]*kernel[r]; greenPixelsOp[(j*(*width)) + i] += (unsigned char)greenPixels[((*width)*j)+(i+r-1)]*kernel[r]; bluePixelsOp[(j*(*width))+i] += (unsigned char)bluePixels[((*width)*j)+(i+r-1)]*kernel[r]; } } /*wait for all threads to complete*/ __syncthreads(); // } // } } /*------------------------------------------------- gpuConvolution_y ----- | gpuConvolution_y | | Purpose: This function is called by CPU and executed on GPU | This function is to compute the Convolution of image pixels along y axis on GPU | | Parameters: | float *kernel Gausian kernel used to blur the image | int *kernelSizeX Gausian kernel size | int *width image width | int *height image hight | unsigned char* redPixelsOp pointerto buffer storing redPixels along x axis | unsigned char* greenPixelsOp pointer to buffer storing green pixels along x axis | unsigned char* bluePixelOpy pointer to buffer storing convoluted bluepixels along y axis | unsigned char* greenPixelOpy pointer to buffer storing convoluted green pixels along y axis | unsigned char* bluePixelOpy pointer to buffer storing convoluted bluepixels along y axis | | | Returns: Void *-------------------------------------------------------------------*/ __global__ void gpuConvolution_y(float *kernel, int *kernelSizeX, int *width, int *height, unsigned char *redPixelsOp, unsigned char *greenPixelsOp, unsigned char *bluePixelsOp, unsigned char *redPixelsOpy, unsigned char *greenPixelsOpy, unsigned char *bluePixelsOpy, unsigned char *out) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i > (*width) || j > (*height)) return; // for(int j = 0; j < (*height); j++) // { // for(int i = 0; i < (*width); i++) // { if(i == 0 || i == 1 || i == 0 || i == 1) { } else { /*convolute red, green and blue pixels seperately with gausian kernel along Y direction*/ for(int r = 0; r < 5; r++) { redPixelsOpy[(j*(*width)) + i] += (unsigned char)redPixelsOp[i+(j+r-1)*(*width)]*kernel[r]; greenPixelsOpy[(j*(*width)) + i] += (unsigned char)greenPixelsOp[i+(j+r-1)*(*width)]*kernel[r]; bluePixelsOpy[(j*(*width))+i] += (unsigned char)bluePixelsOp[i+(j+r-1)*(*width)]*kernel[r]; } } /*wait for all threads to complete*/ __syncthreads(); //} //} } /*------------------------------------------------- gpuFunction ----- | gpuFunction | | Purpose: This function is called by CPU and executed on CPU | This function allocates memory on GPU device and launches GPU kernels | | Parameters: | unsigned char *data input image data | int width width of image file | int height hight of image file | float *kernel Gusian kernel used to blur image on GPU | int kernelSize Size of Gausian kernel | unsigned char* out buffer to store convoluted image data | | Returns: unsigned char* returns pointer to convoluted image *-------------------------------------------------------------------*/ unsigned char* gpuFunction(unsigned char *data, int width, int height, float *kernel, int kernelSize, unsigned char* out){ /*gausian kernel*/ float *d_kernelForGpu; /*pointers to store image data before convolution*/ unsigned char *d_redpixel; unsigned char *d_greenpixel; unsigned char *d_bluepixel; /*pointers to store image data after x convolution*/ unsigned char *d_redpixelOp; unsigned char *d_greenpixelOp; unsigned char *d_bluepixelOp; /*pointers to store image data after y convolution*/ unsigned char *d_redpixelOpy; unsigned char *d_greenpixelOpy; unsigned char *d_bluepixelOpy; /*pointer to input image data*/ unsigned char *d_data; /*pointer to convoluted image*/ unsigned char *d_out; /*gausian filter size*/ int *d_kernelSizeX; /*image width and hight*/ int *d_width; int *d_height; dim3 blocks( ((width / 32)+1), ( (height/32)+1)); dim3 threads(32, 32); HANDLE_ERROR(cudaMalloc((void**)&d_kernelForGpu, kernelSize*sizeof(float))); HANDLE_ERROR(cudaMemcpy(d_kernelForGpu, kernel, kernelSize*sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMalloc(&d_data, width*height*3*sizeof(unsigned char))); HANDLE_ERROR(cudaMemcpy(d_data, data, 3*width*height*sizeof(unsigned char), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMalloc(&d_out, width*height*3*sizeof(unsigned char))); HANDLE_ERROR(cudaMalloc((void**)&d_kernelSizeX, sizeof(int))); HANDLE_ERROR(cudaMemcpy(d_kernelSizeX, &kernelSize, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMalloc((void**)&d_width, sizeof(int))); HANDLE_ERROR(cudaMemcpy(d_width, &width, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMalloc((void**)&d_height, sizeof(int))); HANDLE_ERROR(cudaMemcpy(d_height, &height, sizeof(int), cudaMemcpyHostToDevice)); unsigned char *redPixels = (unsigned char*)malloc((width*height*sizeof(unsigned char))); unsigned char *greenPixels = (unsigned char*)malloc((width*height*sizeof(unsigned char))); unsigned char *bluePixels = (unsigned char*)malloc((width*height*sizeof(unsigned char))); for(int i = 0; i < (width*height);i++ ){ redPixels[i] = data[i*3+0]; greenPixels[i] = data[i*3+1]; bluePixels[i] = data[i*3+2]; } unsigned char *redPixelOp = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); unsigned char *greenPixelOp = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); unsigned char *bluePixelOp = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); for(int i = 0; i < (width*height); i++){ redPixels[i] = data[i*3+0]; greenPixels[i] = data[i*3+1]; bluePixels[i] = data[i*3+2]; } HANDLE_ERROR(cudaMalloc(&d_redpixel, width*height*sizeof(unsigned char))); HANDLE_ERROR(cudaMalloc(&d_redpixelOp, width*height*sizeof(unsigned char))); HANDLE_ERROR(cudaMalloc(&d_redpixelOpy, width*height*sizeof(unsigned char))); HANDLE_ERROR(cudaMemcpy(d_redpixel, &redPixels, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMalloc(&d_greenpixel, width*height*sizeof(unsigned char))); HANDLE_ERROR(cudaMalloc(&d_greenpixelOp, width*height*sizeof(unsigned char))); HANDLE_ERROR(cudaMalloc(&d_greenpixelOpy, width*height*sizeof(unsigned char))); HANDLE_ERROR(cudaMemcpy(d_greenpixel, &greenPixels, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMalloc(&d_bluepixel, width*height*sizeof(unsigned char))); HANDLE_ERROR(cudaMalloc(&d_bluepixelOp, width*height*sizeof(unsigned char))); HANDLE_ERROR(cudaMalloc(&d_bluepixelOpy, width*height*sizeof(unsigned char))); HANDLE_ERROR(cudaMemcpy(d_bluepixel, &bluePixels, sizeof(int), cudaMemcpyHostToDevice)); unsigned char *redPixelOpy = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); unsigned char *greenPixelOpy = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); unsigned char *bluePixelOpy = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); clock_t start = clock(); /*Launch GPU kernels here*/ gpuConvolution_x<<<blocks, threads>>>(d_kernelForGpu, d_kernelSizeX, d_width, d_height, d_redpixel, d_greenpixel, d_bluepixel, d_redpixelOp, d_greenpixelOp, d_bluepixelOp); gpuConvolution_y<<<blocks, threads>>>(d_kernelForGpu, d_kernelSizeX, d_width, d_height, d_redpixelOp, d_greenpixelOp, d_bluepixelOp, d_redpixelOpy, d_greenpixelOpy, d_bluepixelOpy, d_out); clock_t end = clock(); /*profile convolution time here*/ double elapsed_time = (end - start)/(double)CLOCKS_PER_SEC; printf("Convolution time on GPU is %lf fractional seconds\n", elapsed_time); HANDLE_ERROR(cudaMemcpy(redPixels, d_redpixelOp, width*height*sizeof(unsigned char), cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(greenPixels, d_greenpixelOp, width*height*sizeof(unsigned char), cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(bluePixels, d_bluepixelOp, width*height*sizeof(unsigned char), cudaMemcpyDeviceToHost)); for(int k = 0; k < (width*height); k++) { out[k*3+0]= redPixelOpy[k]; out[k*3+1]= greenPixelOpy[k]; out[k*3+2]= bluePixelOpy[k]; } /*File to store GPU convoluted image*/ FILE *fp; fp = fopen("gpu_out.ppm", "wb"); if(!fp) { exit(1); } fprintf(fp, "P6\n"); fprintf(fp, "%d %d\n", width, height); fprintf(fp, "%d\n", RGB_COMPONENT_COLOR); fwrite(out, 3*width, height, fp); fclose(fp); cudaFree(kernel); cudaFree(d_kernelSizeX); cudaFree(d_data); cudaFree(d_out); cudaFree(d_redpixelOpy); cudaFree(d_greenpixelOpy); cudaFree(d_bluepixelOpy); cudaFree(d_redpixelOp); cudaFree(d_greenpixelOp); cudaFree(d_bluepixelOp); cudaFree(d_redpixel); cudaFree(d_greenpixel); cudaFree(d_bluepixel); return out; } /*------------------------------------------------- readPPM ----- | readPPM | | Purpose: This function is called by CPU and executed on CPU | This function reads the input image passed through | command line argument and stores the data in a buffer | | Parameters: constant char* input image file passed through command line argument | | Returns: unsigned char* returns pointer to input image *-------------------------------------------------------------------*/ static unsigned char *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; unsigned char *data; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } width = img->x; height = img->y; if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; data = (unsigned char*)malloc(img->x * img->y * 3*sizeof(unsigned char)); if (!data) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(data, 3 * img->x, img->y, fp)); fclose(fp); return data; } /*------------------------------------------------- convolvePPM ----- | readPPM | | Purpose: This function is called by CPU and executed on CPU | This function does the convolution of input image | command line argument and stores the data in a buffer | | Parameters: unsigned char *img: input image data | unsigned char *out: convoluted image data | float kernel[]: gausian kernel for convolution | int kernelSizeX: size of gausian kernel | int kernelSizeY: size of gausian kernel | | Returns: unsigned char* returns pointer to convoluted image *-------------------------------------------------------------------*/ unsigned char *convolvePPM(unsigned char *img, unsigned char *out, float kernel[], int kernelSizeX, int kernelSizeY) { FILE *fp; unsigned char *redPixels = (unsigned char*)malloc((width*height*sizeof(unsigned char))); unsigned char *greenPixels = (unsigned char*)malloc((width*height*sizeof(unsigned char))); unsigned char *bluePixels = (unsigned char*)malloc((width*height*sizeof(unsigned char))); unsigned char *redPixelOp = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); unsigned char *greenPixelOp = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); unsigned char *bluePixelOp = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); unsigned char *redPixelOpy = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); unsigned char *greenPixelOpy = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); unsigned char *bluePixelOpy = (unsigned char*)malloc((width*height*3*sizeof(unsigned char))); for(int i = 0; i < (width*height);i++ ){ redPixels[i] = img[i*3+0]; greenPixels[i] = img[i*3+1]; bluePixels[i] = img[i*3+2]; } fp = fopen("CPU_out.ppm", "wb"); if (!fp) { exit(1); } fprintf(fp, "P6\n"); fprintf(fp, "%d %d\n",width, height); fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); out[0]=0; clock_t start = clock(); for(int q = 0; q < height; q++) { for(int p = 0; p < width; p++) { if(p == 0 || p == 1 || q == 0 || q == 1) { } else { for(int r = 0; r < 5; r++) { redPixelOp[(q*width) + p] += (unsigned char)redPixels[(width*q)+(p+r-1)]*kernel[r]; greenPixelOp[(q*width) + p] += (unsigned char)greenPixels[(width*q)+(p+r-1)]*kernel[r]; bluePixelOp[(q*width)+p] += (unsigned char)bluePixels[(width*q)+(p+r-1)]*kernel[r]; } for(int r = 0; r < 5; r++) { redPixelOpy[(q*width) + p] += (unsigned char)redPixelOp[p+(q+r-1)*width]*kernel[r]; greenPixelOpy[(q*width) + p] += (unsigned char)greenPixelOp[p+(q+r-1)*width]*kernel[r]; bluePixelOpy[(q*width)+p] += (unsigned char)bluePixelOp[p+(q+r-1)*width]*kernel[r]; } } } } clock_t end = clock(); double elapsed_time = (end - start)/(double)CLOCKS_PER_SEC; printf("Convolution time for CPU is %lf fractional seconds\n", elapsed_time); for(int k = 0; k < (width*height); k++) { out[k*3+0]= redPixelOpy[k]; out[k*3+1]= greenPixelOpy[k]; out[k*3+2]= bluePixelOpy[k]; } fwrite(out, 3 * width, height, fp); fclose(fp); return out; } /*------------------------------------------------- main ----- | main | | Purpose: This function is called by CPU and executed on CPU | This function erceives command line arguments, image file | name and the standard deviation | Prepares gausian kernel and calls the convolution functions on CPU and GPU | | Parameters: int argc: number of commandline arguments | argv[] : image file name and standard deviation | Returns: int 0 on success *-------------------------------------------------------------------*/ int main(int argc, char* argv[]) { string filename = argv[1]; unsigned char *image; unsigned char *outimageongpu; int sigma = atoi(argv[2]); image = readPPM(filename.c_str()); unsigned char *out = (unsigned char*)malloc(width * height * 3 * sizeof(unsigned char)); float *gKernel = (float*)malloc(5 * sizeof(float)); double s = 2.0 * sigma * sigma; double sum = 0.0; int p = 0; for(int x = -2; x <=2; x++) { gKernel[p] = (exp(-(x*x)/s))/(sqrt(M_PI * s)); sum += gKernel[p]; p++; } for(int i = 0; i < 5; i++) { gKernel[i] /= sum; } unsigned char *outimage = convolvePPM(image, out, gKernel, 5, 5); outimageongpu = (unsigned char*)malloc(width*height*3*sizeof(unsigned char)); unsigned char *gpu_outimage = gpuFunction(image, width, height, gKernel, 5, outimageongpu); }
1,553
#include "includes.h" __global__ void g_One_Bgrad(float* _delta, float* bgrad, int rows, int cols, int channels) { extern __shared__ float _sum[]; int channel = blockIdx.x; int col = blockIdx.y; int row = threadIdx.x; float delta = _delta[channel * rows * cols + row * cols + col]; _sum[row] = delta; __syncthreads(); int len = rows; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); if(threadIdx.x == 0) { bgrad[channel * cols + col] = _sum[0] / rows; } }
1,554
#include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> __global__ void occupancy_test(int * results) { int gid = blockDim.x * blockIdx.x + threadIdx.x; int x1 = 1; int x2 = 2; int x3 = 3; int x4 = 4; int x5 = 5; int x6 = 6; int x7 = 7; int x8 = 8; results[gid] = x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 ; } //int main() //{ // int size = 1 << 16; // int byte_size = sizeof(int)*size; // // int * d_results; // cudaMalloc((void**)&d_results, byte_size); // cudaMemset(d_results, 0, byte_size); // // dim3 blocks(128); // dim3 grid((size+blocks.x-1)/blocks.x); // occupancy_test << <grid,blocks >> > (d_results); // cudaDeviceSynchronize(); // return 0; //}
1,555
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __gloaal__ functions 2) Enable a simulation of password cracking in the absence of liarary with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o password_santos password_crack_santos.cu To Run: ./password_santos > resultpwd_cuda_santos.txt Dr Kevan auckley, University of Wolverhampton, 2018 *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password1[] = "SA1234"; char plain_password2[] = "AB5678"; char plain_password3[] = "CD9102"; char plain_password4[] = "EF5648"; char *s = attempt; char *a = attempt; char *n = attempt; char *t = attempt; char *s1 = plain_password1; char *s2 = plain_password2; char *s3 = plain_password3; char *s4 = plain_password4; while(*s == *s1) { if(*s == '\0') { printf("Password: %s\n",plain_password1); break; } s++; s1++; } while(*a == *s2) { if(*a == '\0') { printf("Password: %s\n",plain_password2); break; } a++; s2++; } while(*n == *s3) { if(*n == '\0') { printf("Password: %s\n",plain_password3); break; } n++; s3++; } while(*t == *s4) { if(*t == '\0') { printf("Password: %s\n",plain_password4); return 1; } t++; s4++; } return 0; } __global__ void kernel() { char a,b,c,d; char password[7]; password[6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstValue = i; char secondValue = j; password[0] = firstValue; password[1] = secondValue; for(a='0'; a<='9'; a++){ for(b='0'; b<='9'; b++){ for(c='0';c<='9';c++){ for(d='0';d<='9';d++){ password[2] = a; password[3] = b; password[4]= c; password[5]=d; if(is_a_match(password)) { //printf("Success"); } else { //printf("tried: %s\n", password); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel <<<26,26>>>(); cudaDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
1,556
#include "includes.h" __global__ void floyd1DKernel(int * M, const int nverts, const int k){ int ii = blockIdx.x * blockDim.x + threadIdx.x; // indice filas, coincide con ij int i = ii/nverts; int j = ii - i * nverts; if(i < nverts && j < nverts){ int kj = (k*nverts) + j; // printf("TID = %u \n\tI = %u => \tM[%u] = %u \n \tK = %u => \tM[%u] = %u \n", ii, i, ii, M[ii], k, kj, M[kj]); if (i!=j && i!=k && j!=k) { int ik = (i*nverts) + k; // int kj = (k*nverts) + j; M[ii] = min(M[ik] + M[kj], M[ii]); } } }
1,557
#include "includes.h" #ifdef __cplusplus extern "C" { #endif #ifdef __cplusplus } #endif __global__ void vec_add(float *A, float *B, float* C, int size) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index<size) C[index] = A[index] + B[index]; }
1,558
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <time.h> //************variables globales*************** int msk=3, dimx=1040, dimy=1388, tam_imag=1388*1040; // [i][j] = i*dimy+j //************** Kernel CUDA ********************* __global__ void Varianza (int *G_d, float *var_d){ int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; int id = idx + idy*blockDim.x*gridDim.x; int M_d[9], i, dimx=1040, dimy=1388, tam_imag=1388*1040, msk=3; float X=0.f,Xprom=0.f,Y=0.f; var_d[id]=0; //printf("prueba\n"); if(id<tam_imag){ //M_d[0]=((i<1 || j<1) ? 0:A[i-1][j-1]); /* M_d[0]=((idx<1 || idy<1) ? 0:G_d[(idx-1)+(idy-1)*blockDim.x*gridDim.x]); M_d[1]=((idx<1) ? 0:G_d[(idx-1)+(idy)*blockDim.x*gridDim.x]); M_d[2]=((idx<1 || idy>dimy-2) ? 0:G_d[(idx-1)+(idy+1)*blockDim.x*gridDim.x]); M_d[3]=((idy<1) ? 0:G_d[(idx)+(idy-1)*blockDim.x*gridDim.x]); M_d[4]=G_d[(idx)+(idy)*blockDim.x*gridDim.x]; M_d[5]=((idy>dimy-2) ? 0:G_d[(idx)+(idy+1)*blockDim.x*gridDim.x]); M_d[6]=((idx>dimx-2 || idy<1) ? 0:G_d[(idx+1)+(idy-1)*blockDim.x*gridDim.x]); M_d[7]=((idx>dimx-2) ? 0:G_d[(idx+1)+(idy)*blockDim.x*gridDim.x]); M_d[8]=((idx>dimx-2 || idy>dimy-1) ? 0:G_d[(idx+1)+(idy+1)*blockDim.x*gridDim.x]); */ if (idx==0 || idy==0){ M_d[0]=0; }else{ M_d[0]=G_d[id-1-dimy]; } /* if ((idx==0)){ M_d[1]=0; }else{ M_d[1]=G_d[id-dimy]; //M_d[1]=8; } /* if (idx==0 || idy==dimy){ M_d[2]=0; }else{ M_d[2]=G_d[id+1-dimy]; } */ if (idy==0){ M_d[3]=0; }else{ M_d[3]=G_d[id-1]; } M_d[4]=G_d[id]; if (idy==dimy){ M_d[5]=0; }else{ M_d[5]=G_d[id+1]; } /* if (id==dimx || idy==0){ M_d[6]=0; }else{ M_d[6]=G_d[id-1+dimy]; } *//* if (idx==dimx){ M_d[7]=0; }else{ M_d[7]=G_d[id+dimy]; } *//* if (idx==dimx || idy==dimy){ M_d[8]=0; }else{ M_d[8]=G_d[id+1+dimy]; } */ //M_d[0]=1; M_d[1]=5; M_d[2]=8; //M_d[3]=1; //M_d[4]=1; //M_d[5]=1; M_d[6]=2; M_d[7]=5; M_d[8]=4; for(i=0;i<msk*msk;i++) X+=M_d[i]; Xprom=((float)X)/(msk*msk); for(i=0;i<msk*msk;i++) Y+=(Xprom-M_d[i])*(Xprom-M_d[i]); var_d[id]=Y/(msk*msk); } } //*****************Funcion main********************** int main(int argc,char* argv[]){ //***************Declaracion de variables************** int i,j,init,fin,d; init=atoi(argv[1]); fin=atoi(argv[2]); //init=1; //fin=328; FILE *matrizR, *matrizG, *matrizB; float t; clock_t tinicio, t_GPU; tinicio=clock(); int *topof_h, *R_h, *G_h, *B_h, *Rf_h, *Gf_h, *Bf_h; float *max_h, *var_h; int *topof_d, *R_d, *G_d, *B_d, *Rf_d, *Gf_d, *Bf_d; float *max_d, *var_d; //************Inicializacion de variables en el host y en el device *************** /* // Declaracion tipo MATRIZ max_h=(float **)malloc(sizeof(float)*dimx); topof_h=(int **)malloc(sizeof(int)*dimx); R_h=(int **)malloc(sizeof(int)*dimx); G_h=(int **)malloc(sizeof(int)*dimx); B_h=(int **)malloc(sizeof(int)*dimx); Rf_h=(int **)malloc(sizeof(int)*dimx); Gf_h=(int **)malloc(sizeof(int)*dimx); Bf_h=(int **)malloc(sizeof(int)*dimx); for(i=0;i<dimx;i++){ max_h[i]=(float*)malloc(sizeof(float)*dimy); topof_h[i]=(int*)malloc(sizeof(int)*dimy); R_h[i]=(int*)malloc(sizeof(int)*dimy); G_h[i]=(int*)malloc(sizeof(int)*dimy); B_h[i]=(int*)malloc(sizeof(int)*dimy); Rf_h[i]=(int*)malloc(sizeof(int)*dimy); Gf_h[i]=(int*)malloc(sizeof(int)*dimy); Bf_h[i]=(int*)malloc(sizeof(int)*dimy); } var_h=(float *)malloc(sizeof(float)*tam_imag); */ R_h=(int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&R_d, tam_imag*sizeof(int)); G_h=(int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&G_d, tam_imag*sizeof(int)); B_h=(int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&B_d, tam_imag*sizeof(int)); Rf_h=(int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&Rf_d, tam_imag*sizeof(int)); Gf_h=(int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&Gf_d, tam_imag*sizeof(int)); Bf_h=(int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&Bf_d, tam_imag*sizeof(int)); topof_h=(int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&topof_d, tam_imag*sizeof(int)); max_h=(float *)malloc(sizeof(float)*tam_imag); cudaMalloc((void**)&max_d, tam_imag*sizeof(float)); var_h=(float *)malloc(sizeof(float)*tam_imag); cudaMalloc((void**)&var_d,tam_imag*sizeof(float)); //*************** For cรกlculo EDF **************** for(d=init;d<=fin;d++){ printf("d=%d \n", d); //*****************Lecura de matrices RGB en el host**************** char rutaR[]=""; sprintf(rutaR, "%s%d%s","RGB/",d,"/R"); matrizR=fopen(rutaR,"r+"); char rutaG[]=""; sprintf(rutaG, "%s%d%s","RGB/",d,"/G"); matrizG=fopen(rutaG,"r+"); char rutaB[]=""; sprintf(rutaB, "%s%d%s","RGB/",d,"/B"); matrizB=fopen(rutaB,"r+"); for(i=0;i<dimx;i++){ for(j=0;j<dimy;j++){ fscanf(matrizR, "%d", &R_h[i*dimy+j]); fscanf(matrizG, "%d", &G_h[i*dimy+j]); fscanf(matrizB, "%d", &B_h[i*dimy+j]); } } fclose(matrizR); fclose(matrizG); fclose(matrizB); //***************** Kernel Varianza ******************* cudaMemcpy(G_d,G_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice); dim3 Grid(347,20); dim3 Block(13,16); Varianza<<<Grid,Block>>>(B_d,var_d); printf("Despues de kernel \n"); cudaMemcpy(var_h,var_d,sizeof(float)*tam_imag,cudaMemcpyDeviceToHost); printf("Despues de resultado a host \n"); //***************** Kernel Varianza ******************* /* cudaMemcpy(R_d,R_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice); cudaMemcpy(G_d,G_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice); cudaMemcpy(B_d,B_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice); cudaMemcpy(Rf_d,Rf_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice); cudaMemcpy(Gf_d,Gf_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice); cudaMemcpy(Bf_d,Bf_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice); cudaMemcpy(topof_d,topof_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice); cudaMemcpy(max_d,max_h,sizeof(float)*tam_imag,cudaMemcpyHostToDevice); cudaMemcpy(var_d,var_h,sizeof(float)*tam_imag,cudaMemcpyHostToDevice); dim3 Grid(347,20); dim3 Block(13,16); TopoRGB<<<Grid,Block>>>(R_d,G_d,B_d,Rf_d,Gf_d,Bf_d,topof_d,max_d,var_d); cudaMemcpy(Rf_h,Rf_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost); cudaMemcpy(Gf_h,Gf_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost); cudaMemcpy(Bf_h,Bf_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost); cudaMemcpy(topof_h,topof_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost); cudaMemcpy(max_h,max_d,sizeof(float)*tam_imag,cudaMemcpyDeviceToHost); */ //*********************Calculo de TODO ******************** } //Finaliza For cรกlculo EDF printf("***Sale del for \n"); /* // ***************** Generacion de archivos de resultados ************************ FILE *archTopo, *archR, *archG, *archB; archTopo=fopen("Resultados/topos10","w+"); archR=fopen("Resultados/R10","w+"); archG=fopen("Resultados/G10","w+"); archB=fopen("Resultados/B10","w+"); for(i=0;i<dimx;i++){ for(j=0;j<dimy;j++){ fprintf(archTopo,"%d ",topof_h[i*dimy+j]); fprintf(archR,"%d ",Rf_h[i*dimy+j]); fprintf(archG,"%d ",Gf_h[i*dimy+j]); fprintf(archB,"%d ",Bf_h[i*dimy+j]); } fprintf(archTopo,"\n"); fprintf(archR,"\n"); fprintf(archG,"\n"); fprintf(archB,"\n"); } fclose(archTopo); fclose(archR); fclose(archG); fclose(archB); */ //***************** Archivo de varianza final FILE *archVar; archVar=fopen("Resultados/VarUltima","w+"); for(i=0;i<dimx;i++){ for(j=0;j<dimy;j++){ fprintf(archVar,"%f ",var_h[i*dimy+j]); } fprintf(archVar,"\n"); } fclose(archVar); free(var_h); free(max_h); free(topof_h); free(R_h); free(G_h); free(B_h); free(Rf_h); free(Gf_h); free(Bf_h); cudaFree(var_d); cudaFree(max_d); cudaFree(topof_d); cudaFree(R_d); cudaFree(G_d); cudaFree(B_d); cudaFree(Rf_d); cudaFree(Gf_d); cudaFree(Bf_d); t_GPU=clock(); t = ((float)t_GPU-(float)tinicio)/CLOCKS_PER_SEC; printf("\ntiempo de procesamiento de varianzas: %6.3fs\n",t); //getchar (); return 0; }//FIN funcion main()
1,559
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #define DataSize 1024 __global__ void Mirror(unsigned int *Da, unsigned int *Dc, int high,int width) { int tx = threadIdx.x; int bx = blockIdx.x; int bn = blockDim.x; int id = bx*bn+tx; Dc[id] = Da[bx * bn + bn - 1 - tx]; } __global__ void Filter_func(unsigned int *Da, unsigned int *Dc, int high,int width, unsigned int *Filter, int filtersum) { int tx = threadIdx.x; int bx = blockIdx.x; int bn = blockDim.x; int id = bx*bn+tx; __shared__ unsigned int share[512 * 3]; if(bx == 0 || bx == bn - 1){ if(bx == 0){ share[tx + bn] = Da[id]; share[tx + 2* bn] = Da[id + bn]; }else{ share[tx + bn] = Da[id]; share[tx] = Da[id - bn]; } }else{ share[tx] = Da[id - bn]; share[tx + bn] = Da[id]; share[tx + 2 * bn] = Da[id + bn]; } __syncthreads(); int sum = 0; if(bx != 0 && tx % 512 != 0) // WSWXd sum += Filter[0] * share[tx - 1]; if(bx != 0) // WSWXd sum += Filter[1] * share[tx]; if(bx != 0 && (tx + 1) % 512 != 0) // kWSWXd sum += Filter[2] * share[tx + 1]; if(tx % 512 != 0) // SWXd sum += Filter[3] * share[tx + bn - 1]; sum += Filter[4] * share[tx + bn]; if((tx + 1) % 512 != 0) // kSWXd sum += Filter[5] * share[tx + bn + 1]; if(bx != bn - 1 && tx % 512 != 0) // USWXd sum += Filter[6] * share[tx + 2 * bn - 1]; if(bx != bn - 1) // USWXd sum += Filter[7] * share[tx + 2 * bn]; if(bx != bn - 1 && (tx + 1) % 512 != 0) // kUSWXd sum += Filter[8] * share[tx + 2 * bn + 1]; Dc[id] = sum / filtersum; } int main() { FILE *fp = NULL; unsigned int high, width, offset, nthread, nblock; unsigned char *head; unsigned char *img; unsigned int *da; unsigned int filter[9] = {1,2,1,2,4,2,1,2,1}; char c, sel; printf("What do you want to do?\n(a)Filter the picture\n(b)Mirror the picture\n"); scanf("%c", &sel); getchar(); if(sel == 'A' || sel == 'a'){ printf("Do you want to input filter values by yourself? (Y/N)\n"); scanf("%c", &c); getchar(); if(c == 'Y' || c == 'y'){ printf("Please input 9 filter values:\n"); for(int i = 0; i < 9; ++i) scanf("%d", &filter[i]); getchar(); } } high = 0; width = 0; offset = 0; fp = fopen("lena.bmp","rb"); // rbOread binary fseek(fp, 10, SEEK_SET); // ะฒfpqYโชบ10byteA]offsetbheader10~13byte fread(&offset, sizeof(unsigned int), 1, fp); // qfpลช1unsigned intjpsoffset fseek(fp, 18, SEEK_SET); // ะฒfpqYโชบ18byteA]widthbheader18~21byte fread(&width, sizeof(unsigned int), 1, fp); // qfpลช1unsigned intjpswidth fseek(fp, 22, SEEK_SET); // ะฒfpqYโชบ22byteA]highbheader22~25byte fread(&high, sizeof(unsigned int), 1, fp); // qfpลช1unsigned intjpshigh img = (unsigned char*)malloc(sizeof(unsigned char)*(width*high)); // tm@width*highbyteฦคjpimg(unsigned charO1byte) da = (unsigned int*)malloc(sizeof(unsigned int)*(width*high)); fseek(fp, offset, SEEK_SET); // ะฒfpqYoffsetbytemA]offsetsmO}lspixelm fread(img, sizeof(char), (width*high), fp); // qfpลชwidth*highcharjp(=unsigned charjp)simg head =(unsigned char*)malloc(sizeof(unsigned char)*(offset)); // tm@offsetbyteฦคjpimg fseek(fp, 0, SEEK_SET); // ะฒfp@}lAqYmoffsetmOY fread(head, sizeof(unsigned char), offset, fp); // qfpลชoffsetunsigned charjp(=unsigned charjp)shead if(width > 1024){ // _ำญnwidth*highAthreadืคWL1024 nthread = 1024; nblock = high * width / 1024; }else{ nthread = width; nblock = high; } dim3 block(nthread, 1, 1); dim3 grid(nblock, 1, 1); for(int i = 0; i < high * width; ++i) da[i] = img[i]; int filtersum = 0; for(int i = 0; i < 9; ++i) filtersum += filter[i]; unsigned int *Da; cudaMalloc((void**)&Da, (sizeof(unsigned int)*(width*high))); unsigned int *Dc; cudaMalloc((void**)&Dc, (sizeof(unsigned int)*(width*high))); unsigned int *Filter; cudaMalloc((void**)&Filter, (sizeof(unsigned int)*9)); cudaMemcpy(Da, da, (sizeof(unsigned int)*(width*high)), cudaMemcpyHostToDevice); if(sel == 'A' || sel == 'a'){ Filter_func <<< nblock, nthread >>> (Da,Dc,high,width,Filter,filtersum); cudaMemcpy(Filter, filter, (sizeof(unsigned int)*9), cudaMemcpyHostToDevice); }else if(sel == 'B' || sel == 'b'){ Mirror <<< nblock, nthread >>> (Da,Dc,high,width); } cudaThreadSynchronize(); cudaMemcpy(da, Dc, (sizeof(unsigned int)*(width*high)), cudaMemcpyDeviceToHost); for(int i = 0; i < high * width; ++i) img[i] = da[i]; fp = fopen("lena3.bmp","wb+"); fwrite(head, sizeof(unsigned char), offset, fp); fwrite(img, sizeof(unsigned int), (width*high), fp); fclose(fp); }
1,560
#include <cub/cub.cuh> using namespace cub; template<int BLOCK_SIZE, int CPG> __global__ void cal_group_coo_format_nnz_kernel_cm(float *A, int nRows, int nCols, int *pNnzPerGroup) { int startIdx = blockIdx.x * CPG; int nnz = 0; int nColPerThread = (nCols + BLOCK_SIZE - 1) / BLOCK_SIZE; int colOffset = threadIdx.x * nColPerThread; for (int i = threadIdx.x; i < nCols; i+=BLOCK_SIZE) { for (int j = 0; j < CPG; j++) { int row = j + startIdx; if (row >= nRows) break; float v = A[row * nCols + i]; if (v != 0.0) { nnz++; } } } typedef BlockReduce<int, BLOCK_SIZE> BlockReduceT; __shared__ typename BlockReduceT::TempStorage temp_storage; int aggregate = BlockReduceT(temp_storage).Sum(nnz); if (threadIdx.x == 0) { pNnzPerGroup[blockIdx.x] = aggregate; } } template<int BLOCK_SIZE, int CPG> __global__ void convert_to_group_coo_format_kernel_cm(float *A, int nRows, int nCols, float *pVals, int *pRows, int *pCols, int *pGroupIndex, int *pNnzPerGroup) { int startIdx = blockIdx.x * CPG; int currGroupOffset = pGroupIndex[blockIdx.x]; int cooIndex = currGroupOffset; float *currVals = pVals + cooIndex; int *currCols = pCols + cooIndex; int *currRows = pRows + cooIndex; __shared__ float sA[BLOCK_SIZE * CPG]; typedef BlockScan<int, BLOCK_SIZE> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; __shared__ int sNNz; sNNz = 0; __syncthreads(); int end = (nCols + BLOCK_SIZE - 1)/ BLOCK_SIZE * BLOCK_SIZE; for (int i = threadIdx.x; i < end; i+=BLOCK_SIZE) { int nnz = 0; int nnz_i = 0; for (int j = 0; j < CPG; j++) { int row = j + startIdx; if (row < nRows && i < nCols) { float v = A[row * nCols + i]; sA[j * BLOCK_SIZE + threadIdx.x] = v; if (v != 0.0) nnz++; } } BlockScanT(temp_storage).InclusiveSum(nnz, nnz_i); __syncthreads(); BlockScanT(temp_storage).ExclusiveSum(nnz, nnz); float *vals = currVals + nnz; int *cols = currCols + nnz; int *rows = currRows + nnz; for (int j = 0; j < CPG; j++) { int row = j + startIdx; if (row >= nRows || i >= nCols) break; float v = sA[j * BLOCK_SIZE + threadIdx.x]; if (v != 0.0) { *(vals++) = v; *(rows++) = row; *(cols++) = i; } } if (threadIdx.x == BLOCK_SIZE - 1) { sNNz = nnz_i; } __syncthreads(); currVals += sNNz; currCols += sNNz; currRows += sNNz; } } template<int BLOCK_SIZE, int CPG> __global__ void sparse_dense_groupcoo_mat_mul_kernel(float *vals_A, int *cols_A, int *rows_A, int *groupIndex_A, int *nnzPerGroup_A, int wA, int hA, float *B, int wB, int hB, float *C) { int Cj = blockIdx.y * BLOCK_SIZE + threadIdx.x; int Ci0 = blockIdx.x * CPG; float c0 =0.0; float c1 =0.0; float c2 =0.0; float c3 =0.0; int groupIdxOfCurrentBlock = groupIndex_A[blockIdx.x]; int nnz = nnzPerGroup_A[blockIdx.x]; float *currValsA = vals_A + groupIdxOfCurrentBlock; int *currColsA = cols_A + groupIdxOfCurrentBlock; int *currRowsA = rows_A + groupIdxOfCurrentBlock; __shared__ float sValsA[BLOCK_SIZE]; __shared__ int sRowsA[BLOCK_SIZE]; __shared__ int sColsA[BLOCK_SIZE]; __shared__ int sNNz[1]; int nIter = (BLOCK_SIZE + nnz - 1) / BLOCK_SIZE; int extra = nnz & (BLOCK_SIZE - 1); for (int i = 0; i < nIter; i++) { sColsA[threadIdx.x] = -1; sValsA[threadIdx.x] = 0.0; sNNz[0] = BLOCK_SIZE; __syncthreads(); int valIdxStart = i * BLOCK_SIZE; int valIdx = valIdxStart + threadIdx.x; if (valIdx < nnz) { sValsA[threadIdx.x] = currValsA[valIdx]; sRowsA[threadIdx.x] = currRowsA[valIdx]; sColsA[threadIdx.x] = currColsA[valIdx]; } else { sNNz[0] = extra; } __syncthreads(); if (Cj < wB) { int k = 1; int rNNz = sNNz[0]; int precol = -1; float b; for (int j = 0; j < rNNz;) { int col = sColsA[j]; if (col != precol) { b = B[col * wB + Cj]; precol = col; } float a = sValsA[j]; int currRow = sRowsA[j]; int index = currRow & (CPG-1); if (index == 0) c0 = fmaf(a,b,c0); else if (index == 1) c1=fmaf(a,b,c1); else if (index == 2) c2=fmaf(a,b,c2); else if (index == 3) c3=fmaf(a,b,c3); j++; } } __syncthreads(); } if (Cj < wB) { if (Ci0 < hA) C[Cj + Ci0 * wB] = c0; if (Ci0+1 < hA) C[Cj + (Ci0 + 1)*wB] = c1; if (Ci0+2 < hA) C[Cj + (Ci0 + 2)*wB] = c2; if (Ci0+3 < hA) C[Cj + (Ci0 + 3)*wB] = c3; } } __global__ void prefix_sum_kernel2(int *src, int *dst, int n) { if (threadIdx.x == 0 && blockIdx.x == 0) { dst[0] = 0; for (int i = 1; i < n; i++) { dst[i] = dst[i-1] + src[i-1]; } } }
1,561
//first cuda program //Hitender Prakash #include <stdio.h> //define gpu kernel __global__ void square(double *d_out, double *d_in){ int pos=threadIdx.x; d_out[pos]=d_in[pos]*d_in[pos]; } int main(int argc, char **argv){ if(argc <2 ||argc >2){ printf("\nUsage: sqaure <size of array>"); exit(0); } int siz=atoi(argv[1]); double *d_in, *d_out, *h_in, *h_out; h_in=(double *)malloc(siz*sizeof(double)); h_out=(double *)malloc(siz*sizeof(double)); for(int i=0;i<siz;i++){ h_in[i]=i+1.0; h_out[i]=0.0; } //allocate space on GPU cudaMalloc((void**)&d_in, (size_t)siz*sizeof(double)); int err= cudaGetLastError(); cudaMalloc((void**)&d_out, (size_t)siz*sizeof(double)); //copy from host to device cudaMemcpy(d_in, h_in, siz*sizeof(double), cudaMemcpyHostToDevice); square<<<1,siz>>>(d_out,d_in); cudaMemcpy(h_out, d_out, siz*sizeof(double), cudaMemcpyDeviceToHost); printf("\nBelow is the processed square values: "); for(int i=0;i<siz;i++){ printf("\n%lf ----> %lf",h_in[i],h_out[i]); } printf("\nLast cuda error in malloc: %d",err); printf("\n"); return 0; }
1,562
#include <stdio.h> #include <queue> #include <set> #include <list> #include <iterator> #include <algorithm> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #define ARRAY_SIZE 30 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Structure to represent a vertex and its distance struct distNode { int node; int dist; bool operator<(const distNode& rhs) const { return dist > rhs.dist || (dist == rhs.dist && node > rhs.node);; } }; // Structure to represent an edge struct edge { int from; int to; int weight; bool operator<(const edge& rhs) const { return weight > rhs.weight || (weight == rhs.weight && to > rhs.to); } }; // Structure to represent an edge source & destination struct fromTo { int from; int to; bool operator<(const fromTo& rhs) const { return to < rhs.to || (to == rhs.to && from < rhs.from); } }; // Initialize global variables __device__ __managed__ int parent[ARRAY_SIZE]; // Array to store parent nodes __device__ __managed__ int dist[ARRAY_SIZE]; // Array to store node distances __device__ __managed__ bool fixed[ARRAY_SIZE]; // Array to store flags for node traversal std::vector<bool> nonEmptyIndices; // Array to store non empty indices of vertices std::priority_queue<distNode> H; //binary heap of (j,dist) initially empty; __device__ __managed__ int Q[ARRAY_SIZE], R[ARRAY_SIZE]; //set of vertices initially empty; __device__ __managed__ fromTo T[ARRAY_SIZE*ARRAY_SIZE]; //{ set of edges } initially {}; __device__ __managed__ fromTo mwe[ARRAY_SIZE*ARRAY_SIZE]; //set of edges; minimum weight edges for all vertices __device__ __managed__ int z_device, Q_index=0, R_index=0, mwe_index=0, T_index=0; //Indices to synchronize between host & device __device__ __managed__ int edge_cnt=0; //keeps track of #edges //Arrays to hold all edges of a graph int allvertex_in[ARRAY_SIZE*ARRAY_SIZE], alledge_in[ARRAY_SIZE*ARRAY_SIZE], allweight_in[ARRAY_SIZE*ARRAY_SIZE]; // class to represent a graph object class Graph { public: // construct a vector of vectors of edges to represent an adjacency list std::vector<std::vector<edge>> adjList; // Graph Constructor Graph(std::vector<edge> const &edges, int N) { // resize the vector to hold upto vertex of maximum label value (elements of type vector<edge>) //or assign labels to each vertex starting from 0 adjList.resize(N); nonEmptyIndices.resize(N); // add edges to the undirected graph for (auto &e: edges) { int from = e.from; int to = e.to; int weight = e.weight; // insert at the end adjList[from].push_back(edge{from, to, weight}); adjList[to].push_back(edge{to, from, weight}); //flag the non empty indices in adjList nonEmptyIndices[from] = true; nonEmptyIndices[to] = true; } } }; // print adjacency list representation of graph void printGraph(Graph const &graph) { printf("Input Graph\n"); for (int i = 0; i < graph.adjList.size(); i++) { // print all neighboring vertices of given vertex for (edge v : graph.adjList[i]){ printf("( %d, %d, %d )", v.from, v.to, v.weight); } //printf("\n"); } } //Delete element from array //template<typename T> void deleteElement(int arr[], int arr_index, int size) { if (arr_index < size) { // decrease the size of array and move all elements ahead size = size - 1; for (int j=arr_index; j<size; j++) arr[j] = arr[j+1]; } } //Check if an element exists in an array //template<typename T> __device__ bool ifExist(int arr[], int val){ for (int i=0; i<ARRAY_SIZE; i++) { if (arr[i] == val) return true; } return false; } __device__ bool ifExistMWE(fromTo arr[], fromTo ft){ for (int i=0; i<edge_cnt; i++) { if (arr[i].from == ft.from && arr[i].to == ft.to) return true; } return false; } //Function to load edges into kernel pointer arrays void load_kernelArrays(Graph const &graph) { // generate the input array on the host //atmost a node can connect to all other nodes for (int i = 0; i < graph.adjList.size(); i++) { for(edge adj : graph.adjList[i]) { allvertex_in[edge_cnt] = adj.from; alledge_in[edge_cnt] = adj.to; allweight_in[edge_cnt] = adj.weight; edge_cnt++; } } } //Identifies all minimum weight edges for all vertices void initMWE(Graph const &graph) { for (int i = 0; i < graph.adjList.size(); i++) { int prevWeight=INT_MAX; int min_to, minFrom; // Iterate through all the vertices of graph for (auto it=graph.adjList[i].begin(); it!=graph.adjList[i].end(); it++) { edge adj = *it; // Get the Minimum weight edge for vertex adj.from if (adj.weight < prevWeight) { min_to = adj.to; minFrom = adj.from; prevWeight = adj.weight; } } mwe[mwe_index] = fromTo{minFrom, min_to}; mwe_index++; } } //Kernel to process edges in Parallel __global__ void parallel_processEdge(int *allvertex_devicein, int *alledge_devicein, int *allweight_devicein, int z_device) { int myId = threadIdx.x + blockDim.x * blockIdx.x; // int tid = threadIdx.x; //printf("block:%d, myId: %d\n", blockIdx.x, myId); // process edges in R if (myId < edge_cnt) { //printf("myId:%d, allvertex_devicein[myId] :%d\n", myId, allvertex_devicein[myId]); if (allvertex_devicein[myId] == z_device) { //printf("Z found, allvertex_devicein[myId] :%d\n", allvertex_devicein[myId]); ////printf("block:%d, myId: %d\n", blockIdx.x, myId); printf("Thread %d looking for the Edge to be processed\n", threadIdx.x); int k_device = alledge_devicein[myId]; //printf("k_device: %d\n", k_device); int w_device = allweight_devicein[myId]; //printf("w_device: %d\n", w_device); printf("Edge {%d, %d, %d} found at myID:%d\n", z_device, k_device, w_device); if (!fixed[k_device]) { if (ifExistMWE(mwe, fromTo{z_device, k_device})) { //printf("In MWE and not fixed k, z:%d, k:%d\n", z_device, k_device); fixed[k_device] = true; int t = atomicAdd(&T_index, 1); T[t] = fromTo{k_device, z_device}; // z is the parent of k int r = atomicAdd(&R_index, 1); R[r] = k_device; printf("Destination node is not fixed & also a minimum edge for Z:%d\n", z_device); printf("Adding k:%d to Tree & R for processing\n", k_device); //printf("R_index in kernel:%d\n", R_index); } else if (dist[k_device] > w_device) { //printf("not minimum edge and not fixed k, z:%d, k:%d\n", z_device, k_device); //printf("\n"); printf("Destination node is not fixed & NOT a minimum weight edge\n"); printf("Adding k:%d to Q for inserting into Heap\n", k_device); dist[k_device] = w_device; parent[k_device] = z_device; if (!ifExist(Q, k_device)) { int q = atomicAdd(&Q_index, 1); Q[q] = k_device; //if (Q.find(k_device) == Q.end()) { // Q.insert(k_device); } } } __syncthreads(); // make sure all updates are finished } } } //Kernel Setup void kernel_setup(Graph const &graph, int z_device){ int threads = 512; int blocks = ceil(float(edge_cnt) / float(threads)); const int ARRAY_BYTES = ARRAY_SIZE * ARRAY_SIZE * sizeof(int); //printf("array bytes:%f\n", ARRAY_BYTES); // declare GPU memory pointers int * allvertex_devicein, * alledge_devicein, * allweight_devicein; // allocate GPU memory cudaMalloc((void **) &allvertex_devicein, ARRAY_BYTES); cudaMalloc((void **) &alledge_devicein, ARRAY_BYTES); cudaMalloc((void **) &allweight_devicein, ARRAY_BYTES); // transfer the input array to the GPU cudaMemcpy(allvertex_devicein, allvertex_in, ARRAY_BYTES, cudaMemcpyHostToDevice); gpuErrchk( cudaMemcpy(alledge_devicein, alledge_in, ARRAY_BYTES, cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(allweight_devicein, allweight_in, ARRAY_BYTES, cudaMemcpyHostToDevice) ); //printf("Running global reduce\n"); parallel_processEdge<<<blocks, threads>>> (allvertex_devicein, alledge_devicein, allweight_devicein, z_device); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // free GPU memory allocation cudaFree(allvertex_devicein); cudaFree(alledge_devicein); cudaFree(allweight_devicein); }; // Function to print the constructed MST void printMST(std::set<fromTo> T) { std::set<fromTo>::iterator it; //set iterator for (it=T.begin(); it!=T.end(); it++) { fromTo e = *it; printf("%d - %d\n", e.from, e.to); } } // The main function that constructs Minimum Spanning Tree (MST) // using Prim's Parallel algorithm given in chapter 7 fromTo* primMST(Graph const &graph, int N, int source) { std::set<int>::iterator it; //set iterator // Initialize and assign dist value of // all vertices to 0 and source to infinite for(int i = 0; i < N; i ++) { parent[i] = -1; dist[i] = INT_MAX; fixed[i] = false; } // Make distance value of source vertex as 0 so it is extracted first dist[source] = 0; H.push(distNode{source, dist[0]}); initMWE(graph); //initialize minimum weight edges of given graph; // Loop for |V| - 1 iterations //while (!H.empty()) { for (int i = 0; i < graph.adjList.size(); i++) { // Extract the vertex with minimum dist value distNode d = H.top(); H.pop(); int j = d.node; //pop the minimum distance vertex printf("Popped minimum distance node:%d\n", j); if (!fixed[j]) { printf("Popped node is not fixed adding it to R\n"); R[R_index] = j; R_index++; fixed[j] = true; if (parent[j] != -1) { T[T_index] = fromTo{j, parent[j]}; T_index++; } //printf("R_index: %d\n", R_index); while (R_index != 0){ // call processEdge for all neighbors of vertex in R //printf("R_index: %d\n", R_index); z_device = R[0]; //printf("Z before kernel:%d\n", z_device); deleteElement(R, 0, ARRAY_SIZE); R_index--; //call kernel setup printf("Calling kernel for processing edges of elements in R in parallel\n"); kernel_setup(graph, z_device); } while (Q_index != 0) { //for (int i = 0; i < Q_index; i++) { printf("Adding all elements from Q to Heap H\n"); int z = Q[0]; //printf("z in Q:%d\n", z); deleteElement(Q, 0, ARRAY_SIZE); Q_index--; if (!fixed[z]) { H.push(distNode{z, dist[z]}); } //} } } } if (T_index == graph.adjList.size() -1) { return T; } else return new fromTo[ARRAY_SIZE]; // return empty tree } // Driver program to call Prim int main() { printf("2010_Alaskan.txt\n"); // vector of graph edges std::vector<edge> edges; edges.push_back(edge{0, 1, 866}); edges.push_back(edge{0, 2, 187}); edges.push_back(edge{0, 3, 399}); edges.push_back(edge{1, 5, 605}); edges.push_back(edge{1, 10, 1720}); edges.push_back(edge{1, 11, 888}); edges.push_back(edge{1, 12, 409}); edges.push_back(edge{2, 1, 739}); edges.push_back(edge{2, 3, 213}); edges.push_back(edge{2, 4, 541}); edges.push_back(edge{2, 5, 759}); edges.push_back(edge{2, 6, 1416}); edges.push_back(edge{2, 7, 1391}); edges.push_back(edge{2, 8, 2474}); edges.push_back(edge{2, 9, 2586}); edges.push_back(edge{2, 10, 2421}); edges.push_back(edge{2, 11, 1625}); edges.push_back(edge{2, 12, 765}); edges.push_back(edge{3, 4, 330}); edges.push_back(edge{3, 5, 547}); edges.push_back(edge{3, 12, 561}); edges.push_back(edge{4, 5, 226}); edges.push_back(edge{4, 6, 912}); edges.push_back(edge{5, 6, 689}); edges.push_back(edge{5, 7, 731}); edges.push_back(edge{5, 11, 1199}); edges.push_back(edge{5, 12, 213}); edges.push_back(edge{6, 7, 224}); edges.push_back(edge{6, 8, 1378}); edges.push_back(edge{7, 8, 1234}); edges.push_back(edge{7, 11, 641}); edges.push_back(edge{7, 12, 631}); edges.push_back(edge{8, 9, 337}); edges.push_back(edge{8, 11, 861}); edges.push_back(edge{9, 10, 678}); edges.push_back(edge{9, 11, 967}); edges.push_back(edge{10, 11, 1024}); edges.push_back(edge{11, 12, 1013}); // edges.push_back(edge{4,5,4}); // edges.push_back(edge{4,11,8}); // edges.push_back(edge{5,6,8}); // edges.push_back(edge{5,11,11}); // edges.push_back(edge{6,7,7}); // edges.push_back(edge{6,12,2}); // edges.push_back(edge{6,9,4}); // edges.push_back(edge{7,8,9}); // edges.push_back(edge{7,9,14}); // edges.push_back(edge{8,9,10}); // edges.push_back(edge{9,10,2}); // edges.push_back(edge{10,11,1}); // edges.push_back(edge{10,12,6}); // edges.push_back(edge{11,12,7}); // construct graph Graph graph(edges, ARRAY_SIZE); load_kernelArrays(graph); // print adjacency list representation of graph printGraph(graph); //Source vertex as first non empty vertex in adjacency List //Or modify this to take from input file int source; for(int i = 0; i<nonEmptyIndices.size(); i++) { if (nonEmptyIndices[i]) { source = i; break; } } //printf("source:%d\n", source); //printf("Before Prim\n"); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //printf("Running global reduce\n"); cudaEventRecord(start, 0); primMST(graph, ARRAY_SIZE, source); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Parallel Elpased Time in ms:%f\n", elapsedTime); //printf("After Prim\n"); //printf("T size:%d\n", T_index); //printf("MST in iterator\n"); printf("\n====================================\n"); printf("Minimum Spanning Tree using Prim \n"); for (int i =0; i<T_index; i++) { fromTo e = T[i]; printf("%d - %d\n", e.from, e.to); } return 0; } //Reference: https://www.geeksforgeeks.org/prims-mst-for-adjacency-list-representation-greedy-algo-6/ // https://www.techiedelight.com/graph-implementation-using-stl/
1,563
#include<iostream> #include<cstdio> using namespace std; __global__ void sum(int *a,int *b,int n) { int block=256*blockIdx.x; int sum=0; for(int i=block;i<min(block+256,n);i++) { sum=sum+a[i]; } b[blockIdx.x]=sum; } int main() { cout<<"Enter the no of elements"<<endl; int n; cin>>n; int a[n]; for(int i=0;i<n;i++) { a[i]=i+1; } int *ad,*bd; int size=n*sizeof(int); cudaMalloc(&ad,size); cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice); int grids=ceil(n*1.0f/256.0f); cudaMalloc(&bd,grids*sizeof(int)); dim3 grid(grids,1); dim3 block(1,1); int p=n; while(n>1) { sum<<<grid,block>>>(ad,bd,n); n=ceil(n*1.0f/256.0f); cudaMemcpy(ad,bd,n*sizeof(int),cudaMemcpyDeviceToDevice); } int add[2]; n=p; cudaMemcpy(add,ad,4,cudaMemcpyDeviceToHost); cout<<"The sum is "<<add[0]<<endl; float mean=0.0f; mean=add[0]/(n*1.0f); cout<<"The mean is "<<mean<<endl; }
1,564
#include <stdio.h> // 1. Add kernel function here // 2. Get the number of threads in a block, block and thread indices and print them: // printf("Hello from GPU thread %d = %d * %d + %d\n", threadIndex, blockIndex, blockSize, threadInBlock); int main() { int numThreadsInBlock = 32; int numBlocks = 3; // 1. Call the kernel in the 1D grid of ``numBlocks`` of ``numThreadsInBlock`` threads. // 2. CUDA kernel calls are asynchronous, which means that one have to sync the device to make sure // that GPU kernel completes before program exits. return 0; }
1,565
#include "includes.h" __global__ void k4(int *Aux,int *S){ if(blockIdx.x==0) return; int tid=blockIdx.x*B+threadIdx.x; S[tid]+=Aux[blockIdx.x-1]; }
1,566
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <iostream> #include "cuda_runtime_api.h" #include "cuda.h" #include <string> using namespace std; unsigned int safeDiv(int a, int b) { /* * Function: Divsion that ceils the quotienten to get an int * -------------------- * * a: The numerator * b: The denominator * * returns: Ceiled quotienten */ return ceil(a / (b*1.0)); } bool getThreads(int width, int n_samples, int& threadX, int& threadZ) { /* * Function: Assign threads and blocks * -------------------- * * width: The width of the array * n_samples: Number of experiments * threadX: Number of threads assigned to x-axis * threadZ: Number of threads assigned to z-axis * returns: True, if there exists an thread assignment */ int maxBlocks = 65535; threadX = 512; threadZ = 1; bool search = true; bool X = false; bool Z = false; while (search) { if (safeDiv(width, threadX) < maxBlocks) { X = true; } else { printf ("Couldn't allocate enough threads! Consider decreaseing the number of experiments"); exit (EXIT_FAILURE); } if (safeDiv(n_samples, threadZ) < maxBlocks) { Z = true; } else { threadX = threadX / 2; threadZ = threadZ * 2; X = false; } if (X && Z) { search = false; } if (threadX ==0) { printf ("Couldn't allocate enough threads! Consider decreaseing the number of experiments"); return false; } if (threadZ ==0) { printf ("Couldn't allocate enough threads! Consider decreaseing the number of experiments"); return false; } } return true; } bool check_memory(size_t mem){ /* * Function: Check if there is enough memory on GPU * -------------------- * * mem: Memory required to allocate data * * returns: True, if there exists enough memory */ int num_gpus; size_t free, total; cudaGetDeviceCount( &num_gpus ); for ( int gpu_id = 0; gpu_id < num_gpus; gpu_id++ ) { cudaSetDevice( gpu_id ); int id; cudaGetDevice( &id ); cudaMemGetInfo( &free, &total ); if (free <= mem) { cout << "Error: Your need " << ceil((mem)/1000000 * 0.953674) << " Mib, but there are only " << ceil(free /1000000*0.953674) << " Mib avaialble. Consider running your data in batches."<< endl; return false; } } return true; } size_t cuda_mem_avilable(){ /* * Function: Get available memory on GPU * -------------------- * * returns: Available memory on GPU(bits) */ int num_gpus; size_t free, total; cudaGetDeviceCount( &num_gpus ); for ( int gpu_id = 0; gpu_id < num_gpus; gpu_id++ ) { cudaSetDevice( gpu_id ); int id; cudaGetDevice( &id ); cudaMemGetInfo( &free, &total ); return free; } } size_t mem_required(int m, int n, int S,int n_samples) { /* * Function: Get required memory for allocation of data * -------------------- * * m: Number of samples in A * n: Number of samples in B * S: Maxsum in experiments * n_samples: Number of experiments * * returns: Memory of data(bits) */ int height = m + 1; int width = S + 1; int z_height = m+n; size_t memory = z_height * n_samples * sizeof(int) + 2 * width * height * n_samples * sizeof(double); return memory; } __global__ void compute_perm(double *d_N,double *d_N_old,int *d_z, int height, int width, int n_samples, int sample_len, int i) { /* * Function: Get required memory for allocation of data * -------------------- * * d_N: Array to add counts of permutations * d_N_old: Array of old counts of permutations * d_z: Combination of elements from A and B * height: Height of the arrays d_N and d_N_old * width: Width of the arrays d_N and d_N_old * n_samples: Number of experiments, or depth of d_N and d_N_old * sample_len: Length of d_z * i: Iteration i * returns: Updated counts in d_N for iteration i */ int s = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int d = blockIdx.z * blockDim.z + threadIdx.z; if(j < height && s < width && d < n_samples) { if (i<j) { d_N[(j + d * height)*width + s] = 0; } else if (j == 0 && d_z[sample_len * d + i-1] == s) { d_N[(j + d * height) * width + s] = d_N_old[(j + d * height) * width + s] + 1; } else if (j > 0 && d_z[sample_len * d + i-1] <= s) { d_N[(j + d * height) * width + s] = d_N_old[((j-1) + d * height) * width + (s - d_z[sample_len * d + i-1])] + d_N_old[(j + d*height) * width + s]; } else { d_N[(j + d * height)*width + s] = d_N_old[(j + d*height) * width + s]; } } } double * greenCuda(int *Z_data_, int * all_S_, int m, int n, int S, int n_samples) { /* * Function: Compute permutation distribution * -------------------- * * Z_data_: Combinvation of A and B for all experiments * all_S_: All sums for each experiment * m: Number of samples in A * n: Number of samples in B * S: Maxsum in experiments * n_samples: Number of experiments * * returns: Permutation distribution for all experiments */ cudaError_t err = cudaSuccess; int i; int s; int height = m + 1; int width = S + 1; int z_height = m+n; int *z; double *N, *N_old; size_t memory = z_height * n_samples * sizeof(int) + 2 * width * height * n_samples * sizeof(double); if (!check_memory(memory)){ return NULL; }; cudaMallocManaged(&z, sizeof(int) * z_height * n_samples); cudaMallocManaged(&N, sizeof(double) * width * height * n_samples); cudaMallocManaged(&N_old, sizeof(double) * width * height * n_samples); double *dx = (double *)malloc(sizeof(double) * width * n_samples); for (i = 0; i < z_height*n_samples; ++i) { z[i] = Z_data_[i]; } int threadX, threadZ; if (!getThreads(width, n_samples, threadX, threadZ)){ return NULL; } dim3 threads(threadX,1,threadZ); auto safediv = [](auto a, auto b) {return static_cast<unsigned int>(ceil(a / (b*1.0))); }; dim3 blocks(safediv(width, threads.x), safediv( height, threads.y),safediv( n_samples, threads.z)); for (i = 1; i < (m + n) + 1; i++) { if (i % 2 == 1) { compute_perm<<<blocks,threads>>>(N, N_old, z, height, width, n_samples, z_height, i); } else { compute_perm<<<blocks,threads>>>(N_old, N, z, height, width,n_samples, z_height, i); } } cudaDeviceSynchronize(); double msum; if (i % 2 == 1) { for (i=0; i< n_samples; i++) { for (s = 0; s < S+1; s++) { dx[(S + 1) * i + s] = N_old[((m-1) + i * height)*width + s]; } } } else { for (i=0; i< n_samples; i++) { for (s = 0; s < S+1; s++) { dx[(S + 1) * i + s] = N[((m-1) + i * height)*width + s]; } } } cudaFree(N); cudaFree(N_old); cudaFree(z); return dx; }
1,567
__device__ double basis_eval(double x, double y, int i) { switch (i) { case 0: return 1.414213562373095E+00; case 1: return -1.999999999999999E+00+ 5.999999999999999E+00*x; case 2: return -3.464101615137754E+00+ 3.464101615137750E+00*x+ 6.928203230275512E+00*y; case 3: return 2.449489742783153E+00+-1.959591794226528E+01*x+ 1.648597081617952E-14*y+ 2.449489742783160E+01*x*x; case 4: return 4.242640687119131E+00+-2.545584412271482E+01*x+-8.485281374238392E+00*y+ 2.121320343559552E+01*x*x+ 4.242640687119219E+01*x*y; case 5: return 5.477225575051629E+00+-1.095445115010309E+01*x+-3.286335345030997E+01*y+ 5.477225575051381E+00*x*x+ 3.286335345031001E+01*x*y+ 3.286335345030994E+01*y*y; case 6: return -2.828427124742674E+00+ 4.242640687116461E+01*x+-7.573552134493922E-12*y+-1.272792206135164E+02*x*x+ 1.480773761026334E-11*x*y+ 5.598437621001612E-12*y*y+ 9.899494936607567E+01*x*x*x; case 7: return -4.898979485552100E+00+ 6.368673331219978E+01*x+ 9.797958971110390E+00*y+-1.616663230232790E+02*x*x+-1.175755076534249E+02*x*y+ 6.193426205505117E-12*y*y+ 1.028785691966154E+02*x*x*x+ 2.057571383935418E+02*x*x*y; case 8: return -6.324555320324137E+00+ 5.692099788284931E+01*x+ 3.794733192202038E+01*y+-9.486832980456133E+01*x*x+-3.035786553760383E+02*x*y+-3.794733192204275E+01*y*y+ 4.427188724202135E+01*x*x*x+ 2.656313234538372E+02*x*x*y+ 2.656313234542428E+02*x*y*y; case 9: return -7.483314773585763E+00+ 2.244994432091432E+01*x+ 8.979977728286150E+01*y+-2.244994432106859E+01*x*x+-1.795995545666577E+02*x*y+-2.244994432069088E+02*y*y+ 7.483314773734342E+00*x*x*x+ 8.979977728384534E+01*x*x*y+ 2.244994432077823E+02*x*y*y+ 1.496662954711719E+02*y*y*y; case 10: return 3.162277659932381E+00+-7.589466384207020E+01*x+ 1.426176469642703E-09*y+ 3.984469851776336E+02*x*x+-7.910579797240936E-09*x*y+-2.221386508489322E-09*y*y+-7.083501958763851E+02*x*x*x+ 7.621485651163695E-09*x*x*y+ 6.067021191238202E-09*x*y*y+ 1.028693334372986E-09*y*y*y+ 3.984469851817817E+02*x*x*x*x; case 11: return 5.477225573293454E+00+-1.204989626165627E+02*x+-1.095445114633290E+01*y+ 5.751086852221304E+02*x*x+ 2.300434740977508E+02*x*y+-2.068407262794661E-09*y*y+-9.201738963566557E+02*x*x*x+-9.201738964277876E+02*x*x*y+ 8.157350468216520E-09*x*y*y+ 6.689323593113725E-10*y*y*y+ 4.600869481761621E+02*x*x*x*x+ 9.201738964414616E+02*x*x*x*y; case 12: return 7.071067805455000E+00+-1.272792204805459E+02*x+-4.242640685227210E+01*y+ 4.879036784195053E+02*x*x+ 7.212489164466416E+02*x*y+ 4.242640686115994E+01*y*y+-6.222539665248095E+02*x*x*x+-2.206173156074114E+03*x*x*y+-6.788225097240384E+02*x*y*y+-2.618257708956216E-09*y*y*y+ 2.545584407762581E+02*x*x*x*x+ 1.527350646359356E+03*x*x*x*y+ 1.527350646878523E+03*x*x*y*y; case 13: return 8.366600260526104E+00+-1.003992030768338E+02*x+-1.003992031718044E+02*y+ 2.509980074706584E+02*x*x+ 1.104391234720425E+03*x*y+ 2.509980079602665E+02*y*y+-2.342648066787563E+02*x*x*x+-1.907584859424864E+03*x*x*y+-2.509980079423497E+03*x*y*y+-1.673320053154932E+02*y*y*y+ 7.529940202066899E+01*x*x*x*x+ 9.035928277891566E+02*x*x*x*y+ 2.258982071161214E+03*x*x*y*y+ 1.505988047779402E+03*x*y*y*y; case 14: return 9.486832983521653E+00+-3.794733194390820E+01*x+-1.897366596555042E+02*y+ 5.692099790539108E+01*x*x+ 5.692099791671884E+02*x*y+ 8.538149683896320E+02*y*y+-3.794733189560607E+01*x*x*x+-5.692099793702737E+02*x*x*y+-1.707629937257380E+03*x*y*y+-1.328156617433492E+03*y*y*y+ 9.486832949529969E+00*x*x*x*x+ 1.897366598407721E+02*x*x*x*y+ 8.538149689079755E+02*x*x*y*y+ 1.328156617726260E+03*x*y*y*y+ 6.640783086958844E+02*y*y*y*y; case 15: return -3.464101558949990E+00+ 1.212435559200072E+02*x+-6.554927287342971E-07*y+-9.699484511343436E+02*x*x+ 6.168075543844670E-06*x*y+ 1.855928121585948E-06*y*y+ 2.909845358057180E+03*x*x*x+-1.315541065068287E-05*x*x*y+-1.130274504940533E-05*x*y*y+-2.010826993623497E-06*y*y*y+-3.637306700193426E+03*x*x*x*x+ 7.812696673194998E-06*x*x*x*y+ 1.182910267641302E-05*x*x*y*y+ 5.966502721149417E-06*x*y*y*y+ 7.490370807424855E-07*y*y*y*y+ 1.600414948629347E+03*x*x*x*x*x; case 16: return -5.999999786381989E+00+ 1.979999956525627E+02*x+ 1.199999852846904E+01*y+-1.487999975474801E+03*x*x+-3.839999824158518E+02*x*y+ 3.590733134991923E-06*y*y+ 4.175999942031873E+03*x*x*x+ 2.591999942138422E+03*x*x*y+-2.304845967213866E-05*x*y*y+-3.765206360347302E-06*y*y*y+-4.859999939177293E+03*x*x*x*x+-5.759999920481709E+03*x*x*x*y+ 2.533800791146071E-05*x*x*y*y+ 1.163201295009333E-05*x*y*y*y+ 1.375929645239261E-06*y*y*y*y+ 1.979999976733127E+03*x*x*x*x*x+ 3.959999958598952E+03*x*x*x*x*y; case 17: return -7.745965695420901E+00+ 2.246330064931837E+02*x+ 4.647579541948504E+01*y+-1.471733486303566E+03*x*x+-1.301322301662001E+03*x*y+-4.647579318622461E+01*y*y+ 3.578636133115430E+03*x*x*x+ 7.529079078085794E+03*x*x*y+ 1.254846513301883E+03*x*y*y+-4.908706745821598E-06*y*y*y+-3.601873989014308E+03*x*x*x*x+-1.394273903583612E+04*x*x*x*y+-6.274232689483983E+03*x*x*y*y+ 1.557818297216641E-05*x*y*y*y+ 1.774768472967930E-06*y*y*y*y+ 1.278084301040559E+03*x*x*x*x*x+ 7.668506441990577E+03*x*x*x*x*y+ 7.668506664656632E+03*x*x*x*y*y; case 18: return -9.165149435198423E+00+ 2.107984283759371E+02*x+ 1.099818044339292E+02*y+-1.081487516924421E+03*x*x+-2.419599681468854E+03*x*y+-2.749545200140819E+02*y*y+ 2.071323351561491E+03*x*x*x+ 1.055825295075658E+04*x*x*y+ 5.774044953825824E+03*x*y*y+ 1.833030149275268E+02*y*y*y+-1.695552095854663E+03*x*x*x*x+-1.429763371616711E+04*x*x*x*y+-2.062158902373028E+04*x*x*y*y+-3.666060369213731E+03*x*y*y*y+ 1.477433029163776E-06*y*y*y*y+ 5.040829816128482E+02*x*x*x*x*x+ 6.048998614115979E+03*x*x*x*x*y+ 1.512249834952116E+04*x*x*x*y*y+ 1.008166605651047E+04*x*x*y*y*y; case 19: return -1.039230291051524E+01+ 1.558845242254402E+02*x+ 2.078460790604852E+02*y+-5.196149513118795E+02*x*x+-2.909844994480356E+03*x*y+-9.353073863098218E+02*y*y+ 7.274606525680852E+02*x*x*x+ 7.482457909279853E+03*x*x*y+ 1.215899583458535E+04*x*y*y+ 1.454922624147049E+03*y*y*y+-4.676530172095034E+02*x*x*x*x+-7.066764912559228E+03*x*x*x*y+-2.151206857134464E+04*x*x*y*y+-1.745907139924879E+04*x*y*y*y+-7.274613189125139E+02*y*y*y*y+ 1.143150941224163E+02*x*x*x*x*x+ 2.286305899408025E+03*x*x*x*x*y+ 1.028837996460899E+04*x*x*x*y*y+ 1.600414831467970E+04*x*x*y*y*y+ 8.002074513731438E+03*x*y*y*y*y; case 20: return -1.148912453686957E+01+ 5.744560862969394E+01*x+ 3.446737503461372E+02*y+-1.148911495763160E+02*x*x+-1.378694882483679E+03*x*y+-2.412716282265750E+03*y*y+ 1.148910142070014E+02*x*x*x+ 2.068041931439891E+03*x*x*y+ 7.238148529865374E+03*x*y*y+ 6.433910121171304E+03*y*y*y+-5.744538632836686E+01*x*x*x*x+-1.378694134049671E+03*x*x*x*y+-7.238147869806153E+03*x*x*y*y+-1.286781990429334E+04*x*y*y*y+-7.238148907286980E+03*y*y*y*y+ 1.148903744345461E+01*x*x*x*x*x+ 3.446733285357364E+02*x*x*x*x*y+ 2.412715565158995E+03*x*x*x*y*y+ 6.433909612999752E+03*x*x*y*y*y+ 7.238148781371233E+03*x*y*y*y*y+ 2.895259567891218E+03*y*y*y*y*y; } } __device__ double basis_eval_x(double x, double y, int i) { switch (i) { case 0: return 0; case 1: return 5.999999999999999E+00; case 2: return 3.464101615137750E+00; case 3: return -1.959591794226528E+01+ 2*2.449489742783160E+01*x; case 4: return -2.545584412271482E+01+ 2*2.121320343559552E+01*x+ 4.242640687119219E+01*y; case 5: return -1.095445115010309E+01+ 2*5.477225575051381E+00*x+ 3.286335345031001E+01*y; case 6: return 4.242640687116461E+01+-2*1.272792206135164E+02*x+ 1.480773761026334E-11*y+ 3*9.899494936607567E+01*x*x; case 7: return 6.368673331219978E+01+-2*1.616663230232790E+02*x+-1.175755076534249E+02*y+ 3*1.028785691966154E+02*x*x+ 2*2.057571383935418E+02*x*y; case 8: return 5.692099788284931E+01+-2*9.486832980456133E+01*x+-3.035786553760383E+02*y+ 3*4.427188724202135E+01*x*x+ 2*2.656313234538372E+02*x*y+ 2.656313234542428E+02*y*y; case 9: return 2.244994432091432E+01+-2*2.244994432106859E+01*x+-1.795995545666577E+02*y+ 3*7.483314773734342E+00*x*x+ 2*8.979977728384534E+01*x*y+ 2.244994432077823E+02*y*y; case 10: return -7.589466384207020E+01+ 2*3.984469851776336E+02*x+-7.910579797240936E-09*y+-3*7.083501958763851E+02*x*x+ 2*7.621485651163695E-09*x*y+ 6.067021191238202E-09*y*y+ 4*3.984469851817817E+02*x*x*x; case 11: return -1.204989626165627E+02+ 2*5.751086852221304E+02*x+ 2.300434740977508E+02*y+-3*9.201738963566557E+02*x*x+-2*9.201738964277876E+02*x*y+ 8.157350468216520E-09*y*y+ 4*4.600869481761621E+02*x*x*x+ 3*9.201738964414616E+02*x*x*y; case 12: return -1.272792204805459E+02+ 2*4.879036784195053E+02*x+ 7.212489164466416E+02*y+-3*6.222539665248095E+02*x*x+-2*2.206173156074114E+03*x*y+-6.788225097240384E+02*y*y+ 4*2.545584407762581E+02*x*x*x+ 3*1.527350646359356E+03*x*x*y+ 2*1.527350646878523E+03*x*y*y; case 13: return -1.003992030768338E+02+ 2*2.509980074706584E+02*x+ 1.104391234720425E+03*y+-3*2.342648066787563E+02*x*x+-2*1.907584859424864E+03*x*y+-2.509980079423497E+03*y*y+ 4*7.529940202066899E+01*x*x*x+ 3*9.035928277891566E+02*x*x*y+ 2*2.258982071161214E+03*x*y*y+ 1.505988047779402E+03*y*y*y; case 14: return -3.794733194390820E+01+ 2*5.692099790539108E+01*x+ 5.692099791671884E+02*y+-3*3.794733189560607E+01*x*x+-2*5.692099793702737E+02*x*y+-1.707629937257380E+03*y*y+ 4*9.486832949529969E+00*x*x*x+ 3*1.897366598407721E+02*x*x*y+ 2*8.538149689079755E+02*x*y*y+ 1.328156617726260E+03*y*y*y; case 15: return 1.212435559200072E+02+-2*9.699484511343436E+02*x+ 6.168075543844670E-06*y+ 3*2.909845358057180E+03*x*x+-2*1.315541065068287E-05*x*y+-1.130274504940533E-05*y*y+-4*3.637306700193426E+03*x*x*x+ 3*7.812696673194998E-06*x*x*y+ 2*1.182910267641302E-05*x*y*y+ 5.966502721149417E-06*y*y*y+ 5*1.600414948629347E+03*x*x*x*x; case 16: return 1.979999956525627E+02+-2*1.487999975474801E+03*x+-3.839999824158518E+02*y+ 3*4.175999942031873E+03*x*x+ 2*2.591999942138422E+03*x*y+-2.304845967213866E-05*y*y+-4*4.859999939177293E+03*x*x*x+-3*5.759999920481709E+03*x*x*y+ 2*2.533800791146071E-05*x*y*y+ 1.163201295009333E-05*y*y*y+ 5*1.979999976733127E+03*x*x*x*x+ 4*3.959999958598952E+03*x*x*x*y; case 17: return 2.246330064931837E+02+-2*1.471733486303566E+03*x+-1.301322301662001E+03*y+ 3*3.578636133115430E+03*x*x+ 2*7.529079078085794E+03*x*y+ 1.254846513301883E+03*y*y+-4*3.601873989014308E+03*x*x*x+-3*1.394273903583612E+04*x*x*y+-2*6.274232689483983E+03*x*y*y+ 1.557818297216641E-05*y*y*y+ 5*1.278084301040559E+03*x*x*x*x+ 4*7.668506441990577E+03*x*x*x*y+ 3*7.668506664656632E+03*x*x*y*y; case 18: return 2.107984283759371E+02+-2*1.081487516924421E+03*x+-2.419599681468854E+03*y+ 3*2.071323351561491E+03*x*x+ 2*1.055825295075658E+04*x*y+ 5.774044953825824E+03*y*y+-4*1.695552095854663E+03*x*x*x+-3*1.429763371616711E+04*x*x*y+-2*2.062158902373028E+04*x*y*y+-3.666060369213731E+03*y*y*y+ 5*5.040829816128482E+02*x*x*x*x+ 4*6.048998614115979E+03*x*x*x*y+ 3*1.512249834952116E+04*x*x*y*y+ 2*1.008166605651047E+04*x*y*y*y; case 19: return 1.558845242254402E+02+-2*5.196149513118795E+02*x+-2.909844994480356E+03*y+ 3*7.274606525680852E+02*x*x+ 2*7.482457909279853E+03*x*y+ 1.215899583458535E+04*y*y+-4*4.676530172095034E+02*x*x*x+-3*7.066764912559228E+03*x*x*y+-2*2.151206857134464E+04*x*y*y+-1.745907139924879E+04*y*y*y+ 5*1.143150941224163E+02*x*x*x*x+ 4*2.286305899408025E+03*x*x*x*y+ 3*1.028837996460899E+04*x*x*y*y+ 2*1.600414831467970E+04*x*y*y*y+ 8.002074513731438E+03*y*y*y*y; case 20: return 5.744560862969394E+01+-2*1.148911495763160E+02*x+-1.378694882483679E+03*y+ 3*1.148910142070014E+02*x*x+ 2*2.068041931439891E+03*x*y+ 7.238148529865374E+03*y*y+-4*5.744538632836686E+01*x*x*x+-3*1.378694134049671E+03*x*x*y+-2*7.238147869806153E+03*x*y*y+-1.286781990429334E+04*y*y*y+ 5*1.148903744345461E+01*x*x*x*x+ 4*3.446733285357364E+02*x*x*x*y+ 3*2.412715565158995E+03*x*x*y*y+ 2*6.433909612999752E+03*x*y*y*y+ 7.238148781371233E+03*y*y*y*y; } } __device__ double basis_eval_y(double x, double y, int i) { switch (i) { case 0: return 0; case 1: return 0; case 2: return 6.928203230275512E+00; case 3: return 1.648597081617952E-14; case 4: return -8.485281374238392E+00+ 4.242640687119219E+01*x; case 5: return -3.286335345030997E+01+ 3.286335345031001E+01*x+ 2*3.286335345030994E+01*y; case 6: return -7.573552134493922E-12+ 1.480773761026334E-11*x+ 2*5.598437621001612E-12*y; case 7: return 9.797958971110390E+00+-1.175755076534249E+02*x+ 2*6.193426205505117E-12*y+ 2.057571383935418E+02*x*x; case 8: return 3.794733192202038E+01+-3.035786553760383E+02*x+-2*3.794733192204275E+01*y+ 2.656313234538372E+02*x*x+ 2*2.656313234542428E+02*x*y; case 9: return 8.979977728286150E+01+-1.795995545666577E+02*x+-2*2.244994432069088E+02*y+ 8.979977728384534E+01*x*x+ 2*2.244994432077823E+02*x*y+ 3*1.496662954711719E+02*y*y; case 10: return 1.426176469642703E-09+-7.910579797240936E-09*x+-2*2.221386508489322E-09*y+ 7.621485651163695E-09*x*x+ 2*6.067021191238202E-09*x*y+ 3*1.028693334372986E-09*y*y; case 11: return -1.095445114633290E+01+ 2.300434740977508E+02*x+-2*2.068407262794661E-09*y+-9.201738964277876E+02*x*x+ 2*8.157350468216520E-09*x*y+ 3*6.689323593113725E-10*y*y+ 9.201738964414616E+02*x*x*x; case 12: return -4.242640685227210E+01+ 7.212489164466416E+02*x+ 2*4.242640686115994E+01*y+-2.206173156074114E+03*x*x+-2*6.788225097240384E+02*x*y+-3*2.618257708956216E-09*y*y+ 1.527350646359356E+03*x*x*x+ 2*1.527350646878523E+03*x*x*y; case 13: return -1.003992031718044E+02+ 1.104391234720425E+03*x+ 2*2.509980079602665E+02*y+-1.907584859424864E+03*x*x+-2*2.509980079423497E+03*x*y+-3*1.673320053154932E+02*y*y+ 9.035928277891566E+02*x*x*x+ 2*2.258982071161214E+03*x*x*y+ 3*1.505988047779402E+03*x*y*y; case 14: return -1.897366596555042E+02+ 5.692099791671884E+02*x+ 2*8.538149683896320E+02*y+-5.692099793702737E+02*x*x+-2*1.707629937257380E+03*x*y+-3*1.328156617433492E+03*y*y+ 1.897366598407721E+02*x*x*x+ 2*8.538149689079755E+02*x*x*y+ 3*1.328156617726260E+03*x*y*y+ 4*6.640783086958844E+02*y*y*y; case 15: return -6.554927287342971E-07+ 6.168075543844670E-06*x+ 2*1.855928121585948E-06*y+-1.315541065068287E-05*x*x+-2*1.130274504940533E-05*x*y+-3*2.010826993623497E-06*y*y+ 7.812696673194998E-06*x*x*x+ 2*1.182910267641302E-05*x*x*y+ 3*5.966502721149417E-06*x*y*y+ 4*7.490370807424855E-07*y*y*y; case 16: return 1.199999852846904E+01+-3.839999824158518E+02*x+ 2*3.590733134991923E-06*y+ 2.591999942138422E+03*x*x+-2*2.304845967213866E-05*x*y+-3*3.765206360347302E-06*y*y+-5.759999920481709E+03*x*x*x+ 2*2.533800791146071E-05*x*x*y+ 3*1.163201295009333E-05*x*y*y+ 4*1.375929645239261E-06*y*y*y+ 3.959999958598952E+03*x*x*x*x; case 17: return 4.647579541948504E+01+-1.301322301662001E+03*x+-2*4.647579318622461E+01*y+ 7.529079078085794E+03*x*x+ 2*1.254846513301883E+03*x*y+-3*4.908706745821598E-06*y*y+-1.394273903583612E+04*x*x*x+-2*6.274232689483983E+03*x*x*y+ 3*1.557818297216641E-05*x*y*y+ 4*1.774768472967930E-06*y*y*y+ 7.668506441990577E+03*x*x*x*x+ 2*7.668506664656632E+03*x*x*x*y; case 18: return 1.099818044339292E+02+-2.419599681468854E+03*x+-2*2.749545200140819E+02*y+ 1.055825295075658E+04*x*x+ 2*5.774044953825824E+03*x*y+ 3*1.833030149275268E+02*y*y+-1.429763371616711E+04*x*x*x+-2*2.062158902373028E+04*x*x*y+-3*3.666060369213731E+03*x*y*y+ 4*1.477433029163776E-06*y*y*y+ 6.048998614115979E+03*x*x*x*x+ 2*1.512249834952116E+04*x*x*x*y+ 3*1.008166605651047E+04*x*x*y*y; case 19: return 2.078460790604852E+02+-2.909844994480356E+03*x+-2*9.353073863098218E+02*y+ 7.482457909279853E+03*x*x+ 2*1.215899583458535E+04*x*y+ 3*1.454922624147049E+03*y*y+-7.066764912559228E+03*x*x*x+-2*2.151206857134464E+04*x*x*y+-3*1.745907139924879E+04*x*y*y+-4*7.274613189125139E+02*y*y*y+ 2.286305899408025E+03*x*x*x*x+ 2*1.028837996460899E+04*x*x*x*y+ 3*1.600414831467970E+04*x*x*y*y+ 4*8.002074513731438E+03*x*y*y*y; case 20: return 3.446737503461372E+02+-1.378694882483679E+03*x+-2*2.412716282265750E+03*y+ 2.068041931439891E+03*x*x+ 2*7.238148529865374E+03*x*y+ 3*6.433910121171304E+03*y*y+-1.378694134049671E+03*x*x*x+-2*7.238147869806153E+03*x*x*y+-3*1.286781990429334E+04*x*y*y+-4*7.238148907286980E+03*y*y*y+ 3.446733285357364E+02*x*x*x*x+ 2*2.412715565158995E+03*x*x*x*y+ 3*6.433909612999752E+03*x*x*y*y+ 4*7.238148781371233E+03*x*y*y*y+ 5*2.895259567891218E+03*y*y*y*y; } }
1,568
#include "includes.h" __global__ void OPT_3_SIZES_SUM(int* lcmsizes, int n) { for(int i = 0; i < n; i++) lcmsizes[i+1] += lcmsizes[i]; }
1,569
#include "includes.h" __device__ double get_collective_dist(int *dist, int rows, int cols, int col) { double sum = 0; for (int i = 0; i < rows; i++) { if (dist[i * cols + col] == 0) { return 0; } sum += (1 / (double)dist[i * cols + col]); } return sum; } __global__ void collective_dist_kernel(int *dist, int rows, int cols, double *col_dist) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < cols) { col_dist[tid] = get_collective_dist(dist, rows, cols, tid); tid += blockDim.x * gridDim.x; } }
1,570
#define N 15 #define B 2 #define T 32 __global__ void dl(int* in) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < N) { if(tid % 2 == 0) in[tid]++; __syncthreads(); // ouch int sum = in[tid]; if(tid > 0) sum += in[tid-1]; if(tid < N - 1) sum += in[tid+1]; in[tid] = sum / 3; } } // dl<<<B,T>>>(din);
1,571
//////////////////////////////////////////////////////////// //Ho Thien Luan -> History Tracking! // 1. multi_pat_asm_naive_cpu.cu // 2. // // // //////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <time.h> #include <cuda_runtime.h> //#include "cuPrintf.cu" #define FILENAME_MAXLEN 256 #define THREAD_BLOCK_EXP (7) #define THREAD_BLOCK_SIZE (1 << THREAD_BLOCK_EXP) __global__ void ASM_kernel(char *g_input_string, int input_size, int *g_pattern_table, int pattern_length, int no_of_patterns, int *g_matched_result) { int tid = threadIdx.x ; int gbid = blockIdx.y * gridDim.x + blockIdx.x ; int start = gbid*THREAD_BLOCK_SIZE + tid; int max_pattern_length = pattern_length +1; int result; if (start < (input_size-pattern_length+1)) { for (int j = 0; j < no_of_patterns; j++) { result = 0; for (int i = 0; i<pattern_length ; i++) { if ((g_input_string[ start + i ] != '\n') & (g_input_string[ start + i ] != g_pattern_table[j*max_pattern_length+i])) { result ++; //g_matched_result[start*no_of_patterns + j] = g_matched_result[start*no_of_patterns + j] + 1; } } g_matched_result[start*no_of_patterns + j] = result; } } //cuPrintf("threadIdx.x = %d \t bit_vector = %d \n", start, bit_vector); } //////////////////////////////// void ASM_process_top (char *g_input_string, size_t input_size, int *g_pattern_table, int pattern_length, int no_of_patterns, int *g_matched_result) { // num_blocks = # of thread blocks to cover input stream int num_blocks = (input_size-pattern_length+1)/THREAD_BLOCK_SIZE + 1 ; dim3 dimBlock( THREAD_BLOCK_SIZE, 1 ) ; dim3 dimGrid ; int p = num_blocks >> 15 ; dimGrid.x = num_blocks ; if ( p ){ dimGrid.x = 1<<15 ; dimGrid.y = p+1 ; } //cudaPrintfInit();////for cuPrintf ASM_kernel <<< dimGrid, dimBlock >>>((char*)g_input_string, input_size, (int*) g_pattern_table, pattern_length, no_of_patterns, g_matched_result); //cudaPrintfDisplay();////for cuPrintf //cudaPrintfEnd(); ////for cuPrintf } int main(int argc, char **argv) { char inputFile[FILENAME_MAXLEN]; char patternFile[FILENAME_MAXLEN]; strcpy( inputFile, argv[2]) ; strcpy( patternFile, argv[1]) ; //int k_par = 4; int k_par; k_par = strtol(argv[3], NULL, 10); //////////////////////////////////////////////////////////////////////////////////// //Process input patterns int pattern_size; char *h_pattern = NULL ; int len; size_t sizeOfTableEntry ; int sizeOfTableInBytes ; // numOfTableEntry * sizeOfTableEntry size_t sizeOfPatternInBytes ; // no_of_patterns * sizeOfTableEntry int max_pattern_length = 0; int no_of_patterns = 0; FILE* fpattern = fopen( patternFile, "rb"); assert ( NULL != fpattern ) ; // obtain pattern file fseek (fpattern , 0 , SEEK_END); pattern_size = ftell (fpattern); rewind (fpattern); // allocate a buffer to contains all patterns h_pattern = (char *) malloc (sizeof(char)*pattern_size); assert( NULL != h_pattern ); // copy the file into the buffer pattern_size = fread (h_pattern, 1, pattern_size, fpattern); fclose(fpattern); //printf ("pattern size = %d\n",pattern_size); //printf ("pattern = %s\n",h_pattern); //Processing to get max_pattern_length & no_of_patterns len = 0; for( int i = 0 ; i < pattern_size ; i++){ if ( '\n' == h_pattern[i] ){ if ( (i > 0) && ('\n' != h_pattern[i-1]) ){ // non-empty line no_of_patterns = no_of_patterns + 1; if (max_pattern_length < len+1) {max_pattern_length = len+1;} } len = 0 ; }else{ len++ ; } } // Create pattern_table, pattern_length_table sizeOfTableEntry = sizeof(int) ; sizeOfPatternInBytes = no_of_patterns * sizeOfTableEntry; // 1-D to store size of each patterns sizeOfTableInBytes = no_of_patterns * max_pattern_length; //2-D to store patterns int* pattern_table = (int*) malloc( sizeof(int)*sizeOfTableInBytes ) ; int* pattern_length_table = (int*) malloc( sizeOfPatternInBytes ) ; //Processing to fill pattern_table & pattern_length_table len = 0; int no_patterns = 0; for( int i = 0 ; i < pattern_size ; i++){ if ( '\n' == h_pattern[i] ){ if ( (i > 0) && ('\n' != h_pattern[i-1]) ){ // non-empty line pattern_length_table[no_patterns] = len; no_patterns = no_patterns + 1; } len = 0 ; }else{ pattern_table[no_patterns*max_pattern_length + len] = h_pattern[i]; len++ ; } } //Print to pattern_table/pattern_length_table to check /* for (int i = 0; i < no_of_patterns; i++) { printf("\npattern no %d has length = %d-> ",i, pattern_length_table[i]); for (int j = 0; j < pattern_length_table[i]; j++) { printf("%4d",pattern_table[i*max_pattern_length+j]); } } */ /////////////////////////////////////////////////////////////// //Prepare input string int input_size; char *h_input_string = NULL ; int *h_matched_result = NULL ; //open to read file FILE* fpin = fopen( inputFile, "rb"); assert ( NULL != fpin ) ; // sets the file position of the stream to the given offset. fseek (fpin , 0 , SEEK_END); input_size = ftell (fpin); rewind (fpin); // allocate memory to contain the whole file h_input_string = (char *) malloc (sizeof(char)*input_size); assert( NULL != h_input_string ); int size_matched_result = input_size * no_of_patterns; h_matched_result = (int *) malloc (sizeof(int)*size_matched_result); // each input has no_of_patterns results assert( NULL != h_matched_result ); memset( h_matched_result, 0, size_matched_result ) ; // copy the file into the buffer input_size = fread (h_input_string, 1, input_size, fpin); fclose(fpin); ////////////////// //printf("\ninput size -> %4d -> \n",input_size); // printf("%s\n",h_input_string); /* //AmSM with Naive Method in CPU struct timespec t_start, t_end; double elapsedTime; clock_gettime (CLOCK_REALTIME, &t_start); //printf ("starttime s = %li, ns = %li\n",t_start.tv_sec, t_start.tv_nsec); for(int i = 0; i < input_size-max_pattern_length+1; i++) { for (int j = 0; j < no_of_patterns; j++) { h_matched_result[i*no_of_patterns+j] = 0; for (int k = 0; k < pattern_length_table[j]; k++) { if ((h_input_string[i+k] != '\n') & (h_input_string[i+k] != pattern_table[j*max_pattern_length+k])) { h_matched_result[i*no_of_patterns+j] = h_matched_result[i*no_of_patterns+j] + 1; } } } } clock_gettime(CLOCK_REALTIME, &t_end); //printf ("endtime s = %li, ns = %li\n",t_end.tv_sec, t_end.tv_nsec); elapsedTime = (t_end.tv_sec*1000+t_end.tv_nsec/1000000)-(t_start.tv_sec*1000+t_start.tv_nsec/1000000); */ //AmSM with Naive Method in GPU char *g_input_string; //char *g_pattern; int *g_matched_result; int *g_pattern_table; cudaMalloc (&g_input_string, sizeof(char)*input_size); cudaMalloc (&g_matched_result, sizeof(int)*size_matched_result); cudaMalloc (&g_pattern_table, sizeof(int)*sizeOfTableInBytes); cudaMemcpy (g_input_string, h_input_string, sizeof(char)*input_size, cudaMemcpyHostToDevice ); cudaMemcpy (g_pattern_table, pattern_table, sizeof(int)*sizeOfTableInBytes, cudaMemcpyHostToDevice); // record time setting cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // step 3: run ASM on GPU ASM_process_top ( g_input_string, input_size, g_pattern_table, (max_pattern_length-1), no_of_patterns, g_matched_result) ; // record time setting cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaMemcpy (h_matched_result, g_matched_result, sizeof(int)*size_matched_result, cudaMemcpyDeviceToHost ); // Print Result int total_result = 0; for(int i = 0; i < input_size-max_pattern_length+1; i++) { for (int j = 0; j < no_of_patterns; j++) { //printf("Input location %d with pattern %d has Hamming distance = %d\n",i, j, h_matched_result[RESULT_TABLE_MAP(i,j)]); if(h_matched_result[i*no_of_patterns+j] <= k_par) {total_result++;} } } printf("\n\n\n"); printf("###########################################################\n"); printf("#--Multi Fix-Length Patterns Approximate String Matching--#\n"); printf("#---------------------------------------------------------#\n"); printf("#----------------Naive Approach in GPU--------------------#\n"); printf("###########################################################\n"); printf("#--No of Patterns |\t\t %10d \t #\n",no_of_patterns); printf("#---------------------------------------------------------#\n"); printf("#--Pattern Length |\t\t %10d \t #\n",max_pattern_length-1); printf("#---------------------------------------------------------#\n"); printf("#--Input Size (bytes) |\t\t %10d \t #\n", input_size ); printf("#---------------------------------------------------------#\n"); printf("#--Total matched with k = %d |\t\t %10d \t #\n", k_par, total_result); printf("#---------------------------------------------------------#\n"); printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", time); printf("#---------------------------------------------------------#\n"); printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000000) ); printf("###########################################################\n"); free(h_pattern); free(h_input_string); free(h_matched_result); free(pattern_table); free(pattern_length_table); return 0; }
1,572
#include "includes.h" __global__ void MatrixMulVarKernel(float* M, float* N, float* P, int widthAHeightB, int heightA, int widthB) { int Row = blockIdx.y*blockDim.y+threadIdx.y;// Calculate the row index of the P element and M int Col = blockIdx.x*blockDim.x+threadIdx.x;// Calculate the column index of P and N if ((Row < heightA) && (Col < widthB)) { float Pvalue = 0; for (int k = 0; k < widthAHeightB; ++k) { Pvalue += M[Row*widthAHeightB+k]*N[k*widthB+Col];// each thread computes one element of the block sub-matrix } P[Row*widthB+Col] = Pvalue; } }
1,573
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence * number, no offset */ curand_init(345893, id, 0, &state[id]); } __global__ void generate_uniform_kernel(curandState *state, int n_points_per_thread, int *result) { int id = threadIdx.x + blockIdx.x * blockDim.x; int count = 0; float x; float y; float z; float r2; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random uniforms */ for(int i = 0; i < n_points_per_thread; i++) { x = curand_uniform(&localState) * 4 - 2; y = curand_uniform(&localState) * 4 - 2; r2 = pow(x, 2) + pow(y, 2); z = curand_uniform(&localState); if(z < exp(-1 * r2)) { count++; } // if (z > 0.5) // { // count++; // } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void shmem_reduce( int *d_out, int *d_in) { extern __shared__ int sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; sdata[tid]=d_in[myId]; int s = blockDim.x / 2; while(s>0) { if (tid<s) { sdata[tid]+=sdata[tid+s]; } __syncthreads(); s=( int)s/2; } if (tid == 0) { d_out[blockIdx.x] =sdata[0]; } } int main() { int n_threads = 1024; int n_points_per_thread = 1000000; curandState *devStates; // int total; int *devResults; int *devIntermediate; int *devReduced; int *hostResults; int *hostReduced; hostResults = ( int *)calloc(n_threads, sizeof( int)); hostReduced = ( int *)calloc(n_threads, sizeof( int)); CUDA_CALL(cudaMalloc((void **)&devResults, n_threads * sizeof( int))); CUDA_CALL(cudaMalloc((void **)&devReduced, n_threads * sizeof( int))); CUDA_CALL(cudaMalloc((void **)&devIntermediate, n_threads * sizeof( int))); CUDA_CALL(cudaMalloc((void **)&devStates, n_threads * sizeof(curandState))); CUDA_CALL(cudaMemset(devResults, 0, n_threads * sizeof( int))); CUDA_CALL(cudaMemset(devReduced, 0, n_threads * sizeof( int))); setup_kernel<<<1, n_threads>>>(devStates); generate_uniform_kernel<<<1, n_threads>>>(devStates, n_points_per_thread, devResults); shmem_reduce<<<n_threads / 32, 32, n_threads * sizeof(int)>>>(devIntermediate,devResults); shmem_reduce<<<32, 32, 32 * sizeof(int)>>>(devReduced,devIntermediate); CUDA_CALL(cudaMemcpy(hostResults, devResults, n_threads*sizeof(int), cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(hostReduced, devReduced, n_threads*sizeof(int), cudaMemcpyDeviceToHost)); // for (int i=0; i<n_threads; i++) // { // printf("%d ", hostResults[i]); // } // printf("\n"); /* for (int i=0; i<n_threads; i++) { printf("%d ", hostReduced[i]); } printf("\n");*/ printf("Total area: %1.7f \n", (float) hostReduced[0] / (float) n_points_per_thread / (float) n_threads * 16); CUDA_CALL(cudaFree(devResults)); CUDA_CALL(cudaFree(devReduced)); CUDA_CALL(cudaFree(devStates)); CUDA_CALL(cudaFree(devIntermediate)); free(hostReduced); free(hostResults); return EXIT_SUCCESS; }
1,574
//seqCuda.cu #include<iostream> using namespace std; #include <thrust/reduce.h> #include <thrust/sequence.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> int main() { const int N=10000; thrust::device_vector<int> a(N); thrust::sequence(a.begin(), a.end(), 0); long sumA= thrust::reduce(a.begin(), a.end(),0); long sumCheck=0; for (int i=0; i<N; i++) sumCheck += i; cout << "Host: " << sumCheck << endl; cout << "GPU: " << sumA << endl; if (sumA == sumCheck) cout << "Test Succeeded!" << endl; else { cerr << "Test FAILED!"; return(1); } return(0); }
1,575
// C = alpha * A * B + beta * C __global__ void MatMulKernelAB(const int M,const int K,float *A, const int K1, const int N,float *B, const int M1, const int N1,float *C, const float alpha,const float beta) { // Each thread computes at most (UNROLL_X * UNROLL_Y) elements of C // by accumulating results into Cvalue int row = threadIdx.y*2; int col = threadIdx.x*128; #pragma unroll for(int j = 0; j < 2; ++j){ #pragma SIMD (i) #pragma unroll for(int i = 0; i < 128; ++i){ float Cvalue = C[(row+j)*N+(col+i)]; for(int e = 0; e < K; ++e){ #pragma reduction (Cvalue,e,+,1024) Cvalue += A[(row+j)*K + e] * B[e*N + (col+i)]; } //C[row][col] = alpha * Cvalue + beta * C[row][col]; C[(row+j)*N+(col+i)] = Cvalue ; } } } int main(int argc, char const *argv[]) { int M = 32; int N = 512; int K = 9216; float *A; float *B; float *C; dim3 blockDim(4,16,1); dim3 gridDim(1,1,1); MatMulKernelAB<<<gridDim,blockDim>>>(M,K,A,K,N,B,M,N,C,1.0,0.0); return 0; }
1,576
/** * Demonstrates the use of a synchronisation construct * What to ponder: * - Significance of counter values printed out with/without syncthreads * - Why does the values vary when syncthreads is used in a kernel launch * containing multiple blocks? */ #include <stdio.h> __device__ __managed__ int volatile is_done; __device__ __managed__ int counter; void check_cuda_errors() { cudaError_t rc; rc = cudaGetLastError(); if (rc != cudaSuccess) { printf("Last CUDA error %s\n", cudaGetErrorString(rc)); } } __global__ void no_sync() { // Block till our first warp passes atomicAdd while (threadIdx.x / 32 != 0 && is_done == 0) { } atomicAdd(&counter, 1); if (threadIdx.x == 0) { is_done = 1; } if (threadIdx.x == 0) { printf("Counter value (no sync): %d\n", counter); } } __global__ void with_sync() { // Block till our first warp passes atomicAdd while (threadIdx.x / 32 != 0 && is_done == 0) { } atomicAdd(&counter, 1); if (threadIdx.x == 0) { is_done = 1; } __syncthreads(); if (threadIdx.x == 0) { printf("Counter value (sync, one block): %d\n", counter); } } __global__ void with_sync_multiple() { // Block till our first warp passes atomicAdd while (blockIdx.x / 32 != 0 && threadIdx.x / 32 != 0 && is_done == 0) { } atomicAdd(&counter, 1); if (blockIdx.x == 0 && threadIdx.x == 0) { is_done = 1; } __syncthreads(); if (blockIdx.x == 0 && threadIdx.x == 0) { printf("Counter value (sync, multiple blocks): %d\n", counter); } } int main(int argc, char **argv) { is_done = 0; counter = 0; no_sync<<<1, 1024>>>(); cudaDeviceSynchronize(); check_cuda_errors(); is_done = 0; counter = 0; with_sync<<<1, 1024>>>(); cudaDeviceSynchronize(); check_cuda_errors(); is_done = 0; counter = 0; with_sync_multiple<<<1024, 512>>>(); cudaDeviceSynchronize(); check_cuda_errors(); return 0; }
1,577
#include "includes.h" __global__ void SynchStreams() { }
1,578
#include<stdio.h> #define H 1024 #define W 1024 __global__ void Matrix_add(int *a, int *b, int *c) { int x=blockIdx.x*blockDim.x+threadIdx.x; int y=blockIdx.y*blockDim.y+threadIdx.y; int sum=0; for(int k=0; k<W; k++) { int aa=a[y*W+k]; int bb=b[k*W+x]; sum+=aa*bb; } c[y*W+x]=sum; } int main(void) { int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int SIZE= H*W*sizeof(int); h_a=(int*)malloc(SIZE); h_b=(int*)malloc(SIZE); h_c=(int*)malloc(SIZE); for(int i=0; i<W;i++) { for(int j=0; j<H;j++) { h_c[i*W+j]=0; h_b[i*W+j]=1; h_a[i*W+j]=1; } } cudaMalloc(&d_a, SIZE); cudaMalloc(&d_b, SIZE); cudaMalloc(&d_c, SIZE); cudaMemcpy(d_a, h_a, SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, SIZE, cudaMemcpyHostToDevice); dim3 threads(32, 32); dim3 blocks(32, 32); Matrix_add<<<blocks,threads>>>(d_a, d_b, d_c); cudaMemcpy(h_c, d_c, SIZE, cudaMemcpyDeviceToHost); for(int i=0; i<W; i++) { for(int j=0;j<W;j++) { printf("%d ",h_c[i*W+j]); } printf("\n"); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); return 0; }
1,579
#include <iostream> #include <bits/stdc++.h> #include <fstream> #include <sstream> #include <string> #include "math.h" #include "limits.h" #define MIN -99 #define M 104 #define N 1500 #define trainFileName "train_full.csv" #define testFileName "test_full.csv" #define features 55 #define K 10 #define trainData(row,col) trainData[col+row*M] #define testData(row,col) testData[col+row*M] #define THRESHOLD 0 using namespace std; vector <vector <double> > train_file; vector <vector <double> > test_file; vector <vector <double> > trainFile_Full; vector <vector <double> > testFile_Full; int *device_trainFileData, *device_cardinality; float *infoGainsInitializer; __device__ float device_infoGainOfData; dim3 blocks(M); dim3 threads(N); struct Node{ int number_of_children; int branch_value; int split_attribute; int leaf_value; struct Node *children[10]; }; typedef struct Node node; node* create(){ node* n = new node; n->number_of_children = 0; n->branch_value = -1; n->split_attribute = -1; n->leaf_value = -1; return n; } void chooseRandomFeatures(){ vector<vector<double> > trainFileRandom( N , vector<double> (M, 0)); for(int i=0; i<50; i++){ int guess = rand() % 103; trainFileRandom[i]=train_file[guess]; } train_file=trainFileRandom; } double cosine_distance(double *A, double *B, int Vector_Length) { double dot = 0.0, denominator_a = 0.0, denominator_b = 0.0 ; for( int i = 0u; i < Vector_Length; ++i) { dot += A[i] * B[i] ; denominator_a += A[i] * A[i] ; denominator_b += B[i] * B[i] ; } return dot / (sqrt( denominator_a) * sqrt( denominator_b)) ; } void k_means(){ double trainFile1[N][M]; int minima[features]={INT_MAX}; int maxima[features]={INT_MIN}; int cluster[N]; int t=0; for(int i=0; i<N; i++){ for(int j=0; j<features; j++){ if( trainFile1[i][j]<minima[j]){ minima[j]= trainFile1[i][j]; } if( trainFile1[i][j]>maxima[j]){ maxima[j]= trainFile1[i][j]; } } } double mean_arr[K][features]; for(int i=0; i<K; i++){ for(int j=0; j<features; j++){ int num = (rand() % (maxima[j] - minima[j] + 1)) + minima[j]; mean_arr[i][j]=num; } } for (int i = 0; i < t; i++) { for (int j = 0; j < N; j++) { double* dists = new double[K]; for (int p = 0; p < K; p++) { dists[p] = cosine_distance( trainFile1[j], mean_arr[p], M); } cluster[j] = std::min_element(dists, dists + K) - dists; delete[] dists; } double sum[K][M]={0}; int count[K]={0}; for (int f = 0; f < N; f++) { for (int p = 0; p < M; p++) { sum[cluster[f]][p]+= trainFile1[f][p]; } count[cluster[f]]++; } for (int f = 0; f < K; f++) { for (int p = 0; p < M; p++) { mean_arr[f][p]=sum[f][p]/count[f]; } } } } void read_files(string file_name){ if(file_name.compare("training")==0){ string line; ifstream ifs(trainFileName); while(getline(ifs,line)){ vector <double> entry; stringstream lineStream(line); string value; while(getline(lineStream,value,',')){ entry.push_back(stof(value)); } train_file.push_back(entry); } ifs.close(); } else if(file_name.compare("testing")==0){ string line1; ifstream ifs1(testFileName); while(getline(ifs1,line1)){ vector <double> entry; stringstream lineStream1(line1); string value; while(getline(lineStream1,value,',')){ entry.push_back(stof(value)); } test_file.push_back(entry); } ifs1.close(); } } __global__ void getInformationGains(int *attr,int *data,int dataSize,float *infoGains,int *trainData,int *cardinality) { if(attr[blockIdx.x]==0 && blockIdx.x!=0 && blockIdx.x!=M-1){ int threadid,blockid,j; threadid=threadIdx.x; blockid=blockIdx.x; __shared__ int value_attribute[10]; __shared__ int value_class_attribute[10][10]; if(threadid<10){ value_attribute[threadid]=0; for(j=0;j<10;j++){ value_class_attribute[threadid][j]=0; } } __syncthreads(); int classVal = trainData(data[threadid],M-1); int attribute_value = trainData(data[threadid],blockid); atomicAdd(&value_attribute[attribute_value],1); atomicAdd(&value_class_attribute[attribute_value][classVal],1); __syncthreads(); if(threadid==0){ int i,j; float information_gain,intermediateGain; information_gain=0; for(i=1;i<=cardinality[blockid];i++){ intermediateGain=0; if(value_attribute[i]==0){ continue; } for(j=1;j<=cardinality[M-1];j++){ if(value_class_attribute[i][j]==0){ continue; } intermediateGain+=(float(value_class_attribute[i][j])/(float)value_attribute[i])*(log((float)value_class_attribute[i][j]/(float)value_attribute[i])/log((float)2)); } intermediateGain*=(float(value_attribute[i])/(float)dataSize); information_gain-=intermediateGain; } infoGains[blockid]=information_gain; } } } __global__ void getInfoGainOfData(int *data,int dataSize,int *trainData,int *cardinality) { __shared__ int value_class_count[10]; int classVal,i,threadid; float information_gain; threadid=threadIdx.x; if(threadid<10){ value_class_count[threadid]=0; } __syncthreads(); classVal=trainData(data[threadIdx.x],M-1); atomicAdd(&value_class_count[classVal],1); __syncthreads(); if(threadid==0){ information_gain=0; for(i=1;i<=cardinality[M-1];i++){ if(value_class_count[i]==0){ continue; } information_gain+=((float)value_class_count[i]/(float)dataSize)*(log((float)value_class_count[i]/(float)dataSize)/log((float)2)); } device_infoGainOfData=-1*information_gain; } } int majority_vote(int *data,int dataSize) { int i,outputClass,ans,maxVal; map <int, int> dataCount; map <int, int>::iterator iterator; for(i=0;i<dataSize;i++){ outputClass = train_file[data[i]][M-1]; if(dataCount.find(outputClass)==dataCount.end()){ dataCount.insert(make_pair(outputClass,1)); } else{ dataCount[outputClass]++; } } maxVal = MIN; for(iterator=dataCount.begin();iterator!=dataCount.end();iterator++){ if(iterator->second > maxVal){ ans = iterator->first; } } return ans; } void make_decision(int *host_attributes, int *host_data, node *root, int host_datasize) { int flag, host_selectedAttribute, i; k_means(); if(host_datasize<=THRESHOLD){ return; } float maxGain; flag=1; for(i=1;i<host_datasize;i++){ if(train_file[host_data[i]][M-1]!=train_file[host_data[i-1]][M-1]){ flag=0; break; } } if(flag==1){ root->leaf_value=train_file[host_data[0]][M-1]; return; } int *device_attr, *device_data; float *device_infoGains; float host_informationGains[M]; float host_infoGainOfData; cudaMalloc((void**)&device_attr,M*sizeof(int)); cudaMalloc((void**)&device_data,host_datasize*sizeof(int)); cudaMalloc(&device_infoGains,M*sizeof(float)); cudaMemcpy((void*)device_attr,(void*)host_attributes,M*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy((void*)device_data,(void*)host_data,host_datasize*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(device_infoGains, infoGainsInitializer, M*sizeof(float),cudaMemcpyHostToDevice); getInformationGains<<<blocks,host_datasize>>>(device_attr,device_data,host_datasize,device_infoGains,device_trainFileData,device_cardinality); cudaMemcpy((void*)host_informationGains,(void*)device_infoGains,M*sizeof(float),cudaMemcpyDeviceToHost); cudaFree(device_attr); cudaFree(device_infoGains); getInfoGainOfData<<<1,host_datasize>>>(device_data,host_datasize,device_trainFileData,device_cardinality); cudaMemcpyFromSymbol(&host_infoGainOfData,device_infoGainOfData,sizeof(float),0,cudaMemcpyDeviceToHost); cudaFree(device_data); maxGain=MIN; host_selectedAttribute=-1; for(i=1;i<M-1;i++){ if(host_attributes[i]==0){ host_informationGains[i]=host_infoGainOfData-host_informationGains[i]; if(host_informationGains[i]>maxGain){ maxGain=host_informationGains[i]; host_selectedAttribute=i; } } } root->split_attribute = host_selectedAttribute; host_attributes[host_selectedAttribute]=1; if(host_selectedAttribute==-1){ root->leaf_value = majority_vote(host_data, host_datasize); return; } map<int, vector <int> > dividedData; map<int, vector <int> >::iterator iterator; int attribute_value; for(i=0;i<host_datasize;i++){ attribute_value = train_file[host_data[i]][host_selectedAttribute]; if(dividedData.find(attribute_value) == dividedData.end()){ vector <int> x; x.push_back(host_data[i]); dividedData.insert(make_pair(attribute_value,x)); } else{ dividedData[attribute_value].push_back(host_data[i]); } } for(i=0,iterator=dividedData.begin(); iterator!=dividedData.end(); iterator++,i++){ root->number_of_children++; node* childNode; childNode = create(); root->children[i] = childNode; childNode->branch_value = iterator->first; int new_attr[M]; for(int z=0;z<M;z++){ new_attr[z]=host_attributes[z]; } int* host_childData = &(iterator->second[0]); make_decision(new_attr, host_childData, childNode, iterator->second.size()); } } __global__ void getCardinality(int *trainData, int *cardinality) { __shared__ int x[10]; int blockid, threadid,i; blockid = blockIdx.x; threadid = threadIdx.x; if(threadid<10){ x[threadid]=0; } __syncthreads(); if(blockIdx.x!=0){ x[trainData(threadid, blockid)] = 1; __syncthreads(); for(i=1;i<10;i*=2){ int index = 2*i*threadid; if(index+i<10){ x[index]+=x[index+i]; } __syncthreads(); } if(threadid==0){ cardinality[blockid]=x[0]; } } __syncthreads(); } void fillTrainFile(vector <vector <double> > trainFile_Full, int index){ for(int j=0; j<trainFile_Full.size(); j++){ vector<double> temp; for(int i=0; i<(M-1); i++){ temp.push_back(trainFile_Full[j][i]); } temp.push_back(trainFile_Full[j][index]); train_file.push_back(temp); } } void fillTestFile(vector <vector <double> > testFile_Full, int index){ for(int j=0; j<testFile_Full.size(); j++){ vector<double> temp; for(int i=0; i<(M-1); i++){ temp.push_back(testFile_Full[j][i]); } temp.push_back(testFile_Full[j][index]); test_file.push_back(temp); } } void test(node* root, int index) { int i,pos,neg,noResult,attr,attribute_value,j,flag; node* temp; pos=0; neg=0; noResult=0; // readCSV("testing"); // read_files("testing"); fillTestFile(testFile_Full, index); for(i=0;i<test_file.size();i++){ temp=root; flag=0; while(temp->leaf_value==-1 && temp->split_attribute!=-1){ attr = temp->split_attribute; attribute_value=test_file[i][attr]; for(j=0;j<temp->number_of_children;j++){ if(temp->children[j]->branch_value == attribute_value){ break; } } if(j==temp->number_of_children){ flag=1; break; } else{ temp=temp->children[j]; } } if(temp->leaf_value == test_file[i][M-1]){ pos++; } else{ neg++; } if(temp->leaf_value == -1 || flag==1){ noResult++; } } cout << "Class" << (index - 102) << " : "; cout << "Accuracy: " << max(pos, neg)/(pos+neg+0.0)*1.0; return; } void extractFull(string str) { if(str.compare("training")==0){ ifstream ifs(trainFileName); string line; while(getline(ifs,line)){ stringstream lineStream(line); string cell; vector <double> values; while(getline(lineStream,cell,',')){ values.push_back(stof(cell)); } trainFile_Full.push_back(values); } ifs.close(); } else if(str.compare("testing")==0){ ifstream ifs1(testFileName); string line1; while(getline(ifs1,line1)){ stringstream lineStream1(line1); string cell1; vector <double> values1; while(getline(lineStream1,cell1,',')){ values1.push_back(stof(cell1)); } testFile_Full.push_back(values1); } ifs1.close(); } } int main() { int i; node* root; extractFull("training"); extractFull("testing"); // readCSV("training"); // read_files("training"); for(int index=103; index<117; index++){ train_file.clear(); test_file.clear(); fillTrainFile(trainFile_Full, index); // chooseRandom(); int host_trainFileData[N*M+5]={0}; for(i=0;i<N*M;i++){ host_trainFileData[i] = train_file[i/M][i%M]; } int host_data[N], host_attributes[M]; for(i=0;i<N;i++){ host_data[i]=i; } for(i=0;i<M;i++){ host_attributes[i]=0; } cudaMalloc((void**)&device_trainFileData, N*M*sizeof(int)); cudaMemcpy((void*)device_trainFileData,(void*)host_trainFileData, M*N*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&device_cardinality,M*sizeof(int)); cudaMemset(device_cardinality, 0, M*sizeof(int)); getCardinality<<<blocks,threads>>>(device_trainFileData, device_cardinality); root = create(); infoGainsInitializer = (float*)malloc( M * sizeof(float)); for(i=0; i<M; i++){ infoGainsInitializer[i]=MIN; } make_decision(host_attributes, host_data, root, N); cudaFree(device_trainFileData); cudaFree(device_cardinality); test(root, index); cout << endl; } return 0; }
1,580
#include<math.h> #include<stdlib.h> #include<stdio.h> #include<string.h> #include<cuda.h> #include<time.h> //CUDA error wrapping #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /* Copyright 2018 Maxwel Gama Monteiro Junior Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ************************************************************************************************** Author: Maxwel Gama Monteiro Junior Contact: maxweljr@gmail.com Description: Obtains the surface of a given crystal using its coordination number as order parameter. ************************************************************************************************** ;\ _' \_ ,' ' '`. ;,) \ / : (_ : `--. \ ~ CUDA Crystal Surface Cutter vs2.0 / `. Version History: 1.0 - Does stuff, half on the host and half on the GPU 2.0 - Does stuff WITH MORE GLITTER! oh and also carries on counting at GPU only - At first, it did so partially using the host ************************************************************************************************** ************************************************************************************************** ************************************************************************************************** */ //Double approximated reciprocal square root function (drsqrt) __device__ __forceinline__ double drsqrt (double a) { double y, h, l, e; unsigned int ilo, ihi, g, f; int d; ihi = __double2hiint(a); ilo = __double2loint(a); if (((unsigned int)ihi) - 0x00100000U < 0x7fe00000U){ f = ihi | 0x3fe00000; g = f & 0x3fffffff; d = g - ihi; a = __hiloint2double(g, ilo); y = rsqrt (a); h = __dmul_rn (y, y); l = __fma_rn (y, y, -h); e = __fma_rn (l, -a, __fma_rn (h, -a, 1.0)); // Round as shown in Peter Markstein, "IA-64 and Elementary Functions" y = __fma_rn (__fma_rn (0.375, e, 0.5), e * y, y); d = d >> 1; a = __hiloint2double(__double2hiint(y) + d, __double2loint(y)); } else if (a == 0.0) { a = __hiloint2double ((ihi & 0x80000000) | 0x7ff00000, 0x00000000); } else if (a < 0.0) { a = __hiloint2double (0xfff80000, 0x00000000); } else if (isinf (a)) { a = __hiloint2double (ihi & 0x80000000, 0x00000000); } else if (isnan (a)) { a = a + a; } else { a = a * __hiloint2double (0x7fd00000, 0); y = rsqrt (a); h = __dmul_rn (y, y); l = __fma_rn (y, y, -h); e = __fma_rn (l, -a, __fma_rn (h, -a, 1.0)); // Round as shown in Peter Markstein, "IA-64 and Elementary Functions" y = __fma_rn (__fma_rn (0.375, e, 0.5), e * y, y); a = __hiloint2double(__double2hiint(y) + 0x1ff00000,__double2loint(y)); } return a; } __global__ void parameter_counter(double *x_, double *y_, double *z_, int *count, uint atoms, double prmt_, uint *surfies_) { int n = threadIdx.x + blockDim.x * blockIdx.x; extern __shared__ uint cache[]; uint temp = 0; while (n < atoms) { double xx_; double yy_; double zz_; double rij_; double x = x_[n]; double y = y_[n]; double z = z_[n]; int kappa = 0; //save to thread register instead of reordering *count for (int neighbor = 0; neighbor < atoms; neighbor++) { xx_ = x - x_[neighbor]; yy_ = (y - y_[neighbor])*(y - y_[neighbor]); zz_ = (z - z_[neighbor])*(z - z_[neighbor]); rij_ = drsqrt(xx_*xx_ + yy_ + zz_); if (1.0/rij_ < prmt_) { kappa++; //Count the atom itself to avoid branching } //if (1.0/rij_ < prmt_ && isinf(rij_) == 0)kappa++;//Check that one is not his own neighbor } count[n] = kappa; if(kappa < 13){ temp++; } //Surface atoms will have less than the usual 12 neighbors of FCC (13 in this case because it is self-interacting) n +=gridDim.x * blockDim.x; } cache[threadIdx.x] = temp; __syncthreads(); //Perform sum reduction on temp values to obtain number of surface atoms int u = blockDim.x/2; while(u != 0) { if (threadIdx.x < u) { cache[threadIdx.x] += cache[threadIdx.x + u]; } __syncthreads(); u /= 2; } if (threadIdx.x == 0) surfies_[blockIdx.x] = cache[0]; } int main(void) { int deviceCount; cudaGetDeviceCount (&deviceCount); if (deviceCount <1) { printf("CUDA supporting video card not detected. Go eat a sandwich or something."); return 0; } double *x, *y, *z, *dev_x, *dev_y, *dev_z; uint *surfies, *dev_surfies; int *count, *dev_count; int *lbl; double prmt; int natom; int i, j, p; int n_surface = 0; //Adjust the size of histogram accordingly to fit your maximum number of nearest neighbors int grp[14]; //Change as suitable, this seems to work reasonably in general size_t block = 512; size_t thread = 512; size_t block_size = sizeof(uint)*block; FILE *finp, *fout, *fsupply; finp=fopen("coord_z.xyz","r"); fsupply=fopen("num_edge.dat","w"); fout=fopen("surface.xyz","w"); fscanf(finp,"%d\n",&natom); fscanf(finp,"%lf\n",&prmt); cudaMallocHost((void**)&lbl, sizeof(int) * natom); cudaMallocHost((void**)&x,sizeof(double) * natom); cudaMallocHost((void**)&y,sizeof(double) * natom); cudaMallocHost((void**)&z,sizeof(double) * natom); cudaMallocHost((void**)&count,sizeof(int) * natom); cudaMallocHost((void**)&surfies, block_size); gpuErrchk( cudaMalloc((void**)&dev_surfies, block_size) ); gpuErrchk( cudaMalloc((void**)&dev_x, sizeof(double) * natom) ); gpuErrchk( cudaMalloc((void**)&dev_y, sizeof(double) * natom) ); gpuErrchk( cudaMalloc((void**)&dev_z, sizeof(double) * natom) ); gpuErrchk( cudaMalloc((void**)&dev_count, sizeof(int) * natom)); j = 0; while (fscanf(finp,"%d %lf %lf %lf\n",&lbl[j], &x[j], &y[j], &z[j]) == 4) { j=j+1; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); cudaMemcpyAsync(dev_x, x, sizeof(double)*natom, cudaMemcpyHostToDevice); cudaMemcpyAsync(dev_y, y, sizeof(double)*natom, cudaMemcpyHostToDevice); cudaMemcpyAsync(dev_z, z, sizeof(double)*natom, cudaMemcpyHostToDevice); for (p = 0; p < 14; p++) { grp[p] = 0; } prmt *= 0.8; //Lattice parameter/sqrt(2) ~ prmt * 0.7 is the nearest neighbor FCC distance, using 0.8 to make sure every neighbor is contained //even if fast-math flags are used parameter_counter<<<thread, block, block_size>>>(dev_x,dev_y,dev_z,dev_count,natom, prmt, dev_surfies); cudaMemcpyAsync(count, dev_count, sizeof(int)*natom, cudaMemcpyDeviceToHost); cudaMemcpy(surfies, dev_surfies, block_size, cudaMemcpyDeviceToHost); //This is needed immediately by host for(p = 0; p < block; p++){ n_surface+=surfies[p]; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); clock_t launch, finish; double cpu_time; launch = clock(); fprintf(fout,"%d\n",n_surface); fprintf(fout,"%16.16lf\n",prmt/0.8); for (i = 0; i < natom; i++) //Writing and wrap-up done on host { j = count[i]; grp[j]++; if(count[i] < 13){ fprintf(fout,"%d \t %16.15lf \t %16.15lf \t %16.15lf \n",lbl[i], x[i], y[i], z[i]); } } j = 0; for (i = 0; i < 14; i++) { fprintf(fsupply, "%d\n", grp[i] ); j+=grp[i]; } fprintf(fsupply,"%d\n",j); cudaFreeHost(x); cudaFreeHost(y); cudaFreeHost(z); cudaFreeHost(count); cudaFreeHost(surfies); cudaFreeHost(lbl); cudaFree(dev_x); cudaFree(dev_y); cudaFree(dev_z); cudaFree(dev_count); cudaFree(dev_surfies); finish = clock(); cpu_time = ((double)(finish - launch)) / CLOCKS_PER_SEC; fclose(finp); fclose(fout); fclose(fsupply); printf("\n\nCPU process finished at %16.8lf seconds\n\n", cpu_time); printf("\n\nGPU process finished at %.8f seconds\n\n", elapsedTime/1000); printf("======================================================================~\n"); return 0; }
1,581
#include "includes.h" __global__ void Kogge_Stone_scan_kernel(float *X, float *Y, int InputSize) { __shared__ float XY[SECTION_SIZE]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < InputSize) { XY[threadIdx.x] = X[i]; } // Perform iterative scan on XY for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { float in; __syncthreads(); if (threadIdx.x >= stride){ in = XY[threadIdx.x - stride]; } __syncthreads(); if (threadIdx.x >= stride){ XY[threadIdx.x] += in; } } __syncthreads(); Y[i] = XY[threadIdx.x]; }
1,582
#include "includes.h" __device__ inline float sigmoid(float x) { return 1.0f / (1.0f + __expf(-x)); } __global__ void kApplySigmoid(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = sigmoid(mat[i]); }
1,583
#include "includes.h" __global__ void kTile(const float* src, float* tgt, const uint srcWidth, const uint srcHeight, const uint tgtWidth, const uint tgtHeight) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; // const unsigned int numEls = tgtWidth * tgtHeight; for (uint i = idx; i < tgtWidth * tgtHeight; i += numThreads) { const uint y = i / tgtWidth; const uint x = i % tgtWidth; const uint srcY = y % srcHeight; const uint srcX = x % srcWidth; tgt[i] = src[srcY * srcWidth + srcX]; } }
1,584
#include<cuda_runtime.h> #include<device_launch_parameters.h> #include<cassert> #include<iostream> #include<stdio.h> using namespace std; __global__ void vectorAdd(int *a, int* b, int* c, int n) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if(tid<n) { c[tid] = a[tid] + b[tid]; } } void verify_result(int* a, int* b, int* c, int n) { for(int i=0;i<n;i++) { assert(c[i]==a[i]+b[i]); } } int main(int argc, char const *argv[]) { int n; srand(0); cout<<"Enter value for n"<<endl; cin>>n; int *a, *b, *c; a = (int*)malloc(n*sizeof(int)); b = (int*)malloc(n*sizeof(int)); c = (int*)malloc(n*sizeof(int)); for(int i=0;i<n;i++) { a[i] = rand()%100; b[i] = rand()%100; } int* d_a, *d_b, *d_c; cudaMalloc(&d_a, n*sizeof(int)); cudaMalloc(&d_b, n*sizeof(int)); cudaMalloc(&d_c, n*sizeof(int)); cudaMemcpy(d_a, a, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, n*sizeof(int), cudaMemcpyHostToDevice); int num_threads = 1<<10; int num_blocks = (n+num_threads-1)/num_threads; vectorAdd<<<num_blocks, num_threads>>>(d_a, d_b, d_c, n); cudaMemcpy(c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost); cout<<"Vector Sum: "; for(int i=0;i<n;i++) { cout<<c[i]<<" "; } cout<<endl; verify_result(a, b, c, n); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
1,585
#include <stdint.h> #define WARP_SIZE 32 // ------------------------------------------------------------------- // helper functions // ------------------------------------------------------------------- // Get largest memory address that is aligned to a warp worth of floats // and smaller than x. __forceinline__ __device__ uintptr_t getBlockBeginning(void const * x) { return (uintptr_t)(x) & (~((uintptr_t)(WARP_SIZE*sizeof(float)) - 1)) ; } // Call this kernel like compute_moments, but it does not need a scratch space // DOCS /* beta = x_hat .* x_hat_der_sig ./ (sig_sq + epsilon) ;*/ /* dx_hat = alpha - beta ;*/ /* der = g .* dx_hat + x_hat .* dg + db ;*/ /* // dx_hat = alpha - x_hat .* x_hat_der_sig ./ (sig_sq + epsilon) ; // der = g .* dx_hat + x_hat .* dg + db ;*/ __global__ void mega_kernel(float* xHat, float const * alpha, float const * moments, float const * xHatDerSig, float const * gains, float const * dGains, float const * dBias, float const epsilon, int planeArea, int numPlanes, int numChannels) { int tid = threadIdx.x ; int plane = blockIdx.x ; int blockSize = blockDim.x ; int planeStride = gridDim.x ; int channel = blockIdx.x % numChannels ; float sigma2 = moments[channel+numChannels] * moments[channel+numChannels] ; float x_hat_der_sig = xHatDerSig[channel] ; float g = gains[channel] ; float dg = dGains[channel] ; float db = dBias[channel] ; float coefficient = x_hat_der_sig / (sigma2 + epsilon) ; float dxHat ; while (plane < numPlanes) { float const * planeBegin = xHat + plane * planeArea ; float const * planeEnd = planeBegin + planeArea ; float const * xHatBlock = (float const*) getBlockBeginning(planeBegin) + tid ; float const * aPlaneBegin = alpha + plane * planeArea ; float const * aBlock = (float const*) getBlockBeginning(aPlaneBegin) + tid ; float * oblock = xHat + (xHatBlock - xHat) ; while (xHatBlock < planeEnd) { if (xHatBlock >= planeBegin) { dxHat = *aBlock - coefficient * (*xHatBlock) ; *oblock = g * dxHat + dg * (*xHatBlock) + db ; } xHatBlock += blockSize ; aBlock += blockSize ; oblock += blockSize ; } plane += planeStride ; } }
1,586
#include <cuda.h> #include<stdio.h> __global__ void dd(int *d_a, int *d_b, int *d_c, int vec_size){ int tid= threadIdx.x+blockIdx.x*blockDim.x; if (tid<vec_size) d_c[tid]= d_a[tid] + d_b[tid]; } int main(int argc, char ** argv){ cudaSetDevice(3); int i, vec_size; int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; vec_size=1000000; h_a=(int *)malloc(sizeof(int)*vec_size); h_b=(int *)malloc(sizeof(int)*vec_size); h_c=(int *)malloc(sizeof(int)*vec_size); cudaMalloc((void **) &d_a, sizeof(int)*vec_size); cudaMalloc((void **) &d_b, sizeof(int)*vec_size); cudaMalloc((void **) &d_c, sizeof(int)*vec_size); for(i=0;i<vec_size;i++){ h_a[i]=i; h_b[i]=10; } cudaMemcpy(d_a, h_a, sizeof(int)*vec_size,cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*vec_size,cudaMemcpyHostToDevice); dd<<<1000,1000>>>(d_a,d_b,d_c,vec_size); //dim3 ? cudaMemcpy(h_c, d_c, sizeof(int)*vec_size,cudaMemcpyDeviceToHost); for(i =0; i<vec_size;i++) printf("\n C %d == %d", i, h_c[i]); }
1,587
// NOTE: the meanings of x/y here are switched. // Code assumes dimensions are x, y, channels, samples. __global__ void pool_switches (unsigned int* idx, const float* data, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int width, const int height, const int poolWidth, const int poolHeight, const int strideX, const int strideY, const int padLeft, const int padTop) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int px = pooledIndex ; int py = px / pooledWidth ; int pz = py / pooledHeight ; px %= pooledWidth ; py %= pooledHeight ; data += pz * (width*height) ; // offset by channel/sample int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; int x2 = min(x1 + poolWidth, width) ; int y2 = min(y1 + poolHeight, height) ; x1 = max(x1, 0) ; y1 = max(y1, 0) ; unsigned int bestIdx = y1 * width + x1 ; float value, bestValue = data[bestIdx] ; for (int y = y1 ; y < y2 ; ++y) { for (int x = x1 ; x < x2 ; ++x) { value = data[y * width + x] ; if (value > bestValue) { bestValue = value ; bestIdx = y * width + x ; } } } // return best index. must add the channel/sample offset, plus 1 for one-based indexes idx[pooledIndex] = bestIdx + pz * (width*height) + 1 ; } }
1,588
// Implement BFS on CUDA. // The graph is not weighted but it is directed. The BFS algorithms is same just have to change the graph to unwieghted. // Error handler was copied from Dr. Rama's colab file shared to us on google classroom #include<stdio.h> #include<stdlib.h> #include<time.h> #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) ) __managed__ int n; static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } __global__ void BFS_Kernel(int *Va, int *Ea, int *Fa, int *Xa, int *Ca) { int tid = threadIdx.x; if (tid < n) { if(Fa[tid]) { Fa[tid] = 0; Xa[tid] = 1; for (int i = Va[tid]; i < Va[tid + 1]; i++) { int j = Ea[i]; if(!Xa[j]) { Ca[j] = Ca[tid] + 1; Fa[j] = 1; } } } } } int main() { srand(time(0)); n = 50; int src = rand() % n; int limit = 5; // Limit on maximum number of edges from a vertex printf("Number of Vertices = %d\nStarting Vertex = %d\n", n, src); int *Va; int *c_Va; int *Ea; int *c_Ea; int end = 0; Va = (int *)malloc((n + 1) * sizeof(int)); HANDLE_ERROR(cudaMalloc((void **)&c_Va, (n + 1) * sizeof(int))); for (int i =0; i < n; i++) { Va[i] = end; end = end + (rand() % limit); } Va[n] = end; HANDLE_ERROR(cudaMemcpy(c_Va, Va, (n + 1) * sizeof(int), cudaMemcpyHostToDevice)); Ea = (int *)malloc(end * sizeof(int)); HANDLE_ERROR(cudaMalloc((void **)&c_Ea, end * sizeof(int))); for (int i = 0; i < end; i++) { Ea[i] = (rand()) % n; } HANDLE_ERROR(cudaMemcpy(c_Ea, Ea, end * sizeof(int), cudaMemcpyHostToDevice)); /* Uncomment this to see the graph for (int i = 0; i < n; i++) printf("%d ", Va[i]); puts(" "); for (int i = 0; i < end; i++) printf("%d ", Ea[i]); puts(" "); /**/ int *T; T = (int *)malloc(n * sizeof(n)); int *c_Fa; HANDLE_ERROR(cudaMalloc((void **)&c_Fa, n * sizeof(int))); memset(T, 0, n * sizeof(int)); T[src] = 1; HANDLE_ERROR(cudaMemcpy(c_Fa, T, n * sizeof(int), cudaMemcpyHostToDevice)); int *c_Xa; HANDLE_ERROR(cudaMalloc((void **)&c_Xa, n * sizeof(int))); memset(T, 0, n * sizeof(int)); HANDLE_ERROR(cudaMemcpy(c_Xa, T, n * sizeof(int), cudaMemcpyHostToDevice)); int *c_Ca; HANDLE_ERROR(cudaMalloc((void **)&c_Ca, n * sizeof(int))); memset(T, -1, n * sizeof(int)); T[src] = 0; HANDLE_ERROR(cudaMemcpy(c_Ca, T, n * sizeof(int), cudaMemcpyHostToDevice)); int flag = 1; do { flag = 0; BFS_Kernel <<<1, n>>> (c_Va, c_Ea, c_Fa, c_Xa, c_Ca); cudaDeviceSynchronize(); HANDLE_ERROR(cudaMemcpy(T, c_Fa, n * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; i++) { if (T[i]) { flag = 1; break; } } } while(flag); HANDLE_ERROR(cudaMemcpy(T, c_Ca, n * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; i++) printf("cost to reach %dth node = %d\n", i, T[i]); printf("\nNote: -1 means you can not reach the node from the current starting node\n"); free(Va); free(Ea); free(T); cudaFree(c_Va); cudaFree(c_Ea); cudaFree(c_Fa); cudaFree(c_Xa); cudaFree(c_Ca); return 0; }
1,589
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> __constant__ unsigned int shift1[4] = {6, 2, 13, 3}; __constant__ unsigned int shift2[4] = {13, 27, 21, 12}; __constant__ unsigned int shift3[4] = {18, 2, 7, 13}; __constant__ unsigned int offset[4] = {4294967294, 4294967288, 4294967280, 4294967168}; __shared__ unsigned int randStates[32]; __device__ unsigned int TausStep(unsigned int &z, int S1, int S2, int S3, unsigned int M) { unsigned int b = (((z << S1) ^ z) >> S2); return z = (((z &M) << S3) ^ b); } __device__ unsigned int randInt() { TausStep(randStates[threadIdx.x&31], shift1[threadIdx.x&3], shift2[threadIdx.x&3],shift3[threadIdx.x&3],offset[threadIdx.x&3]); return (randStates[(threadIdx.x)&31]^randStates[(threadIdx.x+1)&31]^randStates[(threadIdx.x+2)&31]^randStates[(threadIdx.x+3)&31]); } __global__ void sync_test(void) { __shared__ int shared_int; int count = 0; long long timeout = 0; if (threadIdx.x == 0) { shared_int = 0; } __syncthreads(); if (threadIdx.x == 0) { // occupy thread0 while (count < 100) { for (int i=0; i<200; i++){ randInt(); } if (++timeout > 1000000) { break; } count++; if (count > 50) { count = 0; } } shared_int = 1; } __syncthreads(); printf("%d\n", shared_int); } int main(void) { sync_test<<<1, 4>>>(); cudaDeviceSynchronize(); return 0; } /* prints: 1 1 1 1 */
1,590
#include<stdio.h> #include<cuda.h> #include<cuda_runtime.h> void print_matrix(float *A,int m,int n) { for(int i =0;i<m;i++) { for(int j=0;j<n;j++) printf("%.1f ",A[i*n+j]); printf("\n"); } } __global__ void swapReflect(float *input, float *output, int M, int N) { int j = threadIdx.x; for(int i=0; i<M; i++) { if(j%2 == 0) { output[i*N+j] = input[i*N+j+1]; output[i*N+j+1] = input[i*N+j]; } } __syncthreads(); for(int i = 0; i<j; i++) { int val = output[j*N + i]; output[j*N + i] = output[i*N + j]; output[i*N + j] = val; } } int main(void) { cudaError_t err = cudaSuccess; int t; // No of test Cases scanf("%d", &t); while(t--) { int m, n; scanf("%d %d", &m, &n); size_t size = m*n * sizeof(float); //Allocate host input float *h_input = (float*)malloc(size); //Allocate host output float *h_output = (float*)malloc(size); // Verify that allocations succeeded if (h_input == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input for (int i = 0; i < n*m; ++i) { scanf("%f",&h_input[i]); } float *d_input = NULL, *d_output = NULL; //Allocate device input cudaMalloc((void**)&d_input, size); //Allocate device output cudaMalloc((void**)&d_output, size); //Copy data from host to device cudaMemcpy(d_input, h_input, size, cudaMemcpyHostToDevice); cudaMemcpy(d_output, h_output, size, cudaMemcpyHostToDevice); dim3 grid(1, 1, 1); dim3 block(n, 1, 1); swapReflect<<<grid, block>>>(d_input, d_output, m, n); err = cudaGetLastError(); if(err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy output of device to host cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost); print_matrix(h_output, m, n); } return 0; }
1,591
// Compile & run with // `nvcc histogram.cu` // `./a.out` #include <cstdio> #include <curand_kernel.h> // uniform integer distrigbution in CUDA: // https://stackoverflow.com/questions/43622482 // Fewer than 32 bins will take the same amount of time // because of the warp size. However, it is not possible // to confirm it in this task, even with fixed seed, // since the thread execution order can't be fixed const uint mult = 16; const uint n_bins = 32; const uint n_blocks = 256; const uint n_threads = 256; __global__ void setupExperiment(curandState * state) { int id = threadIdx.x + blockIdx.x * blockDim.x; curand_init(1234, id, 0, &state[id]); } // if curand_uniform is called several times, then generator // state can be read once and used in local memory, then // stored back into global memory (see CURAND documentation) __global__ void smemAtomics(curandState * state, uint * buffer) { __shared__ uint sdata[n_bins]; int i = blockIdx.x; int j = threadIdx.x; if (j < n_bins) sdata[j] = 0; int z = i * blockDim.x + j; uint sample; sample = n_bins * curand_uniform(&state[z]); atomicAdd(&(sdata[sample]), 1); __syncthreads(); if (j < n_bins) buffer[i * n_bins + j] += sdata[j]; } // not so many summands to apply sum reduction __global__ void histsPileUp(uint * buffer) { uint j = threadIdx.x; for (uint i=1; i<n_blocks; ++i) buffer[j] += buffer[j + i * n_bins]; } ////////////////////////// MAIN ///////////////////////////////////// int main() { curandState * devStates; cudaMalloc(&devStates, n_threads * n_blocks * sizeof(curandState)); uint * hist; cudaMallocManaged(&hist, n_blocks * n_bins * sizeof(uint)); cudaMemset(hist, 0, n_blocks * n_bins * sizeof(uint)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //------------------------------- setupExperiment<<<n_blocks, n_threads>>>(devStates); for (uint k=0; k<mult; ++k) smemAtomics<<<n_blocks, n_threads>>>(devStates, hist); histsPileUp<<<1, n_bins>>>(hist); //------------------------------- cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsed_time(0); cudaEventElapsedTime(&elapsed_time, start, stop); printf(" .:General Info:.\n"); printf("data size: %i\nnumber of bins: %i\n\n", mult * n_blocks * n_threads, n_bins); printf("Elapsed time: %f ms\n", elapsed_time); printf("Results are written to 'output.bin'\n"); FILE * cfout = fopen("output.bin", "wb"); fwrite(hist, sizeof(uint), n_bins, cfout); fclose(cfout); cudaFree(hist); }
1,592
#include <stdio.h> int main(void) { // Return info about the 0th device cudaDeviceProp deviceProperties; cudaGetDeviceProperties(&deviceProperties, 0); printf("Name: %s\n", deviceProperties.name); printf("Total mem: %luMB\n", deviceProperties.totalGlobalMem/1024/1024); printf("Max threads per block: %d\n", deviceProperties.maxThreadsPerBlock); printf("Single to double perf: %d\n", deviceProperties.singleToDoublePrecisionPerfRatio); return 0; }
1,593
/* symbol.cu */ /****************************************************************************/ /* */ /* (C) 2010 Texas Advanced Computing Center. */ /* */ /* For information, contact Frank Willmore: willmore@tacc.utexas.edu */ /* */ /* Shareable in accordance with TACC and University of Texas policies. */ /* */ /****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> __device__ char d_data_array[16]; __global__ void calculate() { int idx; idx = threadIdx.x; d_data_array[idx]++; } int main(int argc, char* argv[]) { int i; FILE *fptr; char h_data_array[16]; size_t size = sizeof(h_data_array); printf("size = %d\n", size); // generate an array with random data, then copy it to the device fptr = fopen("/dev/urandom", "r"); fread(h_data_array, size, 1, fptr); fclose(fptr); for (i=0; i< 16; i++) printf("[%2d] = \t%d\n", i, h_data_array[i]); cudaError_t r = cudaMemcpyToSymbol(d_data_array, h_data_array, size, 0, cudaMemcpyHostToDevice); cudaThreadSynchronize(); // block until the device has completed assert(r == cudaSuccess); dim3 dimGrid(1); dim3 dimBlock(16); calculate<<< dimGrid, dimBlock >>>(); cudaThreadSynchronize(); // block until the device has completed assert(r == cudaSuccess); r = cudaMemcpyFromSymbol(h_data_array, d_data_array, size, 0, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); // block until the device has completed assert(r == cudaSuccess); for (i=0; i< 16; i++) printf("[%2d] = \t%d\n", i, h_data_array[i]); // dim3 dimGrid(2, 2); // dim3 dimBlock(8, 8, 16); // calculateMean<<< 1, dimBlock >>>(h_nrgra, d_nrgra); // err = cudaGetLastError(); // if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err)); // cudaMemcpy( d_nrgra, h_nrgra, sizeof(h_nrgra), cudaMemcpyDeviceToHost ); // err = cudaGetLastError(); // if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err)); }
1,594
#include <stdio.h> #include <fstream> //#include <iostream> #include <string.h> //#include <vector> #include <stdlib.h> //#include <unistd.h> //#include <time.h> #include <cuda.h> //#include <mpi.h> #define uchar unsigned char // 8-bit byte #define uint unsigned int // 32-bit word //define for sha256 #define DBL_INT_ADD(a,b,c) if (a > 0xffffffff - (c)) ++b; a += c; #define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b)))) #define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b)))) #define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z))) #define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) #define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22)) #define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25)) #define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3)) #define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10)) #define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); } typedef struct { uchar data[64]; uint datalen; uint bitlen[2]; uint state[8]; } SHA256_CTX; __constant__ uint k[64] = { 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5, 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174, 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da, 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967, 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85, 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070, 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3, 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 }; __constant__ uchar answer[32]; //==============cuda kernel===================================== __device__ void sha256_transform(SHA256_CTX *ctx, uchar *data){ uint a,b,c,d,e,f,g,h,i,j,t1,t2,m[64]; for (i=0,j=0; i < 16; ++i, j += 4) m[i] = (data[j] << 24) | (data[j+1] << 16) | (data[j+2] << 8) | (data[j+3]); for ( ; i < 64; ++i) m[i] = SIG1(m[i-2]) + m[i-7] + SIG0(m[i-15]) + m[i-16]; a = ctx->state[0]; b = ctx->state[1]; c = ctx->state[2]; d = ctx->state[3]; e = ctx->state[4]; f = ctx->state[5]; g = ctx->state[6]; h = ctx->state[7]; for (i = 0; i < 64; ++i) { t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i]; t2 = EP0(a) + MAJ(a,b,c); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } ctx->state[0] += a; ctx->state[1] += b; ctx->state[2] += c; ctx->state[3] += d; ctx->state[4] += e; ctx->state[5] += f; ctx->state[6] += g; ctx->state[7] += h; } __device__ void sha256(SHA256_CTX *ctx, uchar *data, uchar *hash, int len){ //init sha256 data structure ctx->datalen = 0; ctx->bitlen[0] = 0; ctx->bitlen[1] = 0; ctx->state[0] = 0x6a09e667; ctx->state[1] = 0xbb67ae85; ctx->state[2] = 0x3c6ef372; ctx->state[3] = 0xa54ff53a; ctx->state[4] = 0x510e527f; ctx->state[5] = 0x9b05688c; ctx->state[6] = 0x1f83d9ab; ctx->state[7] = 0x5be0cd19; //update uint i; //uint len = 5; //need to fix!! for (i=0; i < len; ++i) { ctx->data[ctx->datalen] = data[i]; ctx->datalen++; if (ctx->datalen == 64) { sha256_transform(ctx,ctx->data); DBL_INT_ADD(ctx->bitlen[0],ctx->bitlen[1],512); ctx->datalen = 0; } } //final i = ctx->datalen; if (ctx->datalen < 56) { ctx->data[i++] = 0x80; while (i < 56) ctx->data[i++] = 0x00; } else { ctx->data[i++] = 0x80; while (i < 64) ctx->data[i++] = 0x00; sha256_transform(ctx,ctx->data); memset(ctx->data,0,56); } //par here DBL_INT_ADD(ctx->bitlen[0],ctx->bitlen[1],ctx->datalen * 8); ctx->data[63] = ctx->bitlen[0]; ctx->data[62] = ctx->bitlen[0] >> 8; ctx->data[61] = ctx->bitlen[0] >> 16; ctx->data[60] = ctx->bitlen[0] >> 24; ctx->data[59] = ctx->bitlen[1]; ctx->data[58] = ctx->bitlen[1] >> 8; ctx->data[57] = ctx->bitlen[1] >> 16; ctx->data[56] = ctx->bitlen[1] >> 24; sha256_transform(ctx,ctx->data); //we can paralized at here for (i=0; i < 4; ++i) { hash[i] = (ctx->state[0] >> (24-i*8)) & 0x000000ff; hash[i+4] = (ctx->state[1] >> (24-i*8)) & 0x000000ff; hash[i+8] = (ctx->state[2] >> (24-i*8)) & 0x000000ff; hash[i+12] = (ctx->state[3] >> (24-i*8)) & 0x000000ff; hash[i+16] = (ctx->state[4] >> (24-i*8)) & 0x000000ff; hash[i+20] = (ctx->state[5] >> (24-i*8)) & 0x000000ff; hash[i+24] = (ctx->state[6] >> (24-i*8)) & 0x000000ff; hash[i+28] = (ctx->state[7] >> (24-i*8)) & 0x000000ff; } } __device__ void sha256_hash(SHA256_CTX *ctx, uchar *data, uchar *hash, int len, int round){ sha256(ctx, data, hash, len); while(round > 1){ sha256(ctx,hash,hash,32); round --; } } __device__ bool my_strcmp(uchar *str_a, uchar *str_b, uint len){ for(int i=0; i<len; i++){ if(str_a[i] != str_b[i]) return false; } return true; } __global__ void sha256_wrap(uchar *pwarray, uchar *target, int* pwcount, uchar *result){ int idx = threadIdx.x + blockDim.x * blockIdx.x; uchar* data = (uchar*)malloc(pwcount[idx]*sizeof(uchar)); SHA256_CTX ctx;// = new SHA256_CTX; uchar hash[32]; int round = 10000, count = 0; for(int i=0; i<idx; i++){ count += pwcount[i]; } memcpy(data,&pwarray[count],pwcount[idx]*sizeof(uchar)); sha256_hash(&ctx,data,hash,pwcount[idx],round); for (int i=0; i<5;i++){ if(my_strcmp(hash,&target[32*i],32)){ memcpy(result,data,pwcount[0]*sizeof(uchar)); } } } //==================================================================== void print_hash(unsigned char hash[]){ int idx; for (idx=0; idx < 32; idx++) printf("%02x",hash[idx]); printf("\n"); } extern "C" void hash_pairing(uchar *pwarray, uchar *target, int *pwcount, int num){ uchar *dev_pwarray, *dev_target, *dev_result; uchar *result = new uchar[32]; int *dev_pwcount; cudaMalloc((void**)&dev_pwarray,strlen((const char*)pwarray)); cudaMemcpy((void*)dev_pwarray, pwarray, strlen((const char*)pwarray),cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_target,160*sizeof(uchar)); cudaMemcpy((void*)dev_target, target, 160*sizeof(uchar),cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_result, 32*sizeof(uchar)); // cudaMemcpy((void*)dev_result, result, 32*sizeof(uchar), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_pwcount,num*sizeof(int)); cudaMemcpy((void*)dev_pwcount,pwcount,num*sizeof(int),cudaMemcpyHostToDevice); dim3 DimBlock(1024,1); dim3 DimGrid(55,1); sha256_wrap <<< DimGrid, DimBlock >>> (dev_pwarray, dev_target, dev_pwcount, dev_result); cudaDeviceSynchronize(); cudaMemcpy((void*)result, dev_result, 32*sizeof(uchar), cudaMemcpyDeviceToHost); if(strlen((const char*)result)!=0) printf("password: %s ", result); memset(result,0,strlen((const char*) result)); cudaDeviceReset(); //cudaFree(dev_pwarray); cudaFree(dev_target); cudaFree(dev_result); }
1,595
#include <stdio.h> const int N = 2048; __global__ void add_complex(int *a , int *b , int *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < N) { c[tid] = a[tid] + b[tid]; tid += blockDim.x * gridDim.x; } } int main (void) { int a[N], b[N], c[N]; for (int i = 0; i < N; ++i) { a[i] = -i; b[i] = i*i; } int *dev_a, *dev_b, *dev_c; cudaMalloc((void **)&dev_a, N*sizeof(int)); cudaMalloc((void **)&dev_b, N*sizeof(int)); cudaMalloc((void **)&dev_c, N*sizeof(int)); cudaMemcpy (dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy (dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); printf("Results for <<<%d, %d>>>:\n", (N+127)/128, 128); cudaEventRecord(start, 0); add_complex<<<(N+127)/128, 128>>>(dev_a, dev_b, dev_c); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float worktime; cudaEventElapsedTime(&worktime, start, stop); cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost); printf("%d + %d = %d\n", a[0], b[0], c[0]); printf("%d + %d = %d\n", a[N - 1], b[N - 1], c[N - 1]); printf("Time = %f ms \n", worktime); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); system("pause"); return 0 ; }
1,596
/* * CUDA version by chengbin hu U#60715820 * Date 06/20/2015 * */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> //#include <cuda.h> #include <cuda_runtime.h> #define BOX_SIZE 23000 /* size of the data box on one dimension */ /* descriptors for single atom in the tree */ typedef struct atomdesc { double x_pos; double y_pos; double z_pos; } atom; typedef struct hist_entry{ //float min; //float max; unsigned long long d_cnt; /* need a long long type as the count might be huge */ } bucket; bucket * histogram; /* list of all buckets in the histogram */ long long PDH_acnt; /* total number of data points */ int num_buckets; /* total number of buckets in the histogram */ double PDH_res; /* value of w */ atom * atom_list; /* list of all data points */ /* Thesea are for an old way of tracking time */ struct timezone Idunno; struct timeval startTime, endTime; /* distance of two points in the atom_list */ double p2p_distance(int ind1, int ind2) { double x1 = atom_list[ind1].x_pos; double x2 = atom_list[ind2].x_pos; double y1 = atom_list[ind1].y_pos; double y2 = atom_list[ind2].y_pos; double z1 = atom_list[ind1].z_pos; double z2 = atom_list[ind2].z_pos; return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2)); } /* brute-force SDH solution in a single CPU thread */ int PDH_baseline() { int i, j, h_pos; double dist; for(i = 0; i < PDH_acnt; i++) { for(j = i+1; j < PDH_acnt; j++) { dist = p2p_distance(i,j); h_pos = (int) (dist / PDH_res); histogram[h_pos].d_cnt++; } } return 0; } /*device function to caculate distance*/ __device__ double d_p2p_distance(double x1, double x2, double y1, double y2, double z1, double z2) { return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2)); } /* chengbin: kenrel function to caculate histogram*/ __global__ void D_PDH_baseline(double * x,double *y, double * z, bucket * hist, long long PDH_acount, double PDH_w, int n_buckets){ /*shared tiling input use 1024 as 1024 is the largest threadnumber*/ __shared__ double ix1[256]; __shared__ double iy1[256]; __shared__ double iz1[256]; __shared__ double ix2[256]; __shared__ double iy2[256]; __shared__ double iz2[256]; /*shared ouput*/ extern __shared__ unsigned long long p_hist[]; int i, j, d_pos,ti,k; //input[threadIdx.x]=atomlist[]; int gd =gridDim.x; int bd = blockDim.x; int bdx = blockIdx.x; ti = threadIdx.x; i = bdx * bd + ti; for(j=ti;j<n_buckets;j+=bd)p_hist[j]=0;//iniatilize the ouput histogram //copy the anchor tile data to ix1,iy1,iz1 according to i if(i<PDH_acount){ ix1[ti] = x[i]; iy1[ti] = y[i]; iz1[ti] = z[i];} //ix2[ti] = x[i]; //iy2[ti] = y[i]; //iz2[ti] = z[i]; __syncthreads(); //calcute the points within one block. double dist; int lastblock = gd-1; int lastblocklength = PDH_acount - bd*(gd-1); if(bdx<lastblock) { for(j=ti+1; j<bd;j++) { dist = d_p2p_distance(ix1[ti],ix1[j],iy1[ti],iy1[j],iz1[ti],iz1[j]); d_pos = (int) (dist / PDH_w); atomicAdd(&(p_hist[d_pos]),1); } __syncthreads(); } else { if(i<PDH_acount) { for(j=ti+1; j<lastblocklength;j++) { dist = d_p2p_distance(ix1[ti],ix1[j],iy1[ti],iy1[j],iz1[ti],iz1[j]); d_pos = (int) (dist / PDH_w); atomicAdd(&(p_hist[d_pos]),1); } } __syncthreads(); } __syncthreads(); //calcute the points between blocks. int cycle = ceil(gd/2.0);//becareful the last block for(k=1;k<cycle;k++)//caculate points between blocks { j = (bdx+k)%gd; if(j<lastblock) // j is not the last block { ix2[ti] = x[j* bd + ti]; iy2[ti] = y[j* bd + ti]; iz2[ti] = z[j* bd + ti]; __syncthreads(); if(i<PDH_acount) { for(int m = 0; m<bd; m++) { dist = d_p2p_distance(ix1[ti],ix2[m],iy1[ti],iy2[m],iz1[ti],iz2[m]); d_pos = (int) (dist / PDH_w); atomicAdd(&(p_hist[d_pos]),1); } } __syncthreads(); } else //J is the last block { if(ti<lastblocklength) { ix2[ti] = x[j* bd + ti]; iy2[ti] = y[j* bd + ti]; iz2[ti] = z[j* bd + ti]; } __syncthreads(); if(i<PDH_acount) { for(int m = 0; m<lastblocklength; m++) { dist = d_p2p_distance(ix1[ti],ix2[m],iy1[ti],iy2[m],iz1[ti],iz2[m]); d_pos = (int) (dist / PDH_w); atomicAdd(&(p_hist[d_pos]),1); } } __syncthreads(); } }//last half cycle for gridDim.x%2==0 if(gd%2==0) { if(bdx<gd/2) { j = (bdx+cycle)%gd; if(j<lastblock) // j is not the last block { ix2[ti] = x[j* bd + ti]; iy2[ti] = y[j* bd + ti]; iz2[ti] = z[j* bd + ti]; __syncthreads(); if(i<PDH_acount) { for(int m = 0; m<bd; m++) { dist = d_p2p_distance(ix1[ti],ix2[m],iy1[ti],iy2[m],iz1[ti],iz2[m]); d_pos = (int) (dist / PDH_w); atomicAdd(&(p_hist[d_pos]),1); } } __syncthreads(); } else //J is the last block { if(ti<lastblocklength) { ix2[ti] = x[j* bd + ti]; iy2[ti] = y[j* bd + ti]; iz2[ti] = z[j* bd + ti]; } __syncthreads(); if(i<PDH_acount) { for(int m = 0; m<lastblocklength; m++) { dist = d_p2p_distance(ix1[ti],ix2[m],iy1[ti],iy2[m],iz1[ti],iz2[m]); d_pos = (int) (dist / PDH_w); atomicAdd(&(p_hist[d_pos]),1); } } __syncthreads(); } } } __syncthreads(); for(j=ti;j<n_buckets;j+=bd)atomicAdd(&(hist[j].d_cnt),p_hist[j]); } __global__ void D_initialize(bucket * h, int n_buckets){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<n_buckets) h[i].d_cnt=0; } /* set a checkpoint and show the (natural) running time in seconds */ double report_running_time() { long sec_diff, usec_diff; gettimeofday(&endTime, &Idunno); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff); return (double)(sec_diff*1.0 + usec_diff/1000000.0); } /* print the counts in all buckets of the histogram chengbin: changed to print specific histogram from argument */ void output_histogram(bucket * histogram1, int ident){ int i; long long total_cnt = 0; for(i=0; i< num_buckets; i++) { if(i%5 == 0) /* we print 5 buckets in a row */ printf("\n%02d: ", i); printf("%15lld ", histogram1[i].d_cnt); total_cnt += histogram1[i].d_cnt; /* we also want to make sure the total distance count is correct */ if(i == num_buckets - 1) printf("\n T:%lld \n", total_cnt); else printf("| "); } if( ident == 1){ total_cnt = 0; printf("\n difference between CPU and GPU \n"); long long d; for(i=0; i< num_buckets; i++) { if(i%5 == 0) /* we print 5 buckets in a row */ printf("\n%02d: ", i); d = histogram[i].d_cnt- histogram1[i].d_cnt; printf("%15lld ", d); total_cnt += d; /* we also want to make sure the total distance count is correct */ if(i == num_buckets - 1) printf("\n T:%lld \n", total_cnt); else printf("| "); } } } int main(int argc, char **argv) { int block_size; if(argc != 4) { printf("ERROR please input 3 arguments: %s {#of_samples} {bucket_width} {block_size} \n",argv[0]); exit(1); } int i; PDH_acnt = atoi(argv[1]); PDH_res = atof(argv[2]); block_size = atoi(argv[3]); if(block_size > 256) { printf("TOO BIG BLOCK SIZE ERROR. Due to size limitation of shared memory please use a blocksize <=256\n"); exit(1); } //printf("args are %d and %f\n", PDH_acnt, PDH_res); num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1; histogram = (bucket *)malloc(sizeof(bucket)*num_buckets); atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt); srand(1); /* generate data following a uniform distribution */ for(i = 0; i < PDH_acnt; i++) { atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE; } double * h_x, * h_y, * h_z;//seperate host input array double * d_x,* d_y,* d_z;//seperate device input array h_x = (double *)malloc(sizeof(double)*PDH_acnt); h_y = (double *)malloc(sizeof(double)*PDH_acnt); h_z = (double *)malloc(sizeof(double)*PDH_acnt); cudaMalloc((void**)&d_x, sizeof(double)*PDH_acnt); cudaMalloc((void**)&d_y, sizeof(double)*PDH_acnt); cudaMalloc((void**)&d_z, sizeof(double)*PDH_acnt); /*move input array to seperate array*/ for(i = 0; i < PDH_acnt; i++) { h_x[i] = atom_list[i].x_pos; h_y[i] = atom_list[i].y_pos; h_z[i] = atom_list[i].z_pos; } /* start counting time */ gettimeofday(&startTime, &Idunno); /* call CPU single thread version to compute the histogram */ PDH_baseline(); /* check the total running time */ report_running_time(); /* print out the histogram */ output_histogram(histogram, 0); /*Chengbin: locate GPU memory for input data array*/ //atom * d_atom_list; //cudaMalloc((void**)&d_atom_list, sizeof(atom)*PDH_acnt); /*Chengbin: locate GPU memory for output data array*/ bucket * d_histogram; cudaMalloc((void**)&d_histogram, sizeof(bucket)*num_buckets); D_initialize<<<(int)ceil(num_buckets/256.0),256>>>(d_histogram,num_buckets); //const int inivalue = 0; //cudaMemset(d_histogram,inivalue,sizeof(bucket)*num_buckets); /*chengbin: locate GPU results histogrm*/ bucket * cuda_histogram; cuda_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets); /*chengbin: defince grid and block parameter*/ dim3 dimGrid((int)ceil(PDH_acnt/(float)block_size),1,1); dim3 dimBlock(block_size,1,1); //kernel function to take input to generate histogram cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); //cudaMemcpy(d_atom_list,atom_list,sizeof(atom)*PDH_acnt, cudaMemcpyHostToDevice); cudaMemcpy(d_x,h_x,sizeof(double)*PDH_acnt, cudaMemcpyHostToDevice); cudaMemcpy(d_y,h_y,sizeof(double)*PDH_acnt, cudaMemcpyHostToDevice); cudaMemcpy(d_z,h_z,sizeof(double)*PDH_acnt, cudaMemcpyHostToDevice); D_PDH_baseline<<<dimGrid,dimBlock,sizeof(bucket)*num_buckets>>>(d_x, d_y, d_z, d_histogram, PDH_acnt, PDH_res, num_buckets); /*copy device result back to cuda result*/ cudaMemcpy(cuda_histogram,d_histogram,sizeof(bucket)*num_buckets, cudaMemcpyDeviceToHost); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, stop ); printf( "******** Total Running Time of Kernel = %f sec ******* \n", elapsedTime/1000 ); cudaEventDestroy( start ); cudaEventDestroy( stop ); output_histogram(cuda_histogram, 1); cudaFree(d_histogram); cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); free(histogram); free(atom_list); free(h_x); free(h_y); free(h_z); free(cuda_histogram); return 0; }
1,597
#include "includes.h" __global__ void computeHistogram(unsigned int *buffer, int size, unsigned int *histo ) { __shared__ unsigned int temp[1024]; temp[threadIdx.x + 0] = 0; temp[threadIdx.x + 256] = 0; temp[threadIdx.x + 512] = 0; temp[threadIdx.x + 768] = 0; __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; int offset = blockDim.x * gridDim.x; while (i < size) { atomicAdd( &temp[buffer[i]], 1); i += offset; } __syncthreads(); atomicAdd( &(histo[threadIdx.x + 0]), temp[threadIdx.x + 0] ); atomicAdd( &(histo[threadIdx.x + 256]), temp[threadIdx.x + 256] ); atomicAdd( &(histo[threadIdx.x + 512]), temp[threadIdx.x + 512] ); atomicAdd( &(histo[threadIdx.x + 768]), temp[threadIdx.x + 768] ); }
1,598
#include <iostream> #include <vector> #define BLOCK_SIZE 256 #define KERNEL_SIZE 9 #define HALF_KERNEL_SIZE 4 __global__ void convolution_1d_x_shared_memory( float *const input_image, const int width, const int height, float* result) { // Save input image in shared memory __shared__ float row_data[BLOCK_SIZE + 2 * HALF_KERNEL_SIZE]; const int index = blockDim.x * blockIdx.x + threadIdx.x; if (index > HALF_KERNEL_SIZE && index < (width - HALF_KERNEL_SIZE)) { const int row_offset = width * blockIdx.y; const float normalizing_factor = 1.0f / static_cast<float>(KERNEL_SIZE); // Copy all needed by block only float *const row_input = &input_image[row_offset]; row_data[threadIdx.x + HALF_KERNEL_SIZE] = row_input[index]; // Special cases at block boundaries if (threadIdx.x == 0) { for (int kernel_index = 0; kernel_index < HALF_KERNEL_SIZE; kernel_index++) { row_data[kernel_index] = row_input[index - HALF_KERNEL_SIZE + kernel_index]; } } else if (threadIdx.x == blockDim.x - 1) { for (int kernel_index = 1; kernel_index <= HALF_KERNEL_SIZE; kernel_index++) { row_data[threadIdx.x + HALF_KERNEL_SIZE + kernel_index] = row_input[index + kernel_index]; } } __syncthreads(); float *row_result = &result[row_offset]; row_result[index] = 0; for (int kernel_offset = -HALF_KERNEL_SIZE; kernel_offset <= HALF_KERNEL_SIZE; kernel_offset++) { row_result[index] += row_data[threadIdx.x + HALF_KERNEL_SIZE + kernel_offset]; } // normalize row_result[index] *= normalizing_factor; } } void blur_separable_gpu_shared_memory( float *const input_image, const int width, const int height, float* result) { dim3 block_size(BLOCK_SIZE, 1); dim3 grid_size((width + block_size.x - 1) / block_size.x, height); std::cout << "Launching kernel " << grid_size.x << " x " << grid_size.y << std::endl; convolution_1d_x_shared_memory<<<grid_size, block_size>>>( input_image, width, height, result); }
1,599
#include <iostream> #define n 64 #define blockSize 16 #define size_partial_sum blockSize * 2 __global__ void sum_reducer1(int *d_data) { __shared__ int partialSum[size_partial_sum]; partialSum[threadIdx.x] = d_data[threadIdx.x + blockIdx.x * blockDim.x * 2]; partialSum[threadIdx.x + blockDim.x] = d_data[blockDim.x + threadIdx.x + blockIdx.x * blockDim.x * 2]; int t = threadIdx.x; for (unsigned int stride = 1; stride < blockDim.x * 2; stride *= 2) { __syncthreads(); if (t % stride == 0) partialSum[t*2] += partialSum[t*2 + stride]; } if (threadIdx.x == 0) printf("blockIdx.x = %d, partialSum = %i\n", blockIdx.x, partialSum[0]); } __global__ void sum_reducer2(int *d_data) { __shared__ int partialSum[size_partial_sum]; partialSum[threadIdx.x] = d_data[threadIdx.x + blockIdx.x * blockDim.x * 2]; partialSum[threadIdx.x + blockDim.x] = d_data[blockDim.x + threadIdx.x + blockIdx.x * blockDim.x * 2]; int t = threadIdx.x; for (int stride = blockDim.x; stride >= 1; stride = stride >> 1) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t+stride]; } if (threadIdx.x == 0) printf("blockIdx.x = %d, partialSum = %i\n", blockIdx.x, partialSum[0]); } int main() { int h_data[n]; for (int i = 0; i < n; ++i) { h_data[i] = i; } int *d_data; cudaMalloc((void **)&d_data, n * sizeof(int)); cudaMemcpy(d_data, h_data, n * sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid(ceil(n/(blockSize * 2.0)), 1, 1); dim3 dimBlock(blockSize, 1, 1); //sum_reducer1<<<dimGrid, dimBlock>>>(d_data); sum_reducer2<<<dimGrid, dimBlock>>>(d_data); cudaFree(d_data); return 0; }
1,600
/* Faz a soma dos elementos de dois vetores Exemplifica o uso de diferentes streams com cudaMallocHost para alocar memoria no host nao paginavel e copia assincrona com cudaMemcpyAsync. Usa tambem o cudaStreamSynchronize para aguardar toda a stream terminar. O algoritmo divide "tam" elementos por "streams_nr" e encontra "threadsPerGrid" e "blocksPerGrid" O vetor no device tem o tamanho de threadsPerGrid. Caso o nr de streams provoque uma divisao com resto, a ultima grade da stream deve acertar o resto. Funcionou para teste com stream 03 e tam 16 ou 17 e TPB 2 ou 3 Caso o nr de threads por bloco provoque uma divisao com resto, o algoritmo deve funcionar com a fitragem do excesso implementada. Funcionou com alguns testes bรกsicos. Para compilar: nvcc 03-soma-vet-stream.cu -o 03-soma-vet-stream Para executar: ./03-soma-vet-stream OBS: os valores de tamanho do vetor e o conteudo do vetor estao fixos no codigo */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void soma(int *vetorA, int *vetorB,int *vetorC, int tam, int iter) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < tam) { vetorC[idx]=vetorA[idx]+vetorB[idx]; printf("Iter=%d, Blc=%d, Thread=%d, Tam=%d, VetC[%d]=%d \n", iter, blockIdx.x, threadIdx.x, tam, idx, vetorC[idx]); } } int main(int argc,char **argv) { int i,*vetorA,*vetorB,*vetorC,threadsPerBlock; int *vetorA_d,*vetorB_d,*vetorC_d; //Declaraรงรฃo da variรกvel do tipo cudaStream_t cudaStream_t stream; //Criaรงรฃo da stream cudaStreamCreate(&stream); //Define o tamanho do vetor, multiplo de 256 int tam = 16; // 16; // 2048; //Define a quantidade de threads por bloco threadsPerBlock = 2; //2; //256; //Define quantas streams usar int streams_nr = 2; //Define o nr de threads por grade (uma grade por vez na stream) int threadsPerGrid=tam/streams_nr; // threadsPerGrid=8 (16/2) // =1024 (2048/2) //Define a quantidade de blocos por grade int blocksPerGrid=(threadsPerGrid+threadsPerBlock-1)/threadsPerBlock; // blockPerStream=4 (8/2) // =4 (1024/256) // (8+1)/2 ==> 4 (4,5) // (1024+255)/256 ==> 4 (4,9960) // //Aloca memoria nao paginavel para os vetores no host cudaMallocHost((void**)&vetorA,tam*(sizeof(int))); cudaMallocHost((void**)&vetorB,tam*(sizeof(int))); cudaMallocHost((void**)&vetorC,tam*(sizeof(int))); //Aloca os vetores no device cudaMalloc((void**)&vetorA_d, threadsPerGrid*(sizeof(int))); cudaMalloc((void**)&vetorB_d, threadsPerGrid*(sizeof(int))); cudaMalloc((void**)&vetorC_d, threadsPerGrid*(sizeof(int))); //Preenche os vetores no host for(i=0;i<tam;i++) { vetorA[i]=i; vetorB[i]=0; } printf("Host: tam=%d, streams_nr=%d, TPG=%d, BPG=%d, TPB=%d \n", tam, streams_nr, threadsPerGrid, blocksPerGrid, threadsPerBlock); // a cada iteracao desloca o bloco em threadsPerGrid itens // equivale a deslocar streams_nr for(i=0; i<tam; i+=threadsPerGrid) //i+=8 //i+1024 { // caso tenha mais streams que o necessario, precisa acertar o tamanho do bloco na ultima stream // essa ultima stream pega apenas o restante para processas; nao o vlr cheios de threadsPerGrid if((tam-i)< threadsPerGrid) threadsPerGrid = tam - i; //copia um bloco de tamanho threadsPerGrid do vetor A do host para o device cudaMemcpyAsync(vetorA_d,vetorA+i, threadsPerGrid*(sizeof(int)),cudaMemcpyHostToDevice,stream); //copia um bloco de tamanho threadsPerGrid do vetor B do host para o device cudaMemcpyAsync(vetorB_d,vetorB+i, threadsPerGrid*(sizeof(int)),cudaMemcpyHostToDevice,stream); //Invoca o kernel soma passando a stream como argumento soma <<<blocksPerGrid,threadsPerBlock,0,stream>>> (vetorA_d,vetorB_d,vetorC_d,threadsPerGrid,i); // <<<4,2>> para alguns dos exemplos acima. //Copia um bloco de tamanho threadsPerGrid do resultado de volta para o host cudaMemcpyAsync(vetorC+i,vetorC_d,threadsPerGrid*(sizeof(int)),cudaMemcpyDeviceToHost,stream); } //Sincroniza a stream cudaStreamSynchronize(stream); //Imprime o resultado no host for(i=0;i<tam;i++) { printf("%d ",vetorC[i]); } //Desaloca os vetores no host cudaFreeHost(vetorA); cudaFreeHost(vetorB); cudaFreeHost(vetorC); //Desaloca os vetores no device cudaFree(vetorA_d); cudaFree(vetorB_d); cudaFree(vetorC_d); //Destroi a stream cudaStreamDestroy(stream); }