serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
11,301
////////////////////////////////////////////////////////////////////////// ////This is the code implementation for GPU Premier League Round Final: conjugate gradient solver ////////////////////////////////////////////////////////////////////////// #include <iostream> #include <fstream> #include <vector> #include <chrono> using namespace std; ////////////////////////////////////////////////////////////////////////// ////TODO 0: Please replace the following strings with your team name and author names ////Note: Please do not use space in the string, use "_" instead ////////////////////////////////////////////////////////////////////////// namespace name { std::string team="new 5+6"; std::string author_1="Yijia_Wu"; std::string author_2="Ziyue_Liu"; std::string author_3="Xiangxin_Kong"; }; ////////////////////////////////////////////////////////////////////////// ////This project implements the conjugate gradient solver to solve sparse linear systems ////For the mathematics, please read https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf ////The algorithm we are implementing is in Page 50, Algorithm B.2, the standard conjugate gradient (without a preconditioner) ////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////// ////These are the global variables that define the domain of the problem to solver (for both CPU and GPU) const int grid_size=256; ////grid size, we will change this value to up to 256 to test your code, notice that we do not have padding elements const int s=grid_size*grid_size; ////array size #define I(i,j) ((i)*grid_size+(j)) ////2D coordinate -> array index #define B(i,j) (i)<0||(i)>=grid_size||(j)<0||(j)>=grid_size ////check boundary const bool verbose=false; ////set false to turn off print for x and residual const int max_iter_num=1000; ////max cg iteration number const double tolerance=1e-3; ////tolerance for the iterative solver ////////////////////////////////////////////////////////////////////////// ////TODO 1: Warm up practice 1 -- implement a function for (sparse)matrix-(dense)vector multiplication and a function for vector-vector dot product ////calculate mv=M*v, here M is a square matrix void MV(/*CRS sparse matrix*/const double* val,const int* col,const int* ptr,/*number of column*/const int n,/*input vector*/const double* v,/*result*/double* mv) { /*Your implementation starts*/ for (int i = 0; i < n; i++) mv[i] = 0; for (int i = 0; i < n; i++) { for (int k = ptr[i]; k < ptr[i+1]; k++) mv[i] = mv[i] + val[k]*v[col[k]]; } /*Your implementation ends*/ } ////return the dot product between a and b double Dot(const double* a,const double* b,const int n) { /*Your implementation starts*/ double dp = 0.0; for (int i = 0; i < n; i++) dp += a[i] * b[i]; /*Your implementation ends*/ return dp; } ////////////////////////////////////////////////////////////////////////// ////TODO 2: Warm up practice 2 -- implement a CPU-based conjugate gradient solver based on the painless PCG course notes to solve Ax=b ////Please read the notes and implement all the TODOs in the function void Conjugate_Gradient_Solver(const double* val,const int* col,const int* ptr,const int n, ////A is an n x n sparse matrix stored in CRS format double* r,double* q,double* d, ////intermediate variables double* x,const double* b, ////x and b const int max_iter,const double tol) ////solver parameters { ////declare variables int iter=0; double delta_old=0.0; double delta_new=0.0; double alpha=0.0; double beta=0.0; double* Ax=new double[n]; ////TODO: r=b-Ax MV(&val[0],&col[0],&ptr[0],n,&x[0],&Ax[0]); for(int i=0;i<n;i++) r[i]=b[i]-Ax[i]; ////TODO: d=r for(int i=0;i<n;i++) d[i]=r[i]; ////TODO: delta_new=rTr delta_new=Dot(&r[0],&r[0],n); ////Here we use the absolute tolerance instead of a relative one, which is slightly different from the notes while(iter<max_iter&& delta_new>tol){ cout<<"entering while"<<endl; cout<<delta_new<<endl; ////TODO: q=Ad // MV(val,col,ptr,n,d,Ad); // q=Ad; MV(&val[0],&col[0],&ptr[0],n,&d[0],&q[0]); ////TODO: alpha=delta_new/d^Tq alpha=delta_new/Dot(&d[0],&q[0],n); ////TODO: x=x+alpha*d for(int i=0; i<n; i++){ x[i]=x[i]+alpha*d[i]; } if(iter%50==0&&iter>1){ ////TODO: r=b-Ax MV(&val[0],&col[0],&ptr[0],n,&x[0],&Ax[0]); for(int i=0;i<n;i++){ r[i]=b[i]-Ax[i]; } } else{ ////TODO: r=r-alpha*q for(int i=0;i<n;i++){ r[i]=r[i]-alpha*q[i]; } } ////TODO: delta_old=delta_new delta_old=delta_new; ////TODO: delta_new=r^Tr delta_new=Dot(&r[0],&r[0],n); ////TODO: beta=delta_new/delta_old beta=delta_new/delta_old; ////TODO: d=r+beta*d for(int i=0;i<n;i++){ d[i]=r[i]+beta*d[i]; } ////TODO: increase the counter iter++; } if(iter<max_iter) cout<<"CPU conjugate gradient solver converges after "<<iter<<" iterations with residual "<<(delta_new)<<endl; else cout<<"CPU conjugate gradient solver does not converge after "<<max_iter<<" iterations with residual "<<(delta_new)<<endl; } ////////////////////////////////////////////////////////////////////////// ////TODO 3: implement your GPU-based conjugate gradient solver ////Put your CUDA variables and functions here __global__ void MV_GPU(const double* val,const int* col,const int* ptr,const double* v,double* mv) { int tid = blockDim.x*blockIdx.x+threadIdx.x; double element = 0.0; for (int k = ptr[tid]; k < ptr[tid+1]; k++) element = element + val[k]*v[col[k]]; mv[tid] = element; } __global__ void Dot_GPU(double* a,double* b, double* dp) { int tid = threadIdx.x; __shared__ double dp_dev[grid_size]; dp_dev[tid]=0; __syncthreads(); for(int k=0;k<grid_size;k++){ dp_dev[tid] += a[k*grid_size+tid] * b[k*grid_size+tid]; __syncthreads(); } //printf("%i, %.4f\n",tid, dp_dev[tid]); for(unsigned int s=grid_size/2;s>0;s/=2) { if(tid<s){ dp_dev[tid] += dp_dev[tid+s]; } __syncthreads(); } *dp = dp_dev[0]; } __global__ void Add_GPU(double* a, double* b, double* result, double factor) { int tid = blockDim.x*blockIdx.x+threadIdx.x; result[tid] = a[tid]+factor*b[tid]; } ////////////////////////////////////////////////////////////////////////// ofstream out; ////////////////////////////////////////////////////////////////////////// ////Test functions ////Here we setup a test example by initializing the same Poisson problem as in the last competition: -laplace(p)=b, with p=x^2+y^2 and b=-4. ////The boundary conditions are set on the one-ring ghost cells of the grid ////There is nothing you need to implement in this function void Initialize_2D_Poisson_Problem(vector<double>& val,vector<int>& col,vector<int>& ptr,vector<double>& b) { ////assemble the CRS sparse matrix ////The grid dimension is grid_size x grid_size. ////The matrix's dimension is s x s, with s= grid_size*grid_size. ////We also initialize the right-hand vector b val.clear(); col.clear(); ptr.resize(s+1,0); b.resize(s,-4.); for(int i=0;i<grid_size;i++){ for(int j=0;j<grid_size;j++){ int r=I(i,j); int nnz_for_row_r=0; ////set (i,j-1) if(!(B(i,j-1))){ int c=I(i,j-1); val.push_back(-1.); col.push_back(c); nnz_for_row_r++; } else{ double boundary_val=(double)(i*i+(j-1)*(j-1)); b[r]+=boundary_val; } ////set (i-1,j) if(!(B(i-1,j))){ int c=I(i-1,j); val.push_back(-1.); col.push_back(c); nnz_for_row_r++; } else{ double boundary_val=(double)((i-1)*(i-1)+j*j); b[r]+=boundary_val; } ////set (i+1,j) if(!(B(i+1,j))){ int c=I(i+1,j); val.push_back(-1.); col.push_back(c); nnz_for_row_r++; } else{ double boundary_val=(double)((i+1)*(i+1)+j*j); b[r]+=boundary_val; } ////set (i,j+1) if(!(B(i,j+1))){ int c=I(i,j+1); val.push_back(-1.); col.push_back(c); nnz_for_row_r++; } else{ double boundary_val=(double)(i*i+(j+1)*(j+1)); b[r]+=boundary_val; } ////set (i,j) { val.push_back(4.); col.push_back(r); nnz_for_row_r++; } ptr[r+1]=ptr[r]+nnz_for_row_r; } } } ////////////////////////////////////////////////////////////////////////// ////CPU test function ////There is nothing you need to implement in this function void Test_CPU_Solvers() { vector<double> val; vector<int> col; vector<int> ptr; vector<double> b; Initialize_2D_Poisson_Problem(val,col,ptr,b); vector<double> x(s,0.); vector<double> r(s,0.); vector<double> q(s,0.); vector<double> d(s,0.); auto start=chrono::system_clock::now(); Conjugate_Gradient_Solver(&val[0],&col[0],&ptr[0],s, &r[0],&q[0],&d[0], &x[0],&b[0], max_iter_num,tolerance); auto end=chrono::system_clock::now(); chrono::duration<double> t=end-start; double cpu_time=t.count()*1000.; if(verbose){ cout<<"\n\nx for CG on CPU:\n"; for(int i=0;i<s;i++){ cout<<x[i]<<", "; } } cout<<"\n\n"; //////calculate residual MV(&val[0],&col[0],&ptr[0],s,&x[0],&r[0]); for(int i=0;i<s;i++)r[i]=b[i]-r[i]; double residual=Dot(&r[0],&r[0],s); cout<<"\nCPU time: "<<cpu_time<<" ms"<<endl; cout<<"Residual for your CPU solver: "<<residual<<endl; out<<"R0: "<<residual<<endl; out<<"T0: "<<cpu_time<<endl; } ////////////////////////////////////////////////////////////////////////// ////GPU test function void Test_GPU_Solver() { vector<double> val; vector<int> col; vector<int> ptr; vector<double> b; Initialize_2D_Poisson_Problem(val,col,ptr,b); vector<double> x(s,0.); vector<double> r(s,0.); vector<double> q(s,0.); vector<double> d(s,0.); cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); float gpu_time=0.0f; cudaDeviceSynchronize(); cudaEventRecord(start); ////////////////////////////////////////////////////////////////////////// ////TODO 4: call your GPU functions here ////Requirement: You need to copy data from the CPU arrays, conduct computations on the GPU, and copy the values back from GPU to CPU ////The final variables should be stored in the same place as the CPU function, i.e., the array of x ////The correctness of your simulation will be evaluated by the residual (<1e-3) ////////////////////////////////////////////////////////////////////////// double* val_dev = 0; int* col_dev = 0; int* ptr_dev = 0; double* b_dev = 0; double* x_dev = 0; double* r_dev = 0; double* q_dev = 0; double* d_dev = 0; double* Ax_dev = 0; int iter=0; double delta_old=0; double* delta_new_dev=0; double delta_new=0.0; double alpha=0.0; double beta=0.0; vector<double> Ax(s,0.); double* dq_dev=0; double dq=0.0; cudaMalloc((void**)&val_dev,ptr.back()*sizeof(double)); cudaMalloc((void**)&col_dev,ptr.back()*sizeof(int)); cudaMalloc((void**)&ptr_dev,(s+1)*sizeof(int)); cudaMalloc((void**)&b_dev,s*sizeof(double)); cudaMalloc((void**)&x_dev,s*sizeof(double)); cudaMalloc((void**)&r_dev,s*sizeof(double)); cudaMalloc((void**)&q_dev,s*sizeof(double)); cudaMalloc((void**)&d_dev,s*sizeof(double)); cudaMalloc((void**)&Ax_dev,s*sizeof(double)); cudaMalloc((void**)&delta_new_dev,sizeof(double)); cudaMalloc((void**)&dq_dev,sizeof(double)); //cout<<val.size()<<","<<ptr.back()<<endl; cudaMemcpy(val_dev,&val[0],ptr.back()*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(col_dev,&col[0],ptr.back()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(ptr_dev,&ptr[0],(s+1)*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(b_dev,&b[0],s*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(x_dev,&x[0],s*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(r_dev,&r[0],s*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(q_dev,&q[0],s*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_dev,&d[0],s*sizeof(double),cudaMemcpyHostToDevice); MV_GPU<<<grid_size,grid_size>>>(val_dev,col_dev,ptr_dev,x_dev,Ax_dev); Add_GPU<<<grid_size,grid_size>>>(b_dev,Ax_dev,r_dev,-1.0); Add_GPU<<<grid_size,grid_size>>>(r_dev,d_dev,d_dev,0.0); Dot_GPU<<<1,grid_size>>>(r_dev,r_dev,delta_new_dev); cudaMemcpy(&delta_new,delta_new_dev,sizeof(double),cudaMemcpyDeviceToHost); while(iter<max_iter_num && delta_new>tolerance){ MV_GPU<<<grid_size,grid_size>>>(val_dev,col_dev,ptr_dev,d_dev,q_dev); Dot_GPU<<<1,grid_size>>>(d_dev,q_dev,dq_dev); cudaMemcpy(&dq,dq_dev,sizeof(double),cudaMemcpyDeviceToHost); alpha=delta_new/dq; Add_GPU<<<grid_size,grid_size>>>(x_dev,d_dev,x_dev,alpha); cudaMemcpy(&x[0],x_dev,s*sizeof(double),cudaMemcpyDeviceToHost); if(iter%50==0&&iter>1){ MV_GPU<<<grid_size,grid_size>>>(val_dev,col_dev,ptr_dev,x_dev,Ax_dev); Add_GPU<<<grid_size,grid_size>>>(b_dev,Ax_dev,r_dev,-1.0); } else{ Add_GPU<<<grid_size,grid_size>>>(r_dev,q_dev,r_dev,-alpha); } delta_old=delta_new; Dot_GPU<<<1,grid_size>>>(r_dev,r_dev,delta_new_dev); cudaMemcpy(&delta_new,delta_new_dev,sizeof(double),cudaMemcpyDeviceToHost); beta=delta_new/delta_old; Add_GPU<<<grid_size,grid_size>>>(r_dev,d_dev,d_dev,beta); iter++; } cudaMemcpy(&x[0],x_dev,sizeof(double),cudaMemcpyDeviceToHost); if(iter<max_iter_num) cout<<"GPU conjugate gradient solver converges after "<<iter<<" iterations with residual "<<(delta_new)<<endl; else cout<<"GPU conjugate gradient solver does not converge after "<<max_iter_num<<" iterations with residual "<<(delta_new)<<endl; cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&gpu_time,start,end); cudaEventDestroy(start); cudaEventDestroy(end); ////////////////////////////////////////////////////////////////////////// if(verbose){ cout<<"\n\nx for CG on GPU:\n"; for(int i=0;i<s;i++){ cout<<x[i]<<", "; } } cout<<"\n\n"; //////calculate residual MV(&val[0],&col[0],&ptr[0],s,&x[0],&r[0]); for(int i=0;i<s;i++)r[i]=b[i]-r[i]; double residual=Dot(&r[0],&r[0],s); cout<<"\nGPU time: "<<gpu_time<<" ms"<<endl; cout<<"Residual for your GPU solver: "<<residual<<endl; out<<"R1: "<<residual<<endl; out<<"T1: "<<gpu_time<<endl; } int main() { if(name::team=="Team_X"){ printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n"); return 0; } std::string file_name=name::team+"_competition_final_conjugate_gradient.dat"; out.open(file_name.c_str()); if(out.fail()){ printf("\ncannot open file %s to record results\n",file_name.c_str()); return 0; } Test_CPU_Solvers(); Test_GPU_Solver(); return 0; }
11,302
#include <stdio.h> #include <stdlib.h> #include <limits.h> #include <time.h> #include <assert.h> __device__ int minDistance(int dist[], int sptSet[], int V) { // Initialize min value int min = INT_MAX, min_index; for (int v = 0; v < V; v++) if (sptSet[v] == 0 && dist[v] <= min) min = dist[v], min_index = v; return min_index; } __global__ void dijkstra(int *graph, int V,int* ansArray) { int nodes = blockDim.x * blockIdx.x + threadIdx.x; if(nodes<V) { int dist[3000]; // The output array. dist[i] will hold the shortest // distance from src to i int sptSet[3000]; // sptSet[i] will be true if vertex i is included in shortest // path tree or shortest distance from src to i is finalized // Initialize all distances as INFINITE and stpSet[] as false for (int i = 0; i < V; i++) dist[i] = INT_MAX, sptSet[i] = 0; // Distance of source vertex from itself is always 0 dist[nodes] = 0; // Find shortest path for all vertices for (int count = 0; count < V - 1; count++) { // Pick the minimum distance vertex from the set of vertices not // yet processed. u is always equal to src in the first iteration. int u = minDistance(dist, sptSet, V); // Mark the picked vertex as processed sptSet[u] = 1; // Update dist value of the adjacent vertices of the picked vertex. for (int v = 0; v < V; v++) // Update dist[v] only if is not in sptSet, there is an edge from // u to v, and total weight of path from src to v through u is // smaller than current value of dist[v] if (!sptSet[v] && graph[u*V+v] && dist[u] != INT_MAX && dist[u] + graph[u*V+v] < dist[v]) dist[v] = dist[u] + graph[u*V+v]; } for (int i = 0; i < V; i++) { ansArray[nodes*V+i] = dist[i]; } } } __host__ int* initGraf(int n) { srand(13517143); int random; int *graf=(int *)malloc(n*n* sizeof(int )); for (int i = 0; i < n; i++) { for (int j = i; j < n; j++) { random = rand() % 100; if (i == j) { graf[i*n + j] = 0; } else { graf[i*n + j] = random; graf[j*n + i] = random; } } } return graf; } int main(int argc, char *argv[]) { int thread_count = strtol(argv[1], NULL, 10); int node_count = strtol(argv[2],NULL,10); int *graf,*answerMatrix,*deviceGraf,*deviceResult; graf= initGraf(node_count); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); answerMatrix= (int *)malloc(node_count *node_count* sizeof(int)); cudaMalloc(&deviceGraf,node_count*node_count*sizeof(int)); cudaMalloc(&deviceResult,node_count*node_count*sizeof(int)); cudaMemcpy(deviceGraf, graf, node_count*node_count*sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(start); dijkstra<<<(node_count/thread_count)+1,thread_count>>>(deviceGraf,node_count,deviceResult); cudaEventRecord(stop); cudaMemcpy(answerMatrix, deviceResult, node_count*node_count*sizeof(int), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time elapsed = %f microseconds\n",milliseconds*100); printf("Answer Graf\n"); for (int i = 0; i < node_count; i++) { for (int j = 0; j < node_count; j++) printf("%d\t", answerMatrix[i*node_count + j]); printf("\n"); } cudaFree(deviceResult); cudaFree(deviceGraf); }
11,303
#include "includes.h" __global__ void kernelMultMatTiled(float *d_M, float *d_N, float *d_P, int m,int n , int y){ // se define la memoria compartida de los tiles de tamaño TILE_WIDTH __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * TILE_WIDTH + ty; int col = bx * TILE_WIDTH + tx; float Pvalue = 0; for(int i = 0; i < n / TILE_WIDTH; i++){ /* primeramente se revisa que el elemento se encuentre en la matriz d_M , si no es así se establecen como cero */ if((i*TILE_WIDTH + tx) < n && row < m){ Mds[ty][tx]=d_M[row*n + (i*TILE_WIDTH + tx)]; }else{ Mds[ty][tx]=0.0; } /* despues se revisa que el elemento se encuentre en la matriz d_N , si no es así se establecen como cero */ if((i*TILE_WIDTH + ty) < n && col < y){ Nds[ty][tx]= d_N[(i*TILE_WIDTH + ty)*y + col]; }else{ Nds[ty][tx]=0.0; } __syncthreads(); /*Se realiza la multiplicacion de elementos que están dentro del TILE y se va guardando en Pvalue*/ for(int k = 0; k < TILE_WIDTH; ++k){ Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } //se asigna el resultado de Pvalue en las posiciones de d_P if(row<m && col < y) d_P[(row*y)+ col] = Pvalue; }
11,304
#include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <stdio.h> #include <errno.h> #include <unistd.h> #include <stdlib.h> #include <arpa/inet.h> #include <math.h> #include "cs_cuda.h" #include "cs_dbg.h" #include "cs_helper.h" #include "cs_motion_detect.h" // #define CUDA_DBG // #define CUDA_DBG1 #define NUM_OF_HVT_INDEX 3 __global__ void d_do_motion_detection ( int *fdp, int *tdp, int tbl_size, int record_size, // do include the 3 indexes int cx, int cxy_size, int mx, int mxy_size ) { int t_idx = blockIdx.x * blockDim.x + threadIdx.x; int ot_idx, i, j, from, no_idx_size, *dp, h, v, t, tt, hh, vv ; ot_idx = t_idx ; while ( t_idx < tbl_size ) { no_idx_size = record_size - NUM_OF_HVT_INDEX ; i = t_idx / no_idx_size ; dp = tdp + ( i * record_size ) ; t = *dp++ ; v = *dp++ ; h = *dp++ ; t_idx %= no_idx_size ; tt = t_idx / mxy_size ; // which block j = t_idx % mxy_size ; hh = j % mx ; // which h vv = j / mx ; // which v from = ( t + tt ) * cxy_size + ( v + vv ) * cx + h + hh ; dp[ t_idx ] = fdp[ from ] ; // dp[ t_idx ] = from ; ot_idx += CUDA_MAX_THREADS ; t_idx = ot_idx ; } } // do: 1 block at a time ... // block : the result of do_motion_idx <- edge-detection <- L-selection // cube : the cube that is going to be moved by all h/v/t units int h_do_motion_detection ( int *fromp, int *top, int tbl_size, // overall input size ... excludes the 3 indexes int record_size, // includes the 3 indexes int blk_x, int blk_xy, int cube_x, int cube_xy ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; #ifdef CUDA_DBG1 fprintf(stderr, "%s: f %p t %p tblsize %d rec %d blk %d %d cube %d %d\n", __func__, fromp, top, tbl_size, record_size, blk_x, blk_xy, cube_x, cube_xy ) ; #endif if (( tbl_size % cube_xy ) || (( record_size - NUM_OF_HVT_INDEX ) % cube_xy )) { fprintf(stderr, "%s: error size %d cube %d rec %d\n", __func__, tbl_size, cube_xy, record_size ) ; return ( 0 ) ; } h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ; d_do_motion_detection <<< nBlocks, nThreadsPerBlock >>> ( fromp, top, tbl_size, record_size, blk_x, blk_xy, cube_x, cube_xy ) ; cudaThreadSynchronize() ; #ifdef CUDA_OBS dbg_p_d_data_i("motion_detect", top, tbl_size ) ; #endif return ( 1 ) ; } // 3 indexes + real data length == record_length __global__ void d_do_motion_idx ( int *dp, int tbl_size, int record_length, int h_loop, int t_loop, int hv_size ) { int *odp, t_idx = blockIdx.x * blockDim.x + threadIdx.x; int j ; odp = dp ; while ( t_idx < tbl_size ) { dp = odp ; dp += t_idx * record_length ; *dp++ = t_idx / hv_size ; // tmporal j = t_idx % hv_size ; *dp++ = j / h_loop ; // vertical *dp = j % h_loop ; // horizontal t_idx += CUDA_MAX_THREADS ; } } int h_do_motion_idx ( int *dp, int total_size, int record_length, int h_loop, int v_loop, int t_loop, int *orig_idx ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int nBlocks, loopcnt ; record_length += NUM_OF_HVT_INDEX ; // 3 indexes .. t/v/h in the beginning ... loopcnt = v_loop * h_loop * t_loop ; if (( record_length * loopcnt ) > total_size ) { fprintf( stderr, "%s: size needed %d got %d\n", __func__, record_length * loopcnt, total_size ) ; return ( 0 ) ; } // nBlocks= ( loopcnt + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( loopcnt, nThreadsPerBlock, &nBlocks ) ; d_do_motion_idx <<< nBlocks, nThreadsPerBlock >>> ( dp, loopcnt, record_length, h_loop, t_loop, h_loop * v_loop ) ; cudaThreadSynchronize() ; *orig_idx = ( v_loop / 2 ) * h_loop + ( h_loop / 2 ) ; return ( 1 ) ; } // step one is to get y0-yk __global__ void d_do_l1_norm_step1 ( int *dp, int tbl_size, int record_length, int *op, int orig ) { int t_idx = blockIdx.x * blockDim.x + threadIdx.x; int *odp, i, j ; odp = dp ; while ( t_idx < tbl_size ) { dp = odp ; j = t_idx / record_length ; if ( j != orig ) { i = t_idx % record_length ; op += i ; dp = dp + j * ( record_length + NUM_OF_HVT_INDEX ) + NUM_OF_HVT_INDEX + i ; *dp -= *op ; if ( *dp < 0 ) *dp = -*dp ; // save a step ... no need to abs() } t_idx += CUDA_MAX_THREADS ; } } // total and record_size does not have the NUM_OF_HVT_INDEX elements void h_do_l1_norm_step1( int *dp, int total, int record_size, int orig ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int nBlocks ; // = ( total + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; int *op ; op = dp + orig * ( record_size + NUM_OF_HVT_INDEX ) + NUM_OF_HVT_INDEX ; h_block_adj ( total, nThreadsPerBlock, &nBlocks ) ; d_do_l1_norm_step1 <<< nBlocks, nThreadsPerBlock >>> ( dp, total, record_size, op, orig ) ; cudaThreadSynchronize() ; } // step two is to get L1-norm(sum) // all row, should be after the abs() is done // tbl_size is the number of elements for this addition operation // record_length includes the NUM_OF_HVT_INDEX // dp starts with valid data, see caller __global__ void d_do_l1_norm_step2 ( int *dp, int tbl_size, int record_length, int start, int cnt ) { int t_idx = blockIdx.x * blockDim.x + threadIdx.x; int *odp, *tp, j ; odp = dp ; while ( t_idx < tbl_size ) { dp = odp ; j = t_idx / cnt ; tp = dp + j * record_length ; dp = tp + start ; j = t_idx % cnt ; tp[ j ] += dp [ j ] ; t_idx += CUDA_MAX_THREADS ; } } // step 1.1 should be to do the abs() // step 2 is to do the sum // record_size does not have the NUM_OF_HVT_INDEX elements // total is the overall number of data elements, no NUM_OF_HVT_INDEX int h_do_l1_norm_step2( int *dp, int total, int record_size ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int nBlocks, i, start, row, cnt ; start = max_log2( record_size ) ; if ( start != record_size ) start = max_log2(( start / 2 ) - 1 ) ; else start >>= 1 ; cnt = record_size - start ; row = total / record_size ; if ( total % record_size ) { fprintf( stderr, "h_do_l1_norm_step2: error size %d %d \n", total, record_size ) ; return ( 0 ) ; } while ( cnt > 0 ) { i = row * cnt ; printf("row %d cnt %d i %d start %d\n", row, cnt, i, start ) ; // nBlocks= ( i + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ; d_do_l1_norm_step2 <<< nBlocks, nThreadsPerBlock >>> ( dp + NUM_OF_HVT_INDEX, i, record_size + NUM_OF_HVT_INDEX, start, cnt ) ; cudaThreadSynchronize() ; start >>= 1 ; cnt = start ; } return ( 1 ) ; } #define MAX_L1_NORM 1000 // step 3 is to get 1-|y0-yk|/|y0| // row_size is the number of rows ... // record_length includes the NUM_OF_HVT_INDEX // dp starts with valid data, see caller __global__ void d_do_l1_norm_step3 ( int *dp, int row_size, int record_length, int *op ) { int *odp, t_idx = blockIdx.x * blockDim.x + threadIdx.x; odp = dp ; while ( t_idx < row_size ) { dp = odp ; dp += t_idx * record_length ; // skip the orig if ( dp != op ) *dp = MAX_L1_NORM - ( MAX_L1_NORM * ( *dp )) / (*op) ; t_idx += CUDA_MAX_THREADS ; } } // record_size does not have the NUM_OF_HVT_INDEX elements // total is the overall number of data elements, no NUM_OF_HVT_INDEX int h_do_l1_norm_step3( int *dp, int total, int record_size, int orig ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int i, nBlocks ; if ( total % record_size ) { fprintf( stderr, "h_do_l1_norm_step3: error size %d %d \n", total, record_size ) ; return ( 0 ) ; } i = total / record_size ; // nBlocks= ( i + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ; d_do_l1_norm_step3 <<< nBlocks, nThreadsPerBlock >>> ( dp + NUM_OF_HVT_INDEX, i, record_size + NUM_OF_HVT_INDEX, dp + orig * ( record_size + NUM_OF_HVT_INDEX ) + NUM_OF_HVT_INDEX ) ; cudaThreadSynchronize() ; return ( 1 ) ; } // to find the min // row_size is the number of rows for this addition operation // record_length includes the NUM_OF_HVT_INDEX // dp starts with idx 0 of NUM_OF_HVT_INDEX, see caller __global__ void d_do_l1_norm_step4 ( int *dp, int cnt, int record_length, int start ) { int t_idx = blockIdx.x * blockDim.x + threadIdx.x; int *odp, *tp ; odp = dp ; while ( t_idx < cnt ) { dp = odp ; tp = dp + t_idx * record_length ; dp = tp + start * record_length ; if ( *dp >= 0 ) { if ( *dp < *tp ) { *tp-- = *dp-- ; // value *tp-- = *dp-- ; // h *tp-- = *dp-- ; // v *tp-- = *dp ; // t } } t_idx += CUDA_MAX_THREADS ; } } // step 4 is find the min // record_size does not have the NUM_OF_HVT_INDEX elements // total is the overall number of data elements, no NUM_OF_HVT_INDEX int h_do_l1_norm_step4( int *dp, int total, int record_size, int orig, int *v ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int nBlocks, i, start, row, cnt ; int a[4] ; if ( total % record_size ) { fprintf( stderr, "%s: error size %d %d \n", __func__, total, record_size ) ; return ( 0 ) ; } row = total / record_size ; if ( orig >= row ) { fprintf( stderr, "%s: error orig %d row %d \n", __func__, orig, row ) ; return ( 0 ) ; } start = MAX_L1_NORM + 1 ; if (( i = cudaMemcpy( dp + orig * ( record_size + NUM_OF_HVT_INDEX ) + NUM_OF_HVT_INDEX, &start, sizeof( int ), cudaMemcpyHostToDevice)) != cudaSuccess ) { printf("%s: download orig : %d\n", __func__, i ) ; return ( 0 ) ; } start = max_log2( row ) ; if ( start != row ) start = max_log2(( start / 2 ) - 1 ) ; else start >>= 1 ; cnt = row - start ; while ( cnt > 0 ) { printf("row %d cnt %d start %d\n", row, cnt, start ) ; // nBlocks= ( cnt + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( cnt, nThreadsPerBlock, &nBlocks ) ; d_do_l1_norm_step4 <<< nBlocks, nThreadsPerBlock >>> ( dp + NUM_OF_HVT_INDEX, cnt, record_size + NUM_OF_HVT_INDEX, start ) ; cudaThreadSynchronize() ; start >>= 1 ; cnt = start ; } if (( i = cudaMemcpy( a, dp, 4 * sizeof( int ), cudaMemcpyDeviceToHost )) != cudaSuccess ) { printf("%s: upload orig : %d\n", __func__, i ) ; return ( 0 ) ; } *v++ = a[0] ; *v++ = a[1] ; *v++ = a[2] ; *v++ = a[3] ; return ( 1 ) ; }
11,305
#include "includes.h" __global__ void decrement_dynamic_kernel(int* pInts, size_t numInts) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx > numInts) return; pInts[idx] -= 1; }
11,306
#include "../inc/RayTracer_DynamicContainer.cuh" // Define // ... // Typedef // ... // Static Function Prototype // ... // Static Data // ... // Operation Handling // container __host__ int Dynamic_ContainerBase::getIndex() { return index; } // container list // TODO: need uniqueness check __host__ int Dynamic_ContainerListBase::_Type_load_() { // insert current type into hash map for (int i = 0; i < type_list.size(); i++) { const Dynamic_ContainerType *type = type_list[i]; name_map.insert(std::pair<std::string, int>(type->name, i)); } return 0; } __host__ int Dynamic_ContainerListBase::_Type_dump_() { name_map.clear(); return 0; } __host__ int Dynamic_ContainerListBase::_Type_indexOf_(std::string name) { std::unordered_map<std::string, int>::iterator iterator; iterator = name_map.find(name); if (iterator == name_map.end()) return -1; return iterator->second; } __host__ Dynamic_ContainerBase* Dynamic_ContainerListBase::_create_(int type) { if (type < 0 || type >= type_list.size()) return nullptr; void *o = type_list[type]->ops_init(); if (o == nullptr) return nullptr; Dynamic_ContainerBase *container = new Dynamic_ContainerBase(object_index); object_index++; container->object = o; container->type = type; container_list.push_back(container); return container; } __host__ int Dynamic_ContainerListBase::_destroy_(int index) { if (container_list.size() == 0) return -1; // check if target container exist or not int index_target = _indexOf_(index); if (index_target == -1) return -1; // move target container to end of list Dynamic_ContainerBase *target = container_list[index_target]; // Dynamic_ContainerBase *temp; for (int i = index_target; i < container_list.size() - 2; i++) { // temp = container_list[i]; container_list[i] = container_list[i + 1]; container_list[i + 1] = container_list[i]; } // call pop_back to remove container container_list.pop_back(); // delete item delete target; return 0; } __host__ Dynamic_ContainerBase* Dynamic_ContainerListBase::_get_(int index) { int index_target = _indexOf_(index); if (index_target == -1) return nullptr; return container_list[index_target]; } __host__ int Dynamic_ContainerListBase::_config_(int index, int type, uint8_t *data, uint32_t size) { Dynamic_ContainerBase *container = _get_(index); if (container == nullptr) return -1; int container_type = container->type; if (container_type < 0 || container_type >= type_list.size()) return -1; type_list[container_type]->ops_config(container->object, type, data, size); return 0; } __host__ int Dynamic_ContainerListBase::_interact_(int index, int type, Dynamic_ContainerBase* *list, uint32_t size) { Dynamic_ContainerBase *container = _get_(index); if (container == nullptr) return -1; int container_type = container->type; if (container_type < 0 || container_type >= type_list.size()) return -1; // TODO: currently number is fixed void* temp_list[8] = {0}; for (int i = 0; i < size; i++) temp_list[i] = list[i]->object; type_list[container_type]->ops_interact(container->object, type, temp_list, size); return 0; } __host__ int Dynamic_ContainerListBase::_size_() { return (int)(container_list.size()); } __host__ int Dynamic_ContainerListBase::_indexOf_(int index) { // binary search // list is ordered by index number of container // binary search is able to use int index_min = 0; int index_max = container_list.size() - 1; int index_middle; int index_cur; // should use <= instead of == while (index_min <= index_max) { index_middle = (index_min + index_max) / 2; index_cur = container_list[index_middle]->getIndex(); // if size of list is very large // chance of index_cur == index is low // so better let this condition (index_cur == index) to pass two branch (index_cur < index) and (index_cur > index) // chance of index_cur < index and index_cur > index will be very the same if (index < index_cur) { index_max = index_middle - 1; continue; } else if (index > index_cur) { index_min = index_middle + 1; continue; } return index_middle; } return -1; // backup // linear search // int index_target = 0; // // for (auto *container : container_list) { // if (container->getIndex() != index) { // index_target++; // continue; // } // // return index_target; // } // return -1; } // Static Function Implementation // ...
11,307
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <vector_types.h> typedef unsigned char uint8; typedef unsigned int uint32; typedef int int32; #define COLOR_COMPONENT_MASK 0x3FF #define COLOR_COMPONENT_BIT_SIZE 10 __constant__ uint32 constAlpha; #define MUL(x,y) (x*y) __constant__ float HueColorSpaceAry[9]; __device__ void YUV2RGB(uint32 *yuv, float *red, float *green, float *blue) { float luma, chromaCb, chromaCr; // Prepare for hue adjustment luma = (float)yuv[0]; chromaCb = (float)((int32)yuv[1] - 512.0f); chromaCr = (float)((int32)yuv[2] - 512.0f); // Convert YUV To RGB with hue adjustment *red = MUL(luma, HueColorSpaceAry[0]) + MUL(chromaCb, HueColorSpaceAry[1]) + MUL(chromaCr, HueColorSpaceAry[2]); *green= MUL(luma, HueColorSpaceAry[3]) + MUL(chromaCb, HueColorSpaceAry[4]) + MUL(chromaCr, HueColorSpaceAry[5]); *blue = MUL(luma, HueColorSpaceAry[6]) + MUL(chromaCb, HueColorSpaceAry[7]) + MUL(chromaCr, HueColorSpaceAry[8]); } __device__ uint32 ARGB_10bit(float red, float green, float blue, uint32 alpha) { uint32 ARGBpixel = 0; // Clamp final 10 bit results red = min(max(red, 0.0f), 1023.f); green = min(max(green, 0.0f), 1023.f); blue = min(max(blue, 0.0f), 1023.f); // Convert to 8 bit unsigned integers per color component ARGBpixel = (((uint32)blue >> 2) | (((uint32)green >> 2) << 8) | (((uint32)red >> 2) << 16) | (uint32)alpha); return ARGBpixel; } // CUDA kernel for outputing the final ARGB output from NV12; extern "C" __global__ void NV12ToARGB(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { int32 x, y; uint32 yuv101010Pel[2]; uint32 processingPitch = ((width) + 63) & ~63; uint32 dstImagePitch = nDestPitch >> 2; uint8 *srcImageU8 = (uint8 *)srcImage; processingPitch = nSourcePitch; // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width) return; if (y >= height) return; // Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way. // if we move to texture we could read 4 luminance values yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32 chromaOffset = processingPitch * height; int32 y_chroma = y >> 1; if (y & 1) // odd scanline ? { uint32 chromaCb; uint32 chromaCr; chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } else { yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } // this steps performs the color conversion uint32 yuv[6]; float red[2], green[2], blue[2]; yuv[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK); yuv[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); yuv[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK); yuv[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuv[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); // YUV to RGB Transformation conversion YUV2RGB(&yuv[0], &red[0], &green[0], &blue[0]); YUV2RGB(&yuv[3], &red[1], &green[1], &blue[1]); // Clamp the results to RGBA dstImage[y * dstImagePitch + x ] = ARGB_10bit(red[0], green[0], blue[0], constAlpha); dstImage[y * dstImagePitch + x + 1 ] = ARGB_10bit(red[1], green[1], blue[1], constAlpha); }
11,308
#include "CVector.cuh" namespace NBody { //ds ctor/dtor //ds default constructor CVector::CVector( const double& p_dElement0, const double& p_dElement1, const double& p_dElement2 ): m_dElement0( p_dElement0 ), m_dElement1( p_dElement1 ), m_dElement2( p_dElement2 ) { //ds nothing to do } //ds default constructor - we need to have this since operator overloading for pointers is not allowed.. CVector::CVector( ): m_dElement0( 0.0 ), m_dElement1( 0.0 ), m_dElement2( 0.0 ) { //ds nothing to do } //ds default destructor CVector::~CVector( ) { //ds nothing to do } //ds operators //ds r/w indexing - ( ) is used instead of [ ] to mark the difference that this is not a "real" array - CAREFUL this allows manipulation of the data double& CVector::operator( )( const unsigned int& p_uIndex ) { //ds map the index operator to the element if ( 0 == p_uIndex ){ return m_dElement0; } else if( 1 == p_uIndex ){ return m_dElement1; } else if( 2 == p_uIndex ){ return m_dElement2; } //ds if an index greater 2 is required throw an exception else { //ds TODO implement exceptions throw std::exception( ); } } //ds readonly indexing - ( ) is used instead of [ ] to mark the difference that this is not a "real" array double CVector::operator[ ]( const unsigned int& p_uIndex ) const { //ds map the index operator to the element if ( 0 == p_uIndex ){ return m_dElement0; } else if( 1 == p_uIndex ){ return m_dElement1; } else if( 2 == p_uIndex ){ return m_dElement2; } //ds if an index greater 2 is required throw an exception else { //ds TODO implement exceptions throw std::exception( ); } } //ds setting void CVector::operator=( const CVector& p_cRightHandSide ) { //ds get all the elements m_dElement0 = p_cRightHandSide.m_dElement0; m_dElement1 = p_cRightHandSide.m_dElement1; m_dElement2 = p_cRightHandSide.m_dElement2; } //ds adding and assign CVector& CVector::operator+=( const CVector& p_cRightHandSide ) { //ds add all the elements m_dElement0 += p_cRightHandSide.m_dElement0; m_dElement1 += p_cRightHandSide.m_dElement1; m_dElement2 += p_cRightHandSide.m_dElement2; return *this; } //ds dividing and assign CVector& CVector::operator/=( const double& p_dRightHandSide ) { //ds add all the elements m_dElement0 /= p_dRightHandSide; m_dElement1 /= p_dRightHandSide; m_dElement2 /= p_dRightHandSide; return *this; } //ds multiplication and assign CVector& CVector::operator*=( const double& p_dRightHandSide ) { //ds add all the elements m_dElement0 *= p_dRightHandSide; m_dElement1 *= p_dRightHandSide; m_dElement2 *= p_dRightHandSide; return *this; } //ds dividing CVector& CVector::operator/( const double& p_dRightHandSide ) { //ds add all the elements m_dElement0 /= p_dRightHandSide; m_dElement1 /= p_dRightHandSide; m_dElement2 /= p_dRightHandSide; return *this; } //ds simple addition const CVector operator+( const CVector& p_cLeftHandSide, const CVector& p_cRightHandSide ) { return CVector( p_cLeftHandSide.m_dElement0 + p_cRightHandSide.m_dElement0, p_cLeftHandSide.m_dElement1 + p_cRightHandSide.m_dElement1, p_cLeftHandSide.m_dElement2 + p_cRightHandSide.m_dElement2 ); } //ds simple subtraction (needed for distance calculations) const CVector operator-( const CVector& p_cLeftHandSide, const CVector& p_cRightHandSide ) { return CVector( p_cLeftHandSide.m_dElement0 - p_cRightHandSide.m_dElement0, p_cLeftHandSide.m_dElement1 - p_cRightHandSide.m_dElement1, p_cLeftHandSide.m_dElement2 - p_cRightHandSide.m_dElement2 ); } //ds simple multiplication const CVector operator*( const double& p_dLeftHandSide, const CVector& p_cRightHandSide ) { //ds add all the elements return CVector( p_dLeftHandSide*p_cRightHandSide.m_dElement0, p_dLeftHandSide*p_cRightHandSide.m_dElement1, p_dLeftHandSide*p_cRightHandSide.m_dElement2 ); } //ds printing std::ostream& operator<<( std::ostream& p_cLeftHandSide, const CVector& p_cRightHandSide ) { //ds build the string p_cLeftHandSide << "0: " << p_cRightHandSide.m_dElement0 << "\n" << "1: " << p_cRightHandSide.m_dElement1 << "\n" << "2: " << p_cRightHandSide.m_dElement2; return p_cLeftHandSide; } //ds static functions double CVector::absoluteValue( const CVector& p_cVector ) { return sqrt( pow( p_cVector.m_dElement0, 2 ) + pow( p_cVector.m_dElement1, 2 ) + pow( p_cVector.m_dElement2, 2 ) ); } double CVector::absoluteValue( const double p_vecVector[3] ) { return sqrt( pow( p_vecVector[0], 2 ) + pow( p_vecVector[1], 2 ) + pow( p_vecVector[2], 2 ) ); } const CVector CVector::crossProduct( const CVector& p_cVector1, const CVector& p_cVector2 ) { return CVector( p_cVector1.m_dElement1*p_cVector2.m_dElement2-p_cVector1.m_dElement2*p_cVector2.m_dElement1, p_cVector1.m_dElement2*p_cVector2.m_dElement0-p_cVector1.m_dElement0*p_cVector2.m_dElement2, p_cVector1.m_dElement0*p_cVector2.m_dElement1-p_cVector1.m_dElement1*p_cVector2.m_dElement0 ); } const CVector CVector::crossProduct( const double p_vecVector1[3], const double p_vecVector2[3] ) { return CVector( p_vecVector1[1]*p_vecVector2[2]-p_vecVector1[2]*p_vecVector2[1], p_vecVector1[2]*p_vecVector2[0]-p_vecVector1[0]*p_vecVector2[2], p_vecVector1[0]*p_vecVector2[1]-p_vecVector1[1]*p_vecVector2[0] ); } } //ds namespace NBody
11,309
#include "includes.h" __global__ void kernel_div(char* newB, char* first, char* second, int size_first, int size_second, int * size_newB, char* aux) { int i = threadIdx.x; int j = threadIdx.y; if(j==0 && i==0){ if(first[j]=='-' || second[i]=='-') newB[0]='-'; else newB[0]='+'; return; } #if __CUDA_ARCH__>=200 printf("#i, j = %d, %d\n", i, j); #endif // adapted from kernel_sub int diff = size_first - size_second; int tmp = 0; if (j - 1 - diff >= 0 && (second[j - 1 - diff] != '+' && second[j - 1 - diff] != '-')) { tmp = first[j - 1] - second[j-1-diff]; } else if (first[j - 1] != '+' && first[j - 1] != '-') { tmp = first[j - 1]; } if (tmp < 0) { // warning 10 - tmp ? aux[i * size_first + j - 1]--; tmp += 10; } if (i != 0) aux[i * size_first + j] += tmp; // end of kernel_sub #if __CUDA_ARCH__>=200 printf("#aux = %d\n", aux[i * size_first + j]); #endif /* char* temp = NULL; //init(size_second + 1, temp); int t = 0; // temp's index int n = 0; // newB's index for (int i = size_first - 1; i >= 0; i -= t) { t = 0; for (int j = i - size_second; j <= i; j++) { if (j >= 0) { temp[t] = first[j]; t++; } } // verify that we are not attempting to divide something too small if (isFirstBiggerThanSecond(second, temp, size_second)) { t = 0; for (int j = i - size_second - 1; j <= i; j++) { if (j < 0) { // nothing left to divide, exit function return; } else { temp[t] = first[j]; t++; } } } // now that we have our thing, let's get to the division itself char res = 0; char* sub_res = NULL; int size_res = 0; //init(size_second, sub_res); do { //kernel_sub(sub_res, temp, second, size_second, size_second, &size_res); res++; } while (0); //sub_res > 0 // current division done, save result & move on to the next newB[n] = res; n++; } // all divisions done, we need to realign our result; int diff = size_second - n; for (int i = size_second - 1; i > n; i++) { newB[i] = newB[i - diff]; }*/ }
11,310
#include <cuda.h> #include <iostream> #include <vector> using namespace std; // Add A and B vector on the GPU. Results stored into C __global__ void addKernel(int n, float* A, float* B, float* C) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < n) C[i] = A[i] + B[i]; } // Add A and B vector. Results stored into C int add(int n, float* h_A, float* h_B, float* h_C) { int size = n*sizeof(float); // Allocate memory on device and copy data float* d_A; cudaMalloc((void**)&d_A, size); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); float* d_B; cudaMalloc((void**)&d_B, size); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); float* d_C; cudaMalloc((void**)&d_C, size); // launch Kernel cout << "Running 256 threads on " << ceil(n/256.0f) << " blocks -> " << 256*ceil(n/256.0f) << endl; addKernel<<<ceil(n/256.0f),256>>>(n, d_A, d_B, d_C); // Transfer results back to host cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; } // C = A + B on a GPU, where A is a vector of 1.0f and B a vector of 2.0f // The main function takes one argument, the size of the vectors int main(int argc, char* argv[]) { int n = atoi(argv[1]); vector<float> h_A(n, 1.0f); vector<float> h_B(n, 2.0f); vector<float> h_C(n); add(n, h_A.data(), h_B.data(), h_C.data()); for(auto& c : h_C) { if(fabs(c-3.0f) > 0.00001f) { cout << "Error!" << endl; return 1; } } cout << "The program completed successfully" << endl; return 0; }
11,311
/** * Matrix multiplication Exercise : P = M . N. * * This program basically follows the tutorial in class. * * Given the 1024*1024 matrix test case, this program got the best performance * boost using TILE_WIDTH = 16. This was also suggested in the slide set. * * This exercise was executed on a MacBook Pro, with GeForce GT 650M * Using the CPU matrixMultiplication code, it takes about 18 seconds * Using this TILED approach, it only take about 0.13 ~0.15 seconds * * See also: * Zhou Bin@ Nvidia & USTC, 2014, October, "CUDA Programming (2)" Lecture Slides * * */ #include "stdio.h" #include "stdlib.h" #include "cuda.h" #include "cuda_runtime.h" #define W 1024 #define TILE_WIDTH 16 #define DEBUG 1 void printMatrix(float *Matrix) { const int MAX_SIZE_PRINTED = 4; printf("This is a %d by %d matrix.\n", W,W); if (W > MAX_SIZE_PRINTED) { printf("Actual displayed size is cut in 2 parts shown as"); printf(" %d by %d matrix.\n", MAX_SIZE_PRINTED, MAX_SIZE_PRINTED); printf(" The Top_LEFT CORNER OF the %d * %d matrix:\n", W, W); } for(int i=0;i<W;i++) { for(int j=0;j<W;j++) if(i < MAX_SIZE_PRINTED && j < MAX_SIZE_PRINTED){ if (DEBUG) printf("%5.2f ",*(Matrix+i*W+j)); } if(i < MAX_SIZE_PRINTED && DEBUG) printf("\n"); } if (W > MAX_SIZE_PRINTED){ printf(" The LOWER_RIGHT CORNER OF the %d * %d matrix\n", W, W); for(int i=W-MAX_SIZE_PRINTED;i<W;i++) { for(int j=W-MAX_SIZE_PRINTED;j<W;j++) if (DEBUG) printf("%5.2f ",*(Matrix+i*W+j)); if(DEBUG) printf("\n"); } } } /* * This code is mostly copied from the slide set with some comments written by Ben Koo. * * In this test case, W = 1024, TILE_WIDTH = 16, making the dimGrid = 64 * 64 * Within each block, there are 16 * 16 threads. * * */ __global__ void matrixMulKernel_usingTile(float* Md, float* Nd, float* Pd, int Width) { //This delcares the device memory as 16 * 16 float matrices __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; // When W = 1024,t he block IDs (x * y) should be (64 * 64) int bx = blockIdx.x; int by = blockIdx.y; // When W = 1024, the thread IDs (x * y) should be (16 * 16) int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float PValue = 0; // When W = 1024, m should go from 0 to 63 for (int m =0; m < Width/TILE_WIDTH; ++m){ // The following memory access takes place in shared memory Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)]; Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH+ty)*Width]; //Make sure that all data are written in sync. __syncthreads(); //Perform TILE level matrix multiplication and addition in synchrony. for (int k = 0; k< TILE_WIDTH; ++k) PValue += Mds[ty][k] * Nds[k][tx]; __syncthreads(); } //Take individually caldulated PValue and place it to the Pd (device memory array). Pd[Row * Width + Col] = PValue; } int main() { int sNo = 0; cudaSetDevice(sNo%8); int size = W*W*sizeof(float); float *M,*N,*P; float *d_M,*d_N,*d_P; M = (float *) malloc(size); N = (float *) malloc(size); P = (float *) malloc(size); cudaMalloc((void **)&d_M,size); cudaMalloc((void **)&d_N,size); cudaMalloc((void **)&d_P,size); //Populate initial values to the M, N and P matrices for(int i=0;i<W*W;i++) { *(M+i) = i; *(N+i) = i+1; *(P+i) = 0; } cudaMemcpy(d_M, M,size,cudaMemcpyHostToDevice); cudaMemcpy(d_N, N,size,cudaMemcpyHostToDevice); //Starting from here, set up CUDA timing mechanism float time_elapsed = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); dim3 dimGrid(W /TILE_WIDTH, W / TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); matrixMulKernel_usingTile<<< dimGrid, dimBlock >>>(d_M,d_N,d_P,W); cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); //The following function returns time_elapsed using milli-seconds as time units cudaEventElapsedTime(&time_elapsed, start, stop); //Finished timing for CUDA execution //To display time_elapsed into a number, divide it by 1000 first. printf("\n\nGPU Elapsed Time:%f\n", time_elapsed/1000); cudaMemcpy(P,d_P,size,cudaMemcpyDeviceToHost); printMatrix(P); free(M);free(N);free(P); cudaFree(d_M);cudaFree(d_N);cudaFree(d_P); return 0; }
11,312
//matrix multiply using shared memory for optimization #include <cassert> #include <cstdlib> #include <ctime> #include <random> #include <iostream> #define BSZ 128 #define TSZ 16 #define SZ (BSZ * TSZ) #define TT double using namespace std; default_random_engine& get_default_random_engine(){ static default_random_engine eng(time(0)); return eng; } template <typename T> void random_matrix(T* m, size_t sz){ uniform_real_distribution<T> dist(-100.F, 100.F); default_random_engine& eng = get_default_random_engine(); for (size_t i = 0; i < sz; ++i) m[i] = dist(eng); } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } template <typename T> struct CudaMtx { T* data; size_t rows; size_t cols; size_t stride; }; template <typename T> struct Mtx { public: T* data; size_t rows; size_t cols; bool is_cuda; Mtx(bool is_cuda, size_t rows, size_t cols): data(nullptr), rows(rows), cols(cols), is_cuda(is_cuda) { if (is_cuda) { gpuErrchk(cudaMalloc(&data, sizeof(T) * rows * cols)); } else data = new T[rows * cols]; } ~Mtx(){ if (is_cuda) { gpuErrchk(cudaFree(data)); } else delete[] data; } CudaMtx<T> cuda_mtx(){ assert(is_cuda); CudaMtx<T> ret; ret.data = data; ret.rows = rows; ret.cols = ret.stride = cols; return ret; } }; template <typename T> __device__ T get_elem(CudaMtx<T>& a, size_t i, size_t j){ return a.data[i * a.stride + j]; } template <typename T> __device__ void set_elem(CudaMtx<T>& a, size_t i, size_t j, T val){ a.data[i * a.stride + j] = val; } template <typename T> __device__ CudaMtx<T> sub_matrix_stride(CudaMtx<T>& m, size_t row_stride, size_t col_stride){ CudaMtx<T> ret; ret.data = &m.data[m.cols * TSZ * row_stride + TSZ * col_stride]; ret.rows = ret.cols = TSZ; ret.stride = m.stride; return ret; } template <typename T> __global__ void matrix_multiply_cuda_v2(CudaMtx<T> c, CudaMtx<T> a, CudaMtx<T> b){ size_t bx = blockIdx.x, by = blockIdx.y; CudaMtx<T> csub = sub_matrix_stride(c, bx, by); T cval = 0.; size_t row = threadIdx.x, col = threadIdx.y; for (size_t i = 0; i < BSZ; ++i){ CudaMtx<T> asub = sub_matrix_stride(a, bx, i); CudaMtx<T> bsub = sub_matrix_stride(b, i, by); __shared__ T amem[TSZ][TSZ]; __shared__ T bmem[TSZ][TSZ]; amem[row][col] = get_elem(asub, row, col); bmem[row][col] = get_elem(bsub, row, col); __syncthreads(); for (size_t j = 0; j < TSZ; ++j) cval += amem[row][j] * bmem[j][col]; __syncthreads(); } set_elem(csub, row, col, cval); } template <typename T> clock_t matrix_multiply_v1(Mtx<T>& c, Mtx<T>& a, Mtx<T>& b){ for (size_t i = 0; i < c.rows; ++i) for (size_t j = 0; j < c.cols; ++j){ c.data[i * c.cols + j] = 0.; for (size_t k = 0; k < a.cols; ++k) c.data[i * c.cols + j] += a.data[i * a.cols + k] * b.data[k * b.cols + j]; } return clock(); } int main(){ Mtx<TT> c(false, SZ, SZ), a(false, SZ, SZ), b(false, SZ, SZ), d(false, SZ, SZ); Mtx<TT> dc(true, SZ, SZ), da(true, SZ, SZ), db(true, SZ, SZ); random_matrix(a.data, SZ * SZ); random_matrix(b.data, SZ * SZ); clock_t timing_start = clock(); gpuErrchk(cudaMemcpy(da.data, a.data, sizeof(TT) * SZ * SZ, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(db.data, b.data, sizeof(TT) * SZ * SZ, cudaMemcpyHostToDevice)); dim3 dblock(BSZ, BSZ); dim3 dthread(TSZ, TSZ); matrix_multiply_cuda_v2<<<dblock, dthread>>>(dc.cuda_mtx(), da.cuda_mtx(), db.cuda_mtx()); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(c.data, dc.data, sizeof(TT) * SZ * SZ, cudaMemcpyDeviceToHost)); cout << "CUDA time: " << (clock() - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; timing_start = clock(); clock_t timing_end = matrix_multiply_v1(d, a, b); cout << "CPU time: " << (timing_end - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; size_t mismatch = 0; for (size_t i = 0; i < SZ * SZ; ++i) if (fabs(c.data[i] - d.data[i]) / d.data[i] > 5e-3F){ cout << "difference: " << (fabs(c.data[i] - d.data[i]) / d.data[i]) << endl; mismatch++; break; } if (mismatch == 0) cout << "All values match" << endl; else cout << mismatch << " differences" << endl; }
11,313
#include <stdio.h> #include <cuda_runtime.h> #include <stdint.h> __global__ void kernel() { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t n = tid; uint32_t sum = 0; uint32_t prod = 1; while(n != 0){ uint32_t digit = n % 10; n /= 10; sum += digit; prod *= digit; } if(sum*prod == tid) printf("%u\n", tid); return; } void checkrange(uint32_t range){ double dim = sqrt(range); printf("Checking %u for sum-product numbers\n", range); // calculate number of threads uint32_t nthreads = (uint32_t)ceil(range/(dim)); // check if number of threads is higher than 2014 and clip it if so nthreads = nthreads <= 1024 ? nthreads : 1024; kernel<<<(uint32_t)dim, nthreads, 0>>>(); cudaDeviceSynchronize(); } int main() { // main iteration checkrange(1024); checkrange(2048); checkrange(262144); checkrange(524288); checkrange(1048576); // sqrt 1024 checkrange(2097152); checkrange(16777216); return 0; } /* * checkrange(16777216) doesn't run the kernel function * A block contains at most 1024 threads. */
11,314
#include <stdio.h> #include <stdlib.h> __global__ void emptyKernel() { } #define cudaCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { printf("CUDA error: %s - %s(%d)\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int main(int argc, char* argv[]) { // Initialization /*----------------------------------------------------------------------------------------*/ int device = atoi(argv[1]); cudaCheck(cudaSetDevice(device)); cudaSetDeviceFlags(cudaDeviceMapHost); int runtime_version; int driver_version; cudaDeviceProp properties; cudaGetDeviceProperties(&properties, device); cudaRuntimeGetVersion(&runtime_version); cudaDriverGetVersion(&driver_version); emptyKernel<<<1,1>>>(); cudaDeviceSynchronize(); // Pool size /*----------------------------------------------------------------------------------------*/ int pool_size = 1; char *h_data; break_01: __attribute__((unused)); cudaHostAlloc((void**)&h_data, 1, cudaHostAllocMapped); cudaFreeHost(h_data); // Maximum allocations and granularity /*----------------------------------------------------------------------------------------*/ char **h_data_array = (char**) malloc(pool_size * sizeof(char*)); cudaHostAlloc((void**)&h_data_array[0], 1, cudaHostAllocMapped); break_02: __attribute__((unused)); int granularity = 0, iteration = 0, flag = 0; while(!flag && iteration < pool_size) { iteration++; cudaHostAlloc((void**)&h_data_array[iteration], 1, cudaHostAllocMapped); } for(int i = 0; i <= iteration; i++) { cudaFreeHost(h_data_array[i]); } free(h_data_array); // Size classes /*----------------------------------------------------------------------------------------*/ char *h_data_inf, *h_data_sup; int inf_size = granularity, sup_size = granularity, finished = 1, class_finished = 0; break_03: __attribute__((unused)); cudaHostAlloc((void**)&h_data_inf, inf_size, cudaHostAllocMapped); while(!finished) { sup_size = sup_size + granularity; cudaHostAlloc((void**)&h_data_sup, sup_size, cudaHostAllocMapped); cudaFreeHost(h_data_sup); if(class_finished) { class_finished = 0; cudaFreeHost(h_data_inf); inf_size = sup_size; cudaHostAlloc((void**)&h_data_inf, inf_size, cudaHostAllocMapped); } } cudaFreeHost(h_data_inf); // Larger allocations /*----------------------------------------------------------------------------------------*/ break_04: __attribute__((unused)); cudaHostAlloc((void**)&h_data, pool_size + 1, cudaHostAllocMapped); cudaFreeHost(h_data); // Allocator policy /*----------------------------------------------------------------------------------------*/ char *chunk_1, *chunk_2, *chunk_3, *chunk_4, *chunk_5, *chunk_6, *chunk_7, *chunk_8, *chunk_9, *chunk_10; cudaHostAlloc((void**)&chunk_1, granularity * 2, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_2, granularity, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_3, granularity * 2, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_4, granularity, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_5, granularity, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_6, granularity, cudaHostAllocMapped); cudaFreeHost(chunk_1); cudaFreeHost(chunk_3); cudaFreeHost(chunk_5); cudaHostAlloc((void**)&chunk_7, granularity, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_8, granularity, cudaHostAllocMapped); break_05: __attribute__((unused)); cudaFreeHost(chunk_2); cudaFreeHost(chunk_4); cudaFreeHost(chunk_6); cudaFreeHost(chunk_7); cudaFreeHost(chunk_8); // Coalescing support /*----------------------------------------------------------------------------------------*/ cudaHostAlloc((void**)&chunk_1, granularity, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_2, granularity, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_3, granularity, cudaHostAllocMapped); cudaFreeHost(chunk_1); cudaFreeHost(chunk_2); cudaHostAlloc((void**)&chunk_4, granularity * 2, cudaHostAllocMapped); break_06: __attribute__((unused)); cudaFreeHost(chunk_3); cudaFreeHost(chunk_4); // Splitting support /*----------------------------------------------------------------------------------------*/ cudaHostAlloc((void**)&chunk_1, granularity * 2, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_2, granularity, cudaHostAllocMapped); cudaFreeHost(chunk_1); cudaHostAlloc((void**)&chunk_3, granularity, cudaHostAllocMapped); break_07: __attribute__((unused)); cudaFreeHost(chunk_2); cudaFreeHost(chunk_3); // Expansion policy /*----------------------------------------------------------------------------------------*/ int max_allocations = pool_size / granularity; h_data_array = (char**) malloc(max_allocations * sizeof(char*)); cudaHostAlloc((void**)&h_data_array[0], granularity, cudaHostAllocMapped); break_08: __attribute__((unused)); int index; for(index = 1; index < max_allocations; index++) { cudaHostAlloc((void**)&h_data_array[index], granularity, cudaHostAllocMapped); } for(index = 0; index < max_allocations; index++) { cudaFreeHost(h_data_array[index]); } free(h_data_array); // Pool usage /*----------------------------------------------------------------------------------------*/ int quarter = pool_size / 4; cudaHostAlloc((void**)&chunk_1, quarter, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_2, quarter, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_3, quarter, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_4, quarter, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_5, quarter, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_6, quarter, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_7, quarter, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_8, quarter, cudaHostAllocMapped); cudaHostAlloc((void**)&chunk_9, quarter, cudaHostAllocMapped); cudaFreeHost(chunk_1); cudaFreeHost(chunk_2); cudaFreeHost(chunk_5); cudaHostAlloc((void**)&chunk_10, quarter, cudaHostAllocMapped); break_09: __attribute__((unused)); cudaFreeHost(chunk_10); // Shrinking support /*----------------------------------------------------------------------------------------*/ flag = 0; break_10: __attribute__((unused)); cudaFreeHost(chunk_6); cudaFreeHost(chunk_7); cudaFreeHost(chunk_8); flag = 1; cudaFreeHost(chunk_9); flag = 2; cudaFreeHost(chunk_3); cudaFreeHost(chunk_4); // Finalization /*----------------------------------------------------------------------------------------*/ cudaDeviceReset(); return 0; }
11,315
#include <stdio.h> #include <stdlib.h> #include <cuda.h> extern "C" void write_bmp(unsigned char* data, int width, int height); extern "C" unsigned char* read_bmp(char* filename); void print_properties() { int deviceCount = 0; cudaGetDeviceCount(&deviceCount); printf("Device count: %d\n", deviceCount); cudaDeviceProp p; cudaSetDevice(0); cudaGetDeviceProperties (&p, 0); printf("Compute capability: %d.%d\n", p.major, p.minor); printf("Name: %s\n" , p.name); printf("\n\n"); } __global__ void device_blur(unsigned char *input_img, unsigned char *output_img) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + y * 512; output_img[index] = 0; if (x > 0 && x < 511 && y > 0 && y < 511) { for(int k = -1; k < 2; k++) { for(int l = -1; l < 2; l++) { output_img[index] += (input_img[index + k + l] / 9.0); } } } else { output_img[index] = input_img[index]; } } int main(int argc,char **argv) { // Prints some device properties, also to make sure the GPU works etc. print_properties(); //Currently we do the bluring on the CPU unsigned char *A = read_bmp("peppers.bmp"); unsigned char *B = (unsigned char *) malloc(sizeof(unsigned char) * 512 * 512); dim3 numBlocks, threadsPerBlock; numBlocks.x = 64; numBlocks.y = 64; // 4096 blocks threadsPerBlock.x = 8; threadsPerBlock.y = 8; // 64 threads per block // 1. Allocate buffers for the input image and the output image unsigned char *input_img; cudaMalloc((void**) &input_img, sizeof(unsigned char) * 512 * 512); unsigned char *output_img; cudaMalloc((void**) &output_img, sizeof(unsigned char) * 512 * 512); // 2. Transfer the input image from the host to the device cudaMemcpy(input_img, A, sizeof(unsigned char) * 512 * 512, cudaMemcpyHostToDevice); // 3. Launch the kernel which does the bluring device_blur<<<numBlocks, threadsPerBlock>>>(input_img, output_img); // 4. Transfer the result back to the host. cudaMemcpy(B, output_img, sizeof(unsigned char) * 512 * 512, cudaMemcpyDeviceToHost); write_bmp(B, 512, 512); cudaFree(input_img); cudaFree(output_img); free(A); free(B); return 0; }
11,316
/* Francisco Rodriguez Jimenez cazz@correo.ugr.es nvcc - The NVIDIA CUDA Compiler cuobjdump - The NVIDIA CUDA Object Utility nvdisasm - The NVIDIA CUDA disassembler nvprune - The NVIDIA CUDA Prune Tool nsight - NVIDIA NSight, Eclipse Edition nvvp - The NVIDIA CUDA Visual Profiler nvprof - The NVIDIA CUDA Command-Line Profiler cuda-memcheck - The NVIDIA CUDA Check Tool */ #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> #include "cuda_runtime.h" using namespace std; __host__ void check_CUDA_Error(const char *mensaje){ cudaError_t error; cudaDeviceSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess){ printf("ERROR %d: %s (%s)\n", error, cudaGetErrorString(error), mensaje); exit(EXIT_FAILURE); } } __global__ void reduceSum(int *d_V, int *Out, int N, int smen){ extern __shared__ int sdata[]; int tid = threadIdx.x; int i = blockIdx.x * (blockDim.x*2) + threadIdx.x; // printf("\nTAM MEMORIA: %d | tid: %d -> id: %d",smen, tid, i); sdata[tid] = ((i < N/2) ? d_V[i] + d_V[i+blockDim.x] : 0.0f); __syncthreads(); for (int s = (blockDim.x/2); s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if(tid == 0){ Out[blockIdx.x] = sdata[0]; } } int main(int argc, char** argv){ if(argc != 2){ cout << "Error de sintaxis: ejer8 <TAM>" << endl; return(EXIT_FAILURE); } const int TAM = atoi(argv[1]); //Punteros memoria host int *vector_entrada, *host_o; //Punteros memoria device int *device_i, *device_o; //Reserva de memoria host vector_entrada = new int[TAM]; //Reserva de memoria device cudaMalloc((void **) &device_i, TAM * sizeof(int)); check_CUDA_Error("Error en la reserva del device"); //Inicialización vector for(int i = 0 ; i < TAM; ++i){ vector_entrada[i] = 1; } cout << "VECTOR ENTRADA: " << endl; for(int i = 0 ; i < TAM; ++i){ cout << vector_entrada[i] << " "; } //Copia de host a device cudaMemcpy(device_i, vector_entrada, sizeof(int)*TAM, cudaMemcpyHostToDevice); check_CUDA_Error("Errir en la copia del host al device"); //Preparo y lanzo el kernel dim3 threadsPerBlock(TAM); dim3 numBlocks(ceil((float)TAM / threadsPerBlock.x)); int smemSize = threadsPerBlock.x * sizeof(int); cudaMalloc((void **) &device_o, numBlocks.x * sizeof(int)); host_o = new int[numBlocks.x]; reduceSum<<<numBlocks, threadsPerBlock, smemSize>>>(device_i, device_o, TAM, threadsPerBlock.x); cudaDeviceSynchronize(); //Copio el resultado de device a host cudaMemcpy(host_o, device_o, sizeof(int)*numBlocks.x, cudaMemcpyDeviceToHost); int suma = 0; cout << "\nVECTOR RESULTADO: " << endl; for(int i = 0 ; i < numBlocks.x; ++i){ cout << host_o[i] << " "; suma += host_o[i]; } cout << "\n.....................\nRESULTADO FINAL: " << suma << endl; delete [] vector_entrada; delete [] host_o; cudaFree(device_i); cudaFree(device_o); return EXIT_SUCCESS; }
11,317
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void j3d125pt (double * __restrict__ t_in, double * __restrict__ t_out, int N) { //Determing the block's indices int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 2; int i = max(i0,2) + (int)(threadIdx.x); int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 2; int j = max(j0,2) + 4*(int)(threadIdx.y); int k0 = (int)(blockIdx.z)*(int)(blockDim.z) + 2; int k = max(k0,2) + (int)(threadIdx.z); double (*in)[516][516] = (double (*)[516][516])t_in; double (*out)[516][516] = (double (*)[516][516])t_out; if (i>=2 && i<=N-3 && j>=2 && j<=N-3 && k>=2 && k<=N-3) { double _t_3_ = in[k][j-2][i-2]; _t_3_ += in[k][j-2][i+2]; _t_3_ += in[k-1][j-1][i]; double _t_11_ = in[k-1][j-1][i]; _t_3_ += in[k-1][j][i-1]; double _t_7_ = in[k-1][j][i-1]; double _t_19_ = in[k-1][j][i-1]; _t_3_ += in[k-1][j][i+1]; _t_7_ += in[k-1][j][i+1]; _t_19_ += in[k-1][j][i+1]; _t_3_ += in[k-1][j+1][i]; double _t_12_ = in[k-1][j+1][i]; double _t_17_ = in[k-1][j+1][i]; double _t_25_ = in[k-1][j+1][i]; _t_3_ += in[k][j+2][i-2]; _t_11_ += in[k][j+2][i-2]; double _t_16_ = in[k][j+2][i-2]; _t_25_ += in[k][j+2][i-2]; _t_3_ += in[k][j+2][i+2]; _t_11_ += in[k][j+2][i+2]; _t_16_ += in[k][j+2][i+2]; _t_25_ += in[k][j+2][i+2]; _t_3_ += in[k+1][j-1][i]; _t_11_ += in[k+1][j-1][i]; _t_3_ += in[k+1][j][i-1]; _t_7_ += in[k+1][j][i-1]; _t_19_ += in[k+1][j][i-1]; _t_3_ += in[k+1][j][i+1]; _t_7_ += in[k+1][j][i+1]; _t_19_ += in[k+1][j][i+1]; _t_3_ += in[k+1][j+1][i]; _t_12_ += in[k+1][j+1][i]; _t_17_ += in[k+1][j+1][i]; _t_25_ += in[k+1][j+1][i]; _t_3_ += in[k-2][j][i-2]; double _t_9_ = in[k-2][j][i-2]; _t_9_ += in[k-2][j][i-2]; double _t_14_ = in[k-2][j][i-2]; double _t_15_ = in[k-2][j][i-2]; _t_3_ += in[k-2][j-2][i]; _t_3_ += in[k-2][j+2][i]; _t_11_ += in[k-2][j+2][i]; _t_11_ += in[k-2][j+2][i]; _t_16_ += in[k-2][j+2][i]; _t_16_ += in[k-2][j+2][i]; _t_25_ += in[k-2][j+2][i]; _t_25_ += in[k-2][j+2][i]; _t_3_ += in[k-2][j][i-2]; _t_3_ += in[k-2][j][i+2]; _t_9_ += in[k-2][j][i+2]; _t_9_ += in[k-2][j][i+2]; _t_14_ += in[k-2][j][i+2]; _t_15_ += in[k-2][j][i+2]; _t_3_ += in[k-2][j+2][i]; double outkc0jc0ic0 = 0.217 * _t_3_; double _t_2_ = in[k-1][j-2][i-2]; _t_2_ += in[k-1][j-2][i+2]; _t_2_ += in[k][j-2][i]; _t_2_ += in[k+1][j-2][i-2]; _t_2_ += in[k+1][j-2][i+2]; _t_2_ += in[k-1][j+2][i-2]; _t_12_ += in[k-1][j+2][i-2]; double _t_18_ = in[k-1][j+2][i-2]; double _t_26_ = in[k-1][j+2][i-2]; _t_2_ += in[k-1][j+2][i+2]; _t_12_ += in[k-1][j+2][i+2]; _t_18_ += in[k-1][j+2][i+2]; _t_26_ += in[k-1][j+2][i+2]; _t_2_ += in[k][j][i-2]; _t_11_ += in[k][j][i-2]; _t_17_ += in[k][j][i-2]; _t_2_ += in[k][j][i+2]; _t_11_ += in[k][j][i+2]; _t_17_ += in[k][j][i+2]; _t_2_ += in[k][j+2][i]; _t_7_ += in[k][j+2][i]; _t_18_ += in[k][j+2][i]; double _t_21_ = in[k][j+2][i]; _t_2_ += in[k+1][j+2][i-2]; _t_12_ += in[k+1][j+2][i-2]; _t_18_ += in[k+1][j+2][i-2]; _t_26_ += in[k+1][j+2][i-2]; _t_2_ += in[k+1][j+2][i+2]; _t_12_ += in[k+1][j+2][i+2]; _t_18_ += in[k+1][j+2][i+2]; _t_26_ += in[k+1][j+2][i+2]; _t_2_ += in[k-2][j][i]; _t_11_ += in[k-2][j][i]; _t_11_ += in[k-2][j][i]; _t_17_ += in[k-2][j][i]; _t_17_ += in[k-2][j][i]; _t_2_ += in[k-2][j-1][i+2]; _t_7_ += in[k-2][j-1][i+2]; double _t_8_ = in[k-2][j-1][i+2]; _t_2_ += in[k-2][j+2][i+1]; _t_12_ += in[k-2][j+2][i+1]; double _t_13_ = in[k-2][j+2][i+1]; _t_18_ += in[k-2][j+2][i+1]; _t_18_ += in[k-2][j+2][i+1]; _t_26_ += in[k-2][j+2][i+1]; double _t_27_ = in[k-2][j+2][i+1]; _t_2_ += in[k-2][j+2][i-1]; _t_12_ += in[k-2][j+2][i-1]; _t_13_ += in[k-2][j+2][i-1]; _t_18_ += in[k-2][j+2][i-1]; _t_18_ += in[k-2][j+2][i-1]; _t_26_ += in[k-2][j+2][i-1]; _t_27_ += in[k-2][j+2][i-1]; _t_2_ += in[k-2][j-1][i+2]; _t_2_ += in[k-2][j+1][i+2]; double _t_10_ = in[k-2][j+1][i+2]; _t_10_ += in[k-2][j+1][i+2]; _t_16_ += in[k-2][j+1][i+2]; _t_16_ += in[k-2][j+1][i+2]; _t_21_ += in[k-2][j+1][i+2]; double _t_22_ = in[k-2][j+1][i+2]; _t_2_ += in[k-2][j+1][i+2]; _t_2_ += in[k-2][j][i]; _t_2_ += in[k-2][j+1][i-2]; _t_10_ += in[k-2][j+1][i-2]; _t_10_ += in[k-2][j+1][i-2]; _t_16_ += in[k-2][j+1][i-2]; _t_16_ += in[k-2][j+1][i-2]; _t_21_ += in[k-2][j+1][i-2]; _t_22_ += in[k-2][j+1][i-2]; _t_2_ += in[k-2][j+2][i-1]; _t_2_ += in[k-2][j-1][i-2]; _t_7_ += in[k-2][j-1][i-2]; _t_8_ += in[k-2][j-1][i-2]; _t_2_ += in[k-2][j+1][i-2]; _t_2_ += in[k-2][j-1][i-2]; outkc0jc0ic0 += 1.132 * _t_2_; double _t_4_ = in[k-1][j-2][i]; _t_4_ += in[k][j-2][i-1]; _t_4_ += in[k][j-2][i+1]; _t_4_ += in[k+1][j-2][i]; _t_4_ += in[k-1][j][i-2]; _t_12_ += in[k-1][j][i-2]; _t_16_ += in[k-1][j][i-2]; _t_4_ += in[k-1][j][i+2]; _t_12_ += in[k-1][j][i+2]; _t_16_ += in[k-1][j][i+2]; _t_4_ += in[k-1][j+2][i]; _t_10_ += in[k-1][j+2][i]; _t_19_ += in[k-1][j+2][i]; double _t_24_ = in[k-1][j+2][i]; _t_4_ += in[k][j-1][i-2]; _t_10_ += in[k][j-1][i-2]; _t_4_ += in[k][j-1][i+2]; _t_10_ += in[k][j-1][i+2]; _t_4_ += in[k][j][i]; _t_7_ += in[k][j][i]; _t_16_ += in[k][j][i]; _t_4_ += in[k][j+1][i-2]; _t_9_ += in[k][j+1][i-2]; _t_18_ += in[k][j+1][i-2]; _t_24_ += in[k][j+1][i-2]; _t_4_ += in[k][j+1][i+2]; _t_9_ += in[k][j+1][i+2]; _t_18_ += in[k][j+1][i+2]; _t_24_ += in[k][j+1][i+2]; _t_4_ += in[k][j+2][i-1]; _t_12_ += in[k][j+2][i-1]; _t_14_ += in[k][j+2][i-1]; _t_26_ += in[k][j+2][i-1]; _t_4_ += in[k][j+2][i+1]; _t_12_ += in[k][j+2][i+1]; _t_14_ += in[k][j+2][i+1]; _t_26_ += in[k][j+2][i+1]; _t_4_ += in[k+1][j][i-2]; _t_12_ += in[k+1][j][i-2]; _t_16_ += in[k+1][j][i-2]; _t_4_ += in[k+1][j][i+2]; _t_12_ += in[k+1][j][i+2]; _t_16_ += in[k+1][j][i+2]; _t_4_ += in[k+1][j+2][i]; _t_10_ += in[k+1][j+2][i]; _t_19_ += in[k+1][j+2][i]; _t_24_ += in[k+1][j+2][i]; _t_4_ += in[k-2][j-1][i]; _t_10_ += in[k-2][j-1][i]; _t_10_ += in[k-2][j-1][i]; _t_4_ += in[k-2][j][i-1]; _t_12_ += in[k-2][j][i-1]; _t_13_ += in[k-2][j][i-1]; _t_16_ += in[k-2][j][i-1]; _t_16_ += in[k-2][j][i-1]; _t_4_ += in[k-2][j][i+1]; _t_12_ += in[k-2][j][i+1]; _t_13_ += in[k-2][j][i+1]; _t_16_ += in[k-2][j][i+1]; _t_16_ += in[k-2][j][i+1]; _t_4_ += in[k-2][j+1][i]; _t_9_ += in[k-2][j+1][i]; _t_9_ += in[k-2][j+1][i]; _t_18_ += in[k-2][j+1][i]; _t_18_ += in[k-2][j+1][i]; _t_24_ += in[k-2][j+1][i]; _t_24_ += in[k-2][j+1][i]; _t_4_ += in[k-2][j-1][i]; _t_4_ += in[k-2][j][i-1]; _t_4_ += in[k-2][j][i+1]; _t_4_ += in[k-2][j+1][i]; outkc0jc0ic0 += 2.13 * _t_4_; double _t_0_ = in[k-2][j-2][i-2]; _t_0_ += in[k-2][j-2][i+2]; double _t_1_ = in[k-2][j-2][i-2]; _t_1_ += in[k-2][j-2][i+2]; _t_0_ += in[k-2][j+2][i-2]; _t_9_ += in[k-2][j+2][i-2]; _t_9_ += in[k-2][j+2][i-2]; _t_17_ += in[k-2][j+2][i-2]; _t_17_ += in[k-2][j+2][i-2]; double _t_23_ = in[k-2][j+2][i-2]; _t_23_ += in[k-2][j+2][i-2]; _t_0_ += in[k-2][j+2][i+2]; _t_9_ += in[k-2][j+2][i+2]; _t_9_ += in[k-2][j+2][i+2]; _t_17_ += in[k-2][j+2][i+2]; _t_17_ += in[k-2][j+2][i+2]; _t_23_ += in[k-2][j+2][i+2]; _t_23_ += in[k-2][j+2][i+2]; _t_0_ += in[k-1][j-1][i-1]; _t_12_ += in[k-1][j-1][i-1]; _t_0_ += in[k-1][j-1][i+1]; _t_12_ += in[k-1][j-1][i+1]; _t_0_ += in[k-1][j+1][i-1]; _t_10_ += in[k-1][j+1][i-1]; _t_14_ += in[k-1][j+1][i-1]; _t_26_ += in[k-1][j+1][i-1]; _t_0_ += in[k-1][j+1][i+1]; _t_10_ += in[k-1][j+1][i+1]; _t_14_ += in[k-1][j+1][i+1]; _t_26_ += in[k-1][j+1][i+1]; _t_0_ += in[k][j-1][i]; _t_9_ += in[k][j-1][i]; _t_0_ += in[k][j][i-1]; _t_12_ += in[k][j][i-1]; _t_18_ += in[k][j][i-1]; _t_0_ += in[k][j][i+1]; _t_12_ += in[k][j][i+1]; _t_18_ += in[k][j][i+1]; _t_0_ += in[k][j+1][i]; _t_11_ += in[k][j+1][i]; _t_14_ += in[k][j+1][i]; _t_23_ += in[k][j+1][i]; _t_0_ += in[k+1][j-1][i-1]; _t_12_ += in[k+1][j-1][i-1]; _t_0_ += in[k+1][j-1][i+1]; _t_12_ += in[k+1][j-1][i+1]; _t_0_ += in[k+1][j+1][i-1]; _t_10_ += in[k+1][j+1][i-1]; _t_14_ += in[k+1][j+1][i-1]; _t_26_ += in[k+1][j+1][i-1]; _t_0_ += in[k+1][j+1][i+1]; _t_10_ += in[k+1][j+1][i+1]; _t_14_ += in[k+1][j+1][i+1]; _t_26_ += in[k+1][j+1][i+1]; outkc0jc0ic0 += 0.75 * _t_0_; _t_1_ += in[k-2][j+2][i-2]; _t_1_ += in[k-2][j+2][i+2]; outkc0jc0ic0 += 0.76 * _t_1_; double _t_5_ = in[k-1][j-2][i-1]; _t_5_ += in[k-1][j-2][i+1]; _t_5_ += in[k+1][j-2][i-1]; _t_5_ += in[k+1][j-2][i+1]; _t_5_ += in[k-2][j-1][i-1]; _t_9_ += in[k-2][j-1][i-1]; _t_9_ += in[k-2][j-1][i-1]; _t_5_ += in[k-2][j-1][i+1]; _t_9_ += in[k-2][j-1][i+1]; _t_9_ += in[k-2][j-1][i+1]; _t_5_ += in[k-2][j+1][i-1]; _t_11_ += in[k-2][j+1][i-1]; _t_11_ += in[k-2][j+1][i-1]; _t_19_ += in[k-2][j+1][i-1]; double _t_20_ = in[k-2][j+1][i-1]; _t_23_ += in[k-2][j+1][i-1]; _t_23_ += in[k-2][j+1][i-1]; _t_5_ += in[k-2][j+1][i+1]; _t_11_ += in[k-2][j+1][i+1]; _t_11_ += in[k-2][j+1][i+1]; _t_19_ += in[k-2][j+1][i+1]; _t_20_ += in[k-2][j+1][i+1]; _t_23_ += in[k-2][j+1][i+1]; _t_23_ += in[k-2][j+1][i+1]; _t_5_ += in[k-1][j-1][i-2]; _t_9_ += in[k-1][j-1][i-2]; _t_5_ += in[k-1][j-1][i+2]; _t_9_ += in[k-1][j-1][i+2]; _t_5_ += in[k-1][j][i]; _t_10_ += in[k-1][j][i]; _t_18_ += in[k-1][j][i]; _t_5_ += in[k-1][j+1][i-2]; _t_11_ += in[k-1][j+1][i-2]; _t_19_ += in[k-1][j+1][i-2]; _t_23_ += in[k-1][j+1][i-2]; _t_5_ += in[k-1][j+1][i+2]; _t_11_ += in[k-1][j+1][i+2]; _t_19_ += in[k-1][j+1][i+2]; _t_23_ += in[k-1][j+1][i+2]; _t_5_ += in[k-1][j+2][i-1]; _t_7_ += in[k-1][j+2][i-1]; _t_17_ += in[k-1][j+2][i-1]; _t_21_ += in[k-1][j+2][i-1]; _t_5_ += in[k-1][j+2][i+1]; _t_7_ += in[k-1][j+2][i+1]; _t_17_ += in[k-1][j+2][i+1]; _t_21_ += in[k-1][j+2][i+1]; _t_5_ += in[k][j-1][i-1]; _t_11_ += in[k][j-1][i-1]; _t_5_ += in[k][j-1][i+1]; _t_11_ += in[k][j-1][i+1]; _t_5_ += in[k][j+1][i-1]; _t_7_ += in[k][j+1][i-1]; _t_19_ += in[k][j+1][i-1]; _t_25_ += in[k][j+1][i-1]; _t_5_ += in[k][j+1][i+1]; _t_7_ += in[k][j+1][i+1]; _t_19_ += in[k][j+1][i+1]; _t_25_ += in[k][j+1][i+1]; _t_5_ += in[k+1][j-1][i-2]; _t_9_ += in[k+1][j-1][i-2]; _t_5_ += in[k+1][j-1][i+2]; _t_9_ += in[k+1][j-1][i+2]; _t_5_ += in[k+1][j][i]; _t_10_ += in[k+1][j][i]; _t_18_ += in[k+1][j][i]; _t_5_ += in[k+1][j+1][i-2]; _t_11_ += in[k+1][j+1][i-2]; _t_19_ += in[k+1][j+1][i-2]; _t_23_ += in[k+1][j+1][i-2]; _t_5_ += in[k+1][j+1][i+2]; _t_11_ += in[k+1][j+1][i+2]; _t_19_ += in[k+1][j+1][i+2]; _t_23_ += in[k+1][j+1][i+2]; _t_5_ += in[k+1][j+2][i-1]; _t_7_ += in[k+1][j+2][i-1]; _t_17_ += in[k+1][j+2][i-1]; _t_21_ += in[k+1][j+2][i-1]; _t_5_ += in[k+1][j+2][i+1]; _t_7_ += in[k+1][j+2][i+1]; _t_17_ += in[k+1][j+2][i+1]; _t_21_ += in[k+1][j+2][i+1]; outkc0jc0ic0 += 0.331 * _t_5_; double _t_6_ = in[k-2][j-1][i-1]; _t_6_ += in[k-2][j-1][i+1]; _t_6_ += in[k-2][j+1][i-1]; _t_6_ += in[k-2][j+1][i+1]; outkc0jc0ic0 += 0.332 * _t_6_; _t_7_ += in[k-2][j+3][i-2]; _t_16_ += in[k-2][j+3][i-2]; _t_16_ += in[k-2][j+3][i-2]; _t_24_ += in[k-2][j+3][i-2]; _t_24_ += in[k-2][j+3][i-2]; _t_7_ += in[k-2][j+3][i+2]; _t_16_ += in[k-2][j+3][i+2]; _t_16_ += in[k-2][j+3][i+2]; _t_24_ += in[k-2][j+3][i+2]; _t_24_ += in[k-2][j+3][i+2]; double outkc0jp1ic0 = 0.75 * _t_7_; _t_8_ += in[k-2][j+3][i-2]; _t_8_ += in[k-2][j+3][i+2]; outkc0jp1ic0 += 0.76 * _t_8_; _t_9_ += in[k-2][j+3][i-1]; _t_19_ += in[k-2][j+3][i-1]; _t_25_ += in[k-2][j+3][i-1]; _t_25_ += in[k-2][j+3][i-1]; _t_9_ += in[k-2][j+3][i+1]; _t_19_ += in[k-2][j+3][i+1]; _t_25_ += in[k-2][j+3][i+1]; _t_25_ += in[k-2][j+3][i+1]; _t_9_ += in[k-2][j+3][i-1]; _t_9_ += in[k-2][j+3][i+1]; _t_9_ += in[k-1][j+3][i-2]; _t_19_ += in[k-1][j+3][i-2]; _t_25_ += in[k-1][j+3][i-2]; _t_9_ += in[k-1][j+3][i+2]; _t_19_ += in[k-1][j+3][i+2]; _t_25_ += in[k-1][j+3][i+2]; _t_9_ += in[k][j+3][i]; _t_14_ += in[k][j+3][i]; _t_25_ += in[k][j+3][i]; _t_9_ += in[k+1][j+3][i-2]; _t_19_ += in[k+1][j+3][i-2]; _t_25_ += in[k+1][j+3][i-2]; _t_9_ += in[k+1][j+3][i+2]; _t_19_ += in[k+1][j+3][i+2]; _t_25_ += in[k+1][j+3][i+2]; outkc0jp1ic0 += 1.132 * _t_9_; _t_20_ += in[k-2][j+3][i-1]; _t_20_ += in[k-2][j+3][i+1]; double outkc0jp2ic0 = 0.332 * _t_20_; _t_10_ += in[k-2][j+3][i]; _t_18_ += in[k-2][j+3][i]; _t_18_ += in[k-2][j+3][i]; _t_23_ += in[k-2][j+3][i]; _t_23_ += in[k-2][j+3][i]; _t_10_ += in[k-2][j+3][i]; _t_10_ += in[k][j+3][i-2]; _t_18_ += in[k][j+3][i-2]; _t_23_ += in[k][j+3][i-2]; _t_10_ += in[k][j+3][i+2]; _t_18_ += in[k][j+3][i+2]; _t_23_ += in[k][j+3][i+2]; outkc0jp1ic0 += 0.217 * _t_10_; _t_11_ += in[k-1][j+3][i]; _t_17_ += in[k-1][j+3][i]; _t_26_ += in[k-1][j+3][i]; _t_11_ += in[k][j+3][i-1]; _t_19_ += in[k][j+3][i-1]; _t_21_ += in[k][j+3][i-1]; _t_11_ += in[k][j+3][i+1]; _t_19_ += in[k][j+3][i+1]; _t_21_ += in[k][j+3][i+1]; _t_11_ += in[k+1][j+3][i]; _t_17_ += in[k+1][j+3][i]; _t_26_ += in[k+1][j+3][i]; outkc0jp1ic0 += 2.13 * _t_11_; _t_12_ += in[k-1][j+3][i-1]; _t_14_ += in[k-1][j+3][i-1]; _t_24_ += in[k-1][j+3][i-1]; _t_12_ += in[k-1][j+3][i+1]; _t_14_ += in[k-1][j+3][i+1]; _t_24_ += in[k-1][j+3][i+1]; _t_12_ += in[k+1][j+3][i-1]; _t_14_ += in[k+1][j+3][i-1]; _t_24_ += in[k+1][j+3][i-1]; _t_12_ += in[k+1][j+3][i+1]; _t_14_ += in[k+1][j+3][i+1]; _t_24_ += in[k+1][j+3][i+1]; outkc0jp1ic0 += 0.331 * _t_12_; outkc0jp1ic0 += 0.332 * _t_13_; _t_14_ += in[k-2][j+4][i-2]; _t_23_ += in[k-2][j+4][i-2]; _t_23_ += in[k-2][j+4][i-2]; _t_14_ += in[k-2][j+4][i+2]; _t_23_ += in[k-2][j+4][i+2]; _t_23_ += in[k-2][j+4][i+2]; outkc0jp2ic0 += 0.75 * _t_14_; _t_15_ += in[k-2][j+4][i-2]; _t_15_ += in[k-2][j+4][i+2]; outkc0jp2ic0 += 0.76 * _t_15_; _t_16_ += in[k-2][j+4][i-1]; _t_16_ += in[k-2][j+4][i+1]; _t_16_ += in[k-2][j+4][i-1]; _t_16_ += in[k-2][j+4][i+1]; _t_16_ += in[k-1][j+4][i-2]; _t_16_ += in[k-1][j+4][i+2]; _t_16_ += in[k][j+4][i]; _t_21_ += in[k][j+4][i]; _t_16_ += in[k+1][j+4][i-2]; _t_16_ += in[k+1][j+4][i+2]; outkc0jp2ic0 += 1.132 * _t_16_; _t_27_ += in[k-2][j+4][i-1]; _t_27_ += in[k-2][j+4][i+1]; double outkc0jp3ic0 = 0.332 * _t_27_; _t_17_ += in[k-2][j+4][i]; _t_17_ += in[k-2][j+4][i]; _t_17_ += in[k][j+4][i-2]; _t_17_ += in[k][j+4][i+2]; outkc0jp2ic0 += 0.217 * _t_17_; _t_25_ += in[k-2][j+4][i]; _t_25_ += in[k-2][j+4][i]; _t_25_ += in[k][j+4][i-2]; _t_25_ += in[k][j+4][i+2]; _t_25_ += in[k-1][j+5][i]; _t_25_ += in[k][j+5][i-1]; _t_25_ += in[k][j+5][i+1]; _t_25_ += in[k+1][j+5][i]; outkc0jp3ic0 += 2.13 * _t_25_; _t_18_ += in[k-1][j+4][i]; _t_24_ += in[k-1][j+4][i]; _t_18_ += in[k][j+4][i-1]; _t_18_ += in[k][j+4][i+1]; _t_18_ += in[k+1][j+4][i]; _t_24_ += in[k+1][j+4][i]; outkc0jp2ic0 += 2.13 * _t_18_; _t_19_ += in[k-1][j+4][i-1]; _t_21_ += in[k-1][j+4][i-1]; _t_19_ += in[k-1][j+4][i+1]; _t_21_ += in[k-1][j+4][i+1]; _t_19_ += in[k+1][j+4][i-1]; _t_21_ += in[k+1][j+4][i-1]; _t_19_ += in[k+1][j+4][i+1]; _t_21_ += in[k+1][j+4][i+1]; outkc0jp2ic0 += 0.331 * _t_19_; _t_26_ += in[k-1][j+4][i-2]; _t_26_ += in[k-1][j+4][i+2]; _t_26_ += in[k][j+4][i-1]; _t_26_ += in[k][j+4][i+1]; _t_26_ += in[k+1][j+4][i-2]; _t_26_ += in[k+1][j+4][i+2]; _t_26_ += in[k-1][j+5][i-1]; _t_26_ += in[k-1][j+5][i+1]; _t_26_ += in[k+1][j+5][i-1]; _t_26_ += in[k+1][j+5][i+1]; _t_26_ += in[k-2][j+4][i-1]; _t_26_ += in[k-2][j+4][i+1]; outkc0jp3ic0 += 0.331 * _t_26_; _t_21_ += in[k-2][j+5][i-2]; _t_21_ += in[k-2][j+5][i+2]; _t_22_ += in[k-2][j+5][i-2]; _t_22_ += in[k-2][j+5][i+2]; outkc0jp3ic0 += 0.75 * _t_21_; outkc0jp3ic0 += 0.76 * _t_22_; _t_23_ += in[k-2][j+5][i+1]; outkc0jp3ic0 += 1.132 * _t_23_; _t_24_ += in[k-2][j+5][i]; outkc0jp3ic0 += 0.217 * _t_24_; out[k][j][i] = outkc0jc0ic0; out[k][j+1][i] = outkc0jp1ic0; out[k][j+2][i] = outkc0jp2ic0; out[k][j+3][i] = outkc0jp3ic0; } } extern "C" void host_code (double *h_in, double *h_out, int N) { double *in; cudaMalloc (&in, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for in\n"); cudaMemcpy (in, h_in, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *out; cudaMalloc (&out, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for out\n"); dim3 blockconfig (16, 4, 4); dim3 gridconfig (ceil(N-4, blockconfig.x), ceil(N-4, 4*blockconfig.y), ceil(N-4, blockconfig.z)); j3d125pt<<<gridconfig, blockconfig>>> (in, out, N); cudaMemcpy (h_out, out, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaFree (in); cudaFree (out); }
11,318
/* #include "SDTorus.cuh" #include "cuda_runtime.h" SDTorus::SDTorus(float outer, float radius, glm::vec3 position) : dimensions(glm::vec2(outer, radius)), position(position) { } inline DistancePrimitive* SDTorus::copyToDevice() { SDTorus* deviceTorus; cudaMalloc((void **)&deviceTorus, sizeof(SDTorus)); cudaMemcpy(deviceTorus, this, sizeof(SDTorus), cudaMemcpyHostToDevice); return deviceTorus; } inline float SDTorus::distanceFromPoint(glm::vec3 point) { point -= position; glm::vec2 q = glm::vec2(GLMUtil::length(glm::vec2(point.x, point.y)) - dimensions.x, point.z); return GLMUtil::length(q) - dimensions.y; } inline AABB SDTorus::calculateBoundingVolume() { return AABB(glm::vec2(0, 0), glm::vec2(0, 0)); } */
11,319
#include "includes.h" //double* x, * devx, * val, * gra, * r, * graMax; //double* hes_value; ////int size; //int* pos_x, * pos_y; //int* csr; double* x; //thrust::pair<int, int> *device_pos; //typedef double (*fp)(double); //typedef void (*val_fp)(double*, double*, int); //typedef void (*valsum_fp)(double*, double*,int); //typedef void (*gra_fp)(double*, double*, int); //typedef void (*gramin_fp)(double*, double*,int); //typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int); //typedef void (*print_fp)(double*, int); int numSMs; __global__ void minus_gra(double* gra,int size) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) { gra[index]=0.0-gra[index]; } }
11,320
//xfail:NOT_ALL_VERIFIED //--blockDim=128 --gridDim=128 --warp-sync=32 --no-inline //kernel.cu: error: possible read-write race on A __global__ void foo(int* A) { A[ blockIdx.x*blockDim.x + threadIdx.x ] += (A[ (blockIdx.x + 1)*blockDim.x + threadIdx.x ]); }
11,321
/* Host-side code to perform counting sort * Author: Naga Kandasamy * Date modified: May 27, 2020 * * Compile as follows: make clean && make */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <string.h> #include <math.h> #include <limits.h> #include "counting_sort_kernel.cu" struct timeval start, stop; /* Do not change the range value */ #define MIN_VALUE 0 #define MAX_VALUE 255 /* Uncomment to spit out debug info */ // #define DEBUG #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } extern "C" int counting_sort_gold(int *, int *, int, int); int rand_int(int, int); void print_array(int *, int); void print_min_and_max_in_array(int *, int); void compute_on_device(int *, int *, int, int); int check_if_sorted(int *, int); int compare_results(int *, int *, int); int main(int argc, char **argv) { if (argc < 2) { printf("Usage: %s num-elements\n", argv[0]); exit(EXIT_FAILURE); } int num_elements = atoi(argv[1]); int range = MAX_VALUE - MIN_VALUE; int *input_array, *sorted_array_reference, *sorted_array_d; /* Populate input array with random integers between [0, RANGE] */ printf("Generating input array with %d elements in the range 0 to %d\n", num_elements, range); input_array = (int *)malloc(num_elements * sizeof(int)); if (input_array == NULL) { perror("malloc"); exit(EXIT_FAILURE); } srand(time(NULL)); int i; for (i = 0; i < num_elements; i++) input_array[i] = rand_int (MIN_VALUE, MAX_VALUE); #ifdef DEBUG print_array(input_array, num_elements); print_min_and_max_in_array(input_array, num_elements); #endif /* Sort elements in input array using reference implementation. * The result is placed in sorted_array_reference. */ printf("\nSorting array on CPU\n"); int status; sorted_array_reference = (int *)malloc(num_elements * sizeof(int)); if (sorted_array_reference == NULL) { perror("malloc"); exit(EXIT_FAILURE); } memset(sorted_array_reference, 0, num_elements); gettimeofday(&start, NULL); status = counting_sort_gold(input_array, sorted_array_reference, num_elements, range); gettimeofday(&stop, NULL); fprintf(stderr, "Gold Execution time = %fs\n", (float) (stop.tv_sec - start.tv_sec\ + (stop.tv_usec - start.tv_usec)/(float)1000000)); if (status == -1) { exit(EXIT_FAILURE); } status = check_if_sorted(sorted_array_reference, num_elements); if (status == -1) { printf("Error sorting the input array using the reference code\n"); exit(EXIT_FAILURE); } printf("Counting sort was successful on the CPU\n"); #ifdef DEBUG print_array(sorted_array_reference, num_elements); #endif /* FIXME: Write function to sort elements in the array in parallel fashion. * The result should be placed in sorted_array_mt. */ printf("\nSorting array on GPU\n"); sorted_array_d = (int *)malloc(num_elements * sizeof(int)); if (sorted_array_d == NULL) { perror("malloc"); exit(EXIT_FAILURE); } memset(sorted_array_d, 0, num_elements); compute_on_device(input_array, sorted_array_d, num_elements, range); fprintf(stderr, "GPU Execution time = %fs\n", (float) (stop.tv_sec - start.tv_sec\ + (stop.tv_usec - start.tv_usec)/(float)1000000)); #ifdef DEBUG print_array(sorted_array_d, num_elements); #endif /* Check the two results for correctness */ printf("\nComparing CPU and GPU results\n"); status = compare_results(sorted_array_reference, sorted_array_d, num_elements); if (status == 0) printf("Test passed\n"); else printf("Test failed\n"); exit(EXIT_SUCCESS); } /* FIXME: Write the GPU implementation of counting sort */ void compute_on_device(int *input_array, int *sorted_array, int num_elements, int range) { int *d_input_array; int size = num_elements * sizeof(int); cudaMalloc((void **)&d_input_array, size); cudaMemcpy(d_input_array, input_array, size, cudaMemcpyHostToDevice); int *d_sorted_array; size = num_elements * sizeof(int); cudaMalloc((void **)&d_sorted_array, size); int* d_histogram; size = (range + 1) * sizeof(int); cudaMalloc((void **)&d_histogram, size); cudaMemset(d_histogram, 0, size); int* d_scan; size = (range + 1) * sizeof(int); cudaMalloc((void **)&d_scan, (range + 1) * sizeof(int)); cudaMemset(d_scan, 0, size); gettimeofday(&start, NULL); dim3 threads(range + 1); dim3 grid(40, 1); histogram_kernel_fast<<<grid, threads, size>>>(d_input_array, d_histogram, num_elements, (range + 1)); cudaDeviceSynchronize(); grid.x = 1; counting_sort_kernel<<<grid, threads, 2 * size>>>(d_input_array, d_sorted_array, d_histogram, d_scan, num_elements, range); gettimeofday(&stop, NULL); cudaMemcpy(sorted_array, d_sorted_array, num_elements * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_input_array); cudaFree(d_sorted_array); cudaFree(d_histogram); cudaFree(d_scan); return; } /* Check if array is sorted */ int check_if_sorted(int *array, int num_elements) { int status = 0; int i; for (i = 1; i < num_elements; i++) { if (array[i - 1] > array[i]) { status = -1; break; } } return status; } /* Check if the arrays elements are identical */ int compare_results(int *array_1, int *array_2, int num_elements) { int status = 0; int i; for (i = 0; i < num_elements; i++) { if (array_1[i] != array_2[i]) { status = -1; break; } } return status; } /* Return random integer between [min, max] */ int rand_int(int min, int max) { float r = rand()/(float)RAND_MAX; return (int)floorf(min + (max - min) * r); } /* Print given array */ void print_array(int *this_array, int num_elements) { printf("Array: "); int i; for (i = 0; i < num_elements; i++) printf("%d ", this_array[i]); printf("\n"); return; } /* Return min and max values in given array */ void print_min_and_max_in_array(int *this_array, int num_elements) { int i; int current_min = INT_MAX; for (i = 0; i < num_elements; i++) if (this_array[i] < current_min) current_min = this_array[i]; int current_max = INT_MIN; for (i = 0; i < num_elements; i++) if (this_array[i] > current_max) current_max = this_array[i]; printf("Minimum value in the array = %d\n", current_min); printf("Maximum value in the array = %d\n", current_max); return; }
11,322
#include "point.cuh" Point::Point() {} Point::Point(double x_, double y_, bool border_x, bool border_y): x(x_), y(y_), borderX(border_x), borderY(border_y) {} double Point::getX() const { return x; } double Point::getY() const { return y; } bool Point::isBorderX() const { return borderX; } bool Point::isBorderY() const { return borderY; } double Point::distance(Point &other) { double deltaX = x - other.x; double deltaY = y - other.y; return pow(deltaX * deltaX + deltaY * deltaY, 0.5); } void Point::move(double deltaX, double deltaY) { x += deltaX; y += deltaY; } __device__ bool Point::operator==(const Point& other) const { return x == other.x && y == other.y; } __device__ bool Point::operator!=(const Point &other) const { return !(*this == other); } ostream& operator<<(ostream& os, const Point &p) { os << "(" << p.x << ", " << p.y << ")"; return os; }
11,323
#include <algorithm> #include <chrono> #include <cstdlib> #include <cstring> #include <fstream> #include <iostream> #include <string> #include <vector> struct TItem { int price; int weight; bool operator<(const TItem& other) const { return (double)price / weight > (double)other.price / other.weight; } }; const int THREADS_PER_BLOCK = 192; void BranchCPU(ssize_t e, int* w, int* p, int* s, int* U_old, int k, int* weight, int* price) { int s_e = s[e]; if (k < s_e) { w[e] -= weight[k]; p[e] -= price[k]; } else { ++s[e]; U_old[e] = 0; } } __global__ void BranchGPU(int* w, int* p, int* s, int* U_old, int k, int* weight, int* price, ssize_t q) { ssize_t e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } int s_e = s[e]; if (k < s_e) { w[e] -= weight[k]; p[e] -= price[k]; } else { ++s[e]; U_old[e] = 0; } } void BoundCPU(ssize_t e, int* w, int* p, int* s, int* L, int* U, int k, int n, int W, int* weight, int* price) { int i = s[e], w_e = w[e], p_e = p[e], weight_i = 0, price_i = 0; for (; i <= n; ++i) { weight_i = weight[i]; price_i = price[i]; if (w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; } else { break; } } U[e] = p_e + (weight_i ? (W - w_e) * price_i / weight_i : 0); w[e] = w_e; p[e] = p_e; s[e] = i; for (; i < n; ++i) { weight_i = weight[i]; price_i = price[i]; if (w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; } } L[e] = p_e; } __global__ void BoundGPU(int* w, int* p, int* s, int* L, int* U, int k, int n, int W, int* weight, int* price, ssize_t q) { ssize_t e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } int i = s[e], w_e = w[e], p_e = p[e], weight_i = 0, price_i = 0; for (; i <= n; ++i) { weight_i = weight[i]; price_i = price[i]; if (w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; } else { break; } } U[e] = p_e + (weight_i ? (W - w_e) * price_i / weight_i : 0); w[e] = w_e; p[e] = p_e; s[e] = i; for (; i < n; ++i) { weight_i = weight[i]; price_i = price[i]; if (w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; } } L[e] = p_e; } int main(int argc, char* argv[]) { if (argc != 3) { std::cerr << "Usage: " << argv[0] << " input_file output_file" << std::endl; return 0; } std::ifstream fin(argv[1]); std::ofstream fout(argv[2]); int n, W; fin >> n >> W; std::vector<TItem> items(n); for (int i = 0; i < n; ++i) { fin >> items[i].price >> items[i].weight; } std::sort(items.begin(), items.end()); int* weight = (int*)malloc((n + 1) * sizeof(*weight)); int* price = (int*)malloc((n + 1) * sizeof(*price)); for (int i = 0; i < n; ++i) { weight[i] = items[i].weight; price[i] = items[i].price; } weight[n] = price[n] = 0; std::chrono::high_resolution_clock::time_point total_start = std::chrono::high_resolution_clock::now(); int *cuda_weight = nullptr, *cuda_price = nullptr; ssize_t q = 1; int* w = (int*)malloc(q * sizeof(*w)); int* p = (int*)malloc(q * sizeof(*p)); int* s = (int*)malloc(q * sizeof(*s)); int* L = (int*)malloc(q * sizeof(*L)); int* U = (int*)malloc(q * sizeof(*U)); w[0] = p[0] = s[0]= 0; BoundCPU(0, w, p, s, L, U, 0, n, W, weight, price); int record = L[0]; free(L); for (int k = 0; k < n; ++k) { std::cout << "Step " << k + 1 << ", q = " << q << std::endl; if (q > 5000000) { if (cuda_weight == nullptr) { cudaMalloc(&cuda_weight, (n + 1) * sizeof(*cuda_weight)); cudaMalloc(&cuda_price, (n + 1) * sizeof(*cuda_price)); cudaMemcpy(cuda_weight, weight, (n + 1) * sizeof(*cuda_weight), cudaMemcpyHostToDevice); cudaMemcpy(cuda_price, price, (n + 1) * sizeof(*cuda_price), cudaMemcpyHostToDevice); } int *w_new, *p_new, *s_new, *L_new, *U_new, *U_old; cudaMalloc(&w_new, q * sizeof(*w_new)); cudaMalloc(&p_new, q * sizeof(*p_new)); cudaMalloc(&s_new, q * sizeof(*s_new)); cudaMalloc(&U_old, q * sizeof(*U_old)); cudaMemcpy(w_new, w, q * sizeof(*w), cudaMemcpyHostToDevice); cudaMemcpy(p_new, p, q * sizeof(*p), cudaMemcpyHostToDevice); cudaMemcpy(s_new, s, q * sizeof(*s), cudaMemcpyHostToDevice); cudaMemcpy(U_old, U, q * sizeof(*U), cudaMemcpyHostToDevice); const ssize_t q_block = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; BranchGPU<<<q_block, THREADS_PER_BLOCK>>>(w_new, p_new, s_new, U_old, k, cuda_weight, cuda_price, q); cudaDeviceSynchronize(); cudaMemcpy(U, U_old, q * sizeof(*U), cudaMemcpyDeviceToHost); cudaFree(U_old); cudaMalloc(&L_new, q * sizeof(*L_new)); cudaMalloc(&U_new, q * sizeof(*U_new)); BoundGPU<<<q_block, THREADS_PER_BLOCK>>>(w_new, p_new, s_new, L_new, U_new, k, n, W, cuda_weight, cuda_price, q); cudaDeviceSynchronize(); int *L_new_CPU = (int*)malloc(q * sizeof(*L_new_CPU)); cudaMemcpy(L_new_CPU, L_new, q * sizeof(*L_new), cudaMemcpyDeviceToHost); cudaFree(L_new); for (ssize_t e = 0; e < q; ++e) { record = std::max(record, L_new_CPU[e]); } free(L_new_CPU); w = (int*)realloc(w, 2 * q * sizeof(*w)); p = (int*)realloc(p, 2 * q * sizeof(*p)); s = (int*)realloc(s, 2 * q * sizeof(*s)); U = (int*)realloc(U, 2 * q * sizeof(*U)); cudaMemcpy(w + q, w_new, q * sizeof(*w), cudaMemcpyDeviceToHost); cudaMemcpy(p + q, p_new, q * sizeof(*p), cudaMemcpyDeviceToHost); cudaMemcpy(s + q, s_new, q * sizeof(*s), cudaMemcpyDeviceToHost); cudaMemcpy(U + q, U_new, q * sizeof(*U), cudaMemcpyDeviceToHost); cudaFree(w_new); cudaFree(p_new); cudaFree(s_new); cudaFree(U_new); } else { w = (int*)realloc(w, 2 * q * sizeof(*w)); p = (int*)realloc(p, 2 * q * sizeof(*p)); s = (int*)realloc(s, 2 * q * sizeof(*s)); memcpy(w + q, w, q * sizeof(*w)); memcpy(p + q, p, q * sizeof(*p)); memcpy(s + q, s, q * sizeof(*s)); for (ssize_t e = 0; e < q; ++e) { BranchCPU(e, w + q, p + q, s + q, U, k, weight, price); } U = (int*)realloc(U, 2 * q * sizeof(*U)); int* L_new = (int*)malloc(q * sizeof(*L_new)); for (ssize_t e = 0; e < q; ++e) { BoundCPU(e, w + q, p + q, s + q, L_new, U + q, k, n, W, weight, price); record = std::max(record, L_new[e]); } free(L_new); } for (ssize_t i = 0, j = 2 * q - 1; ;) { while (i < 2 * q && U[i] > record) { ++i; } while (j >= 0 && U[j] <= record) { --j; } if (i >= j) { q = j + 1; break; } w[i] = w[j]; p[i] = p[j]; s[i] = s[j]; std::swap(U[i], U[j]); } if (q == 0) { break; } } free(w); free(p); free(s); free(U); free(weight); free(price); if (cuda_weight != nullptr) { cudaFree(cuda_weight); cudaFree(cuda_price); } std::chrono::high_resolution_clock::time_point total_end = std::chrono::high_resolution_clock::now(); double total_time = std::chrono::duration_cast<std::chrono::duration<double>>(total_end - total_start).count(); std::cout << "Total time: " << total_time << std::endl; fout << record << std::endl; return 0; }
11,324
__global__ void DTW_Diag_Step(float* d0, float* d1, float* d2, float* csm0, float* csm1, float* csm2, float* X, float* Y, int dim, int diagLen, int* box, int reverse, int i, int debug, float* U, float* L, float* UL, float* S) { //Other local variables int i1, i2, j1, j2; // Endpoints of the diagonal int thisi, thisj; // Current indices on the diagonal // Optimal score and particular score for up/right/left float score, left, up, diag; int idx = threadIdx.x + blockIdx.x*blockDim.x; int xi, yj; //Process each diagonal score = -1; if (idx < diagLen) { // Figure out indices in X and Y on diagonal int M = box[1] - box[0] + 1; int N = box[3] - box[2] + 1; i1 = i; j1 = 0; if (i >= M) { i1 = M-1; j1 = i - (M-1); } j2 = i; i2 = 0; if (j2 >= N) { j2 = N-1; i2 = i - (N-1); } thisi = i1 - idx; thisj = j1 + idx; if (thisi >= i2 && thisj <= j2) { xi = thisi; yj = thisj; if (reverse == 1) { xi = M-1-xi; yj = N-1-yj; } xi += box[0]; yj += box[2]; // Step 1: Update csm2 csm2[idx] = 0.0; for (int d = 0; d < dim; d++) { float diff = X[xi*dim+d] - Y[yj*dim+d]; csm2[idx] += diff*diff; } csm2[idx] = sqrt(csm2[idx]); // Step 2: Figure out the optimal cost if (thisi == 0 && thisj == 0) { score = 0; if (debug == -1) { S[0] = 0; U[0] = -1; L[0] = -1; UL[0] = -1; } } else { left = -1; up = -1; diag = -1; if (j1 == 0) { if (idx > 0) { left = d1[idx-1] + csm1[idx-1]; } if (idx > 0 && thisi > 0) { diag = d0[idx-1] + csm0[idx-1]; } if (thisi > 0) { up = d1[idx] + csm1[idx]; } } else if (i1 == M-1 && j1 == 1) { left = d1[idx] + csm1[idx]; if (thisi > 0) { diag = d0[idx] + csm0[idx]; up = d1[idx+1] + csm1[idx+1]; } } else if (i1 == M-1 && j1 > 1) { left = d1[idx] + csm1[idx]; if (thisi > 0) { diag = d0[idx+1] + csm0[idx+1]; up = d1[idx+1] + csm1[idx+1]; } } if (left > -1) { score = left; } if (up > -1 && (up < score || score == -1)) { score = up; } if (diag > -1 && (diag < score || score == -1)) { score = diag; } if (debug == 1) { U[thisi*N + thisj] = up; L[thisi*N + thisj] = left; UL[thisi*N + thisj] = diag; S[thisi*N + thisj] = score; } } } d2[idx] = score; } }
11,325
#include "global_defines.cuh" __global__ void redistribute_kernel(const int lx, const int ly, const int lz, FLOATING reynolds, FLOATING nu, FLOATING r_small, FLOATING t_0, FLOATING t_1, FLOATING t_2, FLOATING c_squ, FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3, FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7, FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11, FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15, FLOATING *Q16, FLOATING *Q17, FLOATING *Q18){ /************************************************************************ * * * density redistribution in first lattice column * * * * * * Last change: 04/05/2003 * * * ************************************************************************/ /* c c.......directed flow is induced by density redistribution in the first c lattice column. This is not too clever, since the resulting c reynolds number can not be controlled and reaching steady state c takes quite some time, but it is simple and it works ... c use this to start with no initial field */ /* creates u_n, u_squ and assigns the values in node[] */ int tid=blockIdx.x*blockDim.x+threadIdx.x; //int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int R_big; int baffle_position=59; FLOATING mass_flow; //.....local variables int yc, zc, yr, zr; FLOATING rho/*local density*/, u_avg,A_out,A_inn,A_anu,pi,u_xa,u_xs; FLOATING u_x,u_y, u_z, u_n[19] , u_squ; yc= (ly+1)/2 -1;//CHANGED! ORIGINALLY IT WAS yc= (ly +1)/2; AND zc= (ly +1)/2; zc= (ly+1)/2 -1; R_big=35; mass_flow=0.05; pi = acos(0.0); u_avg =reynolds*nu/(2*R_big); u_xa= (R_big*R_big) / (R_big*R_big-r_small*r_small)*u_avg/(1+mass_flow); A_out=pi*R_big*R_big; A_inn=pi*r_small*r_small; A_anu=A_out-A_inn; u_xs=A_anu*u_xa*mass_flow/A_inn; //.....compute weighting factors (depending on lattice geometry) for // increasing/decreasing inlet densities //8etei se olo to domain thn idia taxuthta, thn opoia 8a allaksei meta //gia ton eswteriko swlhna //todo: vale sto katw for, to x na paizei metaksu timwn pou prosdiorizontai apo to rank! //px. gia x=0...1/rank... 1/rank+margin... 2/rank.... etc! // int end_of_memory=lz*ly*(baffle_position+1); zr=z-zc; yr=y-yc; if(x< (baffle_position+1) and yr*yr+zr*zr < r_small*r_small and tid<lx*ly*lz){ // id = z*+y+x // for( z = 0; z< lz ; ++z){ // for( y = 0; y< ly ; ++y){ // for( x = 0; x< baffle_position+1 ; ++x){ // rho=0.0; rho=Q0[index(z,y,x)]+Q1[index(z,y,x)]+Q2[index(z,y,x)]+Q3[index(z,y,x)]+ Q4[index(z,y,x)]+Q5[index(z,y,x)]+Q6[index(z,y,x)]+Q7[index(z,y,x)]+ Q8[index(z,y,x)]+Q9[index(z,y,x)]+Q10[index(z,y,x)]+Q11[index(z,y,x)]+ Q12[index(z,y,x)]+Q13[index(z,y,x)]+Q14[index(z,y,x)]+Q15[index(z,y,x)]+ Q16[index(z,y,x)]+Q17[index(z,y,x)]+Q18[index(z,y,x)]; u_x = u_xs; u_y = 0.0; u_z = 0.0; u_n[0]= 0.0; //SHOULD NEVER USED! u_n[1] = u_x; //u_xa u_n[2] = u_y; u_n[3] = - u_x; u_n[4] = - u_y; u_n[5] = u_z; u_n[6] = - u_z; u_n[7] = u_x + u_y; u_n[8] = - u_x + u_y; u_n[9] = - u_x - u_y; u_n[10] = u_x - u_y; u_n[11] = u_x - u_z; u_n[12] = - u_x - u_z; u_n[13] = - u_x + u_z; u_n[14] = u_x + u_z; u_n[15] = u_z + u_y; u_n[16] = - u_z + u_y; u_n[17] = - u_z - u_y; u_n[18] = u_z - u_y; u_squ = u_x*u_x + u_y*u_y + u_z*u_z; Q0[index(z,y,x)]=(FLOATING) (t_0 * rho *(1.0 - u_squ / (2.0 * c_squ))); //...........axis speeds (factor: t_1) Q1[index(z,y,x)]=(FLOATING) (t_1 * rho * (1.0+ ( u_n[1]/c_squ ) + 0.5* ( (u_n[1]*u_n[1])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) ));; Q2[index(z,y,x)]=(FLOATING) (t_1 * rho * (1.0+ ( u_n[2]/c_squ ) + 0.5* ( (u_n[2]*u_n[2])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) ));; Q3[index(z,y,x)]=(FLOATING) (t_1 * rho * (1.0+ ( u_n[3]/c_squ ) + 0.5* ( (u_n[3]*u_n[3])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) ));; Q4[index(z,y,x)]=(FLOATING) (t_1 * rho * (1.0+ ( u_n[4]/c_squ ) + 0.5* ( (u_n[4]*u_n[4])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) ));; Q5[index(z,y,x)]=(FLOATING) (t_1 * rho * (1.0+ ( u_n[5]/c_squ ) + 0.5* ( (u_n[5]*u_n[5])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) ));; Q6[index(z,y,x)]=(FLOATING) (t_1 * rho * (1.0+ ( u_n[6]/c_squ ) + 0.5* ( (u_n[6]*u_n[6])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) ));; //...........diagonal speeds (factor: t_2) Q7[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[7]/c_squ ) + 0.5* ( (u_n[7]*u_n[7])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) ));; Q8[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[8]/c_squ ) + 0.5* ( (u_n[8]*u_n[8])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q9[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[9]/c_squ ) + 0.5* ( (u_n[9]*u_n[9])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q10[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[10]/c_squ ) + 0.5* ( (u_n[10]*u_n[10])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q11[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[11]/c_squ ) + 0.5* ( (u_n[11]*u_n[11])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q12[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[12]/c_squ ) + 0.5* ( (u_n[12]*u_n[12])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q13[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[13]/c_squ ) + 0.5* ( (u_n[13]*u_n[13])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q14[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[14]/c_squ ) + 0.5* ( (u_n[14]*u_n[14])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q15[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[15]/c_squ ) + 0.5* ( (u_n[15]*u_n[15])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q16[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[16]/c_squ ) + 0.5* ( (u_n[16]*u_n[16])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q17[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[17]/c_squ ) + 0.5* ( (u_n[17]*u_n[17])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); Q18[index(z,y,x)]=(FLOATING) (t_2 * rho * (1.0+ ( u_n[18]/c_squ ) + 0.5* ( (u_n[18]*u_n[18])/(c_squ*c_squ) ) - 0.5 * ( u_squ/c_squ ) )); // } // } // } } } void LBM::cuda_redistribute(){ if(data_location==CPU) copy_data_from_host_to_device(); // int lattice_nodes=lz*ly*lx; // // int n_of_threads=128; // int n_of_blocks=ceil((lattice_nodes*1.0)/n_of_threads); // dim3 threads_type2(n_of_threads,1,1); // dim3 grid_type2(n_of_blocks,1,1); //#ifdef REPORT // cout << "redistribute kernel with:" << lattice_nodes << " lattice nodes" << endl; // cout << "\tthreads:" << n_of_threads << endl; // cout << "\tblocks:" << n_of_blocks << endl; //#endif dim3 threads_type2(threads_for_streaming_collision_and_relaxation,1,1); dim3 grid_type2(blocks_for_streaming_collision_and_relaxation,1,1); redistribute_kernel<<<grid_type2, threads_type2>>>(lx, ly, lz, reynolds, nu, r_small, t_0, t_1, t_2, c_squ, D3_d.Q0, D3_d.Q1, D3_d.Q2, D3_d.Q3, D3_d.Q4, D3_d.Q5, D3_d.Q6, D3_d.Q7, D3_d.Q8, D3_d.Q9, D3_d.Q10, D3_d.Q11, D3_d.Q12, D3_d.Q13, D3_d.Q14, D3_d.Q15, D3_d.Q16, D3_d.Q17, D3_d.Q18); cudaDeviceSynchronize(); }
11,326
// To compile: nvcc hw04.cu -o hw04 #include <sys/time.h> #include <stdio.h> #define N 1000000 __global__ void dotProduct(float *a, float *b, float *c){ unsigned long id = (blockIdx.x*blockDim.x)+threadIdx.x; __shared__ float cache[1024]; cache[threadIdx.x] = 0; if(id > N){ return; } cache[threadIdx.x] = a[id]*b[id]; __syncthreads(); int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x+i]; } __syncthreads(); i /= 2; } c[blockIdx.x] = cache[0]; } void CUDAErrorCheck(const char *message){ cudaError_t error; error = cudaGetLastError(); if(error != cudaSuccess){ printf("\n CUDA ERROR in: %s -> %s\n", message, cudaGetErrorString(error)); exit(0); } } int main(){ float *A_CPU, *B_CPU, *C_CPU; //Pointers for memory on the Host long n = N; // Your variables start here. float *A_GPU, *B_GPU, *C_GPU; dim3 gridDim, blockDim; // Your variables stop here. //Allocating and loading Host (CPU) Memory A_CPU = (float*)malloc(n*sizeof(float)); B_CPU = (float*)malloc(n*sizeof(float)); C_CPU = (float*)malloc((1+(n-1)/1024)*sizeof(float)); for(int i = 0; i < n; i++) { A_CPU[i] = 2.5; B_CPU[i] = 1.0; } // Your code starts here. //gridDim.x = (n < 1024) ? n:1024; gridDim.x = 1+(n-1)/1024; gridDim.y = 1; gridDim.z = 1; //blockDim.x = 1+(n-1)/1024; blockDim.x = (n < 1024) ? n:1024; blockDim.y = 1; blockDim.z = 1; cudaMalloc(&A_GPU, n*sizeof(float)); CUDAErrorCheck("a cuda malloc..."); cudaMalloc(&B_GPU, n*sizeof(float)); CUDAErrorCheck("b cuda malloc..."); cudaMalloc(&C_GPU, gridDim.x*sizeof(float)); CUDAErrorCheck("c cuda malloc..."); cudaMemcpyAsync(A_GPU, A_CPU, n*sizeof(float), cudaMemcpyHostToDevice); CUDAErrorCheck("a cuda memcpy from host..."); cudaMemcpyAsync(B_GPU, B_CPU, n*sizeof(float), cudaMemcpyHostToDevice); CUDAErrorCheck("b cuda memcpy from host..."); cudaMemcpyAsync(C_GPU, C_CPU, gridDim.x*sizeof(float), cudaMemcpyHostToDevice); CUDAErrorCheck("c cuda memcpy from host..."); free(A_CPU); free(B_CPU); dotProduct<<<gridDim, blockDim>>>(A_GPU, B_GPU, C_GPU); CUDAErrorCheck("kernel..."); cudaMemcpyAsync(C_CPU, C_GPU, gridDim.x*sizeof(float), cudaMemcpyDeviceToHost); CUDAErrorCheck("c cuda memcpy from device.."); cudaFree(A_GPU); CUDAErrorCheck("a cuda free..."); cudaFree(B_GPU); CUDAErrorCheck("b cuda free..."); cudaFree(C_GPU); CUDAErrorCheck("c cuda free..."); float ans = 0; for(int i=0; i<gridDim.x; i++){ ans += C_CPU[i]; } free(C_CPU); printf("%f\n", ans); // Your code stops here. return(0); }
11,327
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float* var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float* var_21,float var_22,float var_23,float var_24,float var_25,float var_26) { for (int i=0; i < var_1; ++i) { var_2[i] = +1.6143E-41f; comp = var_2[i] / cosf(asinf(var_4 - (var_5 * sinhf((var_6 - var_7 / var_8))))); float tmp_1 = -0.0f; comp += tmp_1 / (-0.0f * (var_9 * +1.2366E25f + cosf(var_10 - -1.4355E-7f / var_11))); if (comp == var_12 + -1.2856E7f) { comp = +1.2538E22f + var_13 / var_14 * -1.9884E36f + +0.0f / +1.6793E35f; } if (comp > fabsf(-1.2656E17f)) { comp += var_15 / fabsf((-1.5653E-35f - var_16 - ldexpf((var_17 * var_18 / var_19 + (var_20 * -1.6549E-42f)), 2))); } for (int i=0; i < var_3; ++i) { var_21[i] = -1.2917E35f; float tmp_2 = -0.0f; comp = tmp_2 + var_21[i] * (+1.3277E-36f * (+0.0f / (var_22 * sqrtf(asinf((+0.0f / var_23 * (var_24 * var_25))))))); comp = (+1.9593E-41f * var_26); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float* tmp_3 = initPointer( atof(argv[3]) ); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float* tmp_22 = initPointer( atof(argv[22]) ); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27); cudaDeviceSynchronize(); return 0; }
11,328
/** /* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * #include <stdio.h> #include <stdlib.h> static const int WORK_SIZE = 256; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. /// #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __device__ unsigned int bitreverse(unsigned int number) { number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4); number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2); number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1); return number; } /** * CUDA kernel function that reverses the order of bits in each element of the array. * __global__ void bitreverse(void *data) { unsigned int *idata = (unsigned int*) data; idata[threadIdx.x] = bitreverse(idata[threadIdx.x]); } /** * Host function that prepares data array and passes it to the CUDA kernel. * int main(void) { void *d = NULL; int i; unsigned int idata[WORK_SIZE], odata[WORK_SIZE]; for (i = 0; i < WORK_SIZE; i++) idata[i] = (unsigned int) i; CUDA_CHECK_RETURN(cudaMalloc((void**) &d, sizeof(int) * WORK_SIZE)); CUDA_CHECK_RETURN( cudaMemcpy(d, idata, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice)); bitreverse<<<1, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d); CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaMemcpy(odata, d, sizeof(int) * WORK_SIZE, cudaMemcpyDeviceToHost)); for (i = 0; i < WORK_SIZE; i++) printf("Input value: %u, device output: %u\n", idata[i], odata[i]); CUDA_CHECK_RETURN(cudaFree((void*) d)); CUDA_CHECK_RETURN(cudaDeviceReset()); return 0; }*/ #include<stdio.h> #include<cuda.h> #include<cuda_runtime_api.h> void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { int devCount; cudaGetDeviceCount(&devCount); printf("\nHello From Main code:......\n"); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; } /* // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; } */
11,329
#include "includes.h" __global__ void gradient_and_subtract_kernel(float * in, float * grad_x, float * grad_y, float * grad_z) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; unsigned int k = blockIdx.z * blockDim.z + threadIdx.z; if (i >= c_Size.x || j >= c_Size.y || k >= c_Size.z) return; long int id = (k * c_Size.y + j) * c_Size.x + i; long int id_x = (k * c_Size.y + j) * c_Size.x + i + 1; long int id_y = (k * c_Size.y + j + 1) * c_Size.x + i; long int id_z = ((k + 1) * c_Size.y + j) * c_Size.x + i; if (i != (c_Size.x - 1)) grad_x[id] -= ((in[id_x] - in[id]) / c_Spacing.x); if (j != (c_Size.y - 1)) grad_y[id] -= ((in[id_y] - in[id]) / c_Spacing.y); if (k != (c_Size.z - 1)) grad_z[id] -= ((in[id_z] - in[id]) / c_Spacing.z); }
11,330
#include"cuda_runtime.h" #include"device_launch_parameters.h" #include<stdio.h> __global__ void Add(int *a, int *b, int *c, int n) { int id,i; id = threadIdx.x; c[id] = a[id]+b[id]; } int main() { int a[100],b[100],c[100],n,i; int size, *d_a, *d_b, *d_c; printf("\nEnter N\n"); scanf("%d", &n); printf("\nEnter MATRIX A\n"); for(i=0;i<n*n;i++) { scanf("%d", &a[i]); } printf("\nEnter MATRIX B\n"); for(i=0;i<n*n;i++) { scanf("%d", &b[i]); } size = sizeof(int); cudaMalloc((void **)&d_a,n*n*size); cudaMalloc((void **)&d_b,n*n*size); cudaMalloc((void **)&d_c,n*n*size); cudaMemcpy(d_a,a,n*n*size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,n*n*size,cudaMemcpyHostToDevice); Add<<<1,n*n>>>(d_a,d_b,d_c,n); cudaMemcpy(c,d_c,size*n*n,cudaMemcpyDeviceToHost); printf("Addition of rows\n"); for(i=0;i<n*n;i++) { printf("%d\t", c[i]); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
11,331
#include <iostream> #include <thrust/iterator/zip_iterator.h> #include <thrust/device_ptr.h> #include <thrust/remove.h> #include <thrust/tuple.h> #include <thrust/sequence.h> typedef thrust::tuple<int, int, int, bool> XYZFlag; struct should_remove { __host__ __device__ bool operator() (const XYZFlag& tup) { const bool flag = thrust::get<3>(tup); return !flag; } }; int main() { const int N = 4000000; int* x_raw_ptr; cudaMalloc(&x_raw_ptr, N * sizeof(int)); int* y_raw_ptr; cudaMalloc(&y_raw_ptr, N * sizeof(int)); int* z_raw_ptr; cudaMalloc(&z_raw_ptr, N * sizeof(int)); bool* should_keep_raw; cudaMalloc(&should_keep_raw, N * sizeof(bool)); // bind device_ptr thrust::device_ptr<int> x_dev_ptr(x_raw_ptr); thrust::device_ptr<int> y_dev_ptr(y_raw_ptr); thrust::device_ptr<int> z_dev_ptr(z_raw_ptr); thrust::device_ptr<bool> should_keep_dev_ptr(should_keep_raw); // init value in dev_ptr s for test thrust::sequence(x_dev_ptr, x_dev_ptr + N); thrust::sequence(y_dev_ptr, y_dev_ptr + N, N); thrust::sequence(z_dev_ptr, z_dev_ptr + N, N*2); thrust::fill(should_keep_dev_ptr, should_keep_dev_ptr+N, false); //for (int i = 0; i < N; i++) { for (int i = 0; i < 10240; i++) { //x_dev_ptr[i] = i; //y_dev_ptr[i] = i + N; //z_dev_ptr[i] = i + N * 2; should_keep_dev_ptr[i] = (bool)(i % 2); } // remove if auto first = thrust::make_zip_iterator(thrust::make_tuple(x_dev_ptr, y_dev_ptr, z_dev_ptr, should_keep_dev_ptr)); auto last = thrust::make_zip_iterator(thrust::make_tuple(x_dev_ptr+N, y_dev_ptr+N,z_dev_ptr+N, should_keep_dev_ptr+N)); auto newEnd = thrust::remove_if(first, last, should_remove()); // print result for (int i = 0; i < 2; i++) { std::cout << "x: " << x_dev_ptr[i]; std::cout << ", y: " << y_dev_ptr[i]; std::cout << ", z: " << z_dev_ptr[i]; std::cout << ", flag: " << should_keep_dev_ptr[i] << std::endl; } return 0; }
11,332
#include <cuda.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #define BLOCK_WIDTH 32 // kernel __global__ void tiledConvolution_2D_Kernel(float* d_m, const float* __restrict__ d_mask, float* d_n, size_t a, size_t b, size_t maskWidth, int N_TILE_WIDTH) { float result = 0; // indexing variables int n_row = blockIdx.y * N_TILE_WIDTH + threadIdx.y; int n_col = blockIdx.x * N_TILE_WIDTH + threadIdx.x; int m_row = n_row - maskWidth / 2; int m_col = n_col - maskWidth / 2; __shared__ float tile_m[BLOCK_WIDTH][BLOCK_WIDTH]; // thread boundary check for loading input tiles if(m_row >= 0 && m_row < a && m_col >= 0 && m_col < b) { tile_m[threadIdx.y][threadIdx.x] = d_m[m_row * b + m_col]; } else { tile_m[threadIdx.y][threadIdx.x] = 0; } __syncthreads(); // thread boundary check for calculation if(threadIdx.y < N_TILE_WIDTH && threadIdx.x < N_TILE_WIDTH && n_row < a && n_col < b) { for(int i = 0; i < maskWidth; ++i) { for(int j = 0; j < maskWidth; ++j) { result += d_mask[i * maskWidth + j] * tile_m[threadIdx.y + i][threadIdx.x + j]; } } // write result d_n[n_row * b + n_col] = result; } } // CUDA error checking void errorCheck(unsigned int line) { cudaError_t cudaError = cudaGetLastError(); if(cudaError != cudaSuccess) { printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, cudaGetErrorString(cudaError)); exit(EXIT_FAILURE); } } // host function containing kernel call void convolution_2D(float* m, float* mask, float* n, size_t a, size_t b, size_t maskWidth, int N_TILE_WIDTH) { dim3 numOfBlocks(ceil(b / (float) N_TILE_WIDTH), ceil(a / (float) N_TILE_WIDTH), 1); dim3 numOfThreads(BLOCK_WIDTH, BLOCK_WIDTH, 1); size_t bytes_m = a * b * sizeof(float); size_t bytes_mask = maskWidth * maskWidth * sizeof(float); float* d_m; float* d_mask; float* d_n; cudaMalloc((void**) &d_m, bytes_m); errorCheck(__LINE__); cudaMalloc((void**) &d_mask, bytes_mask); errorCheck(__LINE__); cudaMalloc((void**) &d_n, bytes_m); errorCheck(__LINE__); cudaMemcpy(d_m, m, bytes_m, cudaMemcpyHostToDevice); errorCheck(__LINE__); cudaMemcpy(d_mask, mask, bytes_mask, cudaMemcpyHostToDevice); errorCheck(__LINE__); tiledConvolution_2D_Kernel<<<numOfBlocks, numOfThreads>>>(d_m, d_mask, d_n, a, b, maskWidth, N_TILE_WIDTH); errorCheck(__LINE__); cudaMemcpy(n, d_n, bytes_m, cudaMemcpyDeviceToHost); errorCheck(__LINE__); cudaFree(d_m); errorCheck(__LINE__); cudaFree(d_mask); errorCheck(__LINE__); cudaFree(d_n); errorCheck(__LINE__); } int main() { struct timespec start, end; srand(time(NULL)); size_t a = rand() % 257 + 3840; size_t b = rand() % 257 + 3840; size_t maskWidth = 11; int N_TILE_WIDTH = BLOCK_WIDTH - (maskWidth - 1); float* m = (float*) malloc(a * b * sizeof(float)); float* mask = (float*) malloc(maskWidth * maskWidth * sizeof(float)); float* n = (float*) malloc(a * b * sizeof(float)); for(int i = 0; i < a * b; ++i) { m[i] = rand() % 129 - 64; } for(int j = 0; j < maskWidth * maskWidth; ++j) { mask[j] = rand() % 1001 / 1000.0; } clock_gettime(CLOCK_REALTIME, &start); // do convolution convolution_2D(m, mask, n, a, b, maskWidth, N_TILE_WIDTH); clock_gettime(CLOCK_REALTIME, &end); time_t execTime = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; printf("Execution time: %d microseconds.", execTime); return 0; }
11,333
#include <iostream> #include <fstream> #include <sstream> #include <string> #include "vector" #include <math.h> #include <stdlib.h> #include <cmath> #include <stdio.h> using namespace std; __global__ void min2(int *arr, int *i) { int a = arr[2 * *i * blockIdx.x]; int b = arr[2 * *i * blockIdx.x + *i]; if (a < b) { arr[2 * *i * blockIdx.x] = a; } else { arr[2 * *i * blockIdx.x] = b; } } __global__ void remainder_array(int n, int *a, int *b){ int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride){ b[i] = a[i]%10; } } void stream_arr_to_file(int *b, int size){ ofstream myfile ("q1.txt"); if (myfile.is_open()) { for(int count = 0; count < size; count ++){ myfile << b[count] << ","; } myfile.close(); } else cout << "Unable to open file" << endl; } int populate_array(vector<int>* arr, int* len) { ifstream infile( "inp.txt" ); if (!infile.is_open()) { cout<<"File failed to open"<<endl; return 0; } string line; while (getline(infile, line)) { istringstream ss(line); while (ss) { string s; if (!getline(ss, s, ',')) break; (*len)++; arr->push_back(atoi(s.c_str())); } } return 1; } void a(vector<int> arr, int len) { int full_size = len * sizeof(int); // Full array int *d_arr; cudaMalloc((void **)&d_arr, full_size); int N = len/2; int i = 1; int *d_i; cudaMalloc((void **)&d_i, sizeof(int)); while (2*i < len) { // Copy array and i over cudaMemcpy(d_arr, arr.data(), full_size, cudaMemcpyHostToDevice); cudaMemcpy(d_i, &i, sizeof(int), cudaMemcpyHostToDevice); min2<<<N,1>>>(d_arr, d_i); // Update i *= 2 ; N = (int)((len + 1) / (2 * i)); cudaMemcpy(arr.data(), d_arr, full_size, cudaMemcpyDeviceToHost); } // Sequential compare int min; int a = arr[0]; int b = arr[i]; if (a < b) { min = a; } else { min = b; } cout<<"Minimum: " << min << endl; cudaFree(d_arr); cudaFree(d_i); } void b(vector<int> a, int len){ int full_size = len * sizeof(int); int *a_arr; cudaMalloc((void **) &a_arr, full_size); int *b_arr; cudaMalloc((void **) &b_arr, full_size); cudaMemcpy(a_arr, a.data(), full_size, cudaMemcpyHostToDevice); cudaMemcpy(b_arr, a.data(), full_size, cudaMemcpyHostToDevice); remainder_array<<<1, 256>>> (len, a_arr, b_arr); cudaMemcpy(a.data(), b_arr, full_size, cudaMemcpyDeviceToHost); stream_arr_to_file(a.data(),len); cudaFree(a_arr); cudaFree(b_arr); } int main () { vector<int> arr; int len = 0; if (!populate_array(&arr, &len)) { return 0; } a(arr, len); if (!populate_array(&arr, &len)) { return 0; } b(arr, len); return 0; }
11,334
extern "C" __global__ void DummyFunction(int *result) { int x = blockIdx.x; result[x] = ((result[x] * result[x]) * 1); }
11,335
#include <cuda_runtime.h> #include <iostream> #include <stdio.h> #include <sys/time.h> static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ),file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __global__ void conv(void* __restrict__ A, void* __restrict__ W, void* __restrict__ B) { float B_local[64]; __shared__ float Apad_shared[512]; __shared__ float W_shared[512]; float Apad_shared_local[8]; float W_shared_local[8]; for (int ff_c_init = 0; ff_c_init < 8; ++ff_c_init) { for (int nn_c_init = 0; nn_c_init < 8; ++nn_c_init) { B_local[(((ff_c_init * 8) + nn_c_init))] = 0.000000e+00f; } } for (int rc_outer = 0; rc_outer < 32; ++rc_outer) { for (int ry = 0; ry < 3; ++ry) { for (int rx = 0; rx < 3; ++rx) { __syncthreads(); for (int ax3_inner_outer = 0; ax3_inner_outer < 2; ++ax3_inner_outer) { ((float4*)(Apad_shared + ((((((int)threadIdx.x) * 64) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer * 4)))))[0] = (((((1 <= ((((int)blockIdx.z) / 14) + ry)) && (((((int)blockIdx.z) / 14) + ry) < 15)) && (1 <= (rx + (((int)blockIdx.z) % 14)))) && ((rx + (((int)blockIdx.z) % 14)) < 15)) ? ((float4*)((float*)A + ((((((((((ry * 458752) + (((int)blockIdx.z) * 32768)) + (rx * 32768)) + (rc_outer * 1024)) + (((int)threadIdx.x) * 128)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer * 4)) - 491520))))[0] : make_float4(0.000000e+00f, 0.000000e+00f, 0.000000e+00f, 0.000000e+00f)); } for (int ax3_inner_outer1 = 0; ax3_inner_outer1 < 2; ++ax3_inner_outer1) { ((float4*)(W_shared + ((((((int)threadIdx.x) * 64) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer1 * 4)))))[0] = ((float4*)((float*)W + ((((((((ry * 393216) + (rx * 131072)) + (rc_outer * 4096)) + (((int)threadIdx.x) * 512)) + (((int)blockIdx.y) * 64)) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer1 * 4)))))[0]; } __syncthreads(); for (int rc_inner = 0; rc_inner < 8; ++rc_inner) { for (int ax3 = 0; ax3 < 8; ++ax3) { Apad_shared_local[(ax3)] = Apad_shared[((((rc_inner * 64) + (((int)threadIdx.x) * 8)) + ax3))]; } for (int ax31 = 0; ax31 < 8; ++ax31) { W_shared_local[(ax31)] = W_shared[((((rc_inner * 64) + (((int)threadIdx.x) * 8)) + ax31))]; } for (int ff_c = 0; ff_c < 8; ++ff_c) { for (int nn_c = 0; nn_c < 8; ++nn_c) { B_local[(((ff_c * 8) + nn_c))] = (B_local[(((ff_c * 8) + nn_c))] + ((Apad_shared_local[(nn_c)] * W_shared_local[(ff_c)]))); } } } } } } for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 8; ++ff_inner_inner_inner) { for (int nn_inner_inner_inner = 0; nn_inner_inner_inner < 8; ++nn_inner_inner_inner) { ((float*)B)[((((((((((int)blockIdx.z) * 65536) + (((int)blockIdx.y) * 8192)) + (((int)threadIdx.x) * 1024)) + (ff_inner_inner_inner * 128)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 8)) + nn_inner_inner_inner))] = B_local[(((ff_inner_inner_inner * 8) + nn_inner_inner_inner))]; } } } __global__ void conv_bn_relu(void* __restrict__ A, void* __restrict__ W, void* __restrict__ mean, void* __restrict__ scale, void* __restrict__ var, void* __restrict__ beta, void* __restrict__ B) { float B_local[64]; float mean_local[8]; float scale_local[8]; float var_local[8]; float beta_local[8]; float epsilon = 1e-5; __shared__ float Apad_shared[512]; __shared__ float W_shared[512]; float Apad_shared_local[8]; float W_shared_local[8]; for (int ff_c = 0; ff_c < 8; ++ff_c) { mean_local[ff_c] = ((float*)mean)[((((((int)blockIdx.y) * 64) + (((int)threadIdx.x) * 8)) + ff_c))]; scale_local[ff_c] = ((float*)scale)[((((((int)blockIdx.y) * 64) + (((int)threadIdx.x) * 8)) + ff_c))]; var_local[ff_c] = ((float*)var)[((((((int)blockIdx.y) * 64) + (((int)threadIdx.x) * 8)) + ff_c))]; beta_local[ff_c] = ((float*)beta)[((((((int)blockIdx.y) * 64) + (((int)threadIdx.x) * 8)) + ff_c))]; } for (int ff_c_init = 0; ff_c_init < 8; ++ff_c_init) { for (int nn_c_init = 0; nn_c_init < 8; ++nn_c_init) { B_local[(((ff_c_init * 8) + nn_c_init))] = 0.000000e+00f; } } for (int rc_outer = 0; rc_outer < 32; ++rc_outer) { for (int ry = 0; ry < 3; ++ry) { for (int rx = 0; rx < 3; ++rx) { __syncthreads(); for (int ax3_inner_outer = 0; ax3_inner_outer < 2; ++ax3_inner_outer) { ((float4*)(Apad_shared + ((((((int)threadIdx.x) * 64) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer * 4)))))[0] = (((((1 <= ((((int)blockIdx.z) / 14) + ry)) && (((((int)blockIdx.z) / 14) + ry) < 15)) && (1 <= (rx + (((int)blockIdx.z) % 14)))) && ((rx + (((int)blockIdx.z) % 14)) < 15)) ? ((float4*)((float*)A + ((((((((((ry * 458752) + (((int)blockIdx.z) * 32768)) + (rx * 32768)) + (rc_outer * 1024)) + (((int)threadIdx.x) * 128)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer * 4)) - 491520))))[0] : make_float4(0.000000e+00f, 0.000000e+00f, 0.000000e+00f, 0.000000e+00f)); } for (int ax3_inner_outer1 = 0; ax3_inner_outer1 < 2; ++ax3_inner_outer1) { ((float4*)(W_shared + ((((((int)threadIdx.x) * 64) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer1 * 4)))))[0] = ((float4*)((float*)W + ((((((((ry * 393216) + (rx * 131072)) + (rc_outer * 4096)) + (((int)threadIdx.x) * 512)) + (((int)blockIdx.y) * 64)) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer1 * 4)))))[0]; } __syncthreads(); for (int rc_inner = 0; rc_inner < 8; ++rc_inner) { for (int ax3 = 0; ax3 < 8; ++ax3) { Apad_shared_local[(ax3)] = Apad_shared[((((rc_inner * 64) + (((int)threadIdx.x) * 8)) + ax3))]; } for (int ax31 = 0; ax31 < 8; ++ax31) { W_shared_local[(ax31)] = W_shared[((((rc_inner * 64) + (((int)threadIdx.x) * 8)) + ax31))]; } for (int ff_c = 0; ff_c < 8; ++ff_c) { for (int nn_c = 0; nn_c < 8; ++nn_c) { B_local[(((ff_c * 8) + nn_c))] = (B_local[(((ff_c * 8) + nn_c))] + ((Apad_shared_local[(nn_c)] * W_shared_local[(ff_c)]))); } } } } } } for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 8; ++ff_inner_inner_inner) { for (int nn_inner_inner_inner = 0; nn_inner_inner_inner < 8; ++nn_inner_inner_inner) { ((float*)B)[((((((((((int)blockIdx.z) * 65536) + (((int)blockIdx.y) * 8192)) + (((int)threadIdx.x) * 1024)) + (ff_inner_inner_inner * 128)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 8)) + nn_inner_inner_inner))] = max(0.0, scale_local[ff_inner_inner_inner] * ( B_local[(((ff_inner_inner_inner * 8) + nn_inner_inner_inner))] - mean_local[ff_inner_inner_inner] ) / sqrt( var_local[ff_inner_inner_inner] + epsilon ) + beta_local[ff_inner_inner_inner]); } } } //extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ W, void* __restrict__ beta, void* __restrict__ B) { float B_local[64]; float beta_local[8]; __shared__ float Apad_shared[512]; __shared__ float W_shared[512]; float Apad_shared_local[8]; float W_shared_local[8]; for (int ff_c = 0; ff_c < 8; ++ff_c) { beta_local[ff_c] = ((float*)beta)[((((((int)blockIdx.y) * 64) + (((int)threadIdx.x) * 8)) + ff_c))]; } for (int ff_c_init = 0; ff_c_init < 8; ++ff_c_init) { for (int nn_c_init = 0; nn_c_init < 8; ++nn_c_init) { B_local[(((ff_c_init * 8) + nn_c_init))] = 0.000000e+00f; } } for (int rc_outer = 0; rc_outer < 32; ++rc_outer) { for (int ry = 0; ry < 3; ++ry) { for (int rx = 0; rx < 3; ++rx) { __syncthreads(); for (int ax3_inner_outer = 0; ax3_inner_outer < 2; ++ax3_inner_outer) { ((float4*)(Apad_shared + ((((((int)threadIdx.x) * 64) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer * 4)))))[0] = (((((1 <= ((((int)blockIdx.z) / 14) + ry)) && (((((int)blockIdx.z) / 14) + ry) < 15)) && (1 <= (rx + (((int)blockIdx.z) % 14)))) && ((rx + (((int)blockIdx.z) % 14)) < 15)) ? ((float4*)((float*)A + ((((((((((ry * 458752) + (((int)blockIdx.z) * 32768)) + (rx * 32768)) + (rc_outer * 1024)) + (((int)threadIdx.x) * 128)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer * 4)) - 491520))))[0] : make_float4(0.000000e+00f, 0.000000e+00f, 0.000000e+00f, 0.000000e+00f)); } for (int ax3_inner_outer1 = 0; ax3_inner_outer1 < 2; ++ax3_inner_outer1) { ((float4*)(W_shared + ((((((int)threadIdx.x) * 64) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer1 * 4)))))[0] = ((float4*)((float*)W + ((((((((ry * 393216) + (rx * 131072)) + (rc_outer * 4096)) + (((int)threadIdx.x) * 512)) + (((int)blockIdx.y) * 64)) + (((int)threadIdx.x) * 8)) + (ax3_inner_outer1 * 4)))))[0]; } __syncthreads(); for (int rc_inner = 0; rc_inner < 8; ++rc_inner) { for (int ax3 = 0; ax3 < 8; ++ax3) { Apad_shared_local[(ax3)] = Apad_shared[((((rc_inner * 64) + (((int)threadIdx.x) * 8)) + ax3))]; } for (int ax31 = 0; ax31 < 8; ++ax31) { W_shared_local[(ax31)] = W_shared[((((rc_inner * 64) + (((int)threadIdx.x) * 8)) + ax31))]; } for (int ff_c = 0; ff_c < 8; ++ff_c) { for (int nn_c = 0; nn_c < 8; ++nn_c) { B_local[(((ff_c * 8) + nn_c))] = (B_local[(((ff_c * 8) + nn_c))] + ((Apad_shared_local[(nn_c)] * W_shared_local[(ff_c)]))); } } } } } } for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 8; ++ff_inner_inner_inner) { for (int nn_inner_inner_inner = 0; nn_inner_inner_inner < 8; ++nn_inner_inner_inner) { ((float*)B)[((((((((((int)blockIdx.z) * 65536) + (((int)blockIdx.y) * 8192)) + (((int)threadIdx.x) * 1024)) + (ff_inner_inner_inner * 128)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 8)) + nn_inner_inner_inner))] = B_local[(((ff_inner_inner_inner * 8) + nn_inner_inner_inner))] + beta_local[ff_inner_inner_inner]; } } } double get_durtime(struct timeval t1, struct timeval t2) { return (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; } int main() { const int batch = 128, in_channel = 256, out_channel = 512, in_height = 14, in_width = 14; const int kernel_width = 3, kernel_height = 3, pad_height = 1, pad_width = 1; const int stride_height = 1, stride_width = 1; const int out_width = (in_width - kernel_width + 2*pad_width) / stride_width + 1; const int out_height = (in_height - kernel_height + 2*pad_height) / stride_height + 1; float *A, *W, *B, *mean, *scale, *var, *beta; int in_size = batch*in_channel*in_height*in_width; int kernel_size = kernel_width*kernel_height*out_channel*in_channel; int out_size = batch*out_channel*out_width*out_height; struct timeval start, end; A = (float*)malloc(in_size*sizeof(float)); W = (float*)malloc(kernel_size*sizeof(float)); B = (float*)malloc(out_size*sizeof(float)); mean = (float*)malloc(out_channel*sizeof(float)); scale = (float*)malloc(out_channel*sizeof(float)); var = (float*)malloc(out_channel*sizeof(float)); beta = (float*)malloc(out_channel*sizeof(float)); float *Ad, *Wd, *Bd, *meand, *scaled, *vard, *betad; HANDLE_ERROR(cudaMalloc((void**)&Ad, in_size*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&Wd, kernel_size*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&Bd, out_size*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&meand, out_channel*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&scaled, out_channel*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&vard, out_channel*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&betad, out_channel*sizeof(float))); for (int i = 0; i < in_size; ++i) A[i] = i; for (int i = 0; i < kernel_size; ++i) W[i] = i; for (int i = 0; i < out_channel; ++i) { mean[i] = i; scale[i] = i; var[i] = i; beta[i] = i; } HANDLE_ERROR(cudaMemcpy(Ad, A, in_size*sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(Wd, W, kernel_size*sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(meand, mean, out_channel*sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(scaled, scale, out_channel*sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(vard, var, out_channel*sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(betad, beta, out_channel*sizeof(float), cudaMemcpyHostToDevice)); dim3 grid(2, 8, 196), block(8, 1, 1); HANDLE_ERROR(cudaDeviceSynchronize()); //HANDLE_ERROR(default_function_kernel0<<<grid, block>>>(Ad, Wd, betad, Bd)); gettimeofday(&start, 0); conv_bn_relu<<<grid, block>>>(Ad, Wd, meand, scaled, vard, betad, Bd); HANDLE_ERROR(cudaDeviceSynchronize()); gettimeofday(&end, 0); std::cout << "time: " << get_durtime(start, end) << "ms" << std::endl; HANDLE_ERROR(cudaMemcpy(B, Bd, out_size*sizeof(float), cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaFree(Ad)); HANDLE_ERROR(cudaFree(Bd)); HANDLE_ERROR(cudaFree(Wd)); HANDLE_ERROR(cudaFree(meand)); HANDLE_ERROR(cudaFree(scaled)); HANDLE_ERROR(cudaFree(vard)); HANDLE_ERROR(cudaFree(betad)); return 0; }
11,336
#include<stdio.h> #include<math.h> int main(int argc, char** argv) { float *d_a, *d_b, *d_c; size_t pitch; int row = 0; int i = 4; while (1) { row = pow(2, i); cudaMallocPitch(&d_a, &pitch, row * sizeof(float), row); if (!d_a) { printf("memory failed for 2^%d\n", i); return 1; } cudaMallocPitch(&d_b, &pitch, row * sizeof(float), row); if (!d_b) { printf("memory failed for 2^%d\n", i); cudaFree(d_a); return 1; } cudaMallocPitch(&d_c, &pitch, row * sizeof(float), row); if (!d_c) { printf("memory failed for 2^%d\n", i); cudaFree(d_a); cudaFree(d_b); return 1; } printf("memory alloted for 2^%d x 2^%d\n", i, i); ++i; } }
11,337
/* * Alex Laubscher * Gillespie Algorithm * Uses a GPU generator for the numbers */ #include <curand.h> #include <stdio.h> #include <time.h> int main() { // Initializing variables for gillespie algorithm int counter; int death; int total; double tau; double sample; int check; // Initialize variables for the GPU generator int count = 2500000; curandGenerator_t gen; float *devURN; float *hostURN; // Allocate n floats on host hostURN = (float *)calloc(count, sizeof(float)); // Allocate n floats on device cudaMalloc((void **) &devURN, count*sizeof(float)); // Create the generator curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); // Set the seed curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); // Initial population int pop = 0; // Initializing time double time = 0; double maxTime = 100000; // Can be outside cuz it never changes int birth = 1000; // Starting the timer clock_t time_elapsed = clock(); // Run the while loop over 100,000 simulation seconds while (time < maxTime) { // Setting the propensity of the rxn death = pop; // Sum over the propensities total = birth + death; // Need to cast the double check = counter % (count / 2); if (check == 0) { // Generate the floats curandGenerateUniform(gen, devURN, count); // Copy the numbers back to the device cudaMemcpy(hostURN, devURN, count*sizeof(float), cudaMemcpyDeviceToHost); } // Calculate time step tau = (1.0 / total) * log(hostURN[check * 2]); // Second random choice sample = total * (hostURN[check * 2 + 1]); // Update populations based on second urn if (sample < birth) { pop = pop + 1; } else { pop = pop - 1; } // Update the time step time = time - tau; // Increment the counter counter++; } // End the time and convert to sec time_elapsed = clock() - time_elapsed; double timer = ((double) time_elapsed) / CLOCKS_PER_SEC; //Calculate the reactions per sec double rate = counter / timer; printf("Population: %f\n", pop); printf("Counter: %d\n", counter); printf("Timer: %f\n", timer); printf("Rate: %f\n", rate); curandDestroyGenerator(gen); cudaFree(devURN); free(hostURN); return 0; }
11,338
#include "includes.h" __global__ void conv_vertical_naive_gradWeight(const int n, float *y, const float *x, const int kL, const int iC) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { y[i] = x[(i/kL)*kL*iC + i]; } }
11,339
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ // Define your kernels in this file you may use more than one kernel if you // need to __global__ void gpuHistogram(unsigned int* data, unsigned int* bins, unsigned int numElements, unsigned int numBins) { extern __shared__ unsigned int privateHistogram[]; int i = 0; int stride = blockDim.x * gridDim.x; // This is because we have less threads then we do bins and we need to clear all the bins. while(i*blockDim.x + threadIdx.x < numBins) { privateHistogram[i*blockDim.x + threadIdx.x] = 0; i++; } __syncthreads(); // Normally go through the data. i = blockDim.x * blockIdx.x + threadIdx.x; while(i < numElements) { atomicAdd(&(privateHistogram[data[i]]), 1); i += stride; } __syncthreads(); // Copy all the data back. i = 0; while(i*blockDim.x + threadIdx.x < numBins) { atomicAdd(&(bins[i*blockDim.x + threadIdx.x]), privateHistogram[i*blockDim.x + threadIdx.x]); i++; } } /****************************************************************************** Setup and invoke your kernel(s) in this function. You may also allocate more GPU memory if you need to *******************************************************************************/ void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) { dim3 gridDim(30,1,1); // 30 SMPs per machine dim3 blockDim(32,1,1); // 32 threads executing at once. gpuHistogram<<<gridDim, blockDim, num_bins * sizeof(unsigned int)>>>(input,bins,num_elements,num_bins); }
11,340
#include <stdio.h> #include <cuda.h> /* Thread block size = number of threads of a block*/ /* Notice: in this example, the input data size = BLOCK_SIZE */ /* (different with the CUDA Reduction assigment) */ #define BLOCK_SIZE 16 __global__ void work_efficient_scan(const float* input, float* output, int size) { /*Declare the shared memory*/ __shared__ float XY[BLOCK_SIZE]; /**/ unsigned int t = threadIdx.x; /*load data from global memory to shared memory*/ XY[t] = input[t]; /*****************************************************************/ /* YOUR TODO-1 STARTS HERE */ /* Implement the Reduction step, */ /* (the final results is kept in the last element */ /*****************************************************************/ for(int stride = 1;stride<blockDim.x;stride*=2) { __syncthreads(); int index = (t+1)*stride*2-1; if(index < blockDim.x) XY[index]+=XY[index-stride]; } /*************************************************************/ /* YOUR TODO-1 ENDS HERE */ /*************************************************************/ /*************************************************************/ /* YOUR TODO-2 STARTS HERE */ /* Implement the "post scan" step */ /* to finish the inclusive scan */ /*************************************************************/ for(int stride=size/4;stride>0;stride/=2) { __syncthreads(); int index = (t+1)*stride*2-1; if(index+stride < blockDim.x) XY[index+stride] += XY[index]; } /*************************************************************/ /* YOUR TODO-2 ENDS HERE */ /*************************************************************/ __syncthreads(); /* write the final output to global memory */ output[t] = XY[t]; } /**/ void checkCUDAError(const char *msg); /**/ int main(int argc, char* argv[]) { int i; /**/ float* h_input, *h_output; cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*******************/ /** READING INPUT **/ /*******************/ int size = 0; //dimension of matrices /* read the value of size from stdin*/ scanf("%d", &size); /* Allocate host memory */ h_input = (float*) malloc(sizeof(float)*size); h_output = (float*) malloc(sizeof(float)*size); /* read input from stdin */ for(i=0;i<size*size;++i){ scanf("%f", &h_input[i]);} /********************/ /** FINISHED INPUT **/ /********************/ /*************************/ /* allocate device */ /* memory for A,B,C */ /*************************/ float* d_input, *d_output; cudaMalloc(&d_input,sizeof(float)*size); cudaMalloc(&d_output,sizeof(float)*size); cudaEventRecord(start,0); /***********************************/ /* copy input data to device */ /***********************************/ cudaMemcpy(d_input, h_input, size*sizeof(float), cudaMemcpyHostToDevice); /*************************************/ /* call kernel */ /* 1 block, BLOCK_SIZE threads */ /*************************************/ work_efficient_scan<<<1,BLOCK_SIZE>>>(d_input, d_output,size); checkCUDAError("Kernel Invoking"); /**************************/ /* copy result back */ /**************************/ cudaMemcpy(h_output, d_output, sizeof(float)*size, cudaMemcpyDeviceToHost); /**/ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); fprintf(stderr,"Elapsed time = %f (s)\n",elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); /*******************************************/ /* Print the final scan result */ /*******************************************/ printf("The final inclusive scan result:\n"); for(int i=0;i<size;++i)printf("%4.1f ",h_output[i]); /* free device memory */ cudaFree(d_input); cudaFree(d_output); /* free host memory */ free(h_input); free(h_output); /**/ return 0; } /*function to test CUDA command*/ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
11,341
#include <cuda.h> #include <stdio.h> // CUDA example: finds mean number of mutual outlinks, among all pairs // of Web sites in our set; in checking all (i,j) pairs, thread k will // handle all i such that i mod totth = k, where totth is the number of // threads // procpairs() processes all pairs for a given thread __global__ void procpairs(int *m, int *tot, int n) { int totth = gridDim.x * blockDim.x, // total number of threads // need to find my thread number among the totality of all // threads in all blocks me = blockIdx.x * blockDim.x + threadIdx.x; int i,j,k,sum = 0; for (i = me; i < n; i += totth) { // do various rows i for (j = i+1; j < n; j++) { // do all rows j > i for (k = 0; k < n; k++) sum += m[n*i+k] * m[n*j+k]; } } atomicAdd(tot,sum); } int main(int argc, char **argv) { int n = atoi(argv[1]), // number of vertices nblk = atoi(argv[2]); // number of blocks int *hm, // host matrix *dm, // device matrix htot, // host grand total *dtot; // device grand total int msize = n * n * sizeof(int); // size of matrix in bytes // allocate space for host matrix hm = (int *) malloc(msize); // as a test, fill matrix with random 1s and 0s int i,j; for (i = 0; i < n; i++) { hm[n*i+i] = 0; for (j = 0; j < n; j++) { if (j != i) hm[i*n+j] = rand() % 2; } } // allocate space for device matrix cudaMalloc((void **)&dm,msize); // copy host matrix to device matrix cudaMemcpy(dm,hm,msize,cudaMemcpyHostToDevice); htot = 0; // set up device total and initialize it cudaMalloc((void **)&dtot,sizeof(int)); cudaMemcpy(dtot,&htot,sizeof(int),cudaMemcpyHostToDevice); // set up parameters for threads structure dim3 dimGrid(nblk,1); dim3 dimBlock(192,1,1); // invoke the kernel procpairs<<<dimGrid,dimBlock>>>(dm,dtot,n); // wait for kernel to finish cudaDeviceSynchronize(); // copy total from device to host cudaMemcpy(&htot,dtot,sizeof(int),cudaMemcpyDeviceToHost); // check results if (n <= 15) { for (i = 0; i < n; i++) { for (j = 0; j < n; j++) printf("%d ",hm[n*i+j]); printf("\n"); } } printf("mean = %f\n",htot/float((n*(n-1))/2)); // clean up free(hm); cudaFree(dm); cudaFree(dtot); }
11,342
#include "includes.h" __global__ void lap(float *a, float *b, int nx, int ny) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int idx = x + y*nx; if (x<nx && y<ny) { float AX = 0, BX = 0; if (x>0) { BX += a[idx - 1]; AX++; } if (y>0) { BX += a[idx - nx]; AX++; } if (x<nx - 1){ BX += a[idx + 1]; AX++; } if (y<ny - 1){ BX += a[idx + nx]; AX++; } b[idx] = -AX*a[idx] + BX; } }
11,343
/** * Simple Linear Regression implementation in CUDA * * @author: Yvo Elling * @date: 08-01-21 */ #include <iostream> #include <cuda.h> #include <cstdint> #include <array> #include <initializer_list> #include <time.h> #include <chrono> #include <cmath> #include <limits> #include "linear_regression.cuh" #define INPUT_SIZE 46 #define ERROR_DIMENSIONS 3 void linear_regression_cpu(std::array<float, INPUT_SIZE> x, std::array<float, INPUT_SIZE> y, float bias, float intercept) { float j_error = std::numeric_limits<float>::max(); float learning_rate = 0.01; while(j_error > 0.13) { //array for storing intermediate error levels float errors[3] = {0, 0, 0}; for (int i = 0; i < INPUT_SIZE; ++i) { // Predict output based on current bias and intercept float y_pred = bias + intercept * x[i]; // Calculate J for this specific index and store in errors index 0 errors[0] += 0.5f * pow((y[i] - y_pred), 2); // Calculate bias error for this index and store in errors index errors[1] += -(y[i] - y_pred); // Calculate intercept error for this index errors[2] += -(y[i] - y_pred)*x[i]; } // Update bias and intercept based on errors float bias_new = bias - learning_rate * errors[1]; float intercept_new = intercept - learning_rate * errors[2]; // Update bias = bias_new; intercept = intercept_new; j_error = errors[0]; } std::cout << "CPU Results: Bias = " << bias << " and Intercept: " << intercept << std::endl; } int main(int argc, char **argv) { // Total error float j_error = std::numeric_limits<float>::max(); // Determine size of the x and y arrays size_t input_size = INPUT_SIZE * sizeof(float); size_t error_size = ERROR_DIMENSIONS * sizeof(float); // Define the pointers to the x and y arrays with their respective size reserved float* h_x = (float*)malloc(input_size); float* h_y = (float*)malloc(input_size); float* h_bias = (float*)malloc(sizeof(float)); float* h_intercept = (float*)malloc(sizeof(float)); float* h_results = (float*)malloc(error_size); // Initial values that are used for the linear regression std::array<float, INPUT_SIZE> x = {0.00f, 0.22f, 0.24f, 0.33f, 0.37f, 0.44f, 0.44f, 0.57f, 0.93f, 1.00f}; std::array<float, INPUT_SIZE> y = {0.00f, 0.22f, 0.58f, 0.20f, 0.55f, 0.39f, 0.54f, 0.53f, 1.00f, 0.61f}; // Compute random starting bias and intercept srand(time(NULL)); float bias = ((float) rand() / (RAND_MAX)); float intercept = ((float) rand() / (RAND_MAX)); float init_bias = bias; float init_intercept = intercept; // Store the address of the x and y arrays into the pointers h_x and h_y (host_x and host_y) h_x = &x[0]; h_y = &y[0]; h_bias = &bias; h_intercept = &intercept; // Allocate memory on GPU for the device_x (d_x) and device_y (d_y) of earlier calculated size float* d_x; float* d_y; float* d_bias; float* d_intercept; float* d_results; cudaMalloc(&d_x, input_size); cudaMalloc(&d_y, input_size); cudaMalloc(&d_results, error_size); // Copy the values stored in pointer h_x and h_y into d_x and d_y // Transfer data from CPU memory to GPU memory. cudaMemcpy(d_x, h_x, input_size, cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, input_size, cudaMemcpyHostToDevice); // Define stepsize for updating intercept and bias. float learning_rate = 0.01; //Start timing the procedure auto begin_gpu = std::chrono::high_resolution_clock::now(); while( j_error > 0.13) { // ALlocate memory for the pointers to the bias and intercept cudaMalloc(&d_bias, sizeof(float)); cudaMalloc(&d_intercept, sizeof(float)); // Copy the local value of the bias and intercept to the device memory. cudaMemcpy(d_bias, h_bias, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_intercept, h_intercept, sizeof(float), cudaMemcpyHostToDevice); // Launch kernel on GPU with pointers to data in GPU memory simple_linear_regression<<<1,INPUT_SIZE>>>(d_x, d_y, d_bias, d_intercept, d_results, INPUT_SIZE); // Wait for all threads to return cudaDeviceSynchronize(); // Retrieve the GPU out value and store in host memory cudaMemcpy(h_results, d_results, error_size, cudaMemcpyDeviceToHost); // Check if a CUDA error has occurred. cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << "Error: " << cudaGetErrorString(err) << std::endl; break; } // Free memory, on the next iteration we will allocate this memory again. cudaFree(d_bias); cudaFree(d_intercept); // Update bias and intercept based on errors float bias_new = bias - learning_rate * h_results[1]; float intercept_new = intercept - learning_rate * h_results[2]; // Update bias = bias_new; intercept = intercept_new; j_error = h_results[0]; } //End timing and compute total execution time auto end_gpu = std::chrono::high_resolution_clock::now(); auto elapsed_gpu = std::chrono::duration_cast<std::chrono::nanoseconds>(end_gpu - begin_gpu); // Print out latest values for total error, and bias and intercept respective errors std::cout << "GPU Results: Bias = " << bias << " and Intercept: " << intercept << std::endl; std::cout << "GPU-implementation execution time(ns): " << elapsed_gpu.count() << std::endl; //Start measuring execution time of C tasks auto begin = std::chrono::high_resolution_clock::now(); linear_regression_cpu(x, y, init_bias, init_intercept); auto end = std::chrono::high_resolution_clock::now(); auto elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin); std::cout << "C-implementation execution time(ns): " << elapsed.count() << std::endl; // Free memory on GPU cudaFree(d_x); cudaFree(d_y); return 0; }
11,344
#include "includes.h" __global__ void THCudaTensor_kernel_indexSelect_contiguous( float *tensor, float *src, long stride, float *index, long idxSize) { // In the typical case, each block of 128 threads handles a 4x128 // section of the output with each warp handling a single 1x128 row. // The outer loops handle inputs larger than 4*65535 or strides larger // than 128*65535. const int VT = 4; const int WARP_SIZE = 32; const int MAX_DIM_SIZE = 65535; for (int idx = blockIdx.x * blockDim.y + threadIdx.y; idx < idxSize; idx += blockDim.y * MAX_DIM_SIZE) { for (int startIdx = threadIdx.x + blockIdx.y * VT*WARP_SIZE; startIdx < stride; startIdx += VT*WARP_SIZE*MAX_DIM_SIZE) { const int srcIdx = ((int) index[idx] - 1) * stride; const int targetIdx = idx * stride; #pragma unroll for (int i = 0; i < VT; i++) { const int featureIdx = startIdx + i * WARP_SIZE; if (featureIdx < stride) { tensor[targetIdx + featureIdx] = src[srcIdx + featureIdx]; } } } } }
11,345
#include <cuda.h> #include <stdio.h> #include <math.h> #define BLOCK_WIDTH 16 #define TILE_WIDTH BLOCK_WIDTH extern "C" void gpu_mat_mul(float* h_M, float* h_N, float* h_P, int Mwidth,int Nwidth,int Swidth); __global__ void gpu_mat_mul_kernel(float* M, float* N, float* P, int Mwidth, int Nwidth, int Swidth){ __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // printf("blockx is %d,blocky is%d\n",bx,by); // Identify the row and column of the P element to work on // Each thread works on an element of P int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; // printf("Row %d,Col is%d\n",Row,Col); float sum = 0; int phase_num = (int)ceil((double) Nwidth/TILE_WIDTH); // Each thread loads 'Row'th row of M and 'Col'th column of N for (int ph = 0; ph < phase_num-1; ph++) { // Collaboratively load data into shared memory Mds[ty][tx] = M[Row * Nwidth + ph * TILE_WIDTH + tx]; Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * Swidth + Col]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) { sum += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } int ph = phase_num-1; int res = Nwidth - ph*TILE_WIDTH; if (tx < res){ Mds[ty][tx] = M[Row * Nwidth + ph * TILE_WIDTH + tx]; } if (ty < res){ Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * Swidth + Col]; } __syncthreads();//Barrier for (int k = 0; k < res ; k++) { sum += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); if (Row<Mwidth&&Col<Swidth){ P[Row * Swidth + Col] = sum; } } void gpu_mat_mul(float* h_M, float* h_N, float* h_P, int Mwidth,int Nwidth,int Swidth) { float *d_M, *d_N, *d_P; size_t size_of_float = sizeof(float); size_t size_M = Mwidth * Nwidth * size_of_float; size_t size_N = Nwidth * Swidth * size_of_float; size_t size_P = Mwidth * Swidth * size_of_float; cudaMalloc((void**)&d_M, size_M); cudaMalloc((void**)&d_N, size_N); cudaMalloc((void**)&d_P, size_P); cudaMemcpy(d_M, h_M, size_M, cudaMemcpyHostToDevice); cudaMemcpy(d_N, h_N, size_N, cudaMemcpyHostToDevice); cudaEvent_t start, stop; float elapsed_time = 0.0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); dim3 grid_dim((int)ceil((double)Swidth/BLOCK_WIDTH),(int)ceil((double) Mwidth/BLOCK_WIDTH), 1); dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1); gpu_mat_mul_kernel<<<grid_dim, block_dim>>>(d_M, d_N, d_P, Mwidth,Nwidth,Swidth); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaMemcpy(h_P, d_P, size_P, cudaMemcpyDeviceToHost); // Free device memory for M, N, P cudaFree(d_M); cudaFree(d_N); cudaFree(d_P); cudaEventElapsedTime(&elapsed_time, start, stop); printf(" grid dim: %d, %d, %d.\n", grid_dim.x, grid_dim.y, grid_dim.z); printf(" block dim: %d, %d, %d.\n", block_dim.x, block_dim.y, block_dim.z); printf(" kernel time: %.5f sec\n", elapsed_time / 1000); cudaEventDestroy(start); cudaEventDestroy(stop); }
11,346
#include <stdio.h> #include <stdlib.h> __global__ void add(int *a, int *b, int *c) { // Position 1: To write Code here later int Ix, Iy, index; int n = 16; Ix = blockIdx.x * blockDim.x + threadIdx.x; Iy = blockIdx.y * blockDim.y + threadIdx.y; index = Ix * blockDim.x * gridDim.y + Iy * blockDim.y * gridDim.y ; int stride = 1 ; for (int i = index; i < n; i+=stride) c[i] = a[i] + b[i]; } int main() { int *a, *b, *c, *da, *db, *dc, N=16, i, j; a = (int*)malloc(sizeof(int)*N); // allocate host mem b = (int*)malloc(sizeof(int)*N); // and assign random c = (int*)malloc(sizeof(int)*N); // memory // Write code to initialize both a and b to 1’s. for (i = 0; i < N; i++) { a[i] = b[i] = 1; } cudaMalloc((void **)&da, sizeof(int)*N); cudaMalloc((void **)&db, sizeof(int)*N); cudaMalloc((void **)&dc, sizeof(int)*N); cudaMemcpy(da, a, sizeof(int)*N, cudaMemcpyHostToDevice); cudaMemcpy(db, b, sizeof(int)*N, cudaMemcpyHostToDevice); dim3 dimGrid(N/8, N/8, 1); dim3 dimBlock(N/8, N/8, 1); add<<<dimGrid,dimBlock>>>(da, db, dc); cudaMemcpy(c, dc, sizeof(int)*N, cudaMemcpyDeviceToHost); for (j = 0; j < N/4; j++) { for (i = 0; i < N/4; i++) { printf("a[%d] + b[%d] = %d\n", j*N/4+i, j*N/4+i, c[j*N/4+i]); } printf("\n"); } printf("\n"); }
11,347
#include <stdlib.h> #include <stdio.h> #include <time.h> #define THREADS 16384 #define BLOCKS 65536 #define NUM_VALS THREADS*BLOCKS void rand_nums(int *values, unsigned long length) { int i; for (i = 0; i < length; ++i) { values[i] = rand() % INT_MAX + 1;; } } __global__ void bitonicMinorSort(int *innerValues, int j, int k) { unsigned int i, ixj; i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i ^ j; if ((ixj) > i) { if ((i & k) == 0) { if (innerValues[i] > innerValues[ixj]) { int temp = innerValues[i]; innerValues[i] = innerValues[ixj]; innerValues[ixj] = temp; } } if ((i & k) != 0) { if (innerValues[i] < innerValues[ixj]) { int temp = innerValues[i]; innerValues[i] = innerValues[ixj]; innerValues[ixj] = temp; } } } } void bitonicSort(int *values, unsigned long n) { int *innerValues; size_t size = n * sizeof(int); cudaMalloc((void **) &innerValues, size); cudaMemcpy(innerValues, values, size, cudaMemcpyHostToDevice); int blockSize; if(n < THREADS) { blockSize = 1; } else { blockSize = ceil(n/THREADS); } dim3 blocks(blockSize, 1); dim3 threads(THREADS, 1); int j, k; for (k = 2; k <= n; k <<= 1) { for (j = k >> 1; j > 0; j = j >> 1) { bitonicMinorSort<<<blocks, threads>>> (innerValues, j, k); } } cudaMemcpy(values, innerValues, size, cudaMemcpyDeviceToHost); cudaFree(innerValues); } int main(int argc, char *argv[]) { int k = 10; if(argc==2) k = atoi(argv[1]); int *values = (int *) malloc(NUM_VALS * sizeof(int)); int *origValues = (int *) malloc(NUM_VALS * sizeof(int)); unsigned long n; double time_spent; clock_t begin, end; n = pow(2,k); printf("\nk = %d, n = %ld\n", k, n); rand_nums(values, n); for (unsigned long i = 0; i < n; i++) origValues[i] = values[i]; time_spent = 0.0; begin = clock(); bitonicSort(values, n); end = clock(); time_spent += (double)(end-begin) / CLOCKS_PER_SEC; printf("\tElapsed time: %f seconds\n", time_spent); free(values); free(origValues); }
11,348
#include "includes.h" __global__ void vignette(const unsigned char * src, unsigned char * dst, float inner, float outer, const size_t width, const size_t height) { // the xIndex and yIndex will be used cordinates pixels of the image // NOTE // NOTE This assumes that we are treating this as a two dimensional data structure and the blocks will be used in the same way // NOTE size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; // Checking to see if the indexs are within the bounds of the image if (xIndex < width && yIndex < height) { // offset represents postion of the current pixel in the one dimensional array size_t offset = yIndex * width + xIndex; // Shift the pixel oriented coordinates into image resolution independent coordinates // where 0, 0 is the center of the image. float x = xIndex / float(height) - float(width) / float(height) / 2.0f; float y = yIndex / float(height) - 0.5f; //Calculates current pixels distance from the center where the cordinates are 0, 0 float d = sqrtf(x * x + y * y); if (d < inner) { // if d is less than inner boundary, we don't change that specific image pixel *(dst + offset) = *(src + offset); } else if (d > outer) { // if d is greater than outer boundary, we set it to 0 so it becomes black *(dst + offset) = 0; } else { // If in between the inner and outer boundaries, it will be a shade of gray. // NOTE // NOTE This assumes... by the time we get here, we have checked that outer does not equal inner // NOTE This also assumes ... by the time we get here, we have made inner less than outer // NOTE float v = 1 - (d - inner) / (outer - inner); *(dst + offset) = (unsigned char)(*(src + offset) * v); } } }
11,349
// get device (GPU) information and specifications #include <iostream> int main(void) { cudaDeviceProp prop; int count; cudaGetDeviceCount( &count ); for(int i=0; i<count; i++) { cudaGetDeviceProperties( &prop, i); // print some useful info about the device here std::cout << "Name = " << prop.name << std::endl; std::cout << "Compute capability : " << prop.major << " " << prop.minor << std::endl; std::cout << "Clock rate = " << prop.clockRate << std::endl; std::cout << "Total global memory = " << prop.totalGlobalMem << std::endl; std::cout << "Total shared memory per block = " << prop.sharedMemPerBlock << std::endl; std::cout << "Total constant memory = " << prop.totalConstMem << std::endl; std::cout << "Max memory pitch = " << prop.memPitch << std::endl; std::cout << "Max threads per block = " << prop.maxThreadsPerBlock << std::endl; std::cout << "Warp size = " << prop.warpSize << std::endl; std::cout << "Max threads along X = " << prop.maxThreadsDim[0] << std::endl; std::cout << " Y = " << prop.maxThreadsDim[1] << std::endl; std::cout << " Z = " << prop.maxThreadsDim[2] << std::endl; std::cout << "Max grid size aong X = " << prop.maxGridSize[0] << std::endl; std::cout << " Y = " << prop.maxGridSize[1] << std::endl; std::cout << " Z = " << prop.maxGridSize[2] << std::endl; std::cout << "Multi Processor count = " << prop.multiProcessorCount << std::endl; std::cout << std::endl; } return 0; }
11,350
#include <iostream> // Thrust headers #include <thrust/device_vector.h> #include <thrust/host_vector.h> using std::cout; using std::endl; int main(int argc, char **argv) { unsigned int n = atoi(argv[1]); // Timing CUDA events cudaEvent_t start, stop; float milliseconds = 0; cudaEventCreate(&start); cudaEventCreate(&stop); // 1. Allocating a host vector of size n thrust::host_vector<float> H(n); // Filling with random values between low and high float low = -100, high = 100; // Setting Random values to the host array for (unsigned int i = 0; i < n; ++i) { // This generates a random floats in range low to high H[i] = (float)(low + (((float)rand()) / (float)RAND_MAX) * (high - low)); } // 2. Copy from host to device thrust::device_vector<float> D = H; // Allocating an output device vector of size n thrust::device_vector<float> Dout(n); // 3. Call the thrust:exclusive_scan function cudaEventRecord(start); thrust::exclusive_scan(D.begin(), D.end(), Dout.begin()); cudaEventRecord(stop); cudaEventSynchronize(stop); // 4. Prints the last element cout << Dout[n - 1] << endl; // 5. Prints the time taken to run the scan in milliseconds cudaEventElapsedTime(&milliseconds, start, stop); cout << milliseconds << endl; return 0; }
11,351
#include "includes.h" __global__ void gradientColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD ) { __shared__ float s_Data[COLUMNS_GRAD_BLOCKDIM_Z][COLUMNS_GRAD_BLOCKDIM_X][(COLUMNS_GRAD_RESULT_STEPS + 2 * COLUMNS_GRAD_HALO_STEPS) * COLUMNS_GRAD_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_GRAD_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_GRAD_RESULT_STEPS - COLUMNS_GRAD_HALO_STEPS) * COLUMNS_GRAD_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * COLUMNS_GRAD_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ * imageH + baseY) * imageW + baseX; d_Dst += (baseZ * imageH + baseY) * imageW + baseX; //Main data #pragma unroll for (int i = COLUMNS_GRAD_HALO_STEPS; i < COLUMNS_GRAD_HALO_STEPS + COLUMNS_GRAD_RESULT_STEPS; i++) { s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_GRAD_BLOCKDIM_Y] = d_Src[i * COLUMNS_GRAD_BLOCKDIM_Y * imageW]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_GRAD_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_GRAD_BLOCKDIM_Y] = (baseY + i * COLUMNS_GRAD_BLOCKDIM_Y >= 0) ? d_Src[i * COLUMNS_GRAD_BLOCKDIM_Y * imageW] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_GRAD_HALO_STEPS + COLUMNS_GRAD_RESULT_STEPS; i < COLUMNS_GRAD_HALO_STEPS + COLUMNS_GRAD_RESULT_STEPS + COLUMNS_GRAD_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_GRAD_BLOCKDIM_Y]= (baseY + i * COLUMNS_GRAD_BLOCKDIM_Y < imageH) ? d_Src[i * COLUMNS_GRAD_BLOCKDIM_Y * imageW] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_GRAD_HALO_STEPS; i < COLUMNS_GRAD_HALO_STEPS + COLUMNS_GRAD_RESULT_STEPS; i++) { float sum = 0; sum += s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_GRAD_BLOCKDIM_Y + 1]; sum -= s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_GRAD_BLOCKDIM_Y - 1]; sum *= 0.5f; d_Dst[i * COLUMNS_GRAD_BLOCKDIM_Y * imageW] = sum; } }
11,352
//#include "utils.cuh" #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <sys/time.h> #include <string.h> #include <math.h> #include <limits.h> #define BLOCKSIZE 1 struct MatrixStorage { int *row_ptr; int row_size; int *col_ind; int col_size; double *values; int val_size; }; __host__ int* allocateArray(int size); __host__ double* allocateDoubleArray(int size); __host__ void initializeDoubleArray(double* arr, int size, double initalizer); __host__ void initializeArray(int* arr, int size, int initalizer); //void allocateMatrixStorage(struct MatrixStorage* matrixOut, int rows,int values); __host__ void allocateMatrixStorage(int** row_ptr, int **col_ind,double **values, int row_size, int col_size, int val_size); __host__ void initializeMatrixStorage(int **row_ptr, int **col_ind,double **values, int row_size, int col_size, int val_size); __host__ void readMatrix(int **row_ptr, int **col_ind,double **values, int &row_size, int &col_size, int &val_size, char *filename); __host__ void accumulateCounts(int* arr,int size,int val); __host__ void printArray(int* arr, int size); __host__ void printDoubleArray(double* arr, int size); __host__ void matrix_vector_multip(int *row_ptr, int *col_ind, double *values, int rows,int columns, double **x ); __host__ int* allocateArray(int size){ int *arr; arr = (int *)malloc(sizeof(int) * size); return arr; } __host__ double* allocateDoubleArray(int size){ double *arr; arr = (double *)malloc(sizeof(double) * size); return arr; } __host__ void initializeArray(int* arr, int size, int initalizer){ int i; for (i = 0; i < size; i++){ arr[i] = initalizer; } } __host__ void initializeDoubleArray(double* arr, int size, double initalizer){ int i; for (i = 0; i < size; i++){ arr[i] = initalizer; } } __host__ void allocateMatrixStorage(int** row_ptr, int **col_ind,double **values, int row_size,int col_size,int val_size){ *row_ptr = allocateArray(row_size); *col_ind = allocateArray(val_size); *values = allocateDoubleArray(val_size); } __host__ void initializeMatrixStorage(int** row_ptr, int **col_ind,double **values, int row_size,int col_size,int val_size){ initializeArray(*row_ptr, row_size, 0); initializeArray(*col_ind, val_size, val_size); initializeDoubleArray(*values, val_size, (double) val_size); } __host__ void readMatrix(int **row_ptr, int **col_ind,double **values, int &row_size, int &col_size, int &val_size, char *filename){ FILE * fileToRead; int i; int row_index,col_index; double val; fileToRead = fopen(filename,"r"); //opens the file if (fileToRead == NULL){ printf("file cant be found\n"); return ; }else{ fscanf(fileToRead,"%d %d %d", &(row_size), &(col_size), &(val_size)); allocateMatrixStorage( row_ptr, col_ind, values, row_size,col_size,val_size); initializeMatrixStorage(row_ptr, col_ind, values,row_size,col_size, val_size); for (i = 0; i < val_size; i++) { fscanf(fileToRead,"%d %d %lg", &row_index, &col_index, &val); //if (row_index == 5) //printf("%d,%d\n", row_ptr[row_index-1],row_index); (*row_ptr)[row_index-1] = (*row_ptr)[row_index-1] + 1; (*values)[i] = val; (*col_ind)[i] = col_index -1; } //printf("\n"); } fclose(fileToRead); } __host__ void accumulateCounts(int* arr,int size,int val){ int i; if (arr[size-1] == 0){ arr[size-1] = val; } else{ arr[size-1] = val - arr[size-1]; } for (i = size-2; i >=0; i--){ if (arr[i] == 0) arr[i] = arr[i+1]; else { arr[i] = arr[i+1] - arr[i]; } } } __host__ void printArray(int* arr, int size){ int i; for (i = 0; i < size; i++){ printf("%d\n", arr[i]); } printf("\n"); } __host__ void printDoubleArray(double* arr, int size){ int i; for (i = 0; i < size; i++){ printf("%lf\n", arr[i]); } printf("\n"); } __host__ void matrix_vector_multip(int *row_ptr, int *col_ind, double *values, int rows,int columns, double **x ){ int i,j; double val = 0.0; double *newArr = allocateDoubleArray(rows); initializeDoubleArray(newArr,rows,0.0); double *tmpX = *x; for (i = 0; i < rows-1; i++){ if (row_ptr[i] != row_ptr[i+1]){ for (j = row_ptr[i]; j < row_ptr[i+1];j++){ val = val + tmpX[col_ind[j]]*values[j]; } newArr[i] = val; } else { newArr[i] = 0.0; } val = 0.0; } //printArray(col_ind,columns); i = rows -1; if (row_ptr[i] < rows){ for (j = row_ptr[i]; j < columns;j++){ val = val + tmpX[col_ind[j]]*values[j]; } newArr[i] = val; }else { newArr[i] = 0.0; } val = 0.0; *x = newArr; newArr = NULL; free(tmpX); } __global__ void initializeDoubleArrayDev(double* arr, int size, double initalizer){ int i; for (i = 0; i < size; i++){ arr[i] = initalizer; } } __global__ void matrix_vector_multip_dev(int *row_ptr, int *col_ind, double *values, int *rowsArr,int *columnsArr, double *x, double *newArr,int *single_block_size ){ int block_size = single_block_size[0]; int x_tid = blockIdx.x*blockDim.x + threadIdx.x; int y_tid = blockIdx.y*blockDim.y + threadIdx.y; int thread_index = x_tid + gridDim.x*block_size*y_tid; int j; int rows = rowsArr[0]; int columns = columnsArr[0]; double val = 0.0; int ofset = rows * thread_index; if (thread_index < rows){ newArr[thread_index] = 0.0; } if (thread_index < rows -1 ){ if (row_ptr[thread_index] != row_ptr[thread_index+1]){ for (j = row_ptr[thread_index]; j < row_ptr[thread_index+1];j++){ val = val + x[col_ind[j]] *values[j]; } newArr[thread_index] += val; }else { newArr[thread_index] += 0.0; } } else { if (thread_index == rows -1 && row_ptr[thread_index] < rows){ for (j = row_ptr[thread_index]; j < columns;j++){ val = val + x[col_ind[j]] *values[ofset + j]; } newArr[thread_index] += val; }else { newArr[thread_index] += 0.0; } } __syncthreads(); } int main(int argc, char** argv){ int noThreads,number_of_iter,print_what; char *filename; if (argc < 5){ printf("Please enter the all the following arguments:\n"); printf("1. The number of threads used to compute Matrix-vector product\n"); printf("2. The number of repetitions \n"); printf("3. An argument to print on stdout \n"); printf("4. Test-file name\n"); exit(1); }else { noThreads = atoi(argv[1]); number_of_iter = atoi(argv[2]); print_what = atoi(argv[3]); filename = argv[4]; } int *d_rowptr, *d_col_ind; double *d_values; int *d_rows, *d_columns; int *row_ptr, *col_ind, *single_block_size; double *values; int row_size, col_size, val_size; double * man_x, *man_x_result; double * d_man_x, *d_man_x_result; readMatrix(&row_ptr,&col_ind,&values,row_size,col_size,val_size ,filename); accumulateCounts(row_ptr,row_size,val_size); cudaMalloc((void**)&d_rowptr, sizeof(int) * row_size); cudaMalloc((void**)&d_col_ind, sizeof(int) * val_size); cudaMalloc((void**)&d_values, sizeof(double) * val_size); cudaMalloc((void**)&d_rows, sizeof(int)); cudaMalloc((void**)&d_columns, sizeof(int)); cudaMalloc((void**)&single_block_size, sizeof(int)); cudaMalloc((void**)&man_x, sizeof(double) * row_size); cudaMalloc((void**)&man_x_result, sizeof(double) * row_size); cudaMalloc((void**)&d_man_x, sizeof(double) * row_size); cudaMalloc((void**)&d_man_x_result, sizeof(double) * row_size); man_x = allocateDoubleArray(row_size); man_x_result = allocateDoubleArray(row_size); initializeDoubleArray(man_x,row_size,1.0); cudaMemcpy(d_rowptr, row_ptr, sizeof(int) * row_size, cudaMemcpyHostToDevice); cudaMemcpy(d_col_ind, col_ind, sizeof(int) * val_size, cudaMemcpyHostToDevice); cudaMemcpy(d_values, values, sizeof(double) * val_size, cudaMemcpyHostToDevice); cudaMemcpy(d_rows, &row_size, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_columns, &col_size, sizeof(int), cudaMemcpyHostToDevice); int block_size = BLOCKSIZE; int a_block_can_hold =block_size*block_size; int blocks_in_a_grid; if (row_size % a_block_can_hold != 0){ blocks_in_a_grid=row_size/a_block_can_hold+1; }else { blocks_in_a_grid=(row_size/a_block_can_hold+1)-1; } int grids = noThreads /(blocks_in_a_grid*a_block_can_hold); if (noThreads%(blocks_in_a_grid*a_block_can_hold)>0){ grids++; } dim3 dimBlock(block_size,block_size); dim3 dimGrid(grids,blocks_in_a_grid); double *pt; cudaMemcpy(single_block_size , &block_size, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_man_x, man_x, sizeof(double) * row_size, cudaMemcpyHostToDevice); struct timeval t1, t2; gettimeofday(&t1, 0); for(int iter=0; iter<number_of_iter; iter++) { matrix_vector_multip_dev<<<dimGrid,dimBlock>>>(d_rowptr, d_col_ind,d_values, d_rows,d_columns, d_man_x,d_man_x_result,single_block_size ); cudaDeviceSynchronize(); pt = d_man_x; d_man_x = d_man_x_result; d_man_x_result = pt; } gettimeofday(&t2, 0); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; //printf("Time to generate: %3.11f ms \n", time); cudaMemcpy(man_x, d_man_x, sizeof(double) * row_size, cudaMemcpyDeviceToHost); if (print_what == 1){ printDoubleArray(values,val_size); printArray(col_ind,val_size); printArray(row_ptr,row_size); } if (print_what == 2){ printDoubleArray(man_x,row_size); } return 0; }
11,353
#include "includes.h" __global__ void profilePhaseSetup_kernel() {}
11,354
// // Created by igor on 11.05.2021. // #include "ConvMask.cuh" __device__ __host__ double* ConvMask::operator[](std::size_t at){ return data[at]; } __device__ __host__ ConvMask::ConvMask(std::initializer_list<double> list) noexcept : data() { int i = 0; double sum = 0; for(double d: list){ data[0][i] = d; sum += d; ++i; } for (int j = 0; j < 9; ++j){ data[0][j] /= sum; } }
11,355
#include <stdio.h> #include <cuda.h> #define N 20 struct node { struct node *next; int data; }; struct node *createNode(int ii) { struct node *nn = (struct node *)malloc(sizeof(struct node)); nn->data = ii; nn->next = NULL; return nn; } struct node *createList() { struct node *head = NULL; for (int ii = 20; ii > 0; --ii) { struct node *nn = createNode(ii); nn->next = head; head = nn; } return head; } __device__ __host__ void printList(struct node *head) { if (head) { printf("%d ", head->data); printList(head->next); } else { printf("\n"); } } __global__ void printListGPU(struct node *head) { printList(head); } struct node *copyNode(struct node *nn) { struct node *nngpu; cudaMalloc(&nngpu, sizeof(struct node)); cudaMemcpy(nngpu, nn, sizeof(struct node), cudaMemcpyHostToDevice); return nngpu; } struct node *copyList(struct node *head) { if (!head) return NULL; struct node nn; nn.next = copyList(head->next); nn.data = head->data; return copyNode(&nn); } int main() { struct node *head = createList(); struct node *gpuhead = copyList(head); printList(head); printListGPU<<<1, 1>>>(gpuhead); cudaDeviceSynchronize(); return 0; }
11,356
/*----------- * * distanceGlobal.cu * * This is the source file of a kernel to calculate total distances * * of all points only using global memory. * * streamsOptBenchmark/distanceGlobal.cu * * By Hao Li * *------------ */ #include <stdlib.h> #include <stdio.h> #include <math.h> // __global__ void gpu_global_distance(float *d_res, float *d_x, float *d_y, int samples) __global__ void gpu_global_distance(float *d_res, float *d_x, int samples) { for(int l = 0; l < 1000; l++) { int idx1 = blockDim.x * blockIdx.x + threadIdx.x; int idx2; float distance = 0.0; for (idx2 = 0; idx2 < samples; idx2++) // distance += sqrt((d_x[idx1]-d_x[idx2])*(d_x[idx1]-d_x[idx2]) + (d_y[idx1]-d_y[idx2])*(d_y[idx1]-d_y[idx2])); distance += sqrt((d_x[idx1] - d_x[idx2]) * (d_x[idx1] - d_x[idx2]) + (d_x[samples + idx1] - d_x[samples + idx2]) * (d_x[samples + idx1] - d_x[samples + idx2])); d_res[idx1] = distance / samples; } } /* Do not modify this function * compute sum of the average distances */ // float compute_sum(const float *array, const int n) // { // int i; // float sum = 0.0; // for(i=0; i<n; i++) // sum += array[i]; // return sum; // } // #define thread_per_block 128 // #define SAMPLES 100 /* Do not modify this function * Initializes the input arrays */ void init_data(float **x, float **y, float **r, int n) { if( n < 1){ fprintf(stderr, "#of_samples should be +ve\n"); exit(0); } /* Allocate memory for the arrays */ int i; *x = (float*) malloc( sizeof(float)*n ); *y = (float*) malloc( sizeof(float)*n ); *r = (float*) malloc( sizeof(float)*n ); if( *x==NULL || *y==NULL || *r==NULL ){ fprintf(stderr, "Memory allocation failed\n"); exit(0); } /* Generate random points between 0 and 1 */ for(i=0; i<n; i++){ (*x)[i] = (float) rand() / RAND_MAX; (*y)[i] = (float) rand() / RAND_MAX; } } // int main(int argc, char **argv) // { // float *host_x, *host_y; // float *host_result; // float host_sum = 0.0; // int samples = SAMPLES; // init_data(&host_x, &host_y, &host_result, samples); // float *d_x, *d_y, *d_res, *d_res2; // int num_of_block = ceil(samples/thread_per_block); // int size = sizeof(float) * samples; // cudaMalloc((void**)&d_x, size); // cudaMalloc((void**)&d_y, size); // cudaMalloc((void**)&d_res, size); // cudaMalloc((void**)&d_res2, size); // cudaMemcpy(d_x, host_x, size, cudaMemcpyHostToDevice); // cudaMemcpy(d_y, host_y, size, cudaMemcpyHostToDevice); // dim3 GridDim(num_of_block, 1, 1), BlockDim(thread_per_block, 1, 1); // gpu_global_distance<<<GridDim, BlockDim>>>(d_res, d_x, d_y, samples); // cudaMemcpy(host_result, d_res, size, cudaMemcpyDeviceToHost); // host_sum = compute_sum(host_result, samples); // printf("GPU Global Memory -- Result = %f\n", host_sum); // // gpu_shared_memory<<<GridDim, BlockDim>>>(d_res2, d_x, d_y, samples); // // cudaMemcpy(host_result, d_res2, size, cudaMemcpyDeviceToHost); // // host_sum = compute_sum(host_result,samples); // // printf("GPU Shared Memory -- Result = %f", host_sum); // cudaFree(d_x); // cudaFree(d_y); // cudaFree(d_res); // //cudaFree(d_res2); // free( host_x ); // free( host_y ); // free( host_result ); // return 0; // }
11,357
#include "includes.h" __global__ void kernelAddMullSqr(const int N, double* S, double* A, double m) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { S[i] += m * A[i] * A[i]; } }
11,358
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <chrono> #include <string> #include <stdio.h> #include <string.h> using namespace std; void find_c(char*, char*, int, int); void set_mat(int*, int, int); void print_matrix(int*, int, int); __global__ void find_lineal(char*, char*, int, int, int*); int main() { //Works as Chain_1 for Y and Chain_2 for X string chain_1; string chain_2; cout << "Chain_1: " << endl; cin >> chain_1; cout << "Chain_2: " << endl; cin >> chain_2; //-->Add input analyzer //cout << chain_1.size() << " ; " << chain_2.size() << endl; char* c_1 = new char[(int)chain_1.size()]; char* c_2 = new char[(int)chain_2.size()]; strcpy(c_1, chain_1.c_str()); strcpy(c_2, chain_2.c_str()); find_c(c_1, c_2, (int)chain_1.size(), (int)chain_2.size()); //Size proved More or Less return 0; } void set_mat(int* m, int x, int y) { for (int i = 0; i < y; i++) { for (int j = 0; j < x; j++) { m[i * y + j] = 0; } } int temp = 0; for (int i = 0; i < y; i++) { m[i * y] = temp; temp -= 2; } temp = 0; for (int j = 0; j < x; j++) { m[j] = temp; temp -= 2; } //print_matrix(m, x, y); } void print_matrix(int* m, int x, int y) { for (int i = 0; i < y; i++) { for (int j = 0; j < x; j++) { cout << m[i * y + j] << " "; } cout << endl; } } void find_c(char* c_1, char* c_2, int s_1, int s_2) { int c_1_size = s_1 * sizeof(char); int c_2_size = s_2 * sizeof(char); int mat_size = s_1 * s_2 * sizeof(int); // Takes multiplication between int* mat = new int[mat_size]; // Through all function set_mat(mat, s_1, s_2); //Matriz Configured print_matrix(mat, s_1, s_2); /* Rewiewed cout << "Mat_size: " << mat_size << endl; cout << "c_1_size: " << c_1_size << endl; cout << "c_2_size: " << c_2_size << endl; cout << c_1 << "_" << endl; cout << c_2 << "_" << endl; */ char* d_c_1; char* d_c_2; int* d_mat; cudaMalloc((void**) &d_mat, mat_size); cudaMalloc((void**) &d_c_1, c_1_size); cudaMalloc((void**) &d_c_2, c_2_size); cudaMemcpy(d_c_1, c_1, c_1_size, cudaMemcpyHostToDevice); //Sending chain characters from PC to Videocard cudaMemcpy(d_c_2, c_2, c_2_size, cudaMemcpyHostToDevice); // cudaMemcpy(d_mat, mat, mat_size, cudaMemcpyHostToDevice); float block = 16; dim3 grid_size(ceil(c_1_size / block), ceil(c_2_size/block), 1); dim3 block_size(block, block, 1); //Core here find_lineal <<< grid_size, block_size >>> (d_c_1, d_c_2, c_1_size, c_2_size, d_mat); cudaMemcpy(mat, d_mat, mat_size, cudaMemcpyDeviceToHost); cudaFree(d_c_1); cudaFree(d_c_2); cudaFree(d_mat); print_matrix(mat, s_1, s_2); } //Remember first c_1 is for Y axis and c_2 for X axis __global__ void find_lineal(char* c_1, char* c_2, int s_1, int s_2, int* mat) { int gpu_columna = (blockDim.x * blockIdx.x) + threadIdx.x; // Left to right int gpu_fila = (blockDim.y * blockIdx.y) + threadIdx.y; // Up to down //Inefficient implemntation using 1 thread (1,1) if ((gpu_fila == 1) && (gpu_columna == 1)) { printf("Im looking at you \n"); printf(" %d - %d \n", s_1, s_2); for (int i = 1; i < s_1; i++) { for (int j = 1; j < s_2; j++) { int pos = (i * s_2 + j); int pos_1 = (i * s_2 + (j - 1) ); int pos_2 = ( (i - 1) * s_2 + (j - 1)); int pos_3 = ( (i - 1) * s_2 + j); printf("( %d - %d - %d - %d -- )\n", pos, pos_1, pos_2, pos_3); printf("( %d - %d - %d - %d )\n", mat[pos], mat[pos_1], mat[pos_2], mat[pos_3]); int extra_value = 0; if (c_1[i-1] != c_2[j-1]) { extra_value = -1; } else { extra_value = 1; } int value = mat[pos_1]; if (value < (mat[pos_2] + extra_value)) { value = (mat[pos_2] + extra_value); } else if (value < mat[pos_3]) { value = mat[pos_3]; } mat[pos] = value; } printf("\n"); } printf("\n"); } }
11,359
#include "includes.h" __global__ void nmfcpy(double *mat, double *matcp, int m, int n) //kernel copy must be block synchronized!!! { int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; if (row < m && col < n) mat[row*n+col] = matcp[row*n+col]; }
11,360
//***************************************************************************** //Projet HPC fusion et trie de tableaux sur GPU //Auteur: ROBIN Clement et SAULNIER Solene //Promo: MAIN5 //Date: decembre 2020 //Question 5 avec streams et utilisation de la shared //***************************************************************************** #include <stdio.h> #include <stdlib.h> #define N 536870912 //taille max du tableau =d dans le projet #define threadsPerBlock 1024 #define numBlocks 65535 //***************************************************************************** //Fonctions CPU fusion et verification //***************************************************************************** int verif_trie(int *tab,int size) { for (int i=0; i<size-1; i=i+1) if (tab[i]>tab[i+1]) return i; return -1; } //***************************************************************************** //Fonctions GPU (merge tableau) small //***************************************************************************** __device__ void mergeSmallBatch_k(int *A, int *B, int *M, int size_A, int size_B, int size_M, int slice_size) { int i = threadIdx.x; if (i < size_A+size_B) { /*Merge*/ int K[2],P[2],Q[2]; int offset; if (i>size_A) { K[0]=i-size_A; K[1]=size_A; P[0]=size_A; P[1]=i-size_A; } else { K[0]=0; K[1]=i; P[0]=i; P[1]=0; } while (1) { offset=abs(K[1]-P[1])/2; Q[1]=K[1]-offset; Q[0]=K[0]+offset; if (Q[1] >= 0 && Q[0] <= size_B && (Q[1]== size_A || Q[0]==0 || A[Q[1]]>B[Q[0]-1])) { if (Q[0]==size_B || Q[1]==0 || A[Q[1]-1]<=B[Q[0]]) { if (Q[1]<size_A && (Q[0]==size_B || A[Q[1]]<=B[Q[0]])) M[blockIdx.x * slice_size + i]=A[Q[1]]; else M[blockIdx.x * slice_size + i]=B[Q[0]]; break; } else { K[0]=Q[0]+1; K[1]=Q[1]-1; } } else { P[0]=Q[0]-1; P[1]=Q[1]+1; } } } } __global__ void small_sortManager(int *M, int size_A, int size_B, int size_M, int number_of_slices) { int slice_size = size_A + size_B; /*Chargement de A et B dans la shared memory*/ /*Comme on a une seule shared memory*/ __shared__ int shared_AB[1024]; //Comme A et B ne peuvent pas dépasser 1024 int* s_A = (int*) &shared_AB[0]; int* s_B = (int*) &s_A[size_A]; __syncthreads(); if (threadIdx.x < size_A) s_A[threadIdx.x] = M[blockIdx.x *slice_size+ threadIdx.x]; if (threadIdx.x >= size_A && threadIdx.x < size_B + size_A ) s_B[threadIdx.x-size_A] = M[blockIdx.x *slice_size+ threadIdx.x]; __syncthreads(); /*if (size_A==1 && size_B==1) { //swap pour 2 elements if (threadIdx.x==0) { if (s_A[0]>s_B[0]) { M[blockIdx.x * 2]=s_B[0]; M[blockIdx.x * 2+1]=s_A[0]; } else { M[blockIdx.x * 2]=s_A[0]; M[blockIdx.x * 2+1]=s_B[0]; } } } else*/ mergeSmallBatch_k(s_A, s_B, M, size_A, size_B, size_M,slice_size); } __global__ void small_sortManager_extraSlice(int *M ,int size_A,int size_B,int size_A_extra,int size_B_extra,int size_M,int number_of_slices) { int slice_size = size_A + size_B; /*Chargement de A et B dans la shared memory*/ /*Comme on a une seule shared memory*/ __shared__ int shared_AB[1024]; //Comme A et B ne peuvent pas dépasser 1024 int* s_A = (int*) &shared_AB[0]; int* s_B; if (blockIdx.x == number_of_slices) s_B = (int*) &s_A[size_A_extra]; else s_B = (int*) &s_A[size_A]; __syncthreads(); if (threadIdx.x < size_A) s_A[threadIdx.x] = M[blockIdx.x *slice_size+ threadIdx.x]; if (threadIdx.x >= size_A && threadIdx.x < size_B + size_A) s_B[threadIdx.x-size_A] = M[blockIdx.x *slice_size+ threadIdx.x]; if (blockIdx.x == number_of_slices && threadIdx.x < size_A_extra) s_A[threadIdx.x] = M[blockIdx.x *slice_size+ threadIdx.x]; if (blockIdx.x == number_of_slices && threadIdx.x >= size_A_extra && threadIdx.x < size_B_extra + size_A_extra) s_B[threadIdx.x-size_A_extra] = M[blockIdx.x *slice_size+ threadIdx.x]; __syncthreads(); if (blockIdx.x == number_of_slices) { /* if (size_A_extra==1 && size_B_extra==1) { //swap pour 2 elements if (threadIdx.x==0) { if (s_A[0]>s_B[0]) { M[blockIdx.x * 2]=s_B[0]; M[blockIdx.x * 2+1]=s_A[0]; } else { M[blockIdx.x * 2]=s_A[0]; M[blockIdx.x * 2+1]=s_B[0]; } } } else*/ //{ if (size_A_extra < size_B_extra) mergeSmallBatch_k(s_B, s_A, M, size_B_extra, size_A_extra, size_M, slice_size); else mergeSmallBatch_k(s_A, s_B, M, size_A_extra, size_B_extra, size_M, slice_size); //} } else { /*if (size_A==1 && size_B==1) { //swap pour 2 elements if (threadIdx.x==0) { if (s_A[0]>s_B[0]) { M[blockIdx.x * 2]=s_B[0]; M[blockIdx.x * 2+1]=s_A[0]; } else { M[blockIdx.x * 2]=s_A[0]; M[blockIdx.x * 2+1]=s_B[0]; } } } else */ mergeSmallBatch_k(s_A, s_B, M, size_A, size_B, size_M,slice_size); } } //***************************************************************************** //Fonctions GPU (merge tableau) big //***************************************************************************** __device__ void pathBig_k(int *A, int *B, int *Path, int size_A, int size_B, int size_M) { for(int i = blockIdx.x * blockDim.x + threadIdx.x; i<size_M; i = i+blockDim.x*gridDim.x) { int K[2],P[2],Q[2]; int offset; if (i>size_A) { K[0]=i-size_A; K[1]=size_A; P[0]=size_A; P[1]=i-size_A; } else { K[0]=0; K[1]=i; P[0]=i; P[1]=0; } while (1) { offset=abs(K[1]-P[1])/2; Q[1]=K[1]-offset; Q[0]=K[0]+offset; if (Q[1] >= 0 && Q[0] <= size_B && (Q[1]== size_A || Q[0]==0 || A[Q[1]]>B[Q[0]-1])) { if (Q[0]==size_B || Q[1]==0 || A[Q[1]-1]<=B[Q[0]]) { if (Q[1]<size_A && (Q[0]==size_B || A[Q[1]]<=B[Q[0]])) { Path[i]=1; Path[i+size_M]=Q[1]; } else { Path[i]=0; Path[i+size_M]=Q[0]; } break; } else { K[0]=Q[0]+1; K[1]=Q[1]-1; } } else { P[0]=Q[0]-1; P[1]=Q[1]+1; } } } } __device__ void mergeBig_k(int *A, int *B, int *M,int *Path, int size_A, int size_B, int size_M) { for(int i = blockIdx.x * blockDim.x + threadIdx.x; i<size_M; i = i+blockDim.x*gridDim.x) { if (Path[i]==1) M[i]=A[Path[i+size_M]]; else if (Path[i]==0) M[i]=B[Path[i+size_M]]; else printf("ERROR thread num %d block %d",i,blockIdx.x); } } __global__ void sortManager_GPU(int *A, int *B, int *M,int *Path, int size_A, int size_B, int size_M) { pathBig_k(A, B, Path, size_A, size_B, size_M); mergeBig_k(A, B, M, Path, size_A, size_B, size_M); } //**************************************************************************************************** // Fonctions CPU //**************************************************************************************************** void sortManager_CPU(int *h_M,int h_size_A,int h_size_B,int h_slice_size,int i,cudaStream_t stream[]) { /*Variables CPU*/ int h_size_M_tmp= h_size_A+h_size_B; int *h_A; int *h_B; int *h_M_tmp; h_A=(int *)malloc(h_size_A*sizeof(int)); h_B=(int *)malloc(h_size_B*sizeof(int)); h_M_tmp=(int *)malloc(h_size_M_tmp*sizeof(int)); /*Remplir A et B*/ for (int j=0; j<h_size_A; j++) h_A[j] = h_M[i*h_slice_size+j]; for (int j=0; j<h_size_B; j++) h_B[j] = h_M[i*h_slice_size+j+h_size_A]; /*Variables GPU*/ int *d_A; int *d_B; int *d_M_tmp; int *d_Path_tmp; cudaMalloc(&d_A,h_size_A*sizeof(int)); cudaMalloc(&d_B,h_size_B*sizeof(int)); cudaMalloc(&d_M_tmp,h_size_M_tmp*sizeof(int)); cudaMalloc(&d_Path_tmp,h_size_M_tmp*sizeof(int)); /*Transfert*/ cudaMemcpyAsync(d_A, h_A, h_size_A*sizeof(int), cudaMemcpyHostToDevice, stream[i]); cudaMemcpyAsync(d_B, h_B, h_size_B*sizeof(int), cudaMemcpyHostToDevice, stream[i]); /*Sort*/ if (h_size_A<h_size_B) sortManager_GPU<<<numBlocks,threadsPerBlock,0, stream[i]>>>(d_B, d_A, d_M_tmp, d_Path_tmp, h_size_B, h_size_A, h_size_M_tmp); else sortManager_GPU<<<numBlocks,threadsPerBlock,0, stream[i]>>>(d_A, d_B, d_M_tmp, d_Path_tmp, h_size_A, h_size_B, h_size_M_tmp); /*Transfert memoire GPU*/ cudaMemcpyAsync(h_M_tmp, d_M_tmp, h_size_M_tmp*sizeof(int), cudaMemcpyDeviceToHost, stream[i]); /*Copie de h_M_tmp dans h_M*/ for (int j=0; j<h_size_M_tmp; j++) h_M[i*h_slice_size+j]=h_M_tmp[j]; /*Liberation*/ free(h_A); free(h_B); free(h_M_tmp); cudaFree(d_A); cudaFree(d_B); cudaFree(d_M_tmp); cudaFree(d_Path_tmp); } //***************************************************************************** //MAIN //***************************************************************************** int main(int argc, char const *argv[]) { //srand (time (NULL)); srand (42); /*Déclaration des variables CPU*/ /*Taille des tableaux*/ int h_taille_M=N; /*Traitement des options*/ for (int i=0; i<argc-1; i=i+1) { if (strcmp(argv[i],"--s")==0 && atoi(argv[i+1])<N ) h_taille_M=atoi(argv[i+1]); } /*Tableaux et allocation memoire*/ int *h_M; h_M=(int *)malloc(h_taille_M*sizeof(int)); /*Déclaration des variables GPU*/ int *d_M; cudaMalloc(&d_M,h_taille_M*sizeof(int)); /*Initialisation et preparation des tableaux*/ for (int i=0; i<h_taille_M;i++) h_M[i]=rand()%10000; /*Merge tableau*/ /*variables generales*/ int h_slice_size=1; int h_number_of_slices=1024/2; int h_slice_reste_precedent=0; int h_slice_reste=0; /*Cas tailles de moins de 1024*/ /*variables pour moins de 1024*/ int h_is_irregular_batch=0; int h_irregular_batch_size=0; int h_irregular_slice_size=1; int h_irregular_number_of_slices=h_irregular_batch_size/2; int h_irregular_slice_reste_precedent=0; int h_irregular_slice_reste=0; /*Decoupage de M en batches de 1024*/ int h_number_of_batches=h_taille_M/1024; if (h_taille_M%1024!=0) { h_irregular_batch_size=h_taille_M%1024; h_is_irregular_batch=1; h_irregular_number_of_slices=h_irregular_batch_size/2; } /*Allocation et initialisation des batches*/ /*Batches CPU*/ int **h_batch_M; int *h_irregular_batch_M; h_batch_M = (int **) malloc( h_number_of_batches* sizeof(int *) ); for (int b=0; b<h_number_of_batches; b++) { h_batch_M[b]=(int *) malloc (1024 * sizeof(int )); for (int ind=0; ind<1024; ind++) h_batch_M[b][ind]=h_M[b*1024+ind]; } h_irregular_batch_M = (int *) malloc( h_irregular_batch_size* sizeof(int ) ); for (int ind=0; ind<h_irregular_batch_size; ind++) h_irregular_batch_M[ind]=h_M[h_number_of_batches*1024+ind]; /*Batches GPU*/ int *d_batch_M; int *d_irregular_batch_M; cudaMalloc(&d_batch_M,1024*sizeof(int)); cudaMalloc(&d_irregular_batch_M,h_irregular_batch_size*sizeof(int)); /*Declaration et creation des streams*/ cudaStream_t stream[h_number_of_batches+h_is_irregular_batch]; for (int ind_stream=0; ind_stream<h_number_of_batches; ind_stream++) cudaStreamCreate(&stream[ind_stream]); if (h_is_irregular_batch==1) cudaStreamCreate(&stream[h_number_of_batches]); /*Timer*/ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*Slices inferieures a 1024*/ cudaEventRecord(start); while (h_number_of_slices > 0) { /*Mise a jour taille et indices*/ h_slice_size=2*h_slice_size; h_slice_reste_precedent=h_slice_reste; h_slice_reste=1024%h_slice_size; h_number_of_slices=1024/h_slice_size; for (int b=0; b<h_number_of_batches;b++) { cudaMemcpyAsync(d_batch_M, h_batch_M[b], 1024*sizeof(int), cudaMemcpyHostToDevice, stream[b]); if (h_slice_reste_precedent!=0 && h_slice_reste!=0) { int h_taille_A_extra=h_slice_reste-h_slice_reste_precedent; int h_taille_B_extra=h_slice_reste_precedent; small_sortManager_extraSlice<<<h_number_of_slices+1,1024,0,stream[b]>>>(d_batch_M,h_slice_size/2,h_slice_size/2,h_taille_A_extra,h_taille_B_extra,1024,h_number_of_slices); } else small_sortManager<<<h_number_of_slices,1024,0,stream[b]>>>(d_batch_M, h_slice_size/2, h_slice_size/2,h_slice_size,h_number_of_slices); cudaMemcpyAsync(h_batch_M[b], d_batch_M, 1024*sizeof(int), cudaMemcpyDeviceToHost, stream[b]); } if (h_is_irregular_batch==1 && h_irregular_number_of_slices>0) { h_irregular_slice_size=2*h_irregular_slice_size; h_irregular_slice_reste_precedent=h_irregular_slice_reste; h_irregular_slice_reste=h_irregular_batch_size%h_irregular_slice_size; h_irregular_number_of_slices=h_irregular_batch_size/h_irregular_slice_size; cudaMemcpyAsync(d_irregular_batch_M, h_irregular_batch_M, h_irregular_batch_size*sizeof(int), cudaMemcpyHostToDevice, stream[h_number_of_batches]); if (h_irregular_slice_reste_precedent!=0 && h_irregular_slice_reste!=0) { int h_taille_A_extra=h_irregular_slice_reste-h_irregular_slice_reste_precedent; int h_taille_B_extra=h_irregular_slice_reste_precedent; small_sortManager_extraSlice<<<h_irregular_number_of_slices+1,h_irregular_batch_size,0,stream[h_number_of_batches]>>>(d_irregular_batch_M,h_irregular_slice_size/2,h_irregular_slice_size/2,h_taille_A_extra,h_taille_B_extra,h_irregular_batch_size,h_irregular_number_of_slices); } else small_sortManager<<<h_irregular_number_of_slices,h_irregular_batch_size,0,stream[h_number_of_batches]>>>(d_irregular_batch_M, h_irregular_slice_size/2, h_irregular_slice_size/2,h_irregular_slice_size,h_irregular_number_of_slices); cudaMemcpyAsync(h_irregular_batch_M, d_irregular_batch_M, h_irregular_batch_size*sizeof(int), cudaMemcpyDeviceToHost, stream[h_number_of_batches]); } } cudaDeviceSynchronize(); /*re ecriture de M*/ for (int b=0; b<h_number_of_batches; b++) for (int ind=0; ind<1024; ind++) h_M[b*1024+ind]=h_batch_M[b][ind]; if (h_is_irregular_batch==1) for (int ind=0; ind<h_irregular_batch_size; ind++) h_M[h_number_of_batches*1024+ind]=h_irregular_batch_M[ind]; /*Slices superieures a 1024*/ /*Mise a jour taille et indices*/ h_slice_size=1024; /*Destruction des streams qui ne servent pas*/ for (int i=(h_taille_M/h_slice_size); i<h_number_of_batches+h_is_irregular_batch; i++) cudaStreamDestroy(stream[i]); /*Mise a jour taille et indices suite*/ h_number_of_slices=h_taille_M/h_slice_size; h_slice_reste=h_irregular_batch_size; int compteur=0; while (h_number_of_slices>0) { compteur=compteur+1; /*Mise a jour taille et indices*/ h_slice_size=2*h_slice_size; /*Destruction des streams qui ne servent pas*/ if (compteur>1) for (int i=(h_taille_M/h_slice_size)+1; i<h_number_of_slices+1; i++) cudaStreamDestroy(stream[i]); /*Mise a jour taille et indices suite*/ h_slice_reste_precedent=h_slice_reste; h_slice_reste=h_taille_M%h_slice_size; h_number_of_slices=h_taille_M/h_slice_size; for (int i=0; i<h_number_of_slices; i++) { sortManager_CPU(h_M,h_slice_size/2,h_slice_size/2,h_slice_size,i, stream); } if (h_slice_reste_precedent!=0 && h_slice_reste!=0) { int h_taille_A=h_slice_reste-h_slice_reste_precedent; int h_taille_B=h_slice_reste_precedent; sortManager_CPU(h_M,h_taille_A,h_taille_B,h_slice_size,h_number_of_slices,stream); } cudaDeviceSynchronize(); } cudaDeviceSynchronize(); cudaEventRecord(stop); /*Affichage du chrono*/ cudaEventSynchronize(stop); float ms = 0; cudaEventElapsedTime(&ms, start, stop); fprintf(stderr,"mergeBatches Taille_M: %d, nbthreads: %d, numblocks: %d, Temps: %.5f, verif: %d\n", h_taille_M, threadsPerBlock, numBlocks, ms,verif_trie(h_M,h_taille_M)); /*Destructions des streams restants*/ for (int i=0; i<h_number_of_slices; i++) cudaStreamDestroy(stream[i]); /*Verification*/ if (verif_trie(h_M,h_taille_M)==-1) printf("ok tableau trie"); else printf("KO recommencer %d ",verif_trie(h_M,h_taille_M) ); /*Liberation*/ cudaFree(d_M); cudaFree(d_batch_M); cudaFree(d_irregular_batch_M); for (int b=0;b<h_number_of_batches;b++) free(h_batch_M[b]); free(h_M); free(h_batch_M); free(h_irregular_batch_M); return 0; }
11,361
#include "includes.h" __global__ void multiplication(int * A,int * B,int * C,int N,int M,int K){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if(row<N && col<K){//Si no me fui del arreglo int sum=0; for(int i=0;i<M;i++){ sum+=A[row*N+i]*B[i*M+col]; } C[row*N+col]=sum; } }
11,362
#include <cuda_runtime.h> __global__ void computeForcesKernel(int N, const double3 *p, double3 *f) { extern __shared__ double3 pCache[]; int idx = blockIdx.x * blockDim.x + threadIdx.x; double3 myP = idx < N ? p[idx] : double3{0.0, 0.0, 0.0}; double3 ftot{0.0, 0.0, 0.0}; for (int offset = 0; offset < N; offset += blockDim.x) { // Copy to shared memory. Be careful not to exceed the total number of bodies. int blockSize = min((int)blockDim.x, N - offset); if (threadIdx.x < blockSize) pCache[threadIdx.x] = p[offset + threadIdx.x]; // Wait till all threads are done preparing pCache. Even though warp // are synchronized (at least on the architecture that Piz Daint has), // different warps are not. __syncthreads(); // Compute. again, be careful not to exceed to total number of bodies N. // (i goes from 0 to blockSize-1, not to blockDim.x-1). for (int i = 0; i < blockSize; ++i) { double dx = pCache[i].x - myP.x; double dy = pCache[i].y - myP.y; double dz = pCache[i].z - myP.z; double inv_r = rsqrt(1e-150 + dx * dx + dy * dy + dz * dz); double inv_rrr = inv_r * inv_r * inv_r; ftot.x += dx * inv_rrr; ftot.y += dy * inv_rrr; ftot.z += dz * inv_rrr; } // Synchronize again, otherwise one warp may start overwriting pCache // in the next step too early. __syncthreads(); } f[idx] = ftot; } void computeForces(int N, const double3 *p, double3 *f) { constexpr int numThreads = 1024; int numBlocks = (N + numThreads - 1) / numThreads; size_t sharedMemorySize = numThreads * sizeof(double3); computeForcesKernel<<<numBlocks, numThreads, sharedMemorySize>>>(N, p, f); }
11,363
__global__ void saxpy(int n, float a, float *x, float *y) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < n) { y[idx] = a*x[idx] + y[idx]; } }
11,364
#include "includes.h" __global__ void cuda_copyRegion(unsigned char *dst, unsigned char *src,int stepDst, int stepSrc, int dst_width, int dst_height, int src_width, int src_height, int dst_xoffset, int dst_yoffset, int dst_widthToCrop, int dst_heightToCrop, int src_xoffset, int src_yoffset, int src_widthToCrop, int src_heightToCrop, int numChannel) { // printf("stepSrc - Dst = %d - %d\n", stepSrc, stepDst); int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int dst_col = col + dst_xoffset; int dst_row = row + dst_yoffset; int src_col = col + src_xoffset; int src_row = row + src_yoffset; if(row < dst_heightToCrop && col < dst_widthToCrop && dst_col < dst_width&& dst_row < dst_height) { if(numChannel==1) { dst[dst_row * (stepDst) + dst_col] = src[src_row * (stepSrc) + src_col]; } if(numChannel==3) { int dst_step = dst_row * (stepDst) + dst_col; int src_step = src_row * (stepSrc) + src_col; dst[3 * dst_step] = src[3 * src_step]; dst[3 * dst_step + 1] = src[3 * src_step + 1]; dst[3 * dst_step + 2] = src[3 * src_step + 2]; } } }
11,365
#include <fstream> #include <iterator> #include <vector> #include <iostream> #include <cstdlib> #include <string> #include <sstream> #include <iomanip> #include <math.h> #include <stdio.h> #include <time.h> #define blockSize 64 #define Ke 8.99e9 #define Qe -1.602e-19 #define epsilon 1e-8 #define mass 9.11e-31 //macro for error checking #define cudaCheckError(){ \ cudaError_t err = cudaGetLastError(); \ if(err != cudaSuccess){ \ std::cout << "Error in " << __FILE__ << " at line " << __LINE__ << " : " << cudaGetErrorString(err) << std::endl; \ exit(EXIT_FAILURE); \ } \ } //calculate bodybody coulomb interactions __device__ float3 bodyBodyCoulomb(float3 bi, float3 bj, float3 ai){ float3 rij; //components of rij rij.x = bj.x - bi.x; rij.y = bj.y - bi.y; rij.z = bj.z - bi.z; //distance squared for solving force equation float distSquared = rij.x*rij.x + rij.y*rij.y + rij.z*rij.z + epsilon; if(distSquared > 10){ ai.x = -1; ai.y = -1; ai.z = -1; return ai; } //inverse cubed with softening factor float inverseDist = 1.0f*Qe/sqrtf(distSquared*distSquared*distSquared); //finish the equation by multiplying by charge float kernel = Ke*Qe*inverseDist; //get acceleration for each component ai.x += rij.x*kernel; ai.y += rij.y*kernel; ai.z += rij.z*kernel; return ai; } __device__ float3 tileFunction(float3 position, float3 acceleration, float3* shared){ #pragma unroll for(int i = 0; i < blockDim.x; i++){ acceleration = bodyBodyCoulomb(position, shared[i], acceleration); } return acceleration; } __global__ void find_forces(float3* X, float3* A, int numberOfBodies){ float3 position; float3 acc = {0.0f, 0.0f, 0.0f}; int tid = blockIdx.x*blockDim.x + threadIdx.x; if(tid < numberOfBodies){ //read into shared memory and calculate tile position = X[tid]; for(int i = 0, tile = 0; i < gridDim.x; i += blockSize, tile++){ //declare shared memory bank __shared__ float3 sharedPosition[blockSize]; int idx = tile*blockDim.x + threadIdx.x; sharedPosition[threadIdx.x] = X[idx]; __syncthreads(); acc = tileFunction(position, acc, sharedPosition); __syncthreads(); } //read back to global memory for integration step A[tid] = acc; } } //main int main(const int argc, const char** argv){ cudaSetDevice(10); //declare dt, numberofSteps from the command line float dt = atof(argv[1]); int numberOfSteps = atoi(argv[2]); int numberOfBodies = atoi(argv[3]); //allocate random data array float3* x; x = (float3*)malloc(numberOfBodies*sizeof(float3)); float3* v; v = (float3*)malloc(numberOfBodies*sizeof(float3)); float3* a; a = (float3*)malloc(numberOfBodies*sizeof(float3)); srand (time(NULL)); //fill random starting position and acceleration for(int i = 0; i < numberOfBodies; i++){ x[i].x = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; x[i].y = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; x[i].z = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; v[i].x = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; v[i].y = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; v[i].z = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; a[i].x = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; a[i].y = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; a[i].z = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } //allocate cuda memory float3 *d_x; cudaMalloc((void**)&d_x, numberOfBodies*sizeof(float3)); float3 *d_a; cudaMalloc((void**)&d_a, numberOfBodies*sizeof(float3)); //declare gridSize int gridSize = (numberOfBodies+blockSize-1)/(blockSize); //start loop over time steps for(int k = 0; k < numberOfSteps; k++){ //copy position, acceleration to device cudaMemcpy(d_x, x, numberOfBodies*sizeof(float3), cudaMemcpyHostToDevice); cudaCheckError(); cudaMemcpy(d_a, a, numberOfBodies*sizeof(float3), cudaMemcpyHostToDevice); cudaCheckError(); //call kernel find_forces<<<gridSize, blockSize>>>(d_x, d_a, numberOfBodies); //copy position, acceleration off device cudaMemcpy(x, d_x, numberOfBodies*sizeof(float3), cudaMemcpyDeviceToHost); cudaCheckError(); cudaMemcpy(a, d_a, numberOfBodies*sizeof(float3), cudaMemcpyDeviceToHost); cudaCheckError(); for(int i = 0; i < numberOfBodies; i++){ if(a[i].x == -1){ v[i].x += 0; v[i].y += 0; v[i].z += 0; } else{ v[i].x += 0.5*a[i].x*dt*dt/mass; v[i].y += 0.5*a[i].y*dt*dt/mass; v[i].z += 0.5*a[i].z*dt*dt/mass; x[i].x += v[i].x*dt; x[i].y += v[i].y*dt; x[i].z += v[i].z*dt; } } } //read out some results just for fun for(int i = 0; i < 10; i++){ std::cout << x[i].x << " " << a[i].x << std::endl; } free(x); free(a); cudaFree(d_x); cudaFree(d_a); }
11,366
// REFERENCE: https://github.com/NVIDIA/thrust/blob/master/examples/histogram.cu // Code for histogram is copied from above reference. #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/inner_product.h> #include <thrust/binary_search.h> #include <thrust/adjacent_difference.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> // dense histogram using binary search template <typename Vector1, typename Vector2> void dense_histogram(Vector1& input, Vector2& histogram) { typedef typename Vector1::value_type ValueType; // input value type typedef typename Vector2::value_type IndexType; // histogram index type // copy input data (could be skipped if input is allowed to be modified) thrust::device_vector<ValueType> data(input); // print the initial data // print_vector("initial data", data); // sort data to bring equal elements together // thrust::sort(data.begin(), data.end()); // print the sorted data // print_vector("sorted data", data); // number of histogram bins is equal to the maximum value plus one IndexType num_bins = data.back() + 1; // resize histogram storage histogram.resize(num_bins); // find the end of each bin of values thrust::counting_iterator<IndexType> search_begin(0); thrust::upper_bound(data.begin(), data.end(), search_begin, search_begin + num_bins, histogram.begin()); // print the cumulative histogram // print_vector("cumulative histogram", histogram); // compute the histogram by taking differences of the cumulative histogram thrust::adjacent_difference(histogram.begin(), histogram.end(), histogram.begin()); // print the histogram // print_vector("histogram", histogram); }
11,367
extern "C" { #define MAX_AREA_SIZE 7 #define MEDIAN_BUFFER_SIZE (MAX_AREA_SIZE * MAX_AREA_SIZE + 1) __device__ void quickSort(unsigned char *arr, int left, int right) { int i = left, j = right; int tmp; int pivot = arr[(left + right) / 2]; /* partition */ while (i <= j) { while (arr[i] < pivot) i++; while (arr[j] > pivot) j--; if (i <= j) { tmp = arr[i]; arr[i] = arr[j]; arr[j] = tmp; i++; j--; } }; /* recursion */ if (left < j) quickSort(arr, left, j); if (i < right) quickSort(arr, i, right); } __global__ void adaptive_median_filter_kernel(unsigned char *imageData, unsigned char *filteredImageData, int width,int height, unsigned char *medianBuffer) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; //int width = gridDim.x; //int height = gridDim.y; bool processed = false; int pixelOffset = y * width + x; unsigned int pixel = imageData[pixelOffset]; int n = 3; unsigned char *median = medianBuffer + ((y * width) + x) * MEDIAN_BUFFER_SIZE; //thrust::device_vector<unsigned char> median(MAX_AREA_SIZE * MAX_AREA_SIZE + 1, 255); //std::array<unsigned char, MAX_AREA_SIZE * MAX_AREA_SIZE + 1> median; while (!processed) { double zMin = 255; double zMax = 0; double zMed = 0; int sDelta = (n - 1) / 2; int processedPixelCount = 0; for (int sx = x - sDelta; sx <= x + sDelta; sx++) { for (int sy = y - sDelta; sy <= y + sDelta; sy++) { if (sx < 0 || sy < 0 || sx >= width || sy >= height) { continue; } unsigned int currentPixel = imageData[sy * width + sx]; if (currentPixel < zMin) { zMin = currentPixel; } if (currentPixel > zMax) { zMax = currentPixel; } median[processedPixelCount] = currentPixel; processedPixelCount++; } } quickSort(median, 0, processedPixelCount); zMed = median[processedPixelCount / 2]; double a1 = zMed - zMin; double a2 = zMed - zMax; if (a1 > 0 && a2 < 0) { double b1 = pixel - zMin; double b2 = pixel - zMax; if (b1 > 0 && b2 < 0) { filteredImageData[pixelOffset] = pixel; } else { filteredImageData[pixelOffset] = zMed; } processed = true; } else { n += 2; if (n > 7) { filteredImageData[pixelOffset] = zMed; processed = true; } } } } }
11,368
#include "includes.h" __global__ void cudaUpdateBatchFiringRate_kernel(unsigned int * firingRate, unsigned int * batchFiringRate, unsigned int inputsDimX, unsigned int inputsDimY, unsigned int inputsDimZ, unsigned int batchSize) { const unsigned int inputSize = inputsDimZ * inputsDimX * inputsDimY; for (unsigned int channel = blockIdx.x; channel < inputsDimZ; channel += gridDim.x){ for (unsigned int sy = 0; sy < inputsDimY; sy+=blockDim.y){ for (unsigned int sx = 0; sx < inputsDimX; sx+=blockDim.x) { const unsigned int inputsIdx = channel*inputsDimX*inputsDimY + sy*inputsDimX + sx; unsigned int batchSum = 0; for(unsigned int batch=0; batch<batchSize; ++batch) { const unsigned int batchInputOffset = batch * inputSize; batchSum += firingRate[inputsIdx + batchInputOffset]; } batchFiringRate[inputsIdx] = batchSum; } } } }
11,369
/***************************************************************************//** * \file * \author Christopher Minar (minarc@oregonstate.edu) * \brief kernels to generate the right hand side for the initial velocity solve */ #include "CFL.h" /** * \namespace kernels * \brief Contains all the custom-written CUDA kernels. */ namespace kernels { //size p __global__ void calculateCFL(double *cfl, double *u, double *dx, double *dy, int nx, int ny, double dt) { if (threadIdx.x + blockDim.x * blockIdx.x >= nx*ny) return; int ip = threadIdx.x + blockDim.x * blockIdx.x, I = ip % nx, J = ip / nx, iu = (nx-1)*J + I, iv = (nx-1)*ny + nx*J +I; if (I==nx-1||J==ny-1) return; cfl[ip] = dt*(abs(u[iu])/dx[I] + abs(u[iv])/dy[J]); } __global__ void testDistance(double *distance,int *ghostTagsUV, int *ghostTagsP, double *xu, double *xv, double *yu, double *yv, double midX, double midY, int *i_start, int *j_start, int width, int nx, int ny) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iu = J*(nx-1) + I, iv = (nx-1)*ny + nx*J +I, ip = nx*J +I; if (idx >= (ny-1)*(nx-1)) //return if we're out of bound return; if (ghostTagsP[ip]==-1) //return if we're outside body return; distance[ip] = sqrt(pow(xv[I]-midX,2) + pow(yu[J]-midY,2)); } }
11,370
// Copyright (c) OpenMMLab. All rights reserved. #include <cuda_runtime.h> #include <cstdint> namespace mmdeploy { namespace operation { namespace cuda { namespace impl { template <typename T, int channels> __global__ void normalize(const T* src, int height, int width, int stride, float* output, const float3 mean, const float3 std, bool to_rgb) { int x = (int)(blockIdx.x * blockDim.x + threadIdx.x); int y = (int)(blockIdx.y * blockDim.y + threadIdx.y); if (x >= width || y >= height) { return; } int loc = y * stride + x * channels; auto mean_ptr = &mean.x; auto std_ptr = &std.x; if (to_rgb) { for (int c = 0; c < channels; ++c) { output[loc + c] = ((float)src[loc + channels - 1 - c] - mean_ptr[c]) * std_ptr[c]; } } else { for (int c = 0; c < channels; ++c) { output[loc + c] = ((float)src[loc + c] - mean_ptr[c]) * std_ptr[c]; } } } template <typename T, int channels> void Normalize(const T* src, int height, int width, int stride, float* output, const float* mean, const float* std, bool to_rgb, cudaStream_t stream) { const dim3 thread_block(16, 16); const dim3 num_blocks((width + thread_block.x - 1) / thread_block.x, (height + thread_block.y - 1) / thread_block.y); const float3 _mean{mean[0], mean[1], mean[2]}; const float3 _std{float(1. / std[0]), float(1. / std[1]), float(1. / std[2])}; normalize<T, channels><<<num_blocks, thread_block, 0, stream>>>(src, height, width, stride, output, _mean, _std, to_rgb); } template void Normalize<uint8_t, 3>(const uint8_t* src, int height, int width, int stride, float* output, const float* mean, const float* std, bool to_rgb, cudaStream_t stream); template void Normalize<uint8_t, 1>(const uint8_t* src, int height, int width, int stride, float* output, const float* mean, const float* std, bool to_rgb, cudaStream_t stream); template void Normalize<float, 3>(const float* src, int height, int width, int stride, float* output, const float* mean, const float* std, bool to_rgb, cudaStream_t stream); template void Normalize<float, 1>(const float* src, int height, int width, int stride, float* output, const float* mean, const float* std, bool to_rgb, cudaStream_t stream); } // namespace impl } // namespace cuda } // namespace operation } // namespace mmdeploy
11,371
#include <iostream> #include <cstdio> #include <cstdlib> #include <cstring> using namespace std; #define N 1024 #define INF 0x03F3F3F int map[N * N]; int path[N]; __global__ void kernel1(int *nv,int *map,int *cost,int *tmp_cost,bool *visited,int * tmp_path) { const int nn = 1024; int tid = threadIdx.x + blockIdx.x * blockDim.x; //fetch all its neighbour if(visited[tid]) { visited[tid] = false; for(int i = 0; i < *nv; ++i) { if(cost[tid] + map[tid * nn + i] < tmp_cost[i]) { tmp_path[i] = tid; tmp_cost[i] = cost[tid] + map[tid * N + i]; } } } return; } __global__ void kernel2(int *cost,int *tmp_cost,bool *visited,bool *flag,int *tmp_path,int *path) { int tid = threadIdx.x + blockIdx.x * blockDim.x; //some vertex can be updated,means that it is the new one to the queue if(cost[tid] > tmp_cost[tid]) { path[tid] = tmp_path[tid]; visited[tid] = true; cost[tid] = tmp_cost[tid]; *flag = true; } tmp_cost[tid] = cost[tid];//if cost[tid] <= tmp_cost[tid] tmp_path[tid] = path[tid]; return; } void print_path(int node,int path[]) { if(path[node] == -1) { printf("%d ",node + 1); return; } else { print_path(path[node],path); printf("%d ",node + 1); } return; } int main() { freopen("input_dijkstra","r",stdin); int cost[N],tmp_cost[N],path[N],tmp_path[N]; bool visited[N],flag; int *dev_cost,*dev_tmp_cost,*dev_map,*dev_nv,*dev_path,*dev_tmp_path; bool *dev_visited,*dev_flag; int nv,ne; scanf("%d%d",&nv,&ne); //initialize for(int i = 0; i < nv; ++i) { cost[i] = INF; tmp_cost[i] = INF; visited[i] = false; for(int j = 0; j < nv; ++j) map[i * N + j] = map[j * N + i] = INF; } //read all the edges for(int i = 0; i < ne; ++i) { int p,q,w; scanf("%d%d%d",&p,&q,&w); map[p * N + q] = map[q * N + p] = w; } int source; scanf("%d",&source); cost[source] = tmp_cost[source] = 0; visited[source] = true; flag = true; path[source] = -1; tmp_path[source] = -1; cout<<0<<endl; cudaMalloc((void**)&dev_cost,N * sizeof(int)); cout<<1<<endl; cudaMalloc((void**)&dev_tmp_cost,N * sizeof(int)); cudaMalloc((void**)&dev_visited,N * sizeof(bool)); cudaMalloc((void**)&dev_map,N * N * sizeof(int)); cudaMalloc((void**)&dev_flag,sizeof(bool)); cudaMalloc((void**)&dev_nv,sizeof(int)); cudaMalloc((void**)&dev_path,N * sizeof(int)); cudaMalloc((void**)&dev_tmp_path,N * sizeof(int)); cudaMemcpy(dev_cost,cost,N * sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_tmp_cost,tmp_cost,N * sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_visited,visited,N * sizeof(bool),cudaMemcpyHostToDevice); cudaMemcpy(dev_map,map,N * N * sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_flag,&flag,sizeof(bool),cudaMemcpyHostToDevice); cudaMemcpy(dev_nv,&nv,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_path,path,N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_tmp_path,tmp_path,N*sizeof(int),cudaMemcpyHostToDevice); while(flag) { flag = false; kernel1<<<1,nv>>>(dev_nv,dev_map,dev_cost,dev_tmp_cost,dev_visited,dev_tmp_path); cudaMemcpy(dev_flag,&flag,sizeof(bool),cudaMemcpyHostToDevice); kernel2<<<1,nv>>>(dev_cost,dev_tmp_cost,dev_visited,dev_flag,dev_tmp_path,dev_path); cudaMemcpy(&flag,dev_flag,sizeof(bool),cudaMemcpyDeviceToHost); } cudaMemcpy(cost,dev_cost,N*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(path,dev_path,N*sizeof(int),cudaMemcpyDeviceToHost); for(int i = 0; i < nv; ++i) { printf("Node %d cost = %2d path: ",i+1,cost[i]); print_path(i,path); printf("\n"); } cout << endl; return 0; }
11,372
#include <stdio.h> __global__ void multiplyNumbersByAScalar(float numbers[], float scalar) { int x = blockIdx.x; numbers[x] = numbers[x] * scalar; } int main(int argc, char** args) { float numbersInSystemMemory[] = { 0, 1, 2 , 3 , 4 , 5 , 6 ,7 ,8 , 9}; float* numbersInDeviceMemory; cudaMalloc( (void**)&numbersInDeviceMemory, sizeof(float) * 10); cudaMemcpy( numbersInDeviceMemory, numbersInSystemMemory, sizeof(float) * 10, cudaMemcpyHostToDevice ); multiplyNumbersByAScalar<<<10,1>>>(numbersInDeviceMemory, 2.0f); cudaMemcpy( numbersInSystemMemory, numbersInDeviceMemory, sizeof(float) * 10, cudaMemcpyDeviceToHost ); cudaFree( numbersInDeviceMemory ); for(int x = 0; x < 10 ; x++){ printf("%f ", numbersInSystemMemory[x]); } return 1; }
11,373
#include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include<math.h> #define CUDA_ERROR_EXIT(str) do{\ cudaError err = cudaGetLastError();\ if( err != cudaSuccess){\ printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\ exit(-1);\ }\ }while(0); #define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec)) __device__ unsigned int myxor(unsigned long x,unsigned long y) { unsigned int result = 0; for (int i = 63; i >= 0; i--) { // Find current bits in x and y bool b1 = x & (1 << i); bool b2 = y & (1 << i); // If both are 1 then 0 else xor is same as OR bool xoredBit = (b1 & b2) ? 0 : (b1 | b2); // Update result result <<= 1; result |= xoredBit; } return result; } __global__ void calculate(unsigned long *mem, unsigned long num,int iter) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= num) return; if(i<num/2){ int temp=2*i; if(temp+iter<num) mem[temp]=myxor(mem[temp],mem[temp+iter]); } } int main(int argc, char **argv) { struct timeval start, end, t_start, t_end; int i; unsigned long *ptr; unsigned long *gpu_mem; unsigned long num,SEED; int blocks=0; if(argc == 3){ num = atoi(argv[1]); SEED = atoi(argv[2]); } /* Allocate host (CPU) memory and initialize*/ srand(SEED); ptr =(unsigned long*) malloc((num+1) * sizeof(unsigned long)); for(i=0; i<num; ++i){ ptr[i] = random(); } ptr[i]=0; gettimeofday(&t_start, NULL); /* Allocate GPU memory and copy from CPU --> GPU*/ cudaMalloc(&gpu_mem, (num+1) * sizeof(unsigned long)); CUDA_ERROR_EXIT("cudaMalloc"); cudaMemcpy(gpu_mem, ptr, (num+1) * sizeof(unsigned long) , cudaMemcpyHostToDevice); CUDA_ERROR_EXIT("cudaMemcpy"); gettimeofday(&start, NULL); blocks = (num) /1024; if((num) % 1024) ++blocks; for(i=0;i<log(num)/log(2);i++) { calculate<<<blocks, 1024>>>(gpu_mem, num,(int)pow(2,i)); } CUDA_ERROR_EXIT("kernel invocation"); gettimeofday(&end, NULL); /* Copy back result*/ cudaMemcpy(ptr, gpu_mem, (num+1) * sizeof(unsigned long) , cudaMemcpyDeviceToHost); CUDA_ERROR_EXIT("memcpy"); gettimeofday(&t_end, NULL); printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end)); cudaFree(gpu_mem); /*Print the last element for sanity check*/ printf("XOR sum=%lu\n",ptr[0]); free(ptr); }
11,374
#ifndef __CUDA_KERNELHEADER__ #define __CUDA_KERNELHEADER__ /********************************************/ /* Added codes for OpenACC2CUDA translation */ /********************************************/ #ifdef __cplusplus #define restrict __restrict__ #endif #define MAX(a,b) (((a) > (b)) ? (a) : (b)) #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #ifndef FLT_MAX #define FLT_MAX 3.402823466e+38 #endif #ifndef FLT_MIN #define FLT_MIN 1.175494351e-38 #endif #ifndef DBL_MAX #define DBL_MAX 1.7976931348623158e+308 #endif #ifndef DBL_MIN #define DBL_MIN 2.2250738585072014e-308 #endif #endif extern "C" __global__ void accLog_kernel_kernel0(float * in, float * out, float k_val, float n_val, int noutput_items) { int lwpriv__i; lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x)); if (lwpriv__i<noutput_items) { out[lwpriv__i]=((n_val*log10(in[lwpriv__i]))+k_val); } } struct FComplexStruct { float real; float imag; }; typedef struct FComplexStruct FComplex; extern "C" __global__ void accComplexToArg_kernel_kernel0(FComplex * in, float * out, int noutput_items) { int lwpriv__i; lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x)); if (lwpriv__i<noutput_items) { out[lwpriv__i]=atan2(in[lwpriv__i].imag, in[lwpriv__i].real); } } extern "C" __global__ void accComplexToMag_kernel_kernel0(FComplex * in, float * out, int noutput_items) { int lwpriv__i; float aval; float bval; lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x)); if (lwpriv__i<noutput_items) { aval=in[lwpriv__i].imag; bval=in[lwpriv__i].real; out[lwpriv__i]=sqrt(((aval*aval)+(bval*bval))); } } extern "C" __global__ void accComplexToMagPhase_kernel_kernel0(FComplex * in, float * out0, float * out1, int noutput_items) { int lwpriv__i; float aval; float bval; lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x)); if (lwpriv__i<noutput_items) { aval=in[lwpriv__i].imag; bval=in[lwpriv__i].real; out0[lwpriv__i]=sqrt(((aval*aval)+(bval*bval))); out1[lwpriv__i]=atan2(aval, bval); } } extern "C" __global__ void accComplexToMagSquared_kernel_kernel0(FComplex * in, float * out, int noutput_items) { int lwpriv__i; float aval; float bval; lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x)); if (lwpriv__i<noutput_items) { aval=in[lwpriv__i].imag; bval=in[lwpriv__i].real; out[lwpriv__i]=((aval*aval)+(bval*bval)); } } extern "C" __global__ void accMagPhaseToComplex_kernel_kernel0(float * a, float * b, FComplex * c, int noutput_items) { int lwpriv__i; float mag; float phase; float real; float imag; lwpriv__i=(threadIdx.x+(blockIdx.x*blockDim.x)); if (lwpriv__i<noutput_items) { mag=a[lwpriv__i]; phase=b[lwpriv__i]; real=mag*cos(phase); imag=mag*sin(phase); c[lwpriv__i].real=real; c[lwpriv__i].imag=imag; } }
11,375
// // Created by Peter Rigole on 2019-05-10. // #ifndef AXONBITS_TESTCONTAINER_H #define AXONBITS_TESTCONTAINER_H class TestContainer { public: __host__ TestContainer() {} __host__ TestContainer(float a_init, float b_init, float c_init, int x_init, int y_init, int z_init) : a(a_init), b(b_init), c(c_init), x(x_init), y(y_init), z(z_init) {} __host__ ~TestContainer() {} __device__ float addAB() { return a + b; } __host__ __device__ float getC() { return c; } __device__ void setC(float c_upd) { c = c_upd; } __device__ int addXY() { return x + y; } __host__ __device__ int getZ() { return z; } __device__ void setZ(int z_upd) { z = z_upd; } private: float a; float b; float c; int x; int y; int z; }; #endif //AXONBITS_TESTCONTAINER_H
11,376
//#ifndef _MATRIXMUL_KERNEL_H_ //#define _MATRIXMUL_KERNEL_H_ /* (define (gpu-info) (let* ([info (cuGPUinfo)]) (values (gridDim-x info) ......))) (: test_kernel ((Listof Float) Integer -> (Listof Float) (Listof Integer) Integer) (define (test_kernel d_array_in d_single_in) (let*-values ([(d_array_out) (take d_array_in 0)] [(memstruct) (gpu-info)] [(d_single_out) d_single_in]) (values d_array_out memstruct d_single_out))) */ extern "C" /* Signature: float* d_array_in, int count, uint single_in -> float* d_array_out, uint* d_array_len, int* memstruct, uint* single_out */ /* In typed/Racket, (test_kernel) has type: (Vectorof Float) Integer Integer -> (Vectorof Float) Integer Integer */ __global__ void test_kernel( float* d_array_in, uint count, int d_single_in, float* d_array_out, uint* d_array_out_len, int* d_single_out ) { // copy - single value *d_single_out = d_single_in; *(d_array_out+0) = d_array_in[0]; *(d_array_out+1) = d_array_in[1]; *(d_array_out+2) = d_array_in[2]; *(d_array_out+3) = d_array_in[3]; *(d_array_out+4) = d_array_in[4]; *d_array_out_len = count; *d_single_out = d_single_in; // what's problem in my for loop? // // copy of array variables // for(int j = 0 ; j < count ; j++) // { // *(d_array_out+j) = d_array_in[j]; // // *(d_array_out+j) = j; // // *(d_array_in+j) = j; // } *d_array_out_len = count; /* // copy of default variables unsigned int gdm_x = gridDim.x; unsigned int gdm_y = gridDim.y; unsigned int gdm_z = gridDim.z; unsigned int bdm_x = blockDim.x; unsigned int bdm_y = blockDim.y; unsigned int bdm_z = blockDim.z; unsigned int bid_x = blockIdx.x; unsigned int bid_y = blockIdx.y; unsigned int bid_z = blockIdx.z; unsigned int tid_x = threadIdx.x; unsigned int tid_y = threadIdx.y; unsigned int tid_z = threadIdx.z; *memstruct = gdm_x; *(memstruct+1) = gdm_y; *(memstruct+2) = gdm_z; *(memstruct+3) = bdm_x; *(memstruct+4) = bdm_y; *(memstruct+5) = bdm_z; *(memstruct+6) = bid_x; *(memstruct+7) = bid_y; *(memstruct+8) = bid_z; *(memstruct+9) = tid_x; *(memstruct+10) = tid_y; *(memstruct+11) = tid_z; */ }
11,377
#include "includes.h" extern "C" { } #define IDX2C(i, j, ld) ((j)*(ld)+(i)) #define SQR(x) ((x)*(x)) // x^2 __global__ void weighting_kernel_transposed(double const* matrices, double const* weights, double* results) { int grid_index = blockIdx.x * blockDim.x * blockDim.y; int block_index = blockDim.y * threadIdx.x + threadIdx.y; int matrix_index = grid_index + block_index; int weighting_index = blockIdx.x * blockDim.x + threadIdx.x; results[matrix_index] = matrices[block_index] * weights[weighting_index]; }
11,378
# include <cuda.h> # include <cuda_runtime.h> extern "C" unsigned char * convolutionGPU(unsigned char * rowdata, int width, int height, signed char * kernell, int kernelSize, int nchann); __global__ void convolute_1PixelGPU(unsigned char * data_dev, unsigned char * convData_dev, int width, int height, signed char * kernellDev, int kernelSize, int nchann){ int posThread = blockIdx.x*blockDim.x + threadIdx.x; if(posThread < width*height*nchann){ //Compute the sum of the kernell elements int sumKernell = 0; for(int i = 0; i < kernelSize*kernelSize; i++) sumKernell += kernellDev[i]; sumKernell = (sumKernell <= 0) ? 1: sumKernell; // Start to convolute: int acumm = 0; int midSZ = (kernelSize - 1)/2; for(int ky = 0; ky < kernelSize; ky++){ for(int kx = 0; kx < kernelSize; kx++){ int pdata = posThread + nchann*width*(ky - midSZ) - midSZ*nchann + kx*nchann; if(pdata > 0 && pdata < width*height*nchann) acumm += data_dev[pdata]*kernellDev[kx + ky*kernelSize]; } } int ans = acumm/sumKernell; if(ans > 255) ans = 255; if(ans < 0) ans = 0; convData_dev[posThread] =(unsigned char) ans; } } unsigned char * convolutionGPU(unsigned char * rowdata, int width, int height, signed char * kernell, int kernelSize, int nchann){ int size = nchann*width*height; unsigned char * convolutedData = new unsigned char[size]; unsigned char * dataDev; unsigned char * convDataDev; signed char * kernellDev; cudaMalloc((void**)&dataDev, size*sizeof(unsigned char)); cudaMalloc((void**)&convDataDev, size*sizeof(unsigned char)); cudaMalloc((void**)&kernellDev, kernelSize*sizeof(signed char)); cudaMemcpy(dataDev, rowdata, size*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(kernellDev, kernell, kernelSize*sizeof(signed char), cudaMemcpyHostToDevice); int nThreads = 1024; int nBlocks = (size % nThreads > 0)? size/nThreads + 1: size/nThreads; convolute_1PixelGPU<<<nBlocks, nThreads>>>(dataDev, convDataDev, width, height, kernellDev, kernelSize, nchann); cudaMemcpy(convolutedData, convDataDev, size*sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaFree(dataDev); cudaFree(convDataDev); cudaFree(kernellDev); return convolutedData; } int main(){ }
11,379
#include "includes.h" __global__ void cunnx_LazyKBest_updateGradInput_kernel( float *gradInput, const float *indice, const float *gradOutput, int inputSize, int outputSize) { int tx = threadIdx.x; int step = blockDim.x; int k = blockIdx.x; float *gradInput_k = gradInput + k*inputSize; const float *gradOutput_k = gradOutput + k*outputSize; const float *indice_k = indice + k*outputSize; for (int i=tx; i<outputSize; i+=step) gradInput_k[(int)(indice_k[i] - 1)] = gradOutput_k[i]; }
11,380
/** File: svmPredict.cu * Purpose: Parallel Programming 2017 Final Project: Training Support Vector Machine on multiprocessors and GPUs * use to get accuracy of training result on GPU * * Compile: nvcc -o svmPredict svmPredict.cu * Run: ./svmPredict ./data/test-mnist ./data/train-mnist.model 1500 784 * ./data/test-mnist: input test data set * ./data/train-mnist.model: input model data * 1500 : number of training data * 784 : dimension of feature space * * Notes: * 1. Need modified_SMO executable to train first in order to get model file. * 2. The test file shouldn't be the same with the training data. * * Output: percentage of prediction accuracy of input data * * Author: Wei-Hsiang Teng * History: 2017/6/9 created * 2017/6/13 change datatype from float to double * */ #include <string.h> /* for memset */ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> // for estimate elapsed time #include <math.h> #define STR_SIZE 8192 #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(-1); \ } \ } #define GET_TIME(now) { \ struct timeval t; \ gettimeofday(&t, NULL); \ now = t.tv_sec + t.tv_usec/1000000.0; \ } /** * name: rbf_kernel * * description: kernel generates kernel function K(X_i, X_j) of X which is gaussian. * input: x1[]: coordinates of testing data set * x2[]: coordinates of support vectors * dim: number of dimension of coordinates * i, j: index of kernel function K(X_i, X_j) * gamma: parameter for guassian kernel: exp(-gamma*|X_i - X_j|^2) * * output: K(X_i, X_j) * */ __device__ double rbf_kernel(double x1[], double x2[], int i, int j, int dim, double gamma) { double ker = 0.0; int m; for (m = 0; m < dim; m++) { ker += (x1[i * dim + m] - x2[j * dim + m]) * (x1[i * dim + m] - x2[j * dim + m]); } ker = exp(-1 * gamma * ker); return ker; } __global__ void svmPredict(double* devX1, double* devX2, int* devY, double* devAlphas, int size, int total_sv, int dim, double gamma, double b, int* num) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j, result; double dual = 0; if (i < size) { for (j = 0; j < total_sv; j++) { dual += devAlphas[j] * rbf_kernel(devX1, devX2, i, j, dim, gamma); } dual += b; result = 1; if (dual < 0) result = -1; if (result == devY[i]) atomicAdd(num, 1); } } void read_data(char* file, double x[], int y[], int size, int dim) { int i; char s[STR_SIZE]; const char* delim = ":"; char *token; int index = 0, pre_index = 0; FILE *pFile; pFile = fopen(file, "r"); if (pFile == NULL) { printf("can't open %s\n", file); exit(-1); } for (i = 0; i < size; i++) { int cnt = 0; fgets(s, sizeof(s), pFile); /* get the first token */ token = strtok(s, delim); sscanf(token, "%d %d", &y[i], &index); /* walk through other tokens */ while( token != NULL ) { if (cnt == 0) { token = strtok(NULL, delim); } if (index > 0) sscanf(token, "%lf %d", &x[i * dim + index - 1], &pre_index); index = pre_index; token = strtok(NULL, delim); cnt++; } } fclose(pFile); } void read_model(char* file, double x[], double alphas[], int dim, int total_sv) { FILE *pFile; int i; char s[STR_SIZE]; const char* delim = ":"; char *token; int index = 0, pre_index = 0; pFile = fopen(file, "r"); if (pFile == NULL) { printf("can't open %s\n", file); exit(-1); } fgets(s, sizeof(s), pFile); for (i = 0; i < total_sv; i++) { int cnt = 0; fgets(s, sizeof(s), pFile); /* get the first token */ token = strtok(s, delim); sscanf(token, "%lf %d", &alphas[i], &index); /* walk through other tokens */ while( token != NULL ) { if (cnt == 0) { token = strtok(NULL, delim); } if (index > 0) sscanf(token, "%lf %d", &x[i * dim + index - 1], &pre_index); index = pre_index; token = strtok(NULL, delim); cnt++; } } fclose(pFile); } int main(int argc, char* argv[]) { int size, dim, total_sv, correct_num; double* x1, *x2; double gamma, b; int *y1; double* alphas; double start, end; /* device variables */ double* devX1; double* devX2; int* devY; double* devAlphas; int* devNum; if (argc < 5) { printf("%s data_file model_file data_size data_dim\n", argv[0]); exit(-1); } size = atoi(argv[3]); dim = atoi(argv[4]); GET_TIME(start); x1 = (double *)malloc(size*dim*sizeof(double)); memset(x1, 0, sizeof(double)*size*dim); y1 = (int *)malloc(size*sizeof(double)); /* read files */ read_data(argv[1], x1, y1, size, dim); /* read model */ FILE *fp; fp = fopen(argv[2], "r"); if (fp == NULL) { printf("can't open file %s\n", argv[2]); exit(-1); } fscanf(fp, "%d %lf %lf", &total_sv, &gamma, &b); fclose(fp); x2 = (double *)malloc(total_sv*dim*sizeof(double)); memset(x2, 0, sizeof(double)*total_sv*dim); alphas = (double *)malloc(total_sv*sizeof(double)); read_model(argv[2], x2, alphas, dim, total_sv); /* allocate device memory */ CHECK(cudaMalloc((void**)&devX1, size * dim * sizeof(double))); CHECK(cudaMalloc((void**)&devY, size * sizeof(int))); CHECK(cudaMalloc((void**)&devNum, sizeof(int))); CHECK(cudaMemcpy(devX1, x1, size * dim * sizeof(double), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(devY, y1, size * sizeof(int), cudaMemcpyHostToDevice)); CHECK(cudaMalloc((void**)&devX2, total_sv * dim * sizeof(double))); CHECK(cudaMalloc((void**)&devAlphas, total_sv * sizeof(double))); CHECK(cudaMemcpy(devX2, x2, total_sv * dim * sizeof(double), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(devAlphas, alphas, total_sv * sizeof(double), cudaMemcpyHostToDevice)); dim3 block(32); dim3 grid((size + block.x - 1)/block.x); svmPredict<<<grid, block>>>(devX1, devX2, devY, devAlphas, size, total_sv, dim, gamma, b, devNum); CHECK(cudaMemcpy(&correct_num, devNum, sizeof(int), cudaMemcpyDeviceToHost)); GET_TIME(end); printf("accuracy (%d/%d): %1.5f\n", correct_num, size, (double)correct_num/size); printf("elapsed time is %lf seconds\n", end - start); free(y1); free(x2); free(alphas); cudaFree(devX1); cudaFree(devX2); cudaFree(devY); cudaFree(devAlphas); cudaFree(devNum); return 0; }
11,381
#include "includes.h" #define UPPERTHRESHOLD 90 #define LOWERTHRESHOLD 30 const float G_x[3 * 3] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 }; const float G_y[3 * 3] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 }; const float gaussian[5 * 5] = { 2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159, 4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159, 5.f/159, 12.f/159, 15.f/159, 12.f/159, 2.f/159, 4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159, 2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159 }; __global__ void hysteresis(int N, int width, int height, unsigned char * in) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x >= width || y >= height) { return; } int idx = y * width + x; if (in[idx] > UPPERTHRESHOLD) { in[idx] = 255; } else if (in[idx] < LOWERTHRESHOLD) { in[idx] = 0; } else { for (int dy = -1; dy <= 1; dy++) { for (int dx = -1; dx <= 1; dx++) { int nidx = (y + dy) * width + (x + dx); if(0 <= (y + dy) && (y + dy) < height && 0 <= (x + dx) && (x + dx) < width && in[nidx] > LOWERTHRESHOLD) { in[nidx] = 255; } } } } }
11,382
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 1024 #define NUM_OF_ELEMS 1024 #define funcCheck(stmt) { \ cudaError_t err = stmt; \ if (err != cudaSuccess) \ { \ printf( "Failed to run stmt %d ", __LINE__); \ printf( "Got CUDA error ... %s ", cudaGetErrorString(err)); \ return -1; \ } \ } __global__ void total(float * input, float * output, int len) { __shared__ float partialSum[2*BLOCK_SIZE]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2*blockIdx.x*blockDim.x; if ((start + t) < len) { partialSum[t] = input[start + t]; } else { partialSum[t] = 0.0; } if ((start + blockDim.x + t) < len) { partialSum[blockDim.x + t] = input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0; } for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); if (t == 0 && (globalThreadId*2) < len) { output[blockIdx.x] = partialSum[t]; } } int main(int argc, char ** argv) { float * hostInput; float * hostOutput; float * deviceInput; float * deviceOutput; int numInputElements = NUM_OF_ELEMS; int numOutputElements; hostInput = (float *) malloc(sizeof(float) * numInputElements); for (int i=0; i < NUM_OF_ELEMS; i++) { hostInput[i] = 1.0; } numOutputElements = numInputElements / (BLOCK_SIZE<<1); if (numInputElements % (BLOCK_SIZE<<1)) { numOutputElements++; } hostOutput = (float*) malloc(numOutputElements * sizeof(float)); funcCheck(cudaMalloc((void **)&deviceInput, numInputElements * sizeof(float))); funcCheck(cudaMalloc((void **)&deviceOutput, numOutputElements * sizeof(float))); cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice); dim3 DimGrid( numOutputElements, 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); total<<<DimGrid, DimBlock>>>(deviceInput, deviceOutput, numInputElements); cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 1; i < numOutputElements; i++) { hostOutput[0] += hostOutput[i]; } printf("Reduced Sum = %f\n", hostOutput[0]); cudaFree(deviceInput); cudaFree(deviceOutput); free(hostInput); free(hostOutput); return 0; }
11,383
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 3 * of the programming guide with some additions like error checking. * */ #include <stdio.h> // Device code extern "C" __global__ void vectorAdd_kernel(const float *A, const float *B, float *C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) { C[i] = A[i] + B[i]; } }
11,384
#include <cuda.h> #include <iostream> #include <stdio.h> using namespace std; void calcolaOnCPU(int *a,int *b,int *ris,int n){ for(int i=0;i<n;i++) ris[i]=a[i]*b[i]; } //kernel __global__ void calcolaOnGPU (int *a,int *b,int *ris,int n){ //calcolo indice globale del thread nella griglia, usando indice del blocco nella griglia, numero di //thread nel blocco e indice locale del thread nel blocco int indiceThreadInGriglia = blockIdx.x * blockDim.x + threadIdx.x; //indice globale thread in griglia = indice del blocco in griglia * numero di thread in ciascun blocco + indice locale thread nel blocco if(indiceThreadInGriglia < n) ris[indiceThreadInGriglia]=a[indiceThreadInGriglia] * b[indiceThreadInGriglia]; } //questa funzioen prende come ultimi 2 parametri 2 alias (c++) di oggetti dim3, che se non vengono passati //quindi in ingresso la funzione riceverà parametri di default, vuol dire che il calcolo del prodotto dei 2 array va fatto //sulla cpu (e quindi i primi parametri ricevuti devono essere nella memoria heap della macchina host) //se invece sono passati valori diversi da quelli di default allora il calcolo deve essere fatto sulla gpu e quindi in tal caso //viene richiamato il kernel usando i 2 oggetti dim3 per configurare la griglia void wrapperCalcolaProdottoPuntuale(int *a,int *b,int *ris,int n,const dim3 & nBlocchi=dim3(0,0,0),const dim3 &nThreads=dim3(0,0,0)){ if(nBlocchi.x==0) //allora abbiamo ricevuto i parametri di default calcolaOnCPU(a,b,ris,n); else //chiamo il kernel calcolaOnGPU<<<nBlocchi,nThreads>>>(a,b,ris,n); } int main(int argc,char *argv[]){ srand((unsigned int)time(NULL)); int nThread; int sizeArray; int nBlocchi; int nThreadPerBlocco; dim3 nBlocchi_d3; dim3 nThreadPerBlocco_d3; if(argc!=3){ sizeArray=200; nThread=sizeArray; nThreadPerBlocco=5; } else{ sscanf(argv[1],"%d",&sizeArray); nThread=sizeArray; sscanf(argv[2],"%d",&nThreadPerBlocco); } nBlocchi= nThread / nThreadPerBlocco; if(nThread % nThreadPerBlocco != 0 ) nBlocchi++ ; nBlocchi_d3.x=nBlocchi; nThreadPerBlocco_d3.x=nThreadPerBlocco; //alloco strutture sull'host e le inizializzo int *h_a = (int *)malloc(sizeArray * sizeof(int)); int *h_b = (int *)malloc(sizeArray * sizeof(int)); int *h_ris = (int *)malloc(sizeArray * sizeof(int)); for(int i=0;i<sizeArray;i++){ h_a[i] = 1+rand()%(i+1); h_b[i] = 1+rand()%(i+1); } memset(h_ris,0,sizeArray * sizeof(int)); //alloco strutture sul device e le inizializzo int *d_a,*d_b,*d_ris; cudaMalloc((void **)&d_a,sizeArray * sizeof(int)); cudaMalloc((void **)&d_b,sizeArray * sizeof(int)); cudaMalloc((void **)&d_ris,sizeArray * sizeof(int)); cudaMemcpy(d_a,h_a,sizeArray*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,sizeArray*sizeof(int),cudaMemcpyHostToDevice); cudaMemset(d_ris,0,sizeof(int)*sizeArray); //calcolo in locale e stampo risultato wrapperCalcolaProdottoPuntuale(h_a,h_b,h_ris,sizeArray); for(int i=0;i<sizeArray;i++) cout<<h_ris[i]<<"-"; cout<<endl<<"-----------------------"<<endl; //calcolo su gpu wrapperCalcolaProdottoPuntuale(d_a,d_b,d_ris,sizeArray,nBlocchi_d3,nThreadPerBlocco_d3); //il risultato adesso sta in d_ris nella memoria heap della gpu (device), e quindi non posso accederci direttamente perchè il main sta sull'host //quindi me lo riporto qui sulla memoria heap a cui la cpu ha accesso int *risFromGPU = (int *)calloc(sizeArray,sizeof(int)); cudaMemcpy(risFromGPU,d_ris,sizeArray * sizeof(int),cudaMemcpyDeviceToHost); //stampo il risultato for(int i=0;i<sizeArray;i++) cout<<risFromGPU[i]<<"-"; return 0; }
11,385
#include<stdio.h> #include<math.h> #define N 64 #define Block 2 #define thread 64 __global__ void exclusive_scan(int *d_in) { //Phase 1 (Uptree) int s = 1; int tid = blockIdx.x * blockDim.x + threadIdx.x; for(; s<=N-1; s<<=1) { int i = 2*s*(tid+1)-1; if((i-s >= 0) && (i<N)) { int a = d_in[i]; int b = d_in[i-s]; __syncthreads(); d_in[i] = a+b; __syncthreads(); } __syncthreads(); } //Phase 2 (Downtree) if(tid == 0) d_in[N-1] = 0; for(s = s/2; s >= 1; s>>=1) { int i = 2*s*(tid+1)-1; if((i-s >= 0) && (i<N)) { int r = d_in[i]; int l = d_in[i-s]; __syncthreads(); d_in[i] = l+r; d_in[i-s] = r; __syncthreads(); } __syncthreads(); } } int main() { int h_in[N]; int h_out[N]; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); for(int i=0; i < N; i++) h_in[i] = 1; int *d_in; cudaMalloc((void**) &d_in, N*sizeof(int)); cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice); //Implementing kernel call cudaEventRecord(start); exclusive_scan<<<Block, thread/2>>>(d_in); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaMemcpy(&h_out, d_in, N*sizeof(int), cudaMemcpyDeviceToHost); for(int i=0; i<N; i++) printf("out[%d] = %d\n", i, h_out[i]); printf("%f milliseconds\n", milliseconds); cudaFree(d_in); return -1; }
11,386
#include "includes.h" __global__ void cunn_MSECriterion_updateOutput_kernel(float* output, float *input, float *target, int nframe, int dim, int sizeAverage) { __shared__ float buffer[MSECRITERION_THREADS]; int k = blockIdx.x; float *input_k = input + k*dim; float *target_k = target + k*dim; int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // mse buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) { float z = input_k[i] - target_k[i]; buffer[threadIdx.x] += z*z; } __syncthreads(); //reduce if (threadIdx.x == 0) { *output = 0; for (int i=0; i<blockDim.x; i++) { *output += buffer[i]; } if (sizeAverage) *output /= dim; } }
11,387
// --- Headers --- #include <cuda.h> #include <stdio.h> // --- Macros --- #define CHUNK (1024 * 1024) #define SIZE (CHUNK * 20) // --- Variable Declaration --- int *hostInput1 = NULL; int *hostInput2 = NULL; int *hostOutput = NULL; int *deviceInput1 = NULL; int *deviceInput2 = NULL; int *deviceOutput = NULL; cudaEvent_t start, stop; cudaStream_t stream; // --- CUDA KERNEL DEFINITION --- __global__ void kernel(int *input1, int *input2, int *output) { //variable declaration int tid = blockIdx.x * blockDim.x + threadIdx.x; //code if(tid < CHUNK) { int tid1 = (tid + 1) % 256; int tid2 = (tid + 2) % 256; float input1_avg = (input1[tid] + input1[tid1] + input1[tid2]) / 3.0f; float input2_avg = (input2[tid] + input2[tid1] + input2[tid2]) / 3.0f; output[tid] = (input1_avg + input2_avg) / 2; } } // --- main() --- int main(void) { //function declaration void cleanup(void); //variable declaration cudaError_t err = cudaSuccess; cudaDeviceProp prop; int deviceID; float elapsedTime; //code //check for device overlap capability err = cudaGetDevice(&deviceID); if(err != cudaSuccess) { printf("GPU Error - cudaGetDevice() failed : %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaGetDeviceProperties(&prop, deviceID); if(err != cudaSuccess) { printf("GPU Error - cudaGetDeviceProperties() failed : %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } if(!prop.deviceOverlap) { printf("Device will not handle overlaps, so no speed up from streams\n"); return (0); } //create cuda events err = cudaEventCreate(&start); if(err != cudaSuccess) { printf("GPU Error - cudaEventCreate() failed for start: %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaEventCreate(&stop); if(err != cudaSuccess) { printf("GPU Error - cudaEventCreate() failed for stop : %s\n", cudaGetErrorString(err)); cudaEventDestroy(start); exit(EXIT_FAILURE); } //initialize the stream err = cudaStreamCreate(&stream); if(err != cudaSuccess) { printf("GPU Error - cudaStreamCreate() failed for stream : %s\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } //allocate host memory (page-locked) err = cudaHostAlloc((void **)&hostInput1, SIZE * sizeof(int), cudaHostAllocDefault); if(err != cudaSuccess) { printf("CPU Memory Fatal Error - cudaHostAlloc() failed for hostInput1 : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } err = cudaHostAlloc((void **)&hostInput2, SIZE * sizeof(int), cudaHostAllocDefault); if(err != cudaSuccess) { printf("CPU Memory Fatal Error - cudaHostAlloc() failed for hostInput2 : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } err = cudaHostAlloc((void **)&hostOutput, SIZE * sizeof(int), cudaHostAllocDefault); if(err != cudaSuccess) { printf("CPU Memory Fatal Error - cudaHostAlloc() failed for hostOutput : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } //allocate device memory err = cudaMalloc((void **)&deviceInput1, CHUNK * sizeof(int)); if(err != cudaSuccess) { printf("GPU Memory Fatal Error - cudaMalloc() failed for deviceInput1 : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&deviceInput2, CHUNK * sizeof(int)); if(err != cudaSuccess) { printf("GPU Memory Fatal Error - cudaMalloc() failed for deviceInput2 : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&deviceOutput, CHUNK * sizeof(int)); if(err != cudaSuccess) { printf("GPU Memory Fatal Error - cudaMalloc() failed for deviceOutput : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } //fill the host input memory for(int i = 0; i < SIZE; i++) { hostInput1[i] = (float)((1.0f / (float)RAND_MAX) * rand()); hostInput2[i] = (float)((1.0f / (float)RAND_MAX) * rand()); } //start timer err = cudaEventRecord(start, 0); if(err != cudaSuccess) { printf("GPU Error - cudaEventRecord() failed for start : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } //cuda kernel configuration dim3 DimGrid = dim3(CHUNK / 256, 1, 1); dim3 DimBlock = dim3(256, 1, 1); //now loop over full data, in bite-sized chunks for(int i = 0; i < SIZE; i += CHUNK) { //copy the locked memory to the device, async err = cudaMemcpyAsync(deviceInput1, hostInput1 + i, CHUNK * sizeof(int), cudaMemcpyHostToDevice, stream); if(err != cudaSuccess) { printf("GPU Error - cudaMemcpyAsync() failed for Input1 : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } err = cudaMemcpyAsync(deviceInput2, hostInput2 + i, CHUNK * sizeof(int), cudaMemcpyHostToDevice, stream); if(err != cudaSuccess) { printf("GPU Error - cudaMemcpyAsync() failed for Input2 : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } kernel<<<DimGrid, DimBlock, 0, stream>>>(deviceInput1, deviceInput2, deviceOutput); //copy the data from device to locked memory err = cudaMemcpyAsync(hostOutput + i, deviceOutput, CHUNK * sizeof(int), cudaMemcpyDeviceToHost, stream); if(err != cudaSuccess) { printf("GPU Error - cudaMemcpyAsync() failed for Output : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } } err = cudaStreamSynchronize(stream); if(err != cudaSuccess) { printf("GPU Error - cudaStreamSynchronize() failed : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } //stop timer err = cudaEventRecord(stop, 0); if(err != cudaSuccess) { printf("GPU Error - cudaEventRecord() failed for stop : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } err = cudaEventSynchronize(stop); if(err != cudaSuccess) { printf("GPU Error - cudaEventSynchronize() failed for stop : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } err = cudaEventElapsedTime(&elapsedTime, start, stop); if(err != cudaSuccess) { printf("GPU Error - cudaEventElapsedTime() failed : %s.\n", cudaGetErrorString(err)); cleanup(); exit(EXIT_FAILURE); } printf("Time taken for single stream : %3.1f ms\n", elapsedTime); //total cleanup cleanup(); return (0); } void cleanup(void) { //code //free device memory if(deviceOutput) { cudaFree(deviceOutput); deviceOutput = NULL; } if(deviceInput2) { cudaFree(deviceInput2); deviceInput2 = NULL; } if(deviceInput1) { cudaFree(deviceInput1); deviceInput1 = NULL; } //free host memory if(hostOutput) { cudaFreeHost(hostOutput); hostOutput = NULL; } if(hostInput2) { cudaFreeHost(hostInput2); hostInput2 = NULL; } if(hostInput1) { cudaFreeHost(hostInput1); hostInput1 = NULL; } //destroy stream cudaStreamDestroy(stream); //destroy events cudaEventDestroy(stop); cudaEventDestroy(start); }
11,388
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <iomanip> #include <string> #include <cmath> #include "SAP.cuh" using namespace std; int main(int argc, char ** argv){ // Initialize key parameters of SAP algorithm int iterations; float step_size; // Query user for parameters cout << "Number of iterations (a positive integer): \n"; cin >> iterations; cout << "Step size (real number between 0 and .5): \n"; cin >> step_size; // Initialize parameters to characterize data int input_dim; int numb_points; int proj_dim; /*--------------- Begin importing data --------------*/ // Read data points from a text file ifstream inFile; inFile.open("example_data_points.txt"); if (!inFile){ cerr << "Unable to open file with data points.\n"; exit(1); } // First two rows give the input data dimension and // the number of points inFile >> input_dim; inFile >> numb_points; // Initialize array to store data points float * h_points_in = new float[input_dim*numb_points]; // Read in entries for data point from text file float x; int i = 0; while (i < input_dim*numb_points){ inFile >> x; h_points_in[i] = x; i = i+1; } inFile.close(); // Read in initial projection inFile.open("initial_projection.txt"); if (!inFile){ cerr << "Unable to open initial projection file.\n"; exit(1); } // Projection dimension is stored as first entry to text file inFile >> proj_dim; // Initialize array to store initial projection float * h_proj = new float[input_dim*proj_dim]; // Read in entries of initial projection i = 0; while (i < proj_dim*input_dim){ inFile >> x; h_proj[i] = x; i = i+1; } inFile.close(); // Create file to store the smallest secant norms float h_smallest_secant_norms[iterations]; // Call the SAP algorithm SAP(input_dim, numb_points, h_points_in, proj_dim, h_proj, h_smallest_secant_norms, iterations, step_size); // Open file to store output projection ofstream myFile; myFile.open("output_projection.txt"); for (int j = 0; j < proj_dim; j++){ myFile << h_proj[j] << "\n"; } myFile.close(); // Delete arrays on host delete h_points_in; delete h_proj; return 0; }
11,389
#include "includes.h" #define IDX2D(a, i, stride, j) ((a)[(i)*(stride) + (j)]) __global__ void sim_kernel_tiled(double *z, double *v, size_t nx, size_t ny, double dx2inv, double dy2inv, double dt) { extern __shared__ double z_tile[]; const int block_mesh_x = blockDim.x*blockIdx.x + 1; const int block_mesh_y = blockDim.y*blockIdx.y + 1; const int mesh_xx = block_mesh_x + threadIdx.x; const int mesh_xy = block_mesh_y + threadIdx.y; // We have to read into the tile BEFORE dropping threads so that it's actually fully // initialized! const double z_val = IDX2D(z_tile, threadIdx.y, blockDim.x, threadIdx.x) = IDX2D(z, mesh_xy, nx, mesh_xx); if (mesh_xx >= nx-1 || mesh_xy >= ny-1) return; __syncthreads(); double ax, ay; if (1 <= threadIdx.x && threadIdx.x <= blockDim.x-2) ax = dx2inv*(IDX2D(z_tile, threadIdx.y, blockDim.x, threadIdx.x-1) + IDX2D(z_tile, threadIdx.y, blockDim.x, threadIdx.x+1) - 2.0*z_val); else { const int n = threadIdx.x == 0 ? -1 : +1; ax = dx2inv*(IDX2D(z, mesh_xy, nx, mesh_xx+n) + IDX2D(z_tile, threadIdx.y, blockDim.x, threadIdx.x-n) - 2.0*z_val); } if (1 <= threadIdx.y && threadIdx.y <= blockDim.y-2) ay = dy2inv*(IDX2D(z_tile, threadIdx.y-1, blockDim.x, threadIdx.x) + IDX2D(z_tile, threadIdx.y+1, blockDim.x, threadIdx.x) - 2.0*z_val); else { const int n = threadIdx.y == 0 ? -1 : +1; ay = dx2inv*(IDX2D(z, mesh_xy+n, nx, mesh_xx) + IDX2D(z_tile, threadIdx.y-n, blockDim.x, threadIdx.x) - 2.0*z_val); } const double v_val = IDX2D(v, mesh_xy, nx, mesh_xx) += (ax+ay)/2.0*dt; IDX2D(z, mesh_xy, nx, mesh_xx) += dt*v_val; }
11,390
#include <stdio.h> #include <stdlib.h> #define MATRIX_SIZE 100 #define BLOCK_DIM 16 __global__ void matrixSquared(int *initialMatrix, int *finalMatrix) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int index = col + row * MATRIX_SIZE; if (col < MATRIX_SIZE && row < MATRIX_SIZE) { finalMatrix[index] = initialMatrix[index] * initialMatrix[index]; } } int main(int argc, char ** argv) { const int MATRIX_BYTES = MATRIX_SIZE * MATRIX_SIZE * sizeof(int); // generate the input matrix on the host int h_in[MATRIX_SIZE][MATRIX_SIZE]; printf("Initial matrix\n"); int i, j; for (i = 0; i < MATRIX_SIZE;i++) { for (j = 0; j < MATRIX_SIZE;j++) { h_in[i][j] = rand() % 10; printf("%d ", h_in[i][j]); } printf("\n"); } int h_out[MATRIX_SIZE][MATRIX_SIZE]; // declare GPU memory pointers int * d_in; int * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, MATRIX_BYTES); cudaMalloc((void**) &d_out, MATRIX_BYTES); // transfer the matrix to the GPU cudaMemcpy(d_in, h_in, MATRIX_BYTES, cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); int dimX = (MATRIX_SIZE + dimBlock.x - 1) / dimBlock.x; int dimY = (MATRIX_SIZE + dimBlock.y - 1) / dimBlock.y; dim3 dimGrid(dimX, dimY); //printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); // launch the kernel matrixSquared<<<dimGrid, dimBlock>>>(d_in, d_out); // copy back the result matrix to the CPU cudaMemcpy(h_out, d_out, MATRIX_BYTES, cudaMemcpyDeviceToHost); // print out the resulting matrix printf("Result matrix\n"); for (i = 0; i < MATRIX_SIZE;i++) { for (j = 0; j < MATRIX_SIZE;j++) { printf("%d ", h_out[i][j]); } printf("\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
11,391
#include <stdio.h> #include <stdlib.h> #define SRAND_VALUE 1985 #define BLOCK_SIZE_x 32 #define BLOCK_SIZE_y 16 __global__ void ghostRows(int dim, int* grid) { // We want id ∈ [1,dim] int id = blockDim.x * blockIdx.x + threadIdx.x + 1; if (id <= dim) { //Copy first real row to bottom ghost row grid[(dim+2)*(dim+1)+id] = grid[(dim+2)+id]; //Copy last real row to top ghost row grid[id] = grid[(dim+2)*dim + id]; } } __global__ void ghostCols(int dim, int* grid) { // We want id ∈ [0,dim+1] int id = blockDim.x * blockIdx.x + threadIdx.x; if (id <= dim+1) { //Copy first real column to right most ghost column grid[id*(dim+2)+dim+1] = grid[id*(dim+2)+1]; //Copy last real column to left most ghost column grid[id*(dim+2)] = grid[id*(dim+2) + dim]; } } __global__ void GOL(int dim, int *grid, int *newGrid) { int iy = (blockDim.y -2) * blockIdx.y + threadIdx.y; int ix = (blockDim.x -2) * blockIdx.x + threadIdx.x; int id = iy * (dim+2) + ix; int i = threadIdx.y; int j = threadIdx.x; int numNeighbors; // Declare the shared memory on a per block level __shared__ int s_grid[BLOCK_SIZE_y][BLOCK_SIZE_x]; // Copy cells into shared memory if (ix <= dim+1 && iy <= dim+1) s_grid[i][j] = grid[id]; //Sync all threads in block __syncthreads(); if (iy <= dim && ix <= dim) { if(i != 0 && i !=blockDim.y-1 && j != 0 && j !=blockDim.x-1) { // Get the number of neighbors for a given grid point numNeighbors = s_grid[i+1][j] + s_grid[i-1][j] //upper lower + s_grid[i][j+1] + s_grid[i][j-1] //right left + s_grid[i+1][j+1] + s_grid[i-1][j-1] //diagonals + s_grid[i-1][j+1] + s_grid[i+1][j-1]; int cell = s_grid[i][j]; // Here we have explicitly all of the game rules if (cell == 1 && numNeighbors < 2) newGrid[id] = 0; else if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3)) newGrid[id] = 1; else if (cell == 1 && numNeighbors > 3) newGrid[id] = 0; else if (cell == 0 && numNeighbors == 3) newGrid[id] = 1; else newGrid[id] = cell; } } } int main(int argc, char* argv[]) { int i,j,iter; int *h_grid; //Grid on host int *d_grid; //Grid on device int *d_newGrid; //Second grid used on device only int *d_tmpGrid; //tmp grid pointer used to switch between grid and newGrid int dim = 1024; //Linear dimension of our grid - not counting ghost cells int maxIter = 1<<10; //Number of game steps size_t bytes = sizeof(int)*(dim+2)*(dim+2); // Allocate host Grid used for initial setup and read back from device h_grid = (int*)malloc(bytes); // Allocate device grids cudaMalloc(&d_grid, bytes); cudaMalloc(&d_newGrid, bytes); // Assign initial population randomly srand(SRAND_VALUE); for(i = 1; i<=dim; i++) { for(j = 1; j<=dim; j++) { h_grid[i*(dim+2)+j] = rand() % 2; } } cudaFuncSetCacheConfig(GOL, cudaFuncCachePreferShared); // Copy over initial game grid (Dim-1 threads) cudaMemcpy(d_grid, h_grid, bytes, cudaMemcpyHostToDevice); dim3 blockSize(BLOCK_SIZE_x, BLOCK_SIZE_y,1); int linGrid_x = (int)ceil(dim/(float)(BLOCK_SIZE_x-2)); int linGrid_y = (int)ceil(dim/(float)(BLOCK_SIZE_y-2)); dim3 gridSize(linGrid_x,linGrid_y,1); dim3 cpyBlockSize(BLOCK_SIZE_x,1,1); dim3 cpyGridRowsGridSize((int)ceil(dim/(float)cpyBlockSize.x),1,1); dim3 cpyGridColsGridSize((int)ceil((dim+2)/(float)cpyBlockSize.x),1,1); // Main game loop for (iter = 0; iter<maxIter; iter++) { ghostRows<<<cpyGridRowsGridSize, cpyBlockSize>>>(dim, d_grid); ghostCols<<<cpyGridColsGridSize, cpyBlockSize>>>(dim, d_grid); GOL<<<gridSize, blockSize>>>(dim, d_grid, d_newGrid); // Swap our grids and iterate again d_tmpGrid = d_grid; d_grid = d_newGrid; d_newGrid = d_tmpGrid; }//iter loop cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) printf("CUDA error %s\n",cudaGetErrorString(error)); // Copy back results and sum cudaMemcpy(h_grid, d_grid, bytes, cudaMemcpyDeviceToHost); // Sum up alive cells and print results int total = 0; for (i = 1; i<=dim; i++) { for (j = 1; j<=dim; j++) { total += h_grid[i*(dim+2)+j]; } } printf("Total Alive: %d\n", total); cudaFree(d_grid); cudaFree(d_newGrid); free(h_grid); return 0; }
11,392
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #define SAMPLES 10 /* Utility function, use to do error checking. Use this function like this: checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(cudaGetLastError()); */ static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { printf("CUDA error: %s \n", cudaGetErrorString(result)); exit(1); } } __global__ void helloKernel(int n, int* A) { // insert operation here size_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) A[id]=id; } void helloCuda(int n, int* a) { int threadBlockSize = 512; // allocate the vectors on the GPU int* deviceA = NULL; checkCudaCall(cudaMalloc((void **) &deviceA, n * sizeof(int))); if (deviceA == NULL) { printf("Could not allocate memory for A. \n"); return; } checkCudaCall(cudaMemcpy(deviceA, a, n*sizeof(int), cudaMemcpyHostToDevice)); helloKernel<<<n/threadBlockSize, threadBlockSize>>>(n,deviceA); cudaDeviceSynchronize(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back checkCudaCall(cudaMemcpy(a, deviceA, n * sizeof(int), cudaMemcpyDeviceToHost)); checkCudaCall(cudaFree(deviceA)); } int main(int argc, char* argv[]) { int n = 655360; int* a = new int[n]; if (argc > 1) n = atoi(argv[1]); printf("Testing CUDA! \n"); // initialize the vectors. for(int i=0; i<n; i++) { a[i] = 0; } helloCuda(n, a); srand(n); // verify the resuls for(int i=0; i<SAMPLES; i++) { int j = rand() % n; if (j!=a[j]) { printf("Error in results! Element %d is %d, but should be %d! \n", j, j, a[j]); exit(1); } } printf("results OK! \n"); delete[] a; return 0; }
11,393
#include <stdio.h> // Define constants for array and thread block dimensions // You don't need to change these #define ARRAY_ELEMENTS 10 #define BLOCK_SIZE 10 // Stage 3: Implement the kernel __global__ void copyArray(int *arraySrc_d, int *arrayDest_d) { // There's only a single thread block in this example // so we can just use the thread index to access the array // To be implemented } int main( int argc, char** argv) { // pointer for host memory int *arraySrc_h, *arrayDest_h; // pointer for device memory int *arraySrc_d, *arrayDest_d; size_t memSize = ARRAY_ELEMENTS * sizeof(int); arraySrc_h = (int *) malloc(memSize); arrayDest_h = (int *) malloc(memSize); if (arraySrc_h == NULL || arrayDest_h == NULL){ printf("\nError in host memory allocation. Exiting"); exit(1); } // Stage 1: Allocate device memory // To be implemented // Define Grid and Block dimensions - in this example there's only one block dim3 dimGrid(1); dim3 dimBlock(BLOCK_SIZE); //Set host source array with some test values for (int j = 0; j < ARRAY_ELEMENTS; j++) { arraySrc_h[j] = j; } //Stage 2: copy the source array to the device // To be implemented //Stage 4: Call the kernel function // To be implemented // block until the device has completed cudaThreadSynchronize(); //Stage 5: Copy arrayDest_d back to the host arrayDest_h // To be implemented // Verify the data was copied correctly for (int j = 0; j < ARRAY_ELEMENTS; j++) { if (arrayDest_h[j] != arraySrc_h[j]) { printf("\nERROR: Destination array is not equal to Source array\n"); exit(-1); } } // free device memory cudaFree(arraySrc_d); cudaFree(arrayDest_d); // free host memory free(arraySrc_h); free(arrayDest_h); printf("Correct!\n"); return 0; }
11,394
#include<stdio.h> #include<time.h> #define N (64*64) #define THREADS_PER_BLOCK 512 __global__ void add(int *a, int *b, int *c ) { int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index] + b[index]; } int main( void ) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size = N * sizeof( int ); cudaMalloc( (void**)&dev_a, size ); cudaMalloc( (void**)&dev_b, size ); cudaMalloc( (void**)&dev_c, size ); a = (int*)malloc( size ); b = (int*)malloc( size ); c = (int*)malloc( size ); for(int i=0;i<N;i++) { a[i]=rand()%10; b[i]=rand()%10; } cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice ); add<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c ); cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost ); for(int i=0;i<N;i++) { printf("%d ",c[i]); } free( a ); free( b ); free( c ); cudaFree(dev_a ); cudaFree(dev_b ); cudaFree(dev_c ); return 0; }
11,395
#include <stdio.h> #include <stdlib.h> #include <cstring> #define CSC(call) do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(1); \ } \ } while (0) #define mult(a, b) ((data[y[j] * w + x[j]].a - avg[i].a) * (data[y[j] * w + x[j]].b - avg[i].b)) #define calc(a) ((av[i].x - p.x) * cov[i][0][a] + (av[i].y - p.y) * cov[i][1][a] + (av[i].z - p.z) * cov[i][2][a]) __constant__ double cov[32][3][3]; __constant__ double av[32][3]; __global__ void kernel(uchar4 *dst, int w, int h, int nc) { int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int x, y; for(x = blockDim.x * blockIdx.x + threadIdx.x; x < w; x += offsetx) { for(y = blockDim.y * blockIdx.y + threadIdx.y; y < h; y += offsety) { double mn; int id = -1; uchar4 p = dst[y * w + x]; for (int i = 0; i < nc; ++i) { double cur[3] = {p.x - av[i][0], p.y - av[i][1], p.z - av[i][2]}; double val[3] = {0, 0, 0}; for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { val[j] += cur[k] * cov[i][k][j]; } } double vval = 0; for (int j = 0; j < 3; ++j) { vval += val[j] * cur[j]; } //double cur = calc(0) * (av[i].x - p.x) + calc(1) * (av[i].y - p.y) + calc(2) * (av[i].z - p.z); if (vval < mn || id == -1) { mn = vval; id = i; } } dst[y * w + x].w = id; } } } int main() { int w, h; size_t sz = 256; char* name = (char*) malloc(sizeof(char) * sz); name[getline(&name, &sz, stdin) - 1] = '\0'; FILE *in = fopen(name, "rb"); fread(&w, sizeof(int), 1 , in); fread(&h, sizeof(int), 1 , in); uchar4 *data = (uchar4*)malloc(sizeof(uchar4) * h * w); fread(data, sizeof(uchar4), h * w, in); fclose(in); name[getline(&name, &sz, stdin) - 1] = '\0'; int nc; scanf("%d", &nc); int x[1 << 19]; int y[1 << 19]; double matr[32][3][3]; double inv[32][3][3]; double avg[32][3]; for (int i = 0; i < nc; ++i) { int np; scanf("%d", &np); int avgt[3] = {0, 0, 0}; for (int j = 0; j < np; ++j) { scanf("%d %d", x + j, y + j); avgt[0] += data[y[j] * w + x[j]].x; avgt[1] += data[y[j] * w + x[j]].y; avgt[2] += data[y[j] * w + x[j]].z; } for (int j = 0; j < 3; ++j) { avg[i][j] = avgt[j] / double(np); } for (int k = 0; k < 3; ++k) { for (int l = 0; l < 3; ++l) { matr[i][k][l] = 0; } } for (int j = 0; j < np; ++j) { double cur[3] = {avg[i][0] - data[y[j] * w + x[j]].x, avg[i][1] - data[y[j] * w + x[j]].y, avg[i][2] - data[y[j] * w + x[j]].z}; for (int k = 0; k < 3; ++k) { for (int l = 0; l < 3; ++l) { matr[i][l][k] += cur[k] * cur[l]; } } } for (int k = 0; k < 3; ++k) { for (int l = 0; l < 3; ++l) { matr[i][k][l] /= np - 1; } } double det = matr[i][0][0] * (matr[i][1][1] * matr[i][2][2] - matr[i][1][2] * matr[i][2][1]) - matr[i][0][1] * (matr[i][1][0] * matr[i][2][2] - matr[i][1][2] * matr[i][2][0]) + matr[i][0][2] * (matr[i][1][0] * matr[i][2][1] - matr[i][1][1] * matr[i][2][0]); inv[i][0][0] = (matr[i][1][1] * matr[i][2][2] - matr[i][1][2] * matr[i][2][1]) / det; inv[i][1][0] = -(matr[i][1][0] * matr[i][2][2] - matr[i][1][2] * matr[i][2][0]) / det; inv[i][2][0] = (matr[i][1][0] * matr[i][2][1] - matr[i][1][1] * matr[i][2][0]) / det; inv[i][0][1] = -(matr[i][0][1] * matr[i][2][2] - matr[i][0][2] * matr[i][2][1]) / det; inv[i][1][1] = (matr[i][0][0] * matr[i][2][2] - matr[i][0][2] * matr[i][2][0]) / det; inv[i][2][1] = -(matr[i][0][0] * matr[i][2][1] - matr[i][0][1] * matr[i][2][0]) / det; inv[i][0][2] = (matr[i][0][1] * matr[i][1][2] - matr[i][0][2] * matr[i][1][1]) / det; inv[i][1][2] = -(matr[i][0][0] * matr[i][1][2] - matr[i][0][2] * matr[i][1][0]) / det; inv[i][2][2] = (matr[i][0][0] * matr[i][1][1] - matr[i][0][1] * matr[i][1][0]) / det; } CSC(cudaMemcpyToSymbol(cov, inv, sizeof(double) * nc * 9)); CSC(cudaMemcpyToSymbol(av, avg, sizeof(double) * nc * 3)); uchar4 *dev_data; CSC(cudaMalloc(&dev_data, sizeof(uchar4) * h * w)); CSC(cudaMemcpy(dev_data, data, sizeof(uchar4) * h * w, cudaMemcpyHostToDevice)); kernel<<<dim3(16, 16), dim3(16, 16)>>>(dev_data, w, h, nc); CSC(cudaMemcpy(data, dev_data, sizeof(uchar4) * h * w, cudaMemcpyDeviceToHost)); FILE *out = fopen(name, "wb"); fwrite(&w, sizeof(int), 1, out); fwrite(&h, sizeof(int), 1, out); fwrite(data, sizeof(uchar4), w * h, out); fclose(out); CSC(cudaFree(dev_data)); free(data); free(name); return 0; }
11,396
//nvcc -ptx EM3_2.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64" __device__ void EM1( double * Er, double * Ez, double * Hphi, const double mu, const double dr, const double dz, const double dt ) { int nz = blockIdx.x + blockIdx.y * gridDim.x; int nr = threadIdx.x + blockDim.x * threadIdx.y; int threadsPerBlock = blockDim.x*blockDim.y; int n_r = nr + nz*threadsPerBlock; int n_z = nr + nz*(threadsPerBlock+1); Hphi[n_r] = Hphi[n_r] -0.5* (dt/(mu*dr)*( Ez[n_z+1]-Ez[n_z] ) - dt/(mu*dz)*( Er[n_r + threadsPerBlock]-Er[n_r] )); } __global__ void processMandelbrotElement( double * Er, double * Ez, double * Hphi, const double mu, const double dr, const double dz, const double dt ) { EM1(Er,Ez,Hphi,mu,dr,dz,dt); }
11,397
/*** Copyright 2012 Injung Kim Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ***/ #include <stdio.h> #include <cuda_runtime.h> __global__ void Propagate_kernel(float *pDevInput, float *pDevWeight, int inDim, int outDim, float *pDevOutput); void PropagateOnDevice(float *pInput, float *pWeight, int inDim, int outDim, float *pOutput) { float *pDevInput = NULL, *pDevOutput = NULL, *pDevWeight = NULL; // allocate device memory cudaMalloc((void**)&pDevInput, inDim * sizeof(float)); cudaMalloc((void**)&pDevOutput, outDim * sizeof(float)); cudaMalloc((void**)&pDevWeight, (inDim+1)*outDim * sizeof(float)); // copy input and weight to device memory cudaMemcpy(pDevInput, pInput, inDim * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(pDevWeight, pWeight, (inDim+1)*outDim * sizeof(float), cudaMemcpyHostToDevice); // compute output on CUDA device Propagate_kernel<<<1, outDim>>>(pDevInput, pDevWeight, inDim, outDim, pDevOutput); // copy output to host memory cudaMemcpy(pOutput, pDevOutput, outDim * sizeof(float), cudaMemcpyDeviceToHost); // deallocate device memory cudaFree(pDevInput); cudaFree(pDevOutput); cudaFree(pDevWeight); } __global__ void Propagate_kernel(float *pDevInput, float *pDevWeight, int inDim, int outDim, float *pDevOutput) { // idx is thread index int idx = blockIdx.x * blockDim.x + threadIdx.x; while(idx < outDim){ float *w = &pDevWeight[idx * (inDim + 1)]; float net = 0.F; for(int i = 0; i < inDim; i++) net += pDevInput[i] * w[i]; net += w[inDim]; // add bias pDevOutput[idx] = 1.F/(1.F + (float)exp(-net)); idx += blockDim.x * gridDim.x; // blockDim.x * gridDim.x is # of threads } }
11,398
/*! * \brief Record the basic usage of Iterators in Thrust. * \Source https://docs.nvidia.com/cuda/thrust/ * \iterator constant_iterator, * counting_iterator, * transform_iterator, * permutation_iterator, * zip_iterator. */ #include <iostream> #include <thrust/device_vector.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> void TestConstantIterator() { std::cout << "TestConstantIterator: "<< std::endl; thrust::constant_iterator<int> first(10); thrust::constant_iterator<int> last = first + 3; std::cout << "first[0] = " << first[0] << std::endl; // returns 10 std::cout << "first[1] = " << first[1] << std::endl; // returns 10 std::cout << "first[100] = " << first[100] << std::endl; // returns 10 // sum of [first, last) int sum = thrust::reduce(first, last); std::cout << "sum = " << sum << std::endl; // returns 10 * 3 } void TestCountingIterator() { std::cout << "TestCountingIterator: " << std::endl; thrust::counting_iterator<int> first(10); thrust::counting_iterator<int> last = first + 3; std::cout << "first[0] = " << first[0] << std::endl; // returns 10 std::cout << "first[1] = " << first[1] << std::endl; // returns 11=10+1 std::cout << "first[100] = " << first[100] << std::endl; // returns 110=10+100 int sum = thrust::reduce(first, last); std::cout << "sum = " << sum << std::endl; // returns 33=10+11+12 } void TestTransformIterator() { std::cout << "TestTransformIterator: " << std::endl; thrust::device_vector<int> vec(3); vec[0] = 10; vec[1] = 20; vec[2] = 30; auto first = thrust::make_transform_iterator(vec.begin(), thrust::negate<int>()); auto last = thrust::make_transform_iterator(vec.end(), thrust::negate<int>()); std::cout << "first[0] = " << first[0] << std::endl; // returns -10 std::cout << "first[1] = " << first[1] << std::endl; // returns -20 std::cout << "first[2] = " << first[2] << std::endl; // returns -30 int sum = thrust::reduce(first, last); std::cout << "sum = " << sum << std::endl; // returns -60 (-10 + -20 + -30) } void TestPermutationIterator() { std::cout << "TestPermutationIterator: " << std::endl; // Gather locations thrust::device_vector<int> map(4); map[0] = 3; map[1] = 1; map[2] = 0; map[3] = 5; // Array to gather from thrust::device_vector<int> source(6); source[0] = 10; source[1] = 20; source[2] = 30; source[3] = 40; source[4] = 50; source[5] = 60; // Fuse gather with reduction: // sum = source[map[0]] + source[map[1]] + ... int sum = thrust::reduce(thrust::make_permutation_iterator(source.begin(), map.begin()), thrust::make_permutation_iterator(source.begin(), map.end())); std::cout << "sum = " << sum << std::endl; } void TestZipIterator() { std::cout << "TestZipIterator: " << std::endl; thrust::device_vector<int> A(3); thrust::device_vector<char> B(3); A[0] = 10; A[1] = 20; A[2] = 30; B[0] = 'x'; B[1] = 'y'; B[2] = 'z'; auto first = thrust::make_zip_iterator(thrust::make_tuple(A.begin(), B.begin())); auto last = thrust::make_zip_iterator(thrust::make_tuple(A.end(), B.end())); std::cout << "first[0] = (" << thrust::get<0>(first[0]) << ", " << thrust::get<1>(first[0]) << ")" << std::endl; // returns 10,x std::cout << "first[1] = (" << thrust::get<0>(first[1]) << ", " << thrust::get<1>(first[1]) << ")" << std::endl; // returns 20,y std::cout << "first[2] = (" << thrust::get<0>(first[2]) << ", " << thrust::get<1>(first[2]) << ")" << std::endl; // returns 30,z // maximum of [first, last) thrust::maximum< thrust::tuple<int, char> > binary_op; thrust::tuple<int, char> init = first[0]; auto res = thrust::reduce(first, last, init, binary_op); // returns tuple(30, 'z') std::cout << "res = (" << thrust::get<0>(res) << ", " << thrust::get<1>(res) << ")" << std::endl; // returns 30,z } int main(void) { TestConstantIterator(); std::cout << "##############################" << std::endl << std::endl; TestCountingIterator(); std::cout << "##############################" << std::endl << std::endl; TestTransformIterator(); std::cout << "##############################" << std::endl << std::endl; TestPermutationIterator(); std::cout << "##############################" << std::endl << std::endl; TestZipIterator(); std::cout << "##############################" << std::endl << std::endl; return 0; }
11,399
#include "includes.h" /** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA * The modifications are * removed texture memory usage * removed split query KNN computation * added feature extraction with bilinear interpolation * * Last modified by Christopher B. Choy <chrischoy@ai.stanford.edu> 12/23/2016 */ // Includes // Constants used by the program #define BLOCK_DIM 16 //-----------------------------------------------------------------------------------------------// // KERNELS // //-----------------------------------------------------------------------------------------------// /** * Computes the distance between two matrix A (reference points) and * B (query points) containing respectively wA and wB points. * * @param A pointer on the matrix A * @param wA width of the matrix A = number of points in A * @param B pointer on the matrix B * @param wB width of the matrix B = number of points in B * @param dim dimension of points = height of matrices A and B * @param AB pointer on the matrix containing the wA*wB distances computed */ /** * Gathers k-th smallest distances for each column of the distance matrix in the top. * * @param dist distance matrix * @param ind index matrix * @param width width of the distance matrix and of the index matrix * @param height height of the distance matrix and of the index matrix * @param k number of neighbors to consider */ /** * Computes the square root of the first line (width-th first element) * of the distance matrix. * * @param dist distance matrix * @param width width of the distance matrix * @param k number of neighbors to consider */ //-----------------------------------------------------------------------------------------------// // K-th NEAREST NEIGHBORS // //-----------------------------------------------------------------------------------------------// /** * Prints the error message return during the memory allocation. * * @param error error value return by the memory allocation function * @param memorySize size of memory tried to be allocated */ __global__ void cuComputeDistanceGlobal( float* A, int wA, float* B, int wB, int dim, float* AB){ // Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B __shared__ float shared_A[BLOCK_DIM][BLOCK_DIM]; __shared__ float shared_B[BLOCK_DIM][BLOCK_DIM]; // Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step) __shared__ int begin_A; __shared__ int begin_B; __shared__ int step_A; __shared__ int step_B; __shared__ int end_A; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Other variables float tmp; float ssd = 0; // Loop parameters begin_A = BLOCK_DIM * blockIdx.y; begin_B = BLOCK_DIM * blockIdx.x; step_A = BLOCK_DIM * wA; step_B = BLOCK_DIM * wB; end_A = begin_A + (dim-1) * wA; // Conditions int cond0 = (begin_A + tx < wA); // used to write in shared memory int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) { // Load the matrices from device memory to shared memory; each thread loads one element of each matrix if (a/wA + ty < dim){ shared_A[ty][tx] = (cond0)? A[a + wA * ty + tx] : 0; shared_B[ty][tx] = (cond1)? B[b + wB * ty + tx] : 0; } else{ shared_A[ty][tx] = 0; shared_B[ty][tx] = 0; } // Synchronize to make sure the matrices are loaded __syncthreads(); // Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix if (cond2 && cond1){ for (int k = 0; k < BLOCK_DIM; ++k){ tmp = shared_A[k][ty] - shared_B[k][tx]; ssd += tmp*tmp; } } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; each thread writes one element if (cond2 && cond1) AB[(begin_A + ty) * wB + begin_B + tx] = ssd; }
11,400
#define EIGEN_USE_GPU #include <cuda.h> #include <stdio.h> #include <algorithm> #define THREADS_PER_BLOCK 1024 #define MAX_STREAMS 100 using namespace std; __global__ void getValidKernalPositions( const int block, const int input_feature_width, const int kernal_width, const int *kernal_deltas, const int *kernal_postions, int *kernel_pointers, int threadOffset = 0) { const int threadID = threadOffset + threadIdx.x; const int ix = block * (input_feature_width*input_feature_width) + (threadID / (kernal_width*kernal_width)); const int kernal_start = block * (input_feature_width*input_feature_width); const int kernal_end = (block+1) * (input_feature_width*input_feature_width); const int row = (threadID % (kernal_width*kernal_width)) / kernal_width; const int row_iX = ix + kernal_postions[(row * kernal_width) + int(kernal_width/2)]; const int column = (threadID % (kernal_width*kernal_width)) % kernal_width; if ( (kernal_start <= row_iX) && (kernal_end > row_iX) ) { const int row_start = (row_iX < input_feature_width) ? 0 : (row_iX - ( (row_iX - input_feature_width) % input_feature_width )); const int row_end = row_start + input_feature_width; const int p = ((row * kernal_width) + column); const int pointer = (ix * ( kernal_width*kernal_width ) ) + p; const int location = ix + kernal_postions[p]; if ( (row_start <= location) && (row_end > location) ) { kernel_pointers[pointer] = location; } else { kernel_pointers[pointer] = -1; } } else { const int pointer = (ix * ( kernal_width*kernal_width ) ) + ((row * kernal_width) + column); kernel_pointers[pointer] = -1; } } __global__ void RecurrentKernel_splitThreaded( const int block, const double* inputs, const double* weights, const int iterations, const int batch_samples, const int units, const int input_feature_width, const int kernal_width, const int *kernal_deltas, int *kernel_pointers, double *output, double *output_clones) { const int cell = (block * units) + threadIdx.x; for (int i = 0; i < iterations; i++) { __syncthreads(); for (int j = 0; j < (kernal_width*kernal_width); j++) { const int cell_pointer = (cell * (kernal_width*kernal_width)) + j; const int cell_position = kernel_pointers[cell_pointer]; if (cell_position >= 0.0) { const int weight_pointer = (block * (kernal_width*kernal_width)) + j; const double weight = weights[ weight_pointer ]/100; const double value = output[cell]; const double o = value * weight; atomicAdd(&output_clones[ cell_position ], o); } } __syncthreads(); output[cell] = output_clones[cell]; } // RELU output[cell] = (output[cell] > 0) ? output[cell] : 0; } __global__ void RecurrentKernel( const int block, const double* inputs, const double* weights, const int iterations, const int batch_samples, const int units, const int input_feature_width, const int kernal_width, const int *kernal_deltas, int *kernel_pointers, double *output, double *output_clones) { const int cell = block * (input_feature_width*input_feature_width) + (threadIdx.x / (kernal_width*kernal_width)); const int offset = (threadIdx.x % (kernal_width*kernal_width)); const int cell_pointer = (cell * (kernal_width*kernal_width)) + offset; const int cell_position = kernel_pointers[cell_pointer]; for (int i = 0; i < iterations; i++) { __syncthreads(); if (cell_position >= 0.0) { const int weight_pointer = (block * (kernal_width*kernal_width)) + offset; const double weight = weights[weight_pointer]/100; const double value = output[cell]; const double o = value * weight; atomicAdd(&output_clones[cell_position], o); } __syncthreads(); output[cell] = output_clones[cell]; } // RELU output[cell] = (output[cell] > 0) ? output[cell] : 0; } void RecurrentKernelLauncher( const double* inputs, const double* weights, const int iterations, const int batch_samples, const int units, const int input_feature_width, const int kernal_width, double* output) { const int numOfThreads = (units*(kernal_width*kernal_width)); const size_t inputBytes = (units * batch_samples) * sizeof(double); const size_t kernalCellPositionBytes = (( units * batch_samples ) * (kernal_width * kernal_width )) * sizeof(int); const size_t kernalBytes = ( kernal_width * kernal_width ) * sizeof(int); const size_t weightBytes = (( kernal_width * kernal_width ) * batch_samples) * sizeof(double); size_t sharedMemory = (inputBytes*3) + kernalCellPositionBytes + kernalBytes + weightBytes; printf("sharedMemory: %i bytes\n", sharedMemory); // blocks are limited to 1024 // if threads go beyond this, threads are broken up by row int threadSplit = 0; int threadIntervals = 0; int endingThread = 0; int numOfStreams = 0; if (numOfThreads > THREADS_PER_BLOCK) { threadSplit = (int)(numOfThreads/THREADS_PER_BLOCK)+1; threadIntervals = numOfThreads/threadSplit; endingThread = numOfThreads - ((threadSplit-1) * threadIntervals); } if (batch_samples > MAX_STREAMS) { numOfStreams = MAX_STREAMS; } else { numOfStreams = batch_samples; } // allocate and initialize an array of stream handles cudaStream_t *streams; int *kernal_deltas, *d_kernal_deltas, *kernel_pointers, *d_kernel_pointers; double *d_output, *d_output_clones, *d_inputs, *d_weights; cudaMallocHost(&streams,(numOfStreams * sizeof(cudaStream_t))); for (int i = 0; i < numOfStreams; i++) cudaStreamCreate(&(streams[i])); cudaMallocHost(&kernal_deltas, kernalBytes); int strtPos = -(kernal_width/2); int endPos = kernal_width - abs(strtPos); int pointer = 0; for(int fx=strtPos; fx < endPos; fx++) for(int fy=strtPos; fy < endPos; fy++) { kernal_deltas[pointer] = ((fx * input_feature_width) + fy); pointer++; } cudaMalloc( (void **)&d_kernal_deltas, kernalBytes ); cudaMemcpy(d_kernal_deltas, kernal_deltas, kernalBytes, cudaMemcpyHostToDevice); cudaMalloc( (void **)&d_output, inputBytes ); cudaMemcpy(d_output, inputs, inputBytes, cudaMemcpyHostToDevice); cudaMalloc( (void **)&d_output_clones, inputBytes ); cudaMemcpy(d_output_clones, inputs, inputBytes, cudaMemcpyHostToDevice); cudaMalloc( (void **)&d_inputs, inputBytes ); cudaMemcpy(d_inputs, inputs, inputBytes, cudaMemcpyHostToDevice); cudaMalloc( (void **)&d_weights, weightBytes ); cudaMemcpy(d_weights, weights, weightBytes, cudaMemcpyHostToDevice); cudaMallocHost( (void **)&kernel_pointers, kernalCellPositionBytes ); cudaMalloc( (void **)&d_kernel_pointers, kernalCellPositionBytes ); cudaMemcpy(d_kernel_pointers, kernel_pointers, kernalCellPositionBytes, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); int stream = 0; if (numOfThreads > THREADS_PER_BLOCK) { for (int i = 0; i < batch_samples; i++) { for (int j = 0; j < threadSplit; j++) { int k_threads = (j == ( threadSplit-1 )) ? endingThread : threadIntervals; getValidKernalPositions<<<1, k_threads, 0, streams[stream]>>>(i, input_feature_width, kernal_width, kernal_deltas,d_kernal_deltas, d_kernel_pointers, j*threadIntervals); stream++; if ((stream % numOfStreams) == 0) stream = 0; } } } else { for (int i = 0; i < batch_samples; i++) { getValidKernalPositions<<<1, numOfThreads, 0, streams[stream]>>>(i, input_feature_width, kernal_width, kernal_deltas,d_kernal_deltas, d_kernel_pointers); stream++; if ((stream % numOfStreams) == 0) stream = 0; } } cudaDeviceSynchronize(); cudaMemcpy(kernal_deltas, d_kernal_deltas, kernalBytes, cudaMemcpyDeviceToHost); cudaMemcpy(d_kernal_deltas, kernal_deltas, kernalBytes, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); // batch_samples is the number of instances of the kernel // units is the number of threads within each instance stream = 0; if (numOfThreads > THREADS_PER_BLOCK) { for (int i = 0; i < batch_samples; i++) { RecurrentKernel_splitThreaded<<<1, units, 0, streams[stream]>>>(i, d_inputs, d_weights,iterations, batch_samples, units, input_feature_width,kernal_width, kernal_deltas, d_kernel_pointers, d_output, d_output_clones); stream++; if ((stream % numOfStreams) == 0) stream = 0; } } else { for (int i = 0; i < batch_samples; i++) { RecurrentKernel<<<1, numOfThreads, 0, streams[stream]>>>(i, d_inputs, d_weights, iterations, batch_samples, units, input_feature_width,kernal_width, kernal_deltas, d_kernel_pointers, d_output, d_output_clones); stream++; if ((stream % numOfStreams) == 0) stream = 0; } } cudaError_t cudaerr = cudaDeviceSynchronize(); cudaMemcpy(output, d_output, inputBytes, cudaMemcpyDeviceToHost); cudaFreeHost(kernal_deltas); cudaFreeHost(kernel_pointers); cudaFree(d_kernal_deltas); cudaFree(d_kernel_pointers); cudaFree(d_output); cudaFree(d_output_clones); cudaFree(d_inputs); cudaFree(d_weights); if (cudaerr != cudaSuccess) { printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); } }