serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
23,601
#define IA 16807 #define IM 2147483647 #define AM (1.0/IM) #define IQ 127773 #define IR 2836 #define NTAB 32 #define NDIV (1+(IM-1)/NTAB) #define EPS 1.2e-7 #define RNMX (1.0-EPS) double rand(long *idum) { int j; long k; static long iy=0; static long iv[NTAB]; double temp; if (*idum <= 0 || !iy) { if (-(*idum) < 1) *idum=1; else *idum = -(*idum); for (j=NTAB+7; j>=0; j--) { k=(*idum)/IQ; *idum=IA*(*idum-k*IQ)-IR*k; if (*idum < 0) *idum += IM; if (j < NTAB) iv[j] = *idum; } iy=iv[0]; } k=(*idum)/IQ; *idum=IA*(*idum-k*IQ)-IR*k; if (*idum < 0) *idum += IM; j=iy/NDIV; iy=iv[j]; iv[j] = *idum; if ((temp=AM*iy) > RNMX) return RNMX; else return temp; }
23,602
#include <iostream> using namespace std; const int N = 16; const int CORES = 16; __global__ void hello(char* s){ if ((s[blockIdx.x] >= 'a')&&(s[blockIdx.x] <= 'z')) { s[blockIdx.x] -= 32; } } int main(int argc, char const *argv[]) { char cpu_string[N] = "hello world!"; char* gpu_string; cudaMalloc((void**)&gpu_string, N * sizeof(char)); cudaMemcpy(gpu_string, cpu_string, N * sizeof(char), cudaMemcpyHostToDevice); hello<<<CORES,1>>>(gpu_string); cudaMemcpy(cpu_string, gpu_string, N * sizeof(char), cudaMemcpyDeviceToHost); cudaFree(gpu_string); cout << cpu_string << endl; return 0; }
23,603
#include<stdio.h> #include<math.h> #include<cuda.h> __global__ void matMul(int *d_a,int *d_b,int *d_c, int M, int N, int K){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col= blockIdx.x * blockDim.x + threadIdx.x; if(row<M && col<K){ int sum=d_a[row*N]*d_b[col]; for(int k=1;k<N;k++){ sum+=d_a[row*N+k]*d_b[k*K+col]; } d_c[row*K + col]=sum; } } int main(int argc, char const *argv[]) { int N=8,M=10,K=9,TW=4; int h_a[M][N],h_b[N][K],h_c[M][K]; int *d_a,*d_b,*d_c; for(int i=0;i<M;i++){ for(int j=0;j<N;j++){ h_a[i][j]=rand()%100; } } for(int i=0;i<N;i++){ for(int j=0;j<K;j++){ h_b[i][j]=rand()%100; } } printf("\n A Matrix\n" ); for(int i=0;i<M;i++){ for(int j=0;j<N;j++){ printf("%d ",h_a[i][j] ); } printf("\n" ); } printf("\n B Matrix\n" ); for(int i=0;i<N;i++){ for(int j=0;j<K;j++){ printf("%d ",h_b[i][j] ); } printf("\n" ); } // taking block diamension as M X M dim3 dimBlock(TW,TW); dim3 dimGrid((int)((M-1)/TW)+1,(int)((K-1)/TW)+1,1 ); // allocating device memory cudaMalloc(&d_a, M*N*sizeof(int)); cudaMalloc(&d_b, N*K*sizeof(int)); cudaMalloc(&d_c, M*K*sizeof(int)); // copying data in device memory cudaMemcpy( d_a, h_a, M*N*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( d_b, h_b, N*K*sizeof(int), cudaMemcpyHostToDevice ); //calling kernel function matMul<<<dimGrid,dimBlock>>>(d_a,d_b,d_c,M,N,K); cudaMemcpy(h_c, d_c, M*K*sizeof(int), cudaMemcpyDeviceToHost ); printf("\n Multiplication of A and B Matrix:\n" ); for(int i=0;i<M;i++){ for(int j=0;j<K;j++){ printf("%d ",h_c[i][j] ); } printf("\n" ); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
23,604
#include "includes.h" __global__ void ExtQtyKernel (double *ExtLabel, double *Dens, double *Label, int nsec, int nrad) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = threadIdx.y + blockDim.y*blockIdx.y; if (i<nrad && j<nsec) ExtLabel[i*nsec + j] = Dens[i*nsec + j]*Label[i*nsec + j]; }
23,605
#include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #define blockSize 128 __global__ void reduce0(int *g_idata,int *g_odata){ extern __shared__ int sdata[]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; sdata[tid]=g_idata[i]; //printf("%d- ",sdata[tid]); __syncthreads(); //if(threadIdx.x==0){ //printf("\n");} //do reduction in shared mem for(unsigned int s=1 ;s<blockDim.x; s*=2){ if(tid%(2*s)==0){ //if(tid==0){ //printf("Level %d Thread %d Data %d + Data %d \n",s,tid,sdata[tid],sdata[tid+s]);} sdata[tid]+=sdata[tid+s]; } __syncthreads(); } // write the result for this block to global mem if(tid ==0){ g_odata[blockIdx.x]=sdata[0]; } } __global__ void reduce1(int *g_idata,int *g_odata){ extern __shared__ int sdata[]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; sdata[tid]=g_idata[i]; //printf("%d- ",sdata[tid]); __syncthreads(); //if(threadIdx.x==0){ //printf("\n");} //do reduction in shared mem for(unsigned int s=1 ;s<blockDim.x; s*=2){ int index=2*s*tid; if(index<blockDim.x){ sdata[index]+=sdata[index+s]; } __syncthreads(); } // write the result for this block to global mem if(tid ==0){ g_odata[blockIdx.x]=sdata[0]; } } __global__ void reduce2(int *g_idata,int *g_odata){ extern __shared__ int sdata[]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; sdata[tid]=g_idata[i]; //printf("%d- ",sdata[tid]); __syncthreads(); //if(threadIdx.x==0){ //printf("\n");} //do reduction in shared mem for(unsigned int s=blockDim.x/2; s>0;s>>=1){ if(tid<s){ sdata[tid]+=sdata[tid+s]; } __syncthreads(); } // write the result for this block to global mem if(tid ==0){ g_odata[blockIdx.x]=sdata[0]; } } __global__ void reduce3(int *g_idata,int *g_odata){ extern __shared__ int sdata[]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2)+threadIdx.x; sdata[tid]=g_idata[i]+g_idata[i+blockDim.x]; //printf("%d- ",sdata[tid]); __syncthreads(); //if(threadIdx.x==0){ //printf("\n");} //do reduction in shared mem for(unsigned int s=blockDim.x/2; s>0;s>>=1){ if(tid<s){ sdata[tid]+=sdata[tid+s]; } __syncthreads(); } // write the result for this block to global mem if(tid ==0){ g_odata[blockIdx.x]=sdata[0]; } } __device__ void warpReduce(volatile int* sdata,int tid ){ sdata[tid]+=sdata[tid +32]; sdata[tid]+=sdata[tid +16]; sdata[tid]+=sdata[tid +8]; sdata[tid]+=sdata[tid +4]; sdata[tid]+=sdata[tid +2]; sdata[tid]+=sdata[tid +1]; } __global__ void reduce4(int *g_idata,int *g_odata){ extern __shared__ int sdata[]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2)+threadIdx.x; sdata[tid]=g_idata[i]+g_idata[i+blockDim.x]; //printf("%d- ",sdata[tid]); __syncthreads(); //if(threadIdx.x==0){ //printf("\n");} //do reduction in shared mem for(unsigned int s=blockDim.x/2; s>32;s>>=1){ if(tid<s){ sdata[tid]+=sdata[tid+s]; } __syncthreads(); } if(tid<32){ warpReduce(sdata,tid);} // write the result for this block to global mem if(tid ==0){ g_odata[blockIdx.x]=sdata[0]; } } /* template <unsigned int blockSize> __device__ void warpReducenew(volatile int* sdata,int tid ){ if(blockSize>=64) sdata[tid]+=sdata[tid +32]; if(blockSize>=32) sdata[tid]+=sdata[tid +16]; if(blockSize>=16) sdata[tid]+=sdata[tid +8]; if(blockSize>=8) sdata[tid]+=sdata[tid +4]; if(blockSize>=4) sdata[tid]+=sdata[tid +2]; if(blockSize>=2) sdata[tid]+=sdata[tid +1]; } template <unsigned int blockSize> __global__ void reduce5(int *g_idata,int * g_odata){ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2)+threadIdx.x; sdata[tid]=g_idata[i]+g_idata[i+blockDim.x]; __syncthreads(); if(blockSize>=512){ if(tid<256 ){sdata[tid]+=sdata[tid+256];} __syncthreads(); } if(blockSize>=256){ if(tid<128 ){sdata[tid]+=sdata[tid+128];} __syncthreads(); } if(blockSize>=128){ if(tid<64 ){sdata[tid]+=sdata[tid+64];} __syncthreads(); } if(tid<32){ warpReducenew<blockSize>(sdata,tid);} // write the result for this block to global mem if(tid ==0){ g_odata[blockIdx.x]=sdata[0]; } } template <unsigned int blockSize> __device__ void warpReduce(volatile int *sdata, unsigned int tid) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if(blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1];} template <unsigned int blockSize> __global__ voidreduce6(int *g_idata, int *g_odata, unsigned int n) {extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32)warpReduce(sdata, tid); if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } */ int main(int argc, char **argv) { int N= 1<<atoi(argv[1]); int Blocks= atoi(argv[2]); int Threads= atoi(argv[3]); printf("Doing a parallel reduction for N= %d NumberOfBlocks=%d Threads=%d \n",N,Blocks,Threads ); // Define input output int *h_idata; int *g_idata,*g_odata; int d_output; h_idata=(int *)malloc( sizeof(int)*N); for(unsigned int i=0 ;i<N;i++){ h_idata[i]=i%2; //printf("-%d",h_idata[i]); } //printf("\n"); cudaMalloc(&g_idata, N*sizeof(int)); cudaMalloc(&g_odata, Blocks*sizeof(int)); cudaMemcpy(g_idata, h_idata, N*sizeof(int), cudaMemcpyHostToDevice); //reduce0<<<ceil(N/(Blocks*Threads)),Threads>>>(g_idata,g_odata); Blocks=N/Threads; int Level=Threads; while(N/Level>0){ //printf(" Level=%d ,N/Level=%d \n",Level,N/Level ); if(Level==Threads){ reduce0<<<N/Level,Threads,Threads*sizeof(int)>>>(g_idata,g_odata); }else{ reduce0<<<N/Level,Threads,Threads*sizeof(int)>>>(g_odata,g_odata); } cudaDeviceSynchronize(); if(N/Level<Threads){ reduce0<<<1,N/Level,N/Level*sizeof(int)>>>(g_odata,g_odata); } Level=Level*Threads; } cudaMemcpy(&d_output, g_odata, sizeof(int), cudaMemcpyDeviceToHost); int s=0; for(unsigned int i=0;i<N;i++){ s=s+h_idata[i]; } printf("reduce0 Device %d Host %d\n",d_output,s ); Level=Threads; while(N/Level>0){ if(Level==Threads){ reduce1<<<N/Level,Threads,Threads*sizeof(int)>>>(g_idata,g_odata); }else{ reduce1<<<N/Level,Threads,Threads*sizeof(int)>>>(g_odata,g_odata); } cudaDeviceSynchronize(); if(N/Level<Threads){ reduce1<<<1,N/Level,N/Level*sizeof(int)>>>(g_odata,g_odata); } Level=Level*Threads; } cudaMemcpy(&d_output, g_odata, sizeof(int), cudaMemcpyDeviceToHost); printf("reduce1 Device %d Host %d\n",d_output,s ); Level=Threads; while(N/Level>0){ if(Level==Threads){ reduce2<<<N/Level,Threads,Threads*sizeof(int)>>>(g_idata,g_odata); }else{ reduce2<<<N/Level,Threads,Threads*sizeof(int)>>>(g_odata,g_odata); } cudaDeviceSynchronize(); if(N/Level<Threads){ reduce2<<<1,N/Level,N/Level*sizeof(int)>>>(g_odata,g_odata); } Level=Level*Threads; } cudaMemcpy(&d_output, g_odata, sizeof(int), cudaMemcpyDeviceToHost); printf("reduce2 Device %d Host %d\n",d_output,s ); Level=Threads; while(N/Level>0){ if(Level==Threads){ reduce3<<<N/(Level*2),Threads,Threads*sizeof(int)>>>(g_idata,g_odata); }else { reduce3<<<N/(Level*2),Threads,Threads*sizeof(int)>>>(g_odata,g_odata); } cudaDeviceSynchronize(); Level=Level*2; if(N/Level<=Threads){ reduce3<<<1,N/(Level*2),N/(Level*2)*sizeof(int)>>>(g_odata,g_odata); } Level=Level*Threads; } cudaMemcpy(&d_output, g_odata, sizeof(int), cudaMemcpyDeviceToHost); printf("reduce3 Device %d Host %d\n",d_output,s ); Level=Threads; while(N/Level>0){ if(Level==Threads){ reduce4<<<N/(Level*2),Threads,Threads*sizeof(int)>>>(g_idata,g_odata); }else { reduce4<<<N/(Level*2),Threads,Threads*sizeof(int)>>>(g_odata,g_odata); } cudaDeviceSynchronize(); Level=Level*2; if(N/Level<=Threads){ reduce4<<<1,N/(Level*2),N/(Level*2)*sizeof(int)>>>(g_odata,g_odata); } Level=Level*Threads; } cudaMemcpy(&d_output, g_odata, sizeof(int), cudaMemcpyDeviceToHost); printf("reduce4 Device %d Host %d\n",d_output,s ); /* Level=Threads; while(N/Level>0){ if(Level==Threads){ switch (Threads) { case 512: reduce5<512><<< N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; case 256: reduce5<256><<< N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; case 128: reduce5<128><<< N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; case 64: reduce5< 64><<< N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; case 32: reduce5< 32><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; case 16: reduce5< 16><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; case 8: reduce5< 8><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; case 4: reduce5< 4><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; case 2: reduce5< 2><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; case 1: reduce5< 1><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_idata, g_odata); break; } }else { switch (Threads) { case 512: reduce5<512><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; case 256: reduce5<256><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; case 128: reduce5<128><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; case 64: reduce5< 64><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; case 32: reduce5< 32><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; case 16: reduce5< 16><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; case 8: reduce5< 8><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; case 4: reduce5< 4><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; case 2: reduce5< 2><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; case 1: reduce5< 1><<<N/(Level*2), Threads,Threads*sizeof(int) >>>(g_odata, g_odata); break; } } cudaDeviceSynchronize(); Level=Level*2; if(N/Level<=Threads){ switch (N/(Level*2)) { case 512: reduce5<512><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; case 256: reduce5<256><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; case 128: reduce5<128><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; case 64: reduce5< 64><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; case 32: reduce5< 32><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; case 16: reduce5< 16><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; case 8: reduce5< 8><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; case 4: reduce5< 4><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; case 2: reduce5< 2><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; case 1: reduce5< 1><<<1, N/(Level*2),N/(Level*2)*sizeof(int) >>>(g_odata, g_odata); break; } reduce4<<<1,N/(Level*2),N/(Level*2)*sizeof(int)>>>(g_odata,g_odata); } Level=Level*Threads; } cudaMemcpy(&d_output, g_odata, sizeof(int), cudaMemcpyDeviceToHost); printf("reduce5 Device %d Host %d\n",d_output,s ); */ return 0; }
23,606
/* The MIT License (MIT) * * Copyright (c) 2013 Johannes Reinhardt <jreinhardt@ist-dein-freund.de> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ /* Monte Carlo Sudoku solver in CUDA */ #include <stdio.h> #include <malloc.h> #include <math.h> #include <time.h> #include <cuda_runtime.h> //this must be smaller than 16 (but is realisticly 3 or 4 or maybe 5) #define BLK_SIZE 3 #define SUD_SIZE (BLK_SIZE*BLK_SIZE) #define N_ENTRIES (SUD_SIZE*SUD_SIZE) #define THREADS_PER_BLOCK 256 #define N_BLOCKS 256 #define N_THREADS (THREADS_PER_BLOCK*N_BLOCKS) #define N_STEPS 100 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } //TODO: Why does this not work //__constant__ int lut_exp[256]; //a form to describe a sudoku that is suitable for fast access, consists of two //parts: One part describes the constant structure, i.e. the positions of the //unknowns and the hints, and is the same for all instances. The other parts //contains the values of the unknowns, the current energy, and space for //bookkeeping. typedef struct { //Constant part, applies to all sudokus: //There are three tables that map an idx for each unknown to the entities it //belongs, i.e. its row, column and block. char* unk_row; char* unk_col; char* unk_blk; //There are three tables that contain the counts for the hints for each //symbol for each entity, souch that hnt_*[idx*SUD_SIZE + sym] gives the //number of occurences of hints of value sym in entity idx. char* hnt_row; char* hnt_col; char* hnt_blk; //Number of unknowns (a single number, for memory purposes pointerized) unsigned int n_unk; //Variable part: contains information about the individual instances //The energy of the current configuration unsigned int* energy; //prng state unsigned int* random; //This table maps an idx of a unknown to its value. char** unk_val; //This is space for accumulating counts. cnt_*[sym*SUD_SIZE + idx] gives //the number of occurences of symbol sym in entity idx char** cnt_row; char** cnt_col; char** cnt_blk; } sudoku_t; __device__ __host__ void mc_step(sudoku_t sud, int* lut_exp, unsigned int thread_idx){ char val,old_val; unsigned int energy,rnd; int i,delta,idx; //generate a new random number rnd = sud.random[thread_idx]; rnd = 1664525*rnd + 1013904223; sud.random[thread_idx] = rnd; //dissect the random number idx = (rnd >> 0) % sud.n_unk; val = (rnd >> 8) % SUD_SIZE; rnd = (rnd >> 16); //save old value, put new value in //TODO: Suboptimal access pattern old_val = sud.unk_val[idx][thread_idx]; sud.unk_val[idx][thread_idx] = val; //put the hints into the counters for(i=0;i<N_ENTRIES;i++){ sud.cnt_row[i][thread_idx] = sud.hnt_row[i]; sud.cnt_col[i][thread_idx] = sud.hnt_col[i]; sud.cnt_blk[i][thread_idx] = sud.hnt_blk[i]; } //count the unknowns TODO: Suboptimal access pattern. for(i=0;i<sud.n_unk;i++){ sud.cnt_row[sud.unk_row[i]*SUD_SIZE + sud.unk_val[i][thread_idx]][thread_idx]++; sud.cnt_col[sud.unk_col[i]*SUD_SIZE + sud.unk_val[i][thread_idx]][thread_idx]++; sud.cnt_blk[sud.unk_blk[i]*SUD_SIZE + sud.unk_val[i][thread_idx]][thread_idx]++; } //calculate the energy energy = 0; for(i=0;i<N_ENTRIES;i++){ energy += (sud.cnt_row[i][thread_idx] == 0) ? 1 : sud.cnt_row[i][thread_idx] - 1; energy += (sud.cnt_col[i][thread_idx] == 0) ? 1 : sud.cnt_col[i][thread_idx] - 1; energy += (sud.cnt_blk[i][thread_idx] == 0) ? 1 : sud.cnt_blk[i][thread_idx] - 1; } delta = energy - sud.energy[thread_idx]; delta = delta > 255 ? 255 : delta; if(((delta > 0) && (lut_exp[delta > 255 ? 255 : delta] < rnd)) || (energy == 0)){ //discard step TODO: suboptimal access pattern sud.unk_val[idx][thread_idx] = old_val; } else { sud.energy[thread_idx] = energy; } } //Memory is laid out such that data for different threads is as close together //as possible to allow coalesced accesses void sudoku_alloc_host(sudoku_t* sud, int* lut_exp, unsigned int n_unk){ int i,j; sud->unk_row = (char*) malloc(n_unk*sizeof(char)); sud->unk_col = (char*) malloc(n_unk*sizeof(char)); sud->unk_blk = (char*) malloc(n_unk*sizeof(char)); sud->hnt_row = (char*) malloc(N_ENTRIES*sizeof(char)); sud->hnt_col = (char*) malloc(N_ENTRIES*sizeof(char)); sud->hnt_blk = (char*) malloc(N_ENTRIES*sizeof(char)); sud->n_unk = n_unk; sud->energy = (unsigned int*) malloc(N_THREADS*sizeof(unsigned int)); sud->random = (unsigned int*) malloc(N_THREADS*sizeof(unsigned int)); sud->unk_val = (char**) malloc(n_unk*sizeof(char*)); for(i=0;i<n_unk;i++){ sud->unk_val[i] = (char*) malloc(N_THREADS*sizeof(char)); } sud->cnt_row = (char**) malloc(N_ENTRIES*sizeof(char*)); sud->cnt_col = (char**) malloc(N_ENTRIES*sizeof(char*)); sud->cnt_blk = (char**) malloc(N_ENTRIES*sizeof(char*)); for(i=0;i<N_ENTRIES;i++){ sud->cnt_row[i] = (char*) malloc(N_THREADS*sizeof(char)); sud->cnt_col[i] = (char*) malloc(N_THREADS*sizeof(char)); sud->cnt_blk[i] = (char*) malloc(N_THREADS*sizeof(char)); } //Setup a simple prng (coefficients from Numerical recipies) for(i=0;i<N_THREADS;i++){ sud->random[i] = 1664525*i + 1013904223; //run it a few times for(j=0;j<5;j++){ sud->random[i] = 1664525*sud->random[i] + 1013904223; } } //Fill the unknowns for(i=0;i<sud->n_unk;i++){ for(j=0;j<N_THREADS;j++){ sud->unk_val[i][j] = sud->random[j] % SUD_SIZE; sud->random[j] = 1664525*sud->random[j] + 1013904223; } } //Set energy to a really high arbitrary value, this will cause the first step to be accepted for(i=0;i<N_THREADS;i++){ sud->energy[i] = 1<<20; } } void sudoku_free_host(sudoku_t* sud){ int i; free(sud->unk_row); free(sud->unk_col); free(sud->unk_blk); free(sud->hnt_row); free(sud->hnt_col); free(sud->hnt_blk); free(sud->energy); free(sud->random); for(i=0; i<sud->n_unk;i++){ free(sud->unk_val[i]); } free(sud->unk_val); for(i=0;i<N_ENTRIES;i++){ free(sud->cnt_row[i]); free(sud->cnt_col[i]); free(sud->cnt_blk[i]); } free(sud->cnt_row); free(sud->cnt_col); free(sud->cnt_blk); } void sudoku_free_device(sudoku_t* sud){ int i; char **tmp_val, **tmp_row, **tmp_col, **tmp_blk; cudaFree(sud->unk_row); cudaFree(sud->unk_col); cudaFree(sud->unk_blk); cudaFree(sud->hnt_row); cudaFree(sud->hnt_col); cudaFree(sud->hnt_blk); cudaFree(sud->energy); cudaFree(sud->random); tmp_val = (char**) malloc(sud->n_unk*sizeof(char*)); cudaMemcpy(tmp_val,sud->unk_val,sud->n_unk*sizeof(char*),cudaMemcpyDeviceToHost); for(i=0; i<sud->n_unk;i++){ cudaFree(tmp_val[i]); } free(tmp_val); cudaFree(sud->unk_val); tmp_col = (char**) malloc(N_ENTRIES*sizeof(char*)); tmp_row = (char**) malloc(N_ENTRIES*sizeof(char*)); tmp_blk = (char**) malloc(N_ENTRIES*sizeof(char*)); cudaMemcpy(tmp_row,sud->cnt_row,N_ENTRIES*sizeof(char*),cudaMemcpyDeviceToHost); cudaMemcpy(tmp_col,sud->cnt_col,N_ENTRIES*sizeof(char*),cudaMemcpyDeviceToHost); cudaMemcpy(tmp_blk,sud->cnt_blk,N_ENTRIES*sizeof(char*),cudaMemcpyDeviceToHost); for(i=0;i<N_ENTRIES;i++){ cudaFree(tmp_row[i]); cudaFree(tmp_col[i]); cudaFree(tmp_blk[i]); } free(tmp_col); free(tmp_row); free(tmp_blk); cudaFree(sud->cnt_row); cudaFree(sud->cnt_col); cudaFree(sud->cnt_blk); } void sudoku_alloc_and_copy_device(sudoku_t* d_sud, sudoku_t* h_sud){ int i; char **tmp_val, **tmp_row, **tmp_col, **tmp_blk; int n_unk = h_sud->n_unk; //alloc gpuErrchk(cudaMalloc(&d_sud->unk_row,n_unk*sizeof(char))); gpuErrchk(cudaMalloc(&d_sud->unk_col,n_unk*sizeof(char))); gpuErrchk(cudaMalloc(&d_sud->unk_blk,n_unk*sizeof(char))); gpuErrchk(cudaMalloc(&d_sud->hnt_row,N_ENTRIES*sizeof(char))); gpuErrchk(cudaMalloc(&d_sud->hnt_col,N_ENTRIES*sizeof(char))); gpuErrchk(cudaMalloc(&d_sud->hnt_blk,N_ENTRIES*sizeof(char))); gpuErrchk(cudaMalloc(&d_sud->energy, N_THREADS*sizeof(unsigned int))); gpuErrchk(cudaMalloc(&d_sud->random, N_THREADS*sizeof(unsigned int))); //Nested array setup is a bit annoying. The result of cudaMalloc can not //directly written to device memory, so we have to cache it in tmp and then //copy it to the device gpuErrchk(cudaMalloc(&d_sud->unk_val, n_unk*sizeof(char*))); tmp_val = (char**) malloc(n_unk*sizeof(char*)); for(i=0;i<n_unk;i++){ gpuErrchk(cudaMalloc(&tmp_val[i],N_THREADS*sizeof(char))); } gpuErrchk(cudaMemcpy(d_sud->unk_val,tmp_val,n_unk*sizeof(char*),cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_sud->cnt_row,N_ENTRIES*sizeof(char*))); gpuErrchk(cudaMalloc(&d_sud->cnt_col,N_ENTRIES*sizeof(char*))); gpuErrchk(cudaMalloc(&d_sud->cnt_blk,N_ENTRIES*sizeof(char*))); tmp_row = (char**) malloc(N_ENTRIES*sizeof(char*)); tmp_col = (char**) malloc(N_ENTRIES*sizeof(char*)); tmp_blk = (char**) malloc(N_ENTRIES*sizeof(char*)); for(i=0;i<N_ENTRIES;i++){ gpuErrchk(cudaMalloc(&tmp_row[i],N_THREADS*sizeof(char))); gpuErrchk(cudaMalloc(&tmp_col[i],N_THREADS*sizeof(char))); gpuErrchk(cudaMalloc(&tmp_blk[i],N_THREADS*sizeof(char))); } gpuErrchk(cudaMemcpy(d_sud->cnt_row,tmp_row, N_ENTRIES*sizeof(char*),cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_sud->cnt_col,tmp_col, N_ENTRIES*sizeof(char*),cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_sud->cnt_blk,tmp_blk, N_ENTRIES*sizeof(char*),cudaMemcpyHostToDevice)); //copy //constant d_sud->n_unk = h_sud->n_unk; gpuErrchk(cudaMemcpy(d_sud->unk_row,h_sud->unk_row,sizeof(char)*n_unk,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_sud->unk_col,h_sud->unk_col,sizeof(char)*n_unk,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_sud->unk_blk,h_sud->unk_blk,sizeof(char)*n_unk,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_sud->hnt_row,h_sud->hnt_row,sizeof(char)*N_ENTRIES,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_sud->hnt_col,h_sud->hnt_col,sizeof(char)*N_ENTRIES,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_sud->hnt_blk,h_sud->hnt_blk,sizeof(char)*N_ENTRIES,cudaMemcpyHostToDevice)); //variable gpuErrchk(cudaMemcpy(d_sud->energy,h_sud->energy,sizeof(unsigned int)*N_THREADS,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_sud->random,h_sud->random,sizeof(unsigned int)*N_THREADS,cudaMemcpyHostToDevice)); for(i=0;i<h_sud->n_unk;i++){ gpuErrchk(cudaMemcpy(tmp_val[i],h_sud->unk_val[i],sizeof(char)*N_THREADS,cudaMemcpyHostToDevice)); } for(i=0;i<N_ENTRIES;i++){ gpuErrchk(cudaMemcpy(tmp_row[i],h_sud->cnt_row[i],sizeof(char)*N_THREADS,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(tmp_col[i],h_sud->cnt_col[i],sizeof(char)*N_THREADS,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(tmp_blk[i],h_sud->cnt_blk[i],sizeof(char)*N_THREADS,cudaMemcpyHostToDevice)); } free(tmp_val); free(tmp_row); free(tmp_col); free(tmp_blk); } __global__ void step_gpu(sudoku_t sud, int* lut_exp){ unsigned int thread_idx = blockIdx.x*blockDim.x + threadIdx.x; mc_step(sud, lut_exp, thread_idx); } void step_cpu(sudoku_t sud, int* lut_exp){ unsigned int thread_idx; for(thread_idx=0; thread_idx < N_THREADS; thread_idx++){ mc_step(sud, lut_exp, thread_idx); } } void add_unk(sudoku_t* sud, int i, int j){ int blk = (i / BLK_SIZE)*BLK_SIZE + (j / BLK_SIZE); sud->unk_row[sud->n_unk] = i; sud->unk_col[sud->n_unk] = j; sud->unk_blk[sud->n_unk] = blk; sud->n_unk++; } void add_hin(sudoku_t* sud, int i, int j, int val){ int blk = (i / BLK_SIZE)*BLK_SIZE + (j / BLK_SIZE); sud->hnt_row[val*SUD_SIZE + i]++; sud->hnt_col[val*SUD_SIZE + j]++; sud->hnt_blk[val*SUD_SIZE + blk]++; } void hint1(sudoku_t* sud){ sud->n_unk = 0; int i,j; for(i=0;i<SUD_SIZE*SUD_SIZE;i++){ sud->hnt_row[i] = 0; sud->hnt_col[i] = 0; sud->hnt_blk[i] = 0; } int vals[81] = { 0, 9, 0, 7, 0, 3, 0, 6, 0, 0, 5, 0, 4, 0, 8, 0, 1, 0, 1, 0, 8, 0, 5, 0, 9, 0, 4, 7, 0, 0, 1, 8, 2, 0, 0, 3, 0, 4, 2, 0, 6, 0, 5, 8, 0, 3, 0, 0, 9, 4, 5, 0, 0, 2, 8, 0, 6, 0, 7, 0, 1, 0, 9, 0, 2, 0, 8, 0, 1, 0, 5, 0, 0, 1, 0, 6, 0, 9, 0, 4, 0}; for(i=0;i<SUD_SIZE;i++){ for(j=0;j<SUD_SIZE;j++){ if(vals[SUD_SIZE*i+j] == 0){ add_unk(sud,i,j); } else { add_hin(sud,i,j,vals[SUD_SIZE*i+j]-1); } } } } void empty(sudoku_t* sud){ sud->n_unk = 0; int i,j; for(i=0;i<SUD_SIZE*SUD_SIZE;i++){ sud->hnt_row[i] = 0; sud->hnt_col[i] = 0; sud->hnt_blk[i] = 0; } for(i=0;i<SUD_SIZE;i++){ for(j=0;j<SUD_SIZE;j++){ add_unk(sud,i,j); } } } void hint16(sudoku_t* sud){ sud->n_unk = 0; int i,j; for(i=0;i<SUD_SIZE*SUD_SIZE;i++){ sud->hnt_row[i] = 0; sud->hnt_col[i] = 0; sud->hnt_blk[i] = 0; } int vals[256] = { 0, 2, 0, 1, 0, 9, 0, 0, 0, 0, 11, 0, 0, 15, 14, 0, 0, 13, 11, 0, 0, 0, 0, 0, 9, 0, 7, 0, 16, 8, 0, 12, 4, 0, 10, 7, 15, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 9, 0, 0, 0, 0, 10, 0, 0, 12, 13, 0, 0, 0, 0, 4, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 16, 9, 11, 0, 0, 0, 0, 15, 5, 0, 16, 0, 0, 0, 4, 1, 0, 3, 0, 0, 10, 0, 0, 1, 0, 9, 12, 13, 15, 0, 0, 0, 6, 10, 0, 4, 0, 0, 0, 0, 11, 0, 0, 2, 6, 0, 0, 0, 0, 0, 0, 13, 0, 9, 8, 7, 10, 0, 6, 0, 0, 0, 0, 0, 0, 1, 3, 0, 0, 8, 0, 0, 0, 0, 11, 0, 13, 7, 0, 0, 0, 6, 10, 1, 12, 0, 4, 0, 0, 12, 0, 0, 5, 0, 16, 7, 0, 0, 0, 11, 0, 10, 13, 0, 0, 0, 0, 6, 8, 14, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 13, 16, 0, 0, 12, 0, 0, 0, 0, 13, 0, 0, 4, 0, 0, 0, 0, 11, 0, 0, 14, 2, 1, 0, 3, 9, 0, 2, 5, 0, 3, 0, 15, 0, 0, 0, 0, 0, 14, 7, 0, 0, 7, 1, 0, 0, 14, 0, 0, 0, 0, 5, 0, 8, 0, 16, 0}; for(i=0;i<SUD_SIZE;i++){ for(j=0;j<SUD_SIZE;j++){ if(vals[SUD_SIZE*i+j] == 0){ add_unk(sud,i,j); } else { add_hin(sud,i,j,vals[SUD_SIZE*i+j]-1); } } } } void energy_stats(int round, unsigned int *energies, unsigned int n_threads){ int max_energy=0, min_energy=100000, total_energy=0; int i; for(i=0;i<n_threads;i++){ max_energy = (energies[i] > max_energy) ? energies[i] : max_energy; min_energy = (energies[i] < min_energy) ? energies[i] : min_energy; total_energy += energies[i]; } printf("Round: %03d Mean: %3.2f Min: %03d Max: %03d\r",round,float(total_energy)/n_threads, min_energy, max_energy); fflush(stdout); } int main(){ int i,j; long int n_steps; float time; int *h_lut_exp, *d_lut_exp; h_lut_exp = (int*) malloc(256*sizeof(int)); gpuErrchk(cudaMalloc(&d_lut_exp,256*sizeof(int))); unsigned int* energies; energies = (unsigned int*) malloc(N_THREADS*sizeof(unsigned int)); dim3 threads_per_block(THREADS_PER_BLOCK); dim3 num_blocks(N_BLOCKS); sudoku_t d_sudoku, h_sudoku; //set up exp(delta_E/T) lut //256 is a safe upper bound for SUD_SIZE 9 and we clamp delta E for //SUD_SIZE 16. But anyway, for useful temperatures it is zero very quickly //anyway float temp = 0.7; for(i=0;i<256;i++){ h_lut_exp[i] = exp(-i/temp)*(1<<16); } gpuErrchk(cudaMemcpy(d_lut_exp,h_lut_exp,256*sizeof(int),cudaMemcpyHostToDevice)); //set up sudoku description if(SUD_SIZE==9){ sudoku_alloc_host(&h_sudoku,h_lut_exp,81); empty(&h_sudoku); } else { sudoku_alloc_host(&h_sudoku, h_lut_exp, 165); hint16(&h_sudoku); } printf("Unknowns: %d\n",h_sudoku.n_unk); sudoku_alloc_and_copy_device(&d_sudoku,&h_sudoku); n_steps = N_THREADS*N_STEPS; //GPU Run time = clock(); for(j=0;j<15000;j++){ step_gpu<<<num_blocks,threads_per_block>>>(d_sudoku,d_lut_exp); gpuErrchk(cudaMemcpy(energies,d_sudoku.energy,N_THREADS*sizeof(unsigned int),cudaMemcpyDeviceToHost)); energy_stats(j,energies,N_THREADS); } time = (clock() - time)/CLOCKS_PER_SEC; printf("\nGPU: Steps: %ld, Time: %f s, %.1f Steps/s\n",n_steps,time,n_steps/time); //CPU Run time = clock(); for(j=0;j<N_STEPS;j++){ step_cpu(h_sudoku,h_lut_exp); gpuErrchk(cudaMemcpy(energies,h_sudoku.energy,N_THREADS*sizeof(unsigned int),cudaMemcpyHostToHost)); energy_stats(j,energies,N_THREADS); } time = (clock() - time)/CLOCKS_PER_SEC; printf("\nCPU: Steps: %ld, Time: %f s, %.1f Steps/s\n",n_steps,time,n_steps/time); sudoku_free_host(&h_sudoku); sudoku_free_device(&d_sudoku); free(energies); cudaFree(d_lut_exp); }
23,607
#include "includes.h" __global__ void calcConvolutionForwardPaddedInGPU( float *in, float *padded_in, int batch_size, int in_size_x, int in_size_y, int in_size_z, int padding) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if( id < batch_size * in_size_x * in_size_y * in_size_z ){ int in_index = id; int x = id % in_size_x; id /= in_size_x; int y = id % in_size_y; id /= in_size_y; int z = id % in_size_z; id /= in_size_z; int b = id; int pad_index = b * (in_size_z * (in_size_x + 2*padding) * (in_size_y + 2*padding) ) + z * ((in_size_x + 2*padding) * (in_size_y + 2*padding)) + (y+padding) * (in_size_x + 2*padding) + (x+padding) ; padded_in[pad_index] = in[in_index]; } /* original code for ( int b = 0; b < in.size.b; ++b ){ for ( int z = 0; z < in.size.z; ++z ){ for ( int y = 0; y < in.size.y; ++y ){ for ( int x = 0; x < in.size.x; ++x ){ padded_in( b, padding+x, padding+y, z ) = in( b, x, y, z ); } } } } */ }
23,608
#include "includes.h" __global__ void prova3() { //auto A = NQfrontier<32>(F_array, 5, Adj_array); //for (auto it : A) //Ouptput[threadIdx.x] = it.start; // printf("threadIdx.x %d \t %d\n", threadIdx.x, it.end); //printf("threadIdx.x %d \t %d\n", threadIdx.x, (*A.begin()).start); }
23,609
#include "includes.h" __device__ static float disp_absolute_residual(float Xd, float Yd, float Zd, float Xm, float Ym, float Zm, float nx, float ny, float nz, float T0, float T1, float T2, float R0, float R1, float R2, float fx, float b) { float r = -Xd * nx + Xm * nx - Yd * ny + Ym * ny - Zd * nz + Zm * nz + nx * T0 + ny * T1 + nz * T2 + Xm * ny * R2 - Xm * nz * R1 - Ym * nx * R2 + Ym * nz * R0 + Zm * nx * R1 - Zm * ny * R0; // weight to convert distance units to pixels r *= fx * b / (Zm * Zm); return fabsf(r); } __global__ void normal_eqs_disparity_weighted_GPU( float *d_CD, const float *d_disparity_compact, const float4 *d_Zbuffer_normals_compact, const int *d_ind_disparity_Zbuffer, float fx, float fy, float ox, float oy, float b, int n_cols, const int *d_n_values_disparity, const int *d_start_ind_disparity, const float *d_abs_res_scales, float w_disp, const float *d_dTR) { int n_val_accum = gridDim.x * blockDim.x; // n_val_accum may not be multiple of blocksize int n_disparity = d_n_values_disparity[blockIdx.y]; int n_accum = (int)ceilf((float)n_disparity / (float)n_val_accum); int start_ind = d_start_ind_disparity[blockIdx.y]; // initialize accumulators float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f, A6 = 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f, A12 = 0.0f, A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f, A18 = 0.0f, A19 = 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f, A23 = 0.0f, A24 = 0.0f, A25 = 0.0f, A26 = 0.0f; for (int in_ind = blockDim.x * blockIdx.x * n_accum + threadIdx.x; in_ind < blockDim.x * (blockIdx.x + 1) * n_accum; in_ind += blockDim.x) { if (in_ind < n_disparity) { // is this a valid sample? // fetch disparity, Zbuffer and normal from global memory float disp = d_disparity_compact[in_ind + start_ind]; float4 tmp = d_Zbuffer_normals_compact[in_ind + start_ind]; float Zbuffer = tmp.x; float nx = tmp.y; float ny = tmp.z; float nz = tmp.w; // compute coordinates int pixel_ind = d_ind_disparity_Zbuffer[in_ind + start_ind]; float y = floorf(__fdividef((float)pixel_ind, n_cols)); float x = (float)pixel_ind - y * n_cols; x = __fdividef((x - ox), fx); y = __fdividef((y - oy), fy); // reconstruct 3D point from disparity float Zd = -(fx * b) / disp; // arbitrary use of fx float Xd = x * Zd; float Yd = y * Zd; // reconstruct 3D point from model float Zm = Zbuffer; float Xm = x * Zm; float Ym = y * Zm; // determine M-estimation weight // disparity residual weighed by rel. importance disp vs flow int s6 = blockIdx.y * 6; float w = w_disp * disp_absolute_residual( Xd, Yd, Zd, Xm, Ym, Zm, nx, ny, nz, d_dTR[s6], d_dTR[s6 + 1], d_dTR[s6 + 2], d_dTR[s6 + 3], d_dTR[s6 + 4], d_dTR[s6 + 5], fx, b); w /= d_abs_res_scales[blockIdx.y]; w = (w > 1) ? 0 : (1.0f - 2.0f * w * w + w * w * w * w); // multiply m estimation weight with distance->pixel conversion weight // (squared) w *= (fx * fx * b * b) / (Zm * Zm * Zm * Zm); /************************/ /* evaluate constraints */ /************************/ // unique values A-matrix A0 += w * (nx * nx); A1 += w * (nx * ny); A2 += w * (nx * nz); A3 += w * (Ym * nx * nz - Zm * nx * ny); A4 += w * (Zm * (nx * nx) - Xm * nx * nz); A5 += w * (-Ym * (nx * nx) + Xm * nx * ny); A6 += w * (ny * ny); A7 += w * (ny * nz); A8 += w * (-Zm * (ny * ny) + Ym * ny * nz); A9 += w * (-Xm * ny * nz + Zm * nx * ny); A10 += w * (Xm * (ny * ny) - Ym * nx * ny); A11 += w * (nz * nz); A12 += w * (Ym * (nz * nz) - Zm * ny * nz); A13 += w * (-Xm * (nz * nz) + Zm * nx * nz); A14 += w * (Xm * ny * nz - Ym * nx * nz); A15 += w * ((Ym * Ym) * (nz * nz) + (Zm * Zm) * (ny * ny) - Ym * Zm * ny * nz * 2.0f); A16 += w * (-Xm * Ym * (nz * nz) - (Zm * Zm) * nx * ny + Xm * Zm * ny * nz + Ym * Zm * nx * nz); A17 += w * (-Xm * Zm * (ny * ny) - (Ym * Ym) * nx * nz + Xm * Ym * ny * nz + Ym * Zm * nx * ny); A18 += w * ((Xm * Xm) * (nz * nz) + (Zm * Zm) * (nx * nx) - Xm * Zm * nx * nz * 2.0f); A19 += w * (-Ym * Zm * (nx * nx) - (Xm * Xm) * ny * nz + Xm * Ym * nx * nz + Xm * Zm * nx * ny); A20 += w * ((Xm * Xm) * (ny * ny) + (Ym * Ym) * (nx * nx) - Xm * Ym * nx * ny * 2.0f); // B-vector A21 += w * (Xd * (nx * nx) - Xm * (nx * nx) + Yd * nx * ny - Ym * nx * ny + Zd * nx * nz - Zm * nx * nz); A22 += w * (Yd * (ny * ny) - Ym * (ny * ny) + Xd * nx * ny - Xm * nx * ny + Zd * ny * nz - Zm * ny * nz); A23 += w * (Zd * (nz * nz) - Zm * (nz * nz) + Xd * nx * nz - Xm * nx * nz + Yd * ny * nz - Ym * ny * nz); A24 += w * (-Yd * Zm * (ny * ny) + Ym * Zd * (nz * nz) + Ym * Zm * (ny * ny) - Ym * Zm * (nz * nz) - (Ym * Ym) * ny * nz + (Zm * Zm) * ny * nz + Xd * Ym * nx * nz - Xm * Ym * nx * nz - Xd * Zm * nx * ny + Yd * Ym * ny * nz + Xm * Zm * nx * ny - Zd * Zm * ny * nz); A25 += w * (Xd * Zm * (nx * nx) - Xm * Zd * (nz * nz) - Xm * Zm * (nx * nx) + Xm * Zm * (nz * nz) + (Xm * Xm) * nx * nz - (Zm * Zm) * nx * nz - Xd * Xm * nx * nz - Xm * Yd * ny * nz + Xm * Ym * ny * nz + Yd * Zm * nx * ny - Ym * Zm * nx * ny + Zd * Zm * nx * nz); A26 += w * (-Xd * Ym * (nx * nx) + Xm * Yd * (ny * ny) + Xm * Ym * (nx * nx) - Xm * Ym * (ny * ny) - (Xm * Xm) * nx * ny + (Ym * Ym) * nx * ny + Xd * Xm * nx * ny - Yd * Ym * nx * ny + Xm * Zd * ny * nz - Xm * Zm * ny * nz - Ym * Zd * nx * nz + Ym * Zm * nx * nz); } } /**************************/ /* write out accumulators */ /**************************/ int out_ind = 27 * n_val_accum * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; w_disp *= w_disp; // weight relative to flow d_CD[out_ind] = w_disp * A0; d_CD[out_ind + n_val_accum] = w_disp * A1; d_CD[out_ind + 2 * n_val_accum] = w_disp * A2; d_CD[out_ind + 3 * n_val_accum] = w_disp * A3; d_CD[out_ind + 4 * n_val_accum] = w_disp * A4; d_CD[out_ind + 5 * n_val_accum] = w_disp * A5; d_CD[out_ind + 6 * n_val_accum] = w_disp * A6; d_CD[out_ind + 7 * n_val_accum] = w_disp * A7; d_CD[out_ind + 8 * n_val_accum] = w_disp * A8; d_CD[out_ind + 9 * n_val_accum] = w_disp * A9; d_CD[out_ind + 10 * n_val_accum] = w_disp * A10; d_CD[out_ind + 11 * n_val_accum] = w_disp * A11; d_CD[out_ind + 12 * n_val_accum] = w_disp * A12; d_CD[out_ind + 13 * n_val_accum] = w_disp * A13; d_CD[out_ind + 14 * n_val_accum] = w_disp * A14; d_CD[out_ind + 15 * n_val_accum] = w_disp * A15; d_CD[out_ind + 16 * n_val_accum] = w_disp * A16; d_CD[out_ind + 17 * n_val_accum] = w_disp * A17; d_CD[out_ind + 18 * n_val_accum] = w_disp * A18; d_CD[out_ind + 19 * n_val_accum] = w_disp * A19; d_CD[out_ind + 20 * n_val_accum] = w_disp * A20; d_CD[out_ind + 21 * n_val_accum] = w_disp * A21; d_CD[out_ind + 22 * n_val_accum] = w_disp * A22; d_CD[out_ind + 23 * n_val_accum] = w_disp * A23; d_CD[out_ind + 24 * n_val_accum] = w_disp * A24; d_CD[out_ind + 25 * n_val_accum] = w_disp * A25; d_CD[out_ind + 26 * n_val_accum] = w_disp * A26; }
23,610
#include <stdio.h> #define cudaErrorCheck(call) \ do { \ cudaError_t cuErr = call; \ if (cudaSuccess != cuErr) { \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, \ cudaGetErrorString(cuErr)); \ exit(0); \ } \ } while (0) #define BLOCK_NUM 100 #define BLOCK_DIM 512 __device__ int count = 0; __global__ void getThreadNum() { int oldVal, returnVal; do { oldVal = count; returnVal = atomicCAS(&count, oldVal, oldVal + 1); } while (oldVal != returnVal); } int main() { int result = -1; getThreadNum<<<BLOCK_NUM, BLOCK_DIM>>>(); cudaErrorCheck(cudaGetLastError()); cudaDeviceSynchronize(); int *count_addr; cudaErrorCheck(cudaGetSymbolAddress((void**)&count_addr, count)); cudaErrorCheck(cudaMemcpy(&result, count_addr, sizeof(int), cudaMemcpyDeviceToHost)); printf("thread num, expected = %d, actual = %d\n", BLOCK_NUM * BLOCK_DIM, result); return 0; }
23,611
//This micro-kernel currently does not use shared memory //It could be improved by adding this caching. //This micro-kernel currently uses a selection sort. //This is done for simplicity of testing and should be replaced // before using it for serious testing with a better sort. __device__ void Sort( void* param) { float* paramIn = (float*)param; int N = (int)(*paramIn); float* a = paramIn+1; //input data, will currently be trashed float* b = a + N*sizeof(float); //location for result array int warp_size = 32; int tid = threadIdx.x%warp_size; float *sub = a + tid*N/warp_size; //Sub list that each warp will sort //Selection Sort, eventually do Merge or something faster int i, j; for(i=0; i<N/warp_size-1; i++){ //Find min is remaining list int min = i; for(j=i+1; j<N/warp_size; j++){ if(sub[j]<sub[min])min = j; } //Swap ith place with min float temp = sub[i]; sub[i]=sub[min]; sub[min]=temp; } //Merge loops int subs; //number of sub lists I currently have for(subs=warp_size; subs!=1; subs=subs/2){ if(tid<subs/2){ //Merge our two lists int sub_size = N/subs; float *sub1 = a + 2*tid*sub_size; float *sub2 = sub1 + sub_size; float *ret = b + 2*tid*sub_size; //Merge our two lists into their location in ret int p1 = 0; //place in first list int p2 = 0; //place in second list int cur= 0; while(p1<sub_size && p2<sub_size){ if(sub1[p1]<sub[p2]){ ret[cur++] = sub1[p1++]; }else{ ret[cur++] = sub2[p2++]; } } //Copy any elements left in our lists after the first list runs out while(p1<sub_size) ret[cur++]=sub1[p1++]; while(p2<sub_size) ret[cur++]=sub2[p2++]; int k; for(k=0;k<2*sub_size;k++)sub1[k]=ret[k]; } /* //Copy the now sorted sub arrays back into sub to be merged again int k; for(k=tid; k<N; k+=warp_size) a[k]=b[k]; */ } }
23,612
__device__ inline double sq(double x) { return x*x;} __device__ double optimsquare_eps_2d_descent(double u[4], double xi[4], double epsilon, double w, int steps) { double no, nxi[4], r; r = 1./(1+epsilon/4.); u[0] -= xi[0]-xi[3]; for (int i=1;i<=3;i++) { u[i] -= xi[i]-xi[i-1]; } for (int it=0; it<steps; it++) { if ((no = sq(nxi[0]=r*(.5*xi[0]+.25*(xi[3]+xi[1]+ u[1]-u[0]))) + sq(nxi[1]=r*(.5*xi[1]+.25*(xi[0]+xi[2]+ u[2]-u[1]))) + sq(nxi[2]=r*(.5*xi[2]+.25*(xi[1]+xi[3]+ u[3]-u[2]))) + sq(nxi[3]=r*(.5*xi[3]+.25*(xi[2]+xi[0]+ u[0]-u[3])))) > w) { no = sqrt(w/no); } else { no = 1; } xi[0]= nxi[0]*no; xi[1]= nxi[1]*no; xi[2]= nxi[2]*no; xi[3]= nxi[3]*no; } u[0] += xi[0]-xi[3]; for (int i=1;i<=3;i++) { u[i] += xi[i]-xi[i-1]; } return 0.0; } __device__ double optimsquare_eps_2d(double u[4], double xi[4], double epsilon, double w, int steps) { double z0, z1, z2; double t0, t1, t2; double a, b; double dno, no, noa, nob; // Derivative of the objective double tm, tp; double tmu; u[0] -= xi[0]-xi[3]; for (int i=1;i<=3;i++) { u[i] -= xi[i]-xi[i-1]; } z0 = u[3]-u[1]; z1 = u[2]-u[0]; z2 = (u[0]+u[2])-(u[3]+u[1]); a = z0*z0 + z1*z1; b = z2*z2; // If the current no is bigger than w, we do a Newton descent // for solving f_{a,b} = w. Otherwise, we keep mu = 0. tmu = epsilon; no = a/((2+tmu)*(2+tmu))+b/((4+tmu)*(4+tmu)); if (no > w) { for (int i=0; i < steps; i++) { noa = a/((2+tmu)*(2+tmu)); nob = b/((4+tmu)*(4+tmu)); dno = noa/(2+tmu)+nob/(4+tmu); tmu += .5*(noa+nob-w)/dno; tmu = max(epsilon, tmu); } } // Recover the values of xi and u after finding mu. t0 = .5*z0/(2.+tmu); t1 = .5*z1/(2.+tmu); t2 = .5*z2/(4.+tmu); tm = t0-t1; tp = t0+t1; xi[1] = tp+t2; xi[2] = tm-t2; xi[3] = t2-tp; xi[0] = -tm-t2; double s = sq(xi[0]) + sq(xi[1]) + sq(xi[2]) + sq(xi[3]); if (s > w) { double factor = sqrt(w/s); for (int i=0; i<4; i++) { xi[i] *= factor; } } u[0] += xi[0]-xi[3]; for (int i=1;i<=3;i++) { u[i] += xi[i]-xi[i-1]; } return tmu; } __global__ void opt_eps_split(int sx, double *xi, double *u, double epsilon, double w, double ws, double we2, int L, int K, int steps, int use_newton, int is_even_step) { int i,kk; int l = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; if ((l < L) && (k < K)) { if (is_even_step) { i = l * 2 * sx + 2 * k; kk = l * K + k; } else { i = (1 + l * 2) * sx + 1 + 2 * k; kk = l * K + k; } double aru[4]; aru[0]=u[i]; aru[1]=u[i+1]; aru[2]=u[i+1+sx]; aru[3]=u[i+sx]; double axi[4]; axi[0] = xi[kk*4]; axi[1] = xi[kk*4 + 1]; axi[2] = xi[kk*4 + 2]; axi[3] = xi[kk*4 + 3]; if (use_newton) { optimsquare_eps_2d(aru, axi, epsilon, w, steps); } else { optimsquare_eps_2d_descent(aru, axi, epsilon, w, steps); } u[i]=aru[0]; u[i+1]=aru[1]; u[i+1+sx]=aru[2]; u[i+sx]=aru[3]; xi[kk*4] = axi[0]; xi[kk*4 + 1] = axi[1]; xi[kk*4 + 2] = axi[2]; xi[kk*4 + 3] = axi[3]; } } __global__ void over_relax_eps_2d(int sx, double *xio, double *xiobar, double *u, double theta, int Lo, int Ko, int is_even_step) { int l = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; if ((l < Lo) && (k < Ko)) { int i, kk; if (is_even_step) { i = l * 2 * sx + 2 * k; kk = l * Ko + k; } else { i = (1 + l * 2) * sx + 1 + 2 * k; kk = l * Ko + k; } double dx[4]; int m; int kx = kk * 4; for (m=0; m < 4; m++) dx[m] = theta * (xiobar[kx+m] - xio[kx+m]); u[i] += dx[0]-dx[3]; u[i+1] += dx[1]-dx[0]; u[i+1+sx] += dx[2]-dx[1]; u[i+sx] += dx[3]-dx[2]; for (m=0; m<4; m++) xio[kx+m] = xiobar[kx+m] + dx[m]; } } __global__ void gap_arr_eps_2d(int sx, double* gl, double *xie, double *u, double epsilon, double w, double ws, double we2, int Le, int Ke, int is_even_step) { int l = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; if ((l < Le) && (k < Ke)) { int i; if (is_even_step) { i = 2 * l * sx + 2 * k; } else { i = (2 * l + 1) * sx + 1 + 2 * k; } int kx = 4*(l*Ke + k); double aru[4], a[4], b, c, d, gap = 0; double *xi = xie + kx; aru[0]=u[i]; aru[1]=u[i+1]; aru[2]=u[i+1+sx]; aru[3]=u[i+sx]; // TV_e(Du) - <xi,Du> + e*xi^2/2 a[3] = aru[0]-aru[3]; b = a[3]*a[3]; c = xi[3]*a[3]; d = xi[3]*xi[3]; for (int m=0;m<3;m++) { a[m] = aru[m+1]-aru[m]; b += a[m]*a[m]; c += xi[m]*a[m]; d += xi[m]*xi[m]; } gap += epsilon*.5*d-c; if (b < we2) { gap += .5*b/epsilon; // here epsilon>0 } else { gap += ws*sqrt(b)-.5*epsilon*w; // TV_eps } gl[l*Ke + k] = gap; } }
23,613
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <time.h> __global__ void additionGPU(float *a, float *b, float *r, int n); void additionCPU(float *a, float *b, float *r, int n); int main(int argc, char* argv[]) { if(argc < 2) { puts("Usage: matmul [N]"); return 0; } int N = atoi(argv[1]); printf("N: %d\n", N); //Total size size_t sz = sizeof(float) * N; //Struct timeval start, end, timer; struct timeval start, end, timer; //Memory allocation for cpu(host) //vectorC = vectorA + vectorB float *h_a = (float *)malloc(sz); float *h_b = (float *)malloc(sz); float *h_c = (float *)malloc(sz); for(int i = 0; i < N; i++) { h_a[i] = i; h_b[i] = N-i; h_c[i] = 0.0; } //Memory allocation for GPU(device) float *d_a, *d_b, *d_c; //cudaMalloc(Memory_pointer, Size); cudaMalloc((void **) &d_a, sz); cudaMalloc((void **) &d_b, sz); cudaMalloc((void **) &d_c, sz); float *h_result = (float *)malloc(sz); //H2D memcpy cudaMemcpy(d_a, h_a, sz, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sz, cudaMemcpyHostToDevice); //CPU vector addition gettimeofday(&start, NULL); additionCPU(h_a, h_b, h_c, N); gettimeofday(&end, NULL); timersub(&end, &start, &timer); printf("CPU elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0)); //GPU vector addition int threads = 256; int grid = (N % threads) ? N/threads+1 : N/threads; //Time measure start gettimeofday(&start, NULL); additionGPU<<< grid, threads >>>(d_a, d_b, d_c, N); //Wait until thread function(gpu) is over cudaDeviceSynchronize(); //Time measure end gettimeofday(&end, NULL); cudaMemcpy(h_result, d_c, sz, cudaMemcpyDeviceToHost); timersub(&end, &start, &timer); printf("GPU elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0)); //Verification for(int i=0;i<N;i++) { if(h_c[i] != h_result[i]) { printf("Failed at %d, [CPU]: %f, [GPU]: %f\n", i, h_c[i], h_result[i]); break; } } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_result); free(h_a); free(h_b); free(h_c); return 0; } __global__ void additionGPU(float *a, float *b, float *c, int n) { int gid = blockIdx.x * blockDim.x + threadIdx.x; //guarding for what? // if don't do this, what will happen? if(gid < n) { c[gid] = a[gid] + b[gid]; } } void additionCPU(float *a, float *b, float *r, int n) { int i = 0; for(i=0;i<n;i++){ r[i]=a[i]+b[i]; } }
23,614
#include "includes.h" __global__ void reduce_sum_kernel(const float *input, float *d_out, int size) { int tid = threadIdx.x; // Local thread index int myId = blockIdx.x*(blockDim.x*2) + threadIdx.x; // Global thread index extern __shared__ float tempsum[]; //shared memory // --- Loading data to shared memory. All the threads contribute to loading the data to shared memory. tempsum[tid] = (myId < size) ? input[myId] + input[myId+blockDim.x] : 0.0f; // --- make sure that all the shared memory loads have been completed __syncthreads(); // --- Reduction in shared memory. Only half of the threads contribute to reduction. for (unsigned int s=blockDim.x/2; s>0; s>>=1){ if (tid < s) { tempsum[tid] += tempsum[tid + s]; } // --- make sure that all memory operations have been completed __syncthreads(); } if (tid == 0) { d_out[blockIdx.x] = tempsum[0]; } }
23,615
#define EXPORT_KERNEL(k) extern "C" void* kern_##k = (void*)k // Super simple vector kernel extern "C" __global__ void add_vector(int* a, int* b, int* c) { int i = blockIdx.x * blockDim.x + threadIdx.x; c[i] = a[i] + b[i]; } EXPORT_KERNEL(add_vector);
23,616
#include "includes.h" #define N 24 __global__ void muestraIndice(float *a, float *b, float *c){ int global = blockIdx.x * blockDim.x + threadIdx.x; if(global < N){ a[global] = threadIdx.x; b[global] = blockIdx.x; c[global] = global; } }
23,617
#include "Tools.cuh" #include <math_functions.h> // operator of Vector3D __device__ __host__ inline Vector3D operator+(const Vector3D& param1, const Vector3D& param2){ Vector3D result; result.x = param1.x + param2.x; result.y = param1.y + param2.y; result.z = param1.z + param2.z; return result; } __device__ __host__ inline Vector3D operator-(const Vector3D& param1, const Vector3D& param2){ Vector3D result; result.x = param1.x - param2.x; result.y = param1.y - param2.y; result.z = param1.z - param2.z; return result; } __device__ __host__ inline Vector3D operator*(const float param1,const Vector3D& param2){ Vector3D result; result.x = param1 * param2.x; result.y = param1 * param2.y; result.z = param1 * param2.z; return result; } __device__ __host__ inline Vector3D operator*(const Vector3D& param1,const float param2){ Vector3D result; result.x = param1.x * param2; result.y = param1.y * param2; result.z = param1.z * param2; return result; } __device__ __host__ inline Vector3D operator/(const Vector3D& param1,const float param2){ Vector3D result; result.x = param1.x / param2; result.y = param1.y / param2; result.z = param1.z / param2; return result; } //__global__ Vector3D& operator=(const Vector3D &param); __device__ __host__ inline float Exponentiation(const Vector3D &param){ float result; result = param.x * param.x + param.y * param.y + param.z * param.z; return result; } __device__ __host__ inline float Norm(const Vector3D &param){ float result; result = sqrt( Exponentiation( param )); return result; } __device__ __host__ inline float operator*(const Vector3D &param1,const Vector3D &param2){ float result; result = param1.x * param2.x + param1.y * param2.y + param1.z * param2.z; return result; } __device__ __host__ inline Vector3D CrossProduct(const Vector3D &param1, const Vector3D &param2){ Vector3D result; result.x = param1.y * param2.z - param1.z * param2.y; result.y = param1.z * param2.x - param1.x * param2.z; result.z = param1.x * param2.y - param1.y * param2.x; return result; } __device__ __host__ inline Vector3D Normalize(const Vector3D &param){ Vector3D result; float norm = Norm(param); result.x = param.x / norm; result.y = param.y / norm; result.z = param.z / norm; return result; } __device__ __host__ inline float Distance(const Point3D &param1,const Point3D &param2){ return ( sqrtf( (param1.x - param2.x ) * ( param1.x - param2.x ) + ( param1.y - param2.y ) * ( param1.y - param2.y ) + ( param1.z - param2.z ) * ( param1.z - param2.z ) )); } /* // operator of Point __device__ __host__ inline Point3D operator+(const Point3D &param1, const Vector3D& param2){ Point3D result; result.x = param1.x + param2.x; result.y = param1.y + param2.y; result.z = param1.z + param2.z; return result; } __device__ __host__ inline Point3D operator-(const Point3D &param1, const Vector3D& param2){ Point3D result; result.x = param1.x - param2.x; result.y = param1.y - param2.y; result.z = param1.z - param2.z; return result; } __device__ __host__ inline Vector3D operator-(const Point3D& param1, const Point3D& param2){ Vector3D result; result.x = param1.x - param2.x; result.y = param1.y - param2.y; result.z = param1.z - param2.z; return result; } */ /* __device__ __host__ inline RGBColor operator+(const RGBColor& param1, const RGBColor &param2){ RGBColor result; result.x = param1.x + param2.x; result.y = param1.y + param2.y; result.z = param1.z + param2.z; return result; } __device__ __host__ inline RGBColor operator-(const RGBColor& param1, const RGBColor &param2){ RGBColor result; result.x = param1.x - param2.x; result.y = param1.y - param2.y; int tempZ = param1.z - param2.z; return result; } *//* __device__ __host__ inline RGBColor operator*(const RGBColor& param1, const float &param2){ RGBColor result; result.x = param1.x * param2; result.y = param1.y * param2; result.z = param1.z * param2; return result; } */ __device__ __host__ inline RGBColor powc(const RGBColor& param1, const RGBColor &param2){ RGBColor result; result.x = param1.x * param2.x; result.y = param1.y * param2.y; result.z = param1.z * param2.z; return result; } /* __device__ __host__ inline RGBColor operator/(const RGBColor& param1, const float &param2){ RGBColor result; result.x = param1.x / param2; result.y = param1.y / param2; result.z = param1.z / param2; return result; } */ __device__ __host__ inline uchar3 MapToUchar(const RGBColor& param){ uchar3 result; result.x = param.x * 255; result.y = param.y * 255; result.z = param.z * 255; if( param.x > 1.0 ){ result.x = 255; }else if( param.x < 0.0 ){ result.x = 0; } if( param.y > 1.0 ){ result.y = 255; }else if( param.x < 0.0 ){ result.y = 0; } if( param.z > 1.0 ){ result.z = 255; }else if( param.x < 0.0 ){ result.z = 0; } return result; } /* pseudo random number generator */ /* linear congruential */ __device__ static unsigned int prng_seed = 0xcdcdcdcd; __device__ void rand_seed(unsigned int seed){ prng_seed = seed; } __device__ inline unsigned int uintRand(){ prng_seed = ( prng_seed * 0xababa ) % ( MAX_RANDOM_NUM ); return prng_seed; } __device__ inline float floatRand(){ prng_seed = ( prng_seed * 0xababa ) % ( MAX_RANDOM_NUM ); float result = float(prng_seed) / float(MAX_RANDOM_NUM); return result; }
23,618
__device__ void imageconvolution(void *param) { float *input = (float *) param; int IW = (int)input[0]; //Image Width int MW = (int)input[1]; //MASK_WIDTH; float* image = input+2; float* mask = image + IW; float* imageout = image + MW + IW; int warp_size=32; int threadId = threadIdx.x % warp_size; float value =0; int start; int index; //printf("%d - %d \n", MW, IW); //this function includes 2 floating point operations while(threadId < IW) { start = threadId - (MW/2); for(int i=0; i<MW;i++){ index= start + i; if(index >=0 && index < IW) value = value + image[index] * mask[i]; } imageout[threadId] = value; //printf("%d - %f \n", threadId, imageout[threadId]); threadId = threadId + warp_size; } }
23,619
#include "includes.h" __global__ void histogram_kernel(float* magnitude, float* phase, float* histograms, int input_width, int input_height, int cell_grid_width, int cell_grid_height, int magnitude_step, int phase_step, int histograms_step, int cell_width, int cell_height, int num_bins) { //TODO: make the buffer sizes dependent on an input or template parameter. // Each thread block needs to store intermediate results for 64 gradients // and also 8 different histograms, each with 9 bins. __shared__ int s_lbin_pos[64]; __shared__ float s_lbin[64]; __shared__ int s_rbin_pos[64]; __shared__ float s_rbin[64]; __shared__ float s_hist[9 * 8]; // The columns of the image are mapped to the first dimension of the block // grid and the first dimension of the thread block. int pixel_x = blockIdx.x * blockDim.x + threadIdx.x; // If current position is outside the image, stop here if(pixel_x >= input_width) { return; } // The columns of the image are mapped to the second dimension of the block // grid and the second dimension of the thread block. int pixel_y = blockIdx.y * blockDim.y + threadIdx.y; // If current position is outside the image, stop here if(pixel_y >= input_height) { return; } // Each row has magnitude_step size int mag_pixel_idx = pixel_y * magnitude_step + pixel_x; // Each row has phase_step size int phase_pixel_idx = pixel_y * phase_step + pixel_x; // The phase was previously normalized to [0,1] float bin_size = 1.0f / (float)num_bins; // By dividing by the bin size and taking the integer part, you find out // inside which bin the gradient is at. If it's greater than the middle of the bin // it will be divided between this one and the next, if it's lesser it will // be divided between this and the previous one. By subtracting 0.5 before // taking the integer part, the division will always be between this bin and // the next. int left_bin = (int)floor((phase[phase_pixel_idx] / bin_size) - 0.5f); // The result of the previous operation might be negative. If so, the next // bit fixes that. Otherwise that changes nothing. left_bin = (left_bin + num_bins) % num_bins; // Take the next bin as the right bin. // If the left bin is the last one, this will be outside range. Wait a bit // before taking the remainder, because this value needs to be used in the // formula below. int right_bin = (left_bin + 1); // Calculate the distance between the gradient phase and the limit between // the left and right bins. Normalized by the bin size, the limit is equal // to the right bin identifier. float delta = (phase[phase_pixel_idx] / bin_size) - right_bin; if(delta < -0.5) { delta += num_bins; } //Fix range for right_bin now right_bin = right_bin % num_bins; // Store the bin positions and amounts for each bin on shared buffers. s_lbin_pos[threadIdx.x] = left_bin; s_lbin[threadIdx.x] = (0.5 - delta) * magnitude[mag_pixel_idx]; s_rbin_pos[threadIdx.x] = right_bin; s_rbin[threadIdx.x] = (0.5 + delta) * magnitude[mag_pixel_idx]; // Wait for other threads. __syncthreads(); // Initialize histograms shared buffer. s_hist[threadIdx.x] = 0.0f; if(threadIdx.x < 8) { s_hist[threadIdx.x + 64] = 0.0f; } int cell_y = pixel_y / cell_height; // Each partial histogram will be calculated by only one thread. if(threadIdx.x < 8) { int s_hist_idx = 9 * threadIdx.x; for(int i = 1; i < 8; ++i) { s_hist[s_hist_idx + s_lbin_pos[8 * threadIdx.x + i]] += s_lbin[8 * threadIdx.x + i]; s_hist[s_hist_idx + s_rbin_pos[8 * threadIdx.x + i]] += s_rbin[8 * threadIdx.x + i]; } } // Wait until all threads finish. __syncthreads(); // Add to the complete histogram sum using atomic operations. int out_idx = cell_y * histograms_step + threadIdx.x; atomicAdd(&(histograms[out_idx]), s_hist[threadIdx.x]); if(threadIdx.x < 8) { atomicAdd(&(histograms[out_idx + 64]), s_hist[threadIdx.x + 64]); } }
23,620
// Jin Pyo Jeon // Times // N Thread/Block seconds // 1 << 24 512 0.60 // 1 << 24 480 0.61 // 1 << 24 272 0.61 // 1 << 22 128 0.15 // 1 << 20 32 0.05 // 1 << 20 64 0.047 // 1 << 20 128 0.048 // 1 << 18 32 0.02 // 1 << 17 32 0.013 #include <cuda.h> #include <stdlib.h> #include <time.h> #include <stdio.h> #include <math.h> #include <assert.h> #define MASK_WIDTH 5 __global__ void convolution_1D_basic_kernel(float *N, float *M, float *P, long Width) { int i = blockIdx.x * blockDim.x + threadIdx.x; float pValue = 0; int nStartPoint = i - (MASK_WIDTH / 2); if (i < Width) { for (int j = 0; j < MASK_WIDTH; j++) { if (nStartPoint + j >= 0 && nStartPoint + j < Width) { pValue += N[nStartPoint + j] * M[j]; } } P[i] = pValue; } } void generateMat(float *m, size_t height, size_t width){ int i, j; for (i = 0; i < height; i++){ for (j = 0; j < width; j++) { m[i*width+j] = rand() % 100; } } } void printMat(float *m, size_t height, size_t width) { int i, j; for (i = 0; i < height; i++){ for (j = 0; j < width; j++) { printf("%f ", m[i*width+j]); } printf("\n"); } printf("\n"); } int main(int argc, char**argv){ long width = 1 << 24; int THREAD_COUNT = 17; // Due to seeming Grid Dim x limitation of 65536 srand(time(NULL)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); float * m, *n, *p; float * d_m, *d_p, *d_n; long mSize = MASK_WIDTH * sizeof(float); long nSize = width * sizeof(float); long pSize = width * sizeof(float); cudaMalloc((void**)&d_m, mSize); cudaMalloc((void**)&d_n, nSize); cudaMalloc((void**)&d_p, pSize); m = (float *)malloc(mSize); n = (float *)malloc(nSize); p = (float *)malloc(pSize); for (int i = 0; i < MASK_WIDTH; i++) { m[i] = 1.0/MASK_WIDTH; // averaging mask } generateMat(n, 1, width); cudaMemcpy(d_m, m, mSize, cudaMemcpyHostToDevice); cudaMemcpy(d_n, n, nSize, cudaMemcpyHostToDevice); cudaError err = cudaGetLastError(); if (err != cudaSuccess) { printf("%d: Error %d %s\n", __LINE__, err, cudaGetErrorString(err)); exit(-1); } long blocks = ceil(width / (float) THREAD_COUNT); while (blocks >= 65535) { THREAD_COUNT *= 2; blocks = ceil(width / (float) THREAD_COUNT); } assert(THREAD_COUNT <= 1024); dim3 DimBlock(THREAD_COUNT, 1, 1); dim3 DimGrid(blocks, 1, 1); convolution_1D_basic_kernel<<<DimGrid, DimBlock>>>(d_n, d_m, d_p, width); err = cudaGetLastError(); if (err != cudaSuccess) { printf("%d: Error %d %s\n", __LINE__, err, cudaGetErrorString(err)); exit(-1); } cudaMemcpy(p, d_p, pSize, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("The elapsed time is %f s with %d threads/block\n", elapsedTime / 1000.0, THREAD_COUNT); free(n); free(m); free(p); cudaFree(d_n); cudaFree(d_m); cudaFree(d_p); }
23,621
#include<stdio.h> #include<math.h> const int lena = 83; const int lenb = 1543; float a[ lena ], b[ lenb ], x[ lena ], xnew[ lena ], tau[ lenb ]; int De[ lena * lenb ]; int tDe[ lenb * lena ]; inline float sum(float* a, const int lena) { float res = *a; for(int i = 1; i < lena; ++i) { res += a[ i ]; } return res; } void transpose(int* tm, int* m, const int nrow, const int ncol) { for(int i = 0; i < nrow; ++i) { for(int j = 0; j < ncol; ++j) { tm[ j * nrow + i ] = m[ i * ncol + j ]; } } } float M_v(int* M, int row, float* v, const int len) { float res = 0.; int* p = M + row * len; for(int i = 0; i < len; ++i) { if(*p++) res += v[ i ]; } return res; } void cpuweaver(float tol) { float sa = sum(a, lena); for(int i = 0; i < lena; ++i) x[ i ] = a[ i ] / sa; float m = sa + sum(b, lenb); int iter = 0; float e = 999, tmp; for(iter = 0; e > 1e-15; ++iter) { for(int j = 0; j < lenb; ++j) { tau[ j ] = b[ j ] / M_v(tDe, j, x, lena); } for(int i = 0; i < lena; ++i) { xnew[ i ] = a[ i ] / (m - M_v(De, i, tau, lenb)); } e = 0.; for(int i = 0; i < lena; ++i) { tmp = fabs(xnew[ i ] - x[ i ]); if(e < tmp) e = tmp; }; for(int i = 0; i < lena; ++i) { x[ i ] = xnew[ i ]; } } } void test_weaver() { FILE* fp; //load a fp = fopen("C:/Users/easttiger/Dropbox/Subjects/W/Weaver Paper Reading list/Hunter matlab code/a.txt", "r"); for(int i = 0; i < lena; ++i) { fscanf(fp, "%f\n", a + i); } fclose(fp); //load b fp = fopen("C:/Users/easttiger/Dropbox/Subjects/W/Weaver Paper Reading list/Hunter matlab code/b.txt", "r"); for(int i = 0; i < lenb; ++i) { fscanf(fp, "%f\n", b + i); } fclose(fp); //load b fp = fopen("C:/Users/easttiger/Dropbox/Subjects/W/Weaver Paper Reading list/Hunter matlab code/tDe.txt", "r"); for(int j = 0; j < lenb; ++j) { for(int i = 0; i < lena - 1; ++i) { fscanf(fp, "%d ", tDe + j * lena + i); } fscanf(fp, "%d\n", tDe + (j + 1) * lena - 1); } fclose(fp); //initialize De as transpose of tDe transpose(De, tDe, lenb, lena); cpuweaver(0.00000001); for(int i = 0; i < lena; ++i) { printf("%f,", x[ i ]); } }
23,622
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <fstream> #include <string> #include <vector> #include <algorithm> #include <time.h> #include <cmath> #include <limits.h> #define MAXHOPS 4 #define MAX_WAITING_TIME 420 #define BLOCK_LENGTH 512 #define END_OF_ARRAY 2147483647 #define BUFFER_LENGTH 50 #define AIRPORT_PATH "C:/Users/acer/Desktop/Semester 7/Project/AA_airports.txt" //"C:/Users/acer/Desktop/Semester 7/Project/Data/AA_airports.txt" #define FLIGHT_PATH "C:/Users/acer/Desktop/Semester 7/Project/AA_data1.txt" //"C:/Users/acer/Desktop/Semester 7/Project/Data/OAGX_data_num_1.txt" bool bool1 = true; bool bool2 = false; using namespace std; cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size); // for cuda error checking #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ return 1; \ } \ } while (0) int ADJ_MATRIX_DIM; __device__ int DEV_ADJ_MATRIX_DIM; // FYP_BFS.cpp : Defines the entry point for the console application. // ///////////////////Global Variables/////////////////// struct Flight{ int flightNumber; int source; int destination; int arrivalTime; int departureTime; int price; string code; }; vector<string> Airport_List; vector<Flight> Flight_List; vector<int>** AdjMatrix; ////////////////////////////////////////////////////// //////////////////Data Read/////////////////////////// int readAirports(){ ifstream myFile; myFile.open(AIRPORT_PATH); int numberOfAirports=0; if(myFile.is_open()){ string line; cout<<"Reading Airports"<<endl; while(myFile.good()){ //------------------------------changed-------------------// //myFile.ignore(256,' '); string s; myFile>>s; Airport_List.push_back(s); numberOfAirports++; } } myFile.close(); ADJ_MATRIX_DIM = Airport_List.size(); //cudaMemcpy(DEV_ADJ_MATRIX_DIM,&ADJ_MATRIX_DIM,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpyToSymbol(DEV_ADJ_MATRIX_DIM,&ADJ_MATRIX_DIM,sizeof(int),0,cudaMemcpyHostToDevice); cudaCheckErrors("Error copying adj matrix dim to device"); cout<<Airport_List.size()<<" Airports Found"<<endl; return 1; } void readFlights(){ //this is a bloody array of pointers AdjMatrix = new vector<int>*[Airport_List.size()]; for(int i=0;i<Airport_List.size();i++){ //thisi is a bloody array of vectors AdjMatrix[i] = new vector<int>[Airport_List.size()]; } ifstream myFile; myFile.open(FLIGHT_PATH); int numOfFlights = 0; if(myFile.is_open()){ string line; Flight tempFlight; while(myFile.good()){ //---------------------------------------changed----------------------------------------// tempFlight.flightNumber= numOfFlights; /*myFile>>tempFlight.source; myFile>>tempFlight.destination; myFile>>tempFlight.departureTime; myFile>>tempFlight.arrivalTime; if(tempFlight.arrivalTime<tempFlight.departureTime) tempFlight.arrivalTime+=10080; myFile>>tempFlight.price; myFile>>tempFlight.code;*/ myFile>>tempFlight.source; myFile>>tempFlight.destination; myFile>>tempFlight.price; myFile>>tempFlight.departureTime; myFile>>tempFlight.arrivalTime; if(tempFlight.arrivalTime<tempFlight.departureTime) tempFlight.arrivalTime+=10080; myFile>>tempFlight.code; //add this flight to the adjmatrix; Flight_List.push_back(tempFlight); AdjMatrix[tempFlight.source][tempFlight.destination].push_back(tempFlight.flightNumber); numOfFlights++; if(numOfFlights%10000==0) cout<<"*"; } cout<<endl; } myFile.close(); cout<<Flight_List.size()<<" Flights Found"<<endl; } ///////////////////////////////////////////////////////////////////////////////////// struct route{ vector<int> flights; int weight; }; int initializeFlightListInDevice(Flight* &dev_flight_list){ //allocate space for the flight list in cuda cudaMalloc((void**)&dev_flight_list, Flight_List.size()*sizeof(Flight)); cudaCheckErrors("Failed to allocate memory to flight list"); cudaMemcpy(dev_flight_list,&Flight_List[0],Flight_List.size()*sizeof(Flight),cudaMemcpyHostToDevice); cudaCheckErrors("Failed to copy flight list"); return 1; } int initializeAdjMatrixInDevice(int** &dev_adj_list, int ** &host_adj_vector){ size_t size = ADJ_MATRIX_DIM*ADJ_MATRIX_DIM*sizeof(int*); //the vector in host that records the pointers in device memory host_adj_vector = (int **)malloc(size); //i indicates rows and j indicates columns of the adjacency matrix //allocate device memory for the boolean vector //allocate memory for each manhattan in device and store the pointer in memory for(int i=0;i<ADJ_MATRIX_DIM;i++){ for(int j=0;j<ADJ_MATRIX_DIM;j++){ cudaMalloc((void **)&host_adj_vector[i*ADJ_MATRIX_DIM+j],AdjMatrix[i][j].size()*sizeof(int)); cudaCheckErrors("Failed to allocate memory to airport list manhattan:"); cudaMemcpy(host_adj_vector[i*ADJ_MATRIX_DIM+j],&AdjMatrix[i][j][0],AdjMatrix[i][j].size()*sizeof(int),cudaMemcpyHostToDevice); cudaCheckErrors("Failed to copy data to airport list manhattan:"); } if(i%100==0) cout<<"&"; } cout<<endl; cudaMalloc((void***)&dev_adj_list,size); cudaCheckErrors("Failed to allocate memory to pointer list in device"); cudaMemcpy(dev_adj_list,host_adj_vector,size,cudaMemcpyHostToDevice); cudaCheckErrors("Failed to allocate data to pointer list in device"); return 1; } int initializeBooleanMatrixInDevice(int* &boolean_matrix){ int* host_bool_matrix= new int[ADJ_MATRIX_DIM*ADJ_MATRIX_DIM]; for(int i=0;i<ADJ_MATRIX_DIM;i++){ for(int j=0;j<ADJ_MATRIX_DIM;j++){ host_bool_matrix[i*ADJ_MATRIX_DIM+j] = (AdjMatrix[i][j].size() !=0); } } size_t size_bool =ADJ_MATRIX_DIM*ADJ_MATRIX_DIM*sizeof(int); cudaMalloc((void**)&boolean_matrix,size_bool); cudaCheckErrors("Failed to allocate memory to boolean adj matrix"); cudaMemcpy(boolean_matrix,host_bool_matrix,size_bool,cudaMemcpyHostToDevice); cudaCheckErrors("Failed to move data to boolean adj matrix"); delete(host_bool_matrix); return 1; } int initializeBuffer(int* &buffer){ //buffer size int* host_bool_buffer= new int[ADJ_MATRIX_DIM*ADJ_MATRIX_DIM*MAXHOPS]; // for(int k=0;k<MAXHOPS;k++){ for(int i=0;i<ADJ_MATRIX_DIM;i++){ for(int j=0;j<ADJ_MATRIX_DIM;j++){ host_bool_buffer[k*ADJ_MATRIX_DIM*ADJ_MATRIX_DIM+ i*ADJ_MATRIX_DIM+j] = false; } } } size_t size_bool =MAXHOPS*ADJ_MATRIX_DIM*ADJ_MATRIX_DIM*sizeof(int); cudaMalloc((void**)&buffer,size_bool); cudaCheckErrors("Failed to allocate memory to boolean buffer"); cudaMemcpy(buffer,host_bool_buffer,size_bool,cudaMemcpyHostToDevice); cudaCheckErrors("Failed to move data to boolean buffer"); delete(host_bool_buffer); return 1; } __global__ void testBuffer(int* buffer,int* result, int size){ int id =blockIdx.x*blockDim.x+threadIdx.x; if(id<size){ if(buffer[id]) result[id] = 1234345; else result[id] = 0; } } __global__ void testMatrix(int** devVector,int size, int* result, Flight* flights){ //block dimension is the number of threads in a block. since blockid is zero based multiplying gets you somewhre close. //to gt the correct position all u have to do then is to add the thread id int i = blockIdx.x*blockDim.x+threadIdx.x; result[i] = 0; if(i<size*size && devVector[i]!= NULL ) result[i] = flights[devVector[i][0]].source; } __global__ void testMatrixBoolean(int* devMatrixBoolean,int size, int* result){ int i = blockIdx.x*blockDim.x+threadIdx.x; result[i] = 0; //put 1 if a manhattan exists for the particular position if(i<size*size && devMatrixBoolean[i]) result[i] = 1; } //initialize buffer to end of array value so that as values are filled the array size will change, but will still be //indicated by the first end of array value //__global__ void initializeBuffer(bool* buffer, int size){ // int id = blockIdx.x*blockDim.x+threadIdx.x; // if(id<size) // buffer[id] = false; //} //give enough threads to span the source row //maximum id should be adj_matrix_dimension __global__ void firstExpansion(int* buffer, int*dev_boolean_matrix, int source){ int id = blockIdx.x*blockDim.x+threadIdx.x; //the source row. //if(id<DEV_ADJ_MATRIX_DIM*DEV_ADJ_MATRIX_DIM){ // //if(dev_adj_matrix[DEV_ADJ_MATRIX_DIM*(source-1)+id]!=NULL){ // // //set source to the precedant node list of each relevant airport // // buffer[id*BUFFER_LENGTH] = source; // //} //} } //max id is number of airports __global__ void expansion(int* dev_buffer1,int* boolean_matrix, int* dev_source_vector,int matrix_dimension){ int id = blockIdx.x*blockDim.x+threadIdx.x; int row = (int) floor((double)id/matrix_dimension); int column = id%matrix_dimension; for(int k=0;k<MAXHOPS;k++){ if(row<matrix_dimension && column<matrix_dimension ){ //for the source row if the matrix row column position has a manhattan set the buffer position to true dev_buffer1[k*matrix_dimension*matrix_dimension+id] = (dev_source_vector[row] && boolean_matrix[id]); } __syncthreads(); //set the 'next source vector' positions to zero by the first of each row if(row<matrix_dimension && column<matrix_dimension&& column==0) dev_source_vector[row]= 0; __syncthreads(); //if the relevant cell in the frame has been set, contribute to making sure the relevant cell in the 'next source vector' is set to 1 if((row<matrix_dimension && column<matrix_dimension) && dev_buffer1[k*matrix_dimension*matrix_dimension+id]){ dev_source_vector[column] = 1; } __syncthreads(); } } //__global__ void copyNextSource(bool* next_source_array, bool* current_array, int size){ // int id = blockDim.x*blockIdx.x+threadIdx.x; // if(id<size) // cudaMemcpy //} int main(int argc) { readAirports(); readFlights(); int source = 344; int destination = 190; Flight* dev_flight_list; int** dev_adj_list; int* dev_adj_matrix_boolean; int** host_adj_vector; int* dev_level1; int* dev_level2; int* frames; //boolean array containing source airports in the next expansion int* dev_next_source_array; size_t matrixSize = ADJ_MATRIX_DIM*ADJ_MATRIX_DIM*sizeof(int); size_t bufferSize = ADJ_MATRIX_DIM*ADJ_MATRIX_DIM*sizeof(int); //add the flight array to GPU memory cout<<"Initializing Flights"<<endl; initializeFlightListInDevice(dev_flight_list); cout<<"finished initializing FLights"<<endl; //add the adjacency matrix with manhattans to GPU cout<<"Initializing Matrix"<<endl; initializeAdjMatrixInDevice(dev_adj_list,host_adj_vector); cout<<"Finished with adj matrix"<<endl; //add the boolean adjacency matrix (without manhattans) to GPU cout<<"Initializing Boolean Matrix"<<endl; initializeBooleanMatrixInDevice(dev_adj_matrix_boolean); cout<<"Finished with boolean matrix"<<endl; //allocate memory for the 'next source array' in device cudaMalloc((void**)&dev_next_source_array,ADJ_MATRIX_DIM*sizeof(int)); cudaCheckErrors("Failed to allocate memory to next source list"); int* source_vector = new int [ADJ_MATRIX_DIM]; //initialize the 'next source vector' with the source row of the adjacency matrix for(int i=0;i<ADJ_MATRIX_DIM;i++){ source_vector[i] = AdjMatrix[source][i].size()!=0; } //intialize 'next source array' in device cudaMemcpy(dev_next_source_array,source_vector,ADJ_MATRIX_DIM*sizeof(int),cudaMemcpyHostToDevice); cudaCheckErrors("Failed to move data to next source list"); delete(source_vector); //////////////////////initialize the buffers for all the levels///////////////// cout<<"initializing Buffers"<<endl; initializeBuffer(dev_level1); //initializeBuffer(dev_level2); cout<<"initialized buffers"<<endl; ///////////////////////////////////////Interations/////////////////////////////////////// int numBlocks = ceil((double)ADJ_MATRIX_DIM*ADJ_MATRIX_DIM/BLOCK_LENGTH); ofstream myFile; myFile.open("nextSource.txt"); int* myArray = (int*) malloc(ADJ_MATRIX_DIM*sizeof(int)); cudaMemcpy(myArray,dev_next_source_array,ADJ_MATRIX_DIM*sizeof(int),cudaMemcpyDeviceToHost); cudaCheckErrors("Failed to copy data from buffer array to host array"); for(int i=0;i<ADJ_MATRIX_DIM;i++){ //if(myArray[i]!= NULL) myFile<<myArray[i]; } myFile<<endl<<endl; free(myArray); cout<<"moving into first expansion"<<endl; ofstream myFile2; myFile2.open("Frame1.txt"); int* myArray2 = (int*)malloc(MAXHOPS*ADJ_MATRIX_DIM*ADJ_MATRIX_DIM*sizeof(int)); expansion<<<numBlocks,BLOCK_LENGTH>>>(dev_level1,dev_adj_matrix_boolean,dev_next_source_array,ADJ_MATRIX_DIM); cudaThreadSynchronize(); cudaCheckErrors("Error occured in expansion"); cout<<"finished expansion"<<endl; cudaMemcpy(myArray2,dev_level1,MAXHOPS*ADJ_MATRIX_DIM*ADJ_MATRIX_DIM*sizeof(int),cudaMemcpyDeviceToHost); cudaCheckErrors("Failed to retrieve memory from first frame"); for(int j=0;j<MAXHOPS;j++){ for(int i=0;i<ADJ_MATRIX_DIM*ADJ_MATRIX_DIM;i++){ //if(myArray[i]!= NULL) myFile2<<myArray2[j*ADJ_MATRIX_DIM*ADJ_MATRIX_DIM+i]; } myFile2<<endl; } myFile2<<endl<<endl; myFile2.close(); free(myArray2); int* myArray1 = (int*)malloc(ADJ_MATRIX_DIM*sizeof(int)); /* expansion<<<numBlocks,BLOCK_LENGTH>>>(dev_level1,dev_adj_matrix_boolean,dev_next_source_array,ADJ_MATRIX_DIM); cudaThreadSynchronize(); cudaCheckErrors("Error occured in expansion"); cout<<"finished expansion"<<endl;*/ cudaMemcpy(myArray1,dev_next_source_array,ADJ_MATRIX_DIM*sizeof(int),cudaMemcpyDeviceToHost); cudaCheckErrors("Failed to retrieve memory from buffer array 1.2 to host"); for(int i=0;i<ADJ_MATRIX_DIM;i++){ //if(myArray[i]!= NULL) myFile<<myArray1[i]; } myFile<<endl<<endl; free(myArray1); myFile.close(); //cudaFree(dev_next_source_array); cudaFree(dev_level1); cudaFree(dev_level2); cudaFree(dev_flight_list); //for(int i=0;i<ADJ_MATRIX_DIM*ADJ_MATRIX_DIM;i++){ // //cout<<i<<endl; // if(host_adj_vector[i] !=NULL) // cudaFree(host_adj_vector[i]); //} cudaFree(dev_adj_list); free(host_adj_vector); return 0; }
23,623
#ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <ctime> // includes, project // includes, kernels #include <cuda.h> #include <cuda_runtime.h> #define MAX_TILE_SIZE 1024 //////////////////////////////////////////////////////////////////////////////// // declaration, forward double* read_array(const char* filename, int len) { double *x = (double*) malloc(len * sizeof(double)); FILE *fp = fopen(filename, "r"); for (int i = 0; i < len; i++) { fscanf(fp, "%lf", &x[i]); } fclose(fp); return x; } __global__ void computeOnDevice(double* dA,double* dB, double* dC, int nRows, int tileSize, float* incTime) { __shared__ float ds_M[MAX_TILE_SIZE]; __shared__ float ds_N[MAX_TILE_SIZE]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * tileSize + ty, Col = bx * tileSize + tx; double Pvalue = 0; for (int m = 0; m < (nRows-1)/tileSize+1; ++m) { if (Row < nRows && m*tileSize+tx < nRows) ds_M[ty * tileSize + tx] = dA[Row*nRows + m*tileSize+tx]; else ds_M[ty * tileSize + tx] = 0; if (Col < nRows && m*tileSize+ty < nRows) ds_N[ty * tileSize + tx] = dB[(m*tileSize+ty)*nRows+Col]; else ds_N[ty * tileSize + tx] = 0; __syncthreads(); for (int k = 0; k < tileSize; ++k) Pvalue += ds_M[ty * tileSize + k] * ds_N[k * tileSize + tx]; __syncthreads(); } if (Row < nRows && Col < nRows) dC[Row*nRows+Col] = Pvalue; return;//Placeholder } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { if(argc!=2) { printf("Usage: ./problem2 N\n"); return 0; } int nRows = 1024; int num_elements = nRows*nRows; int tileSize = atoi(argv[1]); //change this for scaling analysis float incTime=0; // Time for GPU double* hA = read_array("inputA.inp",num_elements); double* hB = read_array("inputB.inp",num_elements); double* hC = (double*) malloc(num_elements * sizeof(double)); dim3 dimGrid((nRows - 1) / tileSize + 1, (nRows - 1) / tileSize + 1, 1); dim3 dimBlock(tileSize, tileSize, 1); double * dA, *dB, *dC; cudaError error = cudaMalloc((void**)&dA, sizeof(double)*num_elements); error = cudaMalloc((void**)&dB, sizeof(double)*num_elements); error = cudaMalloc((void**)&dC, sizeof(double)*num_elements); cudaMemcpy(dA, hA, sizeof(double)*num_elements, cudaMemcpyHostToDevice); cudaMemcpy(dB, hB, sizeof(double)*num_elements, cudaMemcpyHostToDevice); cudaEvent_t startEvent_inc, stopEvent_inc; cudaEventCreate(&startEvent_inc); cudaEventCreate(&stopEvent_inc); cudaEventRecord(startEvent_inc,0); // starting timing for inclusive // **===-------- Modify the body of this function -----------===** computeOnDevice<<<dimGrid, dimBlock>>>(dA, dB, dC, nRows, tileSize, &incTime); // **===-----------------------------------------------------------===** cudaThreadSynchronize(); cudaMemcpy(hC, dC, sizeof(double)*num_elements, cudaMemcpyDeviceToHost); cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive cudaEventSynchronize(stopEvent_inc); cudaEventElapsedTime(&incTime, startEvent_inc, stopEvent_inc); printf("%lf\n%f\n%d\n",hC[num_elements - 1],incTime,tileSize); // cleanup memory free(hA); free(hB); free(hC); return 0; }
23,624
#include <stdio.h> #include <cuda.h> #include <time.h> #define N 128 __global__ void kernel_add(int *d_a,int *d_b,int *d_c){ //int i = threadIdx.x; //int i = blockIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < N) d_c[i] = d_a[i] + d_b[i]; } void addVector(int *h_a,int *h_b,int *h_c){ for(int i = 0; i < N; i++){ h_c[i] = h_a[i] + h_b[i]; } } bool compareTo(int *h_c,int *h_result){ bool flag = true; for(int i = 0; i < N; i++){ if(h_c[i] != h_result[i]){ flag = false; break; } } return flag; } int main(){ clock_t start, end; double cpu_time_used, gpu_time_used; int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *h_result; //Asignar memoria en el host h_a = (int*)malloc(N*sizeof(int)); h_b = (int*)malloc(N*sizeof(int)); h_c = (int*)malloc(N*sizeof(int)); h_result = (int*)malloc(N*sizeof(int)); //Inicializar los vectores for(int i = 0; i < N; i++){ h_a[i] = i; h_b[i] = i+1; h_c[i] = 0; } start = clock(); //Llamar funcion que sume dos vectores y retorne el resultado en h_c addVector(h_a, h_b, h_c); end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Tiempo invertido CPU = %lf s\n", cpu_time_used); //Asignacion de memoria en el device cudaMalloc(&d_a, N*sizeof(int)); cudaMalloc(&d_b, N*sizeof(int)); cudaMalloc(&d_c, N*sizeof(int)); //Copiar los datos del host al device cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, h_c, N * sizeof(int), cudaMemcpyHostToDevice); dim3 threads_per_block(10, 1, 1); dim3 number_of_blocks((N / threads_per_block.x) + 1, 1, 1); start = clock(); //Lanzar el kernel //kernel_add<<<1, N>>>(d_a, d_b, d_c); //kernel_add<<<N, 1>>>(d_a, d_b, d_c); kernel_add<<< number_of_blocks, threads_per_block >>>(d_a, d_b, d_c); cudaMemcpy(h_result, d_c, N*sizeof(int), cudaMemcpyDeviceToHost); end = clock(); gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Tiempo invertido GPU = %lf s\n", gpu_time_used); if(compareTo(h_c, h_result)){ printf("Vectores Iguales"); } else{ printf("Vectores Diferentes"); } return 0; }
23,625
#include <iostream> #include <fstream> #include <bits/stdc++.h> using namespace std; //Graph class for CPU implementation // CPU connected components implementation with DFs //from https://www.geeksforgeeks.org/program-to-count-number-of-connected-components-in-an-undirected-graph/ class Graph { // No. of vertices int V; // Pointer to an array containing adjacency lists list<int>* adj; // A function used by DFS void DFSUtil(int v, bool visited[]); public: // Constructor Graph(int V); void addEdge(int v, int w); int NumberOfconnectedComponents(); }; int Graph::NumberOfconnectedComponents() { // Mark all the vertices as not visited bool* visited = new bool[V]; // To store the number of connected components int count = 0; for (int v = 0; v < V; v++) visited[v] = false; for (int v = 0; v < V; v++) { if (visited[v] == false) { DFSUtil(v, visited); count += 1; } } return count; } void Graph::DFSUtil(int v, bool visited[]) { // Mark the current node as visited visited[v] = true; // Recur for all the vertices // adjacent to this vertex list<int>::iterator i; for (i = adj[v].begin(); i != adj[v].end(); ++i) if (!visited[*i]) DFSUtil(*i, visited); } Graph::Graph(int V) { this->V = V; adj = new list<int>[V]; } // Add an undirected edge void Graph::addEdge(int v, int w) { adj[v].push_back(w); adj[w].push_back(v); } //kernel functions __global__ void bfs(int* vertices, int* edges, bool* frontier, bool* next_frontier, bool* visited) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if(frontier[tid]){ frontier[tid] = false; visited[tid] = true; for(int i=vertices[tid]; i<vertices[tid+1]; i++){ int vtx = edges[i]; if(!visited[vtx]){ next_frontier[vtx] = true; } } } } __global__ void no_memcpy_bfs(int* vertices, int* edges, bool* frontier, bool* next_frontier, bool* visited) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if(frontier[tid]){ frontier[tid] = false; visited[tid] = true; for(int i=vertices[tid]; i<vertices[tid+1]; i++){ int vtx = edges[i]; if(!visited[vtx]){ next_frontier[vtx] = true; } } } } __global__ void next_to_visit(bool* visited, int* tovisit){ int tid = threadIdx.x + blockDim.x*blockIdx.x; if(!visited[tid]){ tovisit[0] = tid; } } __global__ void new_frontier(int* tovisit, bool* frontier){ int tid = threadIdx.x + blockDim.x*blockIdx.x; if(tid==tovisit[0]){ frontier[tovisit[0]] = true; } } __global__ void check_frontier(bool* frontier, bool* checkfrontier){ int tid = threadIdx.x + blockDim.x*blockIdx.x; if(frontier[tid]){ checkfrontier[0] = true; } } //Final GPU solver void Test_GPU_Solver_BFS(char* arg1, char*arg2) { FILE *fdata = fopen(arg2, "r"); int num_vertex, num_edge; char str[100]; fgets(str,99, fdata); sscanf(str, "%d %d", &num_vertex, &num_edge); //printf("vetex: %d, edges %d\n", num_vertex, num_edge); cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); float gpu_time=0.0f; cudaDeviceSynchronize(); cudaEventRecord(start); //host vectors int* vertices = new int[num_vertex+1]; int* edges = new int[num_edge]; bool* frontier = new bool[num_vertex]; bool* visited = new bool[num_vertex]; //fill frontier and visited with false for(int i=0; i<num_vertex; i++){ frontier[i]=false; visited[i]=false; } //parse input FILE *f = fopen(arg1, "r"); int row, col; int prevrow = -1; float value; int vertex_count = 0; int edge_count = 0; while(fgets(str, 99, f)){ sscanf(str, "%d %d %f", &row, &col, &value); if(prevrow!=row){ vertices[vertex_count] = edge_count; vertex_count++; } edges[edge_count] = col; edge_count++; prevrow = row; } vertices[vertex_count] = edge_count; //printing vertices and edges arrays /* printf("\nvertices array: "); for(int i=0; i<vertex_count+1; i++){ printf("%d ", vertices[i]); } printf("\nedges array: "); for(int i=0; i<edge_count; i++){ printf("%d ", edges[i]); }*/ //copying vertices, edges, visited from host to device int* vertices_on_dev=0; int* edges_on_dev=0; bool* frontier_b_on_dev=0; bool* frontier_a_on_dev=0; bool* visited_on_dev=0; cudaMalloc((void**)&vertices_on_dev, (num_vertex+1)*sizeof(int)); cudaMalloc((void**)&edges_on_dev, num_edge*sizeof(int)); cudaMalloc((void**)&frontier_a_on_dev, num_vertex*sizeof(bool)); cudaMalloc((void**)&frontier_b_on_dev, num_vertex*sizeof(bool)); cudaMalloc((void**)&visited_on_dev, num_vertex*sizeof(bool)); cudaMemcpy(vertices_on_dev, vertices,(num_vertex+1)*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(edges_on_dev, edges, num_edge*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(frontier_b_on_dev, frontier, num_vertex*sizeof(bool),cudaMemcpyHostToDevice); cudaMemcpy(visited_on_dev, frontier, num_vertex*sizeof(bool),cudaMemcpyHostToDevice); //calculating block dim and grid dim based on vertex count //large dataset factorization - 3 * 5 * 5 * 11 * 31 * 41 dim3 blocks(num_vertex, 1, 1); dim3 threadsPerBlock(1,1,1); //main bfs algorithm int next_unvisited_idx = 0; bool finished = false; int number_of_ccs = 0; int print_count = 0; while(!finished){ for(int i=std::min(next_unvisited_idx,num_vertex-1); i<num_vertex; i++){ if(!visited[i]){ frontier[i] = true; next_unvisited_idx = i+1; if(print_count>100){ double progress = (100.0*i)/((double)num_vertex); printf("Progress: %.2f%%\n", progress); print_count=0; } print_count++; break; } if(i==num_vertex-1){ finished=true; } } if(finished) break; number_of_ccs++; //copy new frontier array to device cudaMemcpy(frontier_a_on_dev, frontier, num_vertex*sizeof(bool),cudaMemcpyHostToDevice); int count = 0; bool is_frontier = true; while(is_frontier){ if(count%2==0){ //call kernel bfs<<<blocks,threadsPerBlock>>>(vertices_on_dev, edges_on_dev, frontier_a_on_dev, frontier_b_on_dev, visited_on_dev); cudaMemcpy(frontier, frontier_b_on_dev, num_vertex*sizeof(bool),cudaMemcpyDeviceToHost); } else{ //call kernel bfs<<<blocks,threadsPerBlock>>>(vertices_on_dev, edges_on_dev, frontier_b_on_dev, frontier_a_on_dev, visited_on_dev); cudaMemcpy(frontier, frontier_a_on_dev, num_vertex*sizeof(bool),cudaMemcpyDeviceToHost); } for(int i=0; i<num_vertex; i++){ if(frontier[i]) break; if(i==num_vertex-1) is_frontier = false; } count++; } cudaMemcpy(visited,visited_on_dev, num_vertex*sizeof(bool),cudaMemcpyDeviceToHost); } //printf("\nNumber of CCs: %d", number_of_ccs); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&gpu_time,start,end); //printf("\nGPU runtime: %.4f ms\n",gpu_time); printf("%.4f, ", gpu_time); cudaEventDestroy(start); cudaEventDestroy(end); } //Failed optimizations GPU solver void Test_GPU_Solver_BFS_reduce_memcpy(char* arg1, char*arg2) { FILE *fdata = fopen(arg2, "r"); int num_vertex, num_edge; char str[100]; fgets(str,99, fdata); sscanf(str, "%d %d", &num_vertex, &num_edge); printf("vetex: %d, edges %d", num_vertex, num_edge); cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); float gpu_time=0.0f; cudaDeviceSynchronize(); cudaEventRecord(start); //host vectors int* vertices = new int[num_vertex+1]; int* edges = new int[num_edge]; bool* frontier = new bool[num_vertex]; bool* visited = new bool[num_vertex]; //fill frontier and visited with false for(int i=0; i<num_vertex; i++){ frontier[i]=false; visited[i]=false; } //parse input FILE *f = fopen(arg1, "r"); int row, col; int prevrow = -1; float value; int vertex_count = 0; int edge_count = 0; while(fgets(str, 99, f)){ sscanf(str, "%d %d %f", &row, &col, &value); if(prevrow!=row){ vertices[vertex_count] = edge_count; vertex_count++; } edges[edge_count] = col; edge_count++; prevrow = row; } vertices[vertex_count] = edge_count; //printing vertices and edges arrays /* printf("\nvertices array: "); for(int i=0; i<vertex_count+1; i++){ printf("%d ", vertices[i]); } printf("\nedges array: "); for(int i=0; i<edge_count; i++){ printf("%d ", edges[i]); }*/ dim3 blocks(num_vertex, 1, 1); dim3 threadsPerBlock(1,1,1); //false array bool false_array[1]; false_array[0] = false; int zero_array[1]; zero_array[0] = 0; //copying vertices, edges, visited from host to device int* vertices_on_dev=0; int* edges_on_dev=0; bool* frontier_b_on_dev=0; bool* frontier_a_on_dev=0; bool* visited_on_dev=0; bool* checkfrontier=0; int* visitnext=0; cudaMalloc((void**)&vertices_on_dev, (num_vertex+1)*sizeof(int)); cudaMalloc((void**)&edges_on_dev, num_edge*sizeof(int)); cudaMalloc((void**)&frontier_a_on_dev, num_vertex*sizeof(bool)); cudaMalloc((void**)&frontier_b_on_dev, num_vertex*sizeof(bool)); cudaMalloc((void**)&visited_on_dev, num_vertex*sizeof(bool)); cudaMalloc((void**)&checkfrontier, sizeof(bool)); cudaMalloc((void**)&visitnext, sizeof(bool)); cudaMemcpy(vertices_on_dev, vertices,(num_vertex+1)*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(edges_on_dev, edges, num_edge*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(frontier_b_on_dev, frontier, num_vertex*sizeof(bool),cudaMemcpyHostToDevice); cudaMemcpy(visited_on_dev, frontier, num_vertex*sizeof(bool),cudaMemcpyHostToDevice); cudaMemcpy(checkfrontier, false_array, sizeof(bool),cudaMemcpyHostToDevice); cudaMemcpy(visitnext, zero_array, sizeof(int),cudaMemcpyHostToDevice); //calculating block dim and grid dim based on vertex count //main bfs algorithm int next_unvisited_idx = 0; bool finished = false; int number_of_ccs = 0; int print_count = 0; while(!finished){ for(int i=std::min(next_unvisited_idx,num_vertex-1); i<num_vertex; i++){ if(!visited[i]){ frontier[i] = true; next_unvisited_idx = i+1; if(print_count>100){ double progress = (100.0*i)/((double)num_vertex); printf("Progress: %.2f%%\n", progress); print_count=0; } print_count++; break; } if(i==num_vertex-1){ finished=true; } } if(finished) break; /* cudaMemcpy(visited,visited_on_dev, num_vertex*sizeof(bool),cudaMemcpyDeviceToHost); printf("\n"); for(int i=0; i<2000; i++){ printf("%d ", visited[i]); } printf("\n"); next_to_visit<<<blocks,threadsPerBlock>>>(visited_on_dev,visitnext); cudaDeviceSynchronize(); if(print_count>100){ next_unvisited_idx+=.05; printf("approximate progress: %.2f%%\n", next_unvisited_idx); print_count = 0; } print_count++; cudaMemcpy(current_visit_next, visitnext, sizeof(int),cudaMemcpyDeviceToHost); printf("current visit next: %d, previous visit next %d\n",current_visit_next[0], previous_visit_next); if(current_visit_next[0]==previous_visit_next){ finished = true; break; } previous_visit_next = current_visit_next[0]; new_frontier<<<blocks,threadsPerBlock>>>(visitnext, frontier_a_on_dev); cudaDeviceSynchronize(); */ number_of_ccs++; //copy new frontier array to device int count = 0; bool is_frontier[1]; is_frontier[0] = true; while(is_frontier[0]){ bool lastcall; lastcall= false; if(count%2==0){ //call kernel bfs<<<blocks,threadsPerBlock>>>(vertices_on_dev, edges_on_dev, frontier_a_on_dev, frontier_b_on_dev, visited_on_dev); } else{ //call kernel bfs<<<blocks,threadsPerBlock>>>(vertices_on_dev, edges_on_dev, frontier_b_on_dev, frontier_a_on_dev, visited_on_dev); lastcall=true; } if(lastcall){ check_frontier<<<blocks,threadsPerBlock>>>(frontier_a_on_dev, checkfrontier); } else{ check_frontier<<<blocks,threadsPerBlock>>>(frontier_b_on_dev, checkfrontier); } cudaDeviceSynchronize(); cudaMemcpy(is_frontier, checkfrontier, sizeof(bool),cudaMemcpyDeviceToHost); cudaMemcpy(checkfrontier, false_array, sizeof(bool),cudaMemcpyHostToDevice); count++; } cudaMemcpy(visited,visited_on_dev, num_vertex*sizeof(bool),cudaMemcpyDeviceToHost); } printf("\nNumber of CCs: %d", number_of_ccs); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&gpu_time,start,end); printf("\nGPU runtime: %.4f ms\n",gpu_time); cudaEventDestroy(start); cudaEventDestroy(end); } //CPU solver void Test_CPU_Solver_BFS(char* arg1, char*arg2){ cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); float gpu_time=0.0f; cudaDeviceSynchronize(); cudaEventRecord(start); FILE *fdata = fopen(arg2, "r"); int num_vertex, num_edge; char str[100]; fgets(str,99, fdata); sscanf(str, "%d %d", &num_vertex, &num_edge); //printf("vetex: %d, edges %d\n", num_vertex, num_edge); Graph g(num_vertex); //parse input FILE *f = fopen(arg1, "r"); int row, col; float value; while(fgets(str, 99, f)){ sscanf(str, "%d %d %f", &row, &col, &value); g.addEdge(row, col); } int number_of_ccs = g.NumberOfconnectedComponents(); //printf("\nNumber of CCs: %d", number_of_ccs); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&gpu_time,start,end); //printf("\nCPU runtime: %.4f ms\n",gpu_time); printf("%.4f, ", gpu_time); cudaEventDestroy(start); cudaEventDestroy(end); } int main(int argc, char* argv[]) { printf("Testing GPU solver:\n"); for(int i=0; i<20; i++){ Test_GPU_Solver_BFS(argv[1], argv[2]); } printf("\nTesting CPU solver:\n"); for(int i=0; i<20; i++){ Test_CPU_Solver_BFS(argv[1], argv[2]); } return 0; }
23,626
/** * block loading rho calculation. should be much faster * system('nvcc -ptx citydist_rho4.cu') * iA is multiple of CHUNK (16) */ #include <cuda_runtime.h> // #include "cublas_v2.h" #include <math.h> #define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val) #define MIN(A,B) ((A)<(B)) ? (A) : (B) #define MAX(A,B) ((A)>(B)) ? (A) : (B) #define NTHREADS 128 #define NC (1+6*2) // number of Channels #define CHUNK 16 //previously defined as CHUNK #define SINGLE_INF (3.402E+38) /** Main entry point. * Works out where the current thread should read/write to global memory * and calls doIterations to do the actual work. * Step through one B at a time */ __global__ void citydist_sorted_delta(float const *A, unsigned int const *I, float *D, unsigned int *N, int const nA, int const nneigh, int const nC){ // int iA = blockIdx.x * CHUNK; int iA = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK; int iA1; int tx = threadIdx.x; float vr_minDist1[CHUNK]; unsigned int vi_minIdx1[CHUNK]; __shared__ unsigned int svi_I_A1[CHUNK]; __shared__ float smr_A1[NC][CHUNK]; __shared__ float smr_delta1[NTHREADS][CHUNK]; __shared__ unsigned int smi_nneigh1[NTHREADS][CHUNK]; // cache A int iC = tx; if (iC < nC){ //use tx as iC for (iA1 = 0; iA1 < CHUNK; ++iA1){ if (iA + iA1 < nA){ smr_A1[iC][iA1] = A[iC + (iA+iA1)*nC]; }else{ smr_A1[iC][iA1] = SINGLE_INF; } } } iA1 = tx; // batch index if (iA1 < CHUNK){ if (iA + iA1 < nA){ svi_I_A1[iA1] = I[iA + iA1]; }else{ svi_I_A1[iA1] = nA + 1; // out of range } } for (iA1 = 0; iA1 < CHUNK; ++iA1){ vr_minDist1[iA1] = SINGLE_INF; vi_minIdx1[iA1] = iA + iA1; } __syncthreads(); // fill in the shared memory A int iB_min = MAX(iA - nneigh, 0); int iB_max = MIN(iA + nneigh + CHUNK - 1, nA-1); if (nneigh==0){ iB_min = 0; iB_max = nA-1; } int iB = iB_min + tx; while (iB <= iB_max){ float vr_dist1[CHUNK]; for (iA1 = 0; iA1 < CHUNK; ++iA1) vr_dist1[iA1] = 0.0f; for (iC = 0; iC < nC; ++iC){ float Btemp = A[iC + iB*nC]; for (iA1 = 0; iA1 < CHUNK; ++iA1){ float temp = Btemp - smr_A1[iC][iA1]; vr_dist1[iA1] += ABS(temp); } } unsigned int IiB = I[iB]; for (iA1 = 0; iA1 < CHUNK; ++iA1){ if (vr_dist1[iA1] < vr_minDist1[iA1]){ if (IiB < svi_I_A1[iA1]){ //if (vr_dist1[iA1] < vr_minDist1[iA1] && vr_dist1[iA1]>0){ //if (IiB < svi_I_A1[iA1] && iB != iA+iA1){ int dab = ABS(iA + iA1 - iB); if (dab <= nneigh || nneigh==0){ vr_minDist1[iA1] = vr_dist1[iA1]; vi_minIdx1[iA1] = iB; } } } } iB += blockDim.x; } // while // collect result from each thread for (iA1 = 0; iA1 < CHUNK; ++iA1){ smr_delta1[tx][iA1] = vr_minDist1[iA1]; smi_nneigh1[tx][iA1] = vi_minIdx1[iA1]; } __syncthreads(); // final count iA1 = tx; if (iA1 < CHUNK && iA + iA1 < nA){ float minDist1 = SINGLE_INF; unsigned int minIdx1 = iA + iA1; for (int tx1=0; tx1<blockDim.x; ++tx1){ if (smr_delta1[tx1][iA1] < minDist1){ minDist1 = smr_delta1[tx1][iA1]; minIdx1 = smi_nneigh1[tx1][iA1]; } } D[iA + iA1] = sqrtf(minDist1); N[iA + iA1] = minIdx1; } } // func
23,627
#include "test_probe.cuh" ProbeConfig make_probe_config(unsigned n_channels, unsigned n_active, unsigned n_groups, double srate_hz) { // don't check that n_total >= n_active for test purposes if (n_groups > n_active) { throw std::domain_error( "Number of groups cannot exceed number of active sites."); } else if (n_active % n_groups != 0) { throw std::domain_error( "Number of groups must evenly divide number of active sites."); } ProbeConfig cfg; cfg.n_total = n_channels; cfg.spatial_extent = 50.0; cfg.srate_hz = srate_hz; // divide n_active evenly into n_groups auto chans_per_group = n_active / n_groups; auto k = 0; double x = 0.0, y = 0.0; for (auto i = 0; i < n_groups; i++) { ChannelGroup grp = ChannelGroup{ std::vector<unsigned>(chans_per_group), // channels std::vector<unsigned>(chans_per_group), // site_labels std::vector<double>(chans_per_group), // x_coords std::vector<double>(chans_per_group), // y_coords }; for (auto j = 0; j < chans_per_group; j++) { grp.site_labels.at(j) = k + 1; grp.channels.at(j) = k++; grp.x_coords.at(j) = x; grp.y_coords.at(j) = y; if (j % 2 == 1) { x += 25.0; } else { y += 20.0; } } cfg.channel_groups.insert(std::pair<unsigned, ChannelGroup>(i, grp)); } return cfg; } Probe make_probe(unsigned n_channels, unsigned n_active, unsigned n_groups, double srate_hz) { return Probe(make_probe_config(n_channels, n_active, n_groups, srate_hz)); }
23,628
// // Created by goforbroke on 26.12.2020. // #include <iostream> #include <cstdlib> #include <cuda_profiler_api.h> __global__ void vector_add(float *out, float *a, float *b, int n) { size_t index = threadIdx.x; size_t stride = blockDim.x; for (int load = index; load < n; load += stride) { // fake addition load for (int i = index; i < n; i += stride) { out[i] = a[i] + b[i]; } } } int main(int argc, char **argv) { size_t NSAMPLES = atoi(argv[1]); float *a, *b, *out; float *d_a, *d_b, *d_out; // Allocate memory a = (float *) malloc(sizeof(float) * NSAMPLES); b = (float *) malloc(sizeof(float) * NSAMPLES); out = (float *) malloc(sizeof(float) * NSAMPLES); cudaMalloc((void **) &d_a, sizeof(float) * NSAMPLES); cudaMalloc((void **) &d_b, sizeof(float) * NSAMPLES); cudaMalloc((void **) &d_out, sizeof(float) * NSAMPLES); // Initialize array for (long i = 0; i < NSAMPLES; i++) { a[i] = 1.0f; b[i] = 2.0f; } cudaMemcpy(d_a, a, sizeof(float) * NSAMPLES, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float) * NSAMPLES, cudaMemcpyHostToDevice); vector_add<<<256, 256>>>(d_out, d_a, d_b, NSAMPLES); cudaError_t err = cudaGetLastError(); // add if (err != cudaSuccess) std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; // add cudaProfilerStop(); cudaMemcpy(out, d_out, sizeof(float) * NSAMPLES, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); free(a); free(b); free(out); }
23,629
#include<stdio.h> #include<stdlib.h> #include<time.h> #include<assert.h> #include<unistd.h> #define NUMELEMENT 1E6 void vecADD(float* h_A, float* h_B, float* h_C, int n) { for (int i = 0 ; i < n ; i++) h_C[i] = h_A[i] + h_B[i]; } __global__ void vecADDKernel(float* d_A, float* d_B, float* d_C, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) d_C[i] = d_A[i] + d_B[i]; } double get_mesc (struct timespec t1, struct timespec t2){ return ((t2.tv_sec-t1.tv_sec)*1E9 + (t2.tv_nsec - t1.tv_nsec))/1E3; } struct timespec t_start, t_end; int main() { float *h_A, *h_B, *h_C; h_A = (float *)malloc(NUMELEMENT*sizeof(float)); h_B = (float *)malloc(NUMELEMENT*sizeof(float)); h_C = (float *)malloc(NUMELEMENT*sizeof(float)); srand(222); for (int i = 0 ; i < NUMELEMENT ; i++) { h_A[i] = (float)(rand()%1000)/1000; h_B[i] = (float)(rand()%1000)/1000; h_C[i] = 0.0; } float *d_A, *d_B, *d_C; cudaMalloc((void**)&d_A,NUMELEMENT*sizeof(float)); cudaMalloc((void**)&d_B,NUMELEMENT*sizeof(float)); cudaMalloc((void**)&d_C,NUMELEMENT*sizeof(float)); cudaMemcpy(d_A,h_A,NUMELEMENT*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_B,h_B,NUMELEMENT*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_C,h_C,NUMELEMENT*sizeof(float),cudaMemcpyHostToDevice); clock_gettime(CLOCK_REALTIME,&t_start); vecADD(h_A,h_B,h_C,NUMELEMENT); clock_gettime(CLOCK_REALTIME,&t_end); printf("CPU Time :: %.0f msec\n",get_mesc(t_start,t_end)); clock_gettime(CLOCK_REALTIME,&t_start); vecADDKernel<<<ceil(NUMELEMENT/64),64>>>(d_A,d_B,d_C,NUMELEMENT); cudaThreadSynchronize(); clock_gettime(CLOCK_REALTIME,&t_end); printf("GPU Time :: %.0f msec\n",get_mesc(t_start,t_end)); float* h_R = (float *)malloc(NUMELEMENT*sizeof(float)); cudaMemcpy(h_R,d_C,NUMELEMENT*sizeof(float),cudaMemcpyDeviceToHost); for (int i = 0 ; i < NUMELEMENT ; i++) assert(h_R[i] == h_C[i]); free(h_A); free(h_B); free(h_C); free(h_R); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
23,630
/*TODO: rewrite timeit.cu to something like this */ #include <string> #include <list> class Event { public: Event (std::string name): name(name) { cudaEventCreate( &(_start) ); cudaEventCreate( &(_stop) ); }; void start() { cudaEventRecord( _start, 0 ); }; void stop() { cudaEventRecord( _stop, 0 ); cudaEventSynchronize( _stop ); cudaEventElapsedTime( &(_elapsedTime), _start, _stop ); } virtual ~Event (); protected: float _elapsedTime; cudaEvent_t _start; cudaEvent_t _stop; std::string name; }; class EventBlock { public: EventBlock (std::string name): name(name) { eventList = new std::list<Event *> (); }; virtual ~EventBlock (); void startEvent(std::string name) { Event *v = new Event(name); v->start(); this->eventList->push_front(v); }; void stopEvent() { Event *v = this->eventList->front(); v->stop(); } private: std::string name; std::list <Event *> *eventList; }; class PerformanceLogger { public: PerformanceLogger (std::string name): name(name) { eventBlockList = new std::list<EventBlock *> (); }; virtual ~PerformanceLogger (); void startBlock(std::string name) { EventBlock *v = new EventBlock(name); this->eventBlockList->push_front(v); }; void startEvent(std::string name) { this->eventBlockList->front()->startEvent(name); } void stopEvent(std::string name) { this->eventBlockList->front()->stopEvent(); } private: std::string name; std::list <EventBlock *> *eventBlockList; }; /*class PerformanceFileLogger*/ /*{*/ /*public:*/ /*PerformanceLogger (std::string fname){*/ /*logFile.open (fname);*/ /*}*/ /*~PerformanceLogger (){*/ /*if (logFile.is_open()) {*/ /*logFile << std::endl << std::endl;*/ /*logFile.close();*/ /*} */ /*}*/ /*friend FileLogger &operator << (FileLogger &logger, const char *text) {*/ /*logger.logFile << text << std::endl;*/ /*return logger;*/ /*}*/ /*// Make it Non Copyable (or you can inherit from sf::NonCopyable if you want)*/ /*FileLogger (const FileLogger &) = delete;*/ /*FileLogger &operator= (const FileLogger &) = delete;*/ /*private:*/ /*std::ofstream logFile;*/ /*};*/
23,631
#include "includes.h" __device__ double2 subtract(double2 a, double2 b){ return {a.x-b.x, a.y-b.y}; } __global__ void subtract_test(double2 *a, double2 *b, double2 *c){ c[0] = subtract(a[0],b[0]); }
23,632
/* * Copyright (c) 2019 Opticks Team. All Rights Reserved. * * This file is part of Opticks * (see https://bitbucket.org/simoncblyth/opticks). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/for_each.h> #include <thrust/device_vector.h> struct printf_functor_i { __host__ __device__ void operator()(int x) { printf("%d\n", x); } }; struct printf_functor_f4 { __host__ __device__ void operator()(float4 v) { printf("%10.4f %10.4f %10.4f %10.4f \n", v.x, v.y, v.z, v.w); } }; int main() { thrust::device_vector<int> ivec(3); ivec[0] = 0; ivec[1] = 1; ivec[2] = 2; thrust::for_each(ivec.begin(), ivec.end(), printf_functor_i()); thrust::device_vector<float4> fvec(3); fvec[0] = make_float4( 1.f, 2.f, 3.f, 4.f ); fvec[1] = make_float4( 1.f, 2.f, 3.f, 4.f ); fvec[2] = make_float4( 1.f, 2.f, 3.f, 4.f ); thrust::for_each(fvec.begin(), fvec.end(), printf_functor_f4()); cudaDeviceSynchronize(); // Without the sync the process will typically terminate before // any output stream gets pumped out to the terminal when // iterating over device_ptr. // Curiously that doesnt seem to happen with device_vector ? // Maybe their dtors are delayed by the dumping }
23,633
__global__ void select(const double* ran, const double* total, const int* size, double** frac, double** xs, int* selection) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; double cutoff = total[idx] * ran[idx]; double accum = 0; const double* el_frac = frac[idx]; const double* el_xs = xs[idx]; unsigned int i = 0; unsigned int imax = size[idx] - 1; for (; i != imax; ++i) { accum += el_frac[i] * el_xs[i]; if (cutoff >= accum) break; } selection[idx] = i; }
23,634
#include <stdlib.h> #include <stdio.h> #include <cuda.h> // int pos = threadIdx.x + blockIdx.x * blockDim.x; #define BLOCKS 8 #define THREADS 32 __global__ void kernelVacio( void ) { if (threadIdx.x < 10) { printf("Data: %s Id Thread: %d Id block : %d Num threads block : %d\n", "helloWorld!", threadIdx.x, blockIdx.x, blockDim.x); } } int main( void ) { kernelVacio<<<BLOCKS, THREADS>>>(); //kernel launch con grid mínimo cudaDeviceSynchronize(); return 0; }
23,635
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include "cudaHelpers.cuh" void waitForKernel() { CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); } void resetDevice() { CUDA_CHECK_RETURN(cudaDeviceReset()); } void setDevice(int device) { CUDA_CHECK_RETURN(cudaSetDevice(device)); }
23,636
#include <cuda_runtime_api.h> #include <stdio.h> __global__ void Foo() { int tid_x = threadIdx.x + blockIdx.x * blockDim.x; int tid_y = threadIdx.y + blockIdx.y * blockDim.y; printf("Hello world, thread:(%d, %d) \n", tid_y, tid_x); } /// same code __global__ void Fuck(int width) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int tid_x = tid % width; int tid_y = tid / width; printf("fuck the world, thread id: %d = (%d, %d) \n", tid, tid_y, tid_x); } int main(int argc, char* argv[]) { cudaSetDevice(0); dim3 block (3, 3); dim3 grid (2, 2); Foo<<<grid, block>>>(); Fuck<<<4, 9>>>(6); //cudaDeviceReset(); cudaDeviceSynchronize(); return 0; }
23,637
#include<stdio.h> #include<math.h> #include<cuda.h> #define TWW 32 // Setting TileWidth /*----------Kernel Function------------*/ __global__ void matMul(double *d_a,double *d_b,double *d_c, int M, int N, int K){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col= blockIdx.x * blockDim.x + threadIdx.x; __shared__ double ds_a[TWW][TWW]; __shared__ double ds_b[TWW][TWW]; double cval=0.0; for(int t=0;t<N/TWW;t++){ // Loading data from Global meomory to Shared memory ds_a[threadIdx.y][threadIdx.x]=d_a[row*N+t*TWW+threadIdx.x]; ds_b[threadIdx.y][threadIdx.x]=d_b[(t*TWW+threadIdx.y)*K+col]; __syncthreads(); for(int k=0;k<TWW;k++){ cval+=ds_a[threadIdx.y][k]*ds_b[k][threadIdx.x]; } __syncthreads(); } d_c[row*K + col]=cval; } /*------------------------------*/ int main(int argc, char const *argv[]) { /*Matrix A size = M X N and Matrix B size = N X K*/ int N=800, M=N,K=N; double h_a[M][N],h_b[N][K],h_c[M][K]; double *d_a,*d_b,*d_c; cudaEvent_t start,stop; float ms; //Generatig matrix for(int i=0;i<M;i++){ for(int j=0;j<N;j++){ h_a[i][j]=rand()%100; } } for(int i=0;i<N;i++){ for(int j=0;j<K;j++){ h_b[i][j]=rand()%100; } } /*printf("\n A Matrix\n" ); for(int i=0;i<M;i++){ for(int j=0;j<N;j++){ printf("%0.1f ",h_a[i][j] ); } printf("\n" ); } printf("\n B Matrix\n" ); for(int i=0;i<N;i++){ for(int j=0;j<K;j++){ printf("%0.1f ",h_b[i][j] ); } printf("\n" ); }*/ // taking block diamension as TWW X TWW dim3 dimBlock(TWW,TWW); dim3 dimGrid(K/TWW,M/TWW ); // allocating device memory cudaMalloc(&d_a, M*N*sizeof(double)); cudaMalloc(&d_b, N*K*sizeof(double)); cudaMalloc(&d_c, M*K*sizeof(double)); // copying data in device memory cudaMemcpy( d_a, h_a, M*N*sizeof(double), cudaMemcpyHostToDevice ); cudaMemcpy( d_b, h_b, N*K*sizeof(double), cudaMemcpyHostToDevice ); //Creating timestamp event cudaEventCreate(&start); cudaEventCreate(&stop); //Recording Kernel start time cudaEventRecord(start, 0); //calling kernel function matMul<<<dimGrid,dimBlock>>>(d_a,d_b,d_c,M,N,K); //Recording Kernel stop time cudaEventRecord(stop, 0); cudaMemcpy(h_c, d_c, M*K*sizeof(double), cudaMemcpyDeviceToHost ); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); printf("\nTime:%f ",ms ); cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(h_c, d_c, M*K*sizeof(double), cudaMemcpyDeviceToHost ); /*printf("\n Multiplication of A and B Matrix using Tiling:\n" ); for(int i=0;i<M;i++){ for(int j=0;j<K;j++){ printf("%0.1f ",h_c[i][j] ); } printf("\n" ); }*/ cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
23,638
#define DIM 8192 #include<stdio.h> #include<stdlib.h> #include <chrono> //FUNZIONE PER STAMPARE UNA MATRICE void printMatrix(double *M) { int i, j; for (i = 0; i < DIM; i++) { for (j = 0; j < DIM; j++) { printf("%f ", M[i * DIM + j]); } printf("\n"); } } //FUNZIONE CHE RIEMPIE LA MATRICE DI NUMERI double CASUALI void populateMatrix(double *M) { int i, j; for (i = 0; i < DIM; i++) { for (j = 0; j < DIM; j++) { M[i * DIM + j] = (double)(i + j); } } } void MatrixMulHost(double *A, double *B, double *C) { int c, d, k; for (c = 0; c < DIM; c++) { for (d = 0; d < DIM; d++) { int Pvalue = 0; for (k = 0; k < DIM; k++) { Pvalue += A[c * DIM + k] * B[k * DIM + d]; } C[c *DIM + d] = Pvalue; } } } int main(){ double *A = (double *)malloc(DIM * DIM * sizeof(double)); double *B = (double *)malloc(DIM * DIM * sizeof(double)); double *C = (double *)malloc(DIM * DIM * sizeof(double)); populateMatrix(A); populateMatrix(B); std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); MatrixMulHost(A, B, C); std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count(); printf("%f\n", tempo); // printMatrix(C); }
23,639
#include "includes.h" __global__ void calcConvolutionForwardGPU( float *out, float *padded_in, float *filters, int padded_in_size_x, int padded_in_size_y, int padded_in_size_z, int batch_size, int out_size_x, int out_size_y, int out_size_z, int kernel_size, int stride, int filter_size) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int id_out = id; if (id_out < batch_size * out_size_x * out_size_y * out_size_z) { int x = id % out_size_x; id /= out_size_x; int y = id % out_size_y; id /= out_size_y; int filter = id % out_size_z; id /= out_size_z; int b = id; int mapped_x = x * stride; int mapped_y = y * stride; float sum = 0.0; for ( int z = 0; z < padded_in_size_z; ++z ){ // padded_in_size_z = in_size_z for ( int j = 0; j < kernel_size; ++j ){ for ( int i = 0; i < kernel_size; ++i ){ int padded_in_index = b * (padded_in_size_x * padded_in_size_y * padded_in_size_z) + z * (padded_in_size_x * padded_in_size_y) + (mapped_y + j) * (padded_in_size_x) + (mapped_x + i); int filter_index = z * (kernel_size * kernel_size) + j * kernel_size + i; sum += filters[filter * filter_size + filter_index] * padded_in[padded_in_index]; } } } out[id_out] = sum; } /* original code for ( int b = 0; b < in.size.b; ++b ){ int filters_size = filters.size(); for ( int filter = 0; filter < filters_size; ++filter ){ TensorObject<float> filter_data = filters[filter]; for ( int y = 0; y < out.size.y; ++y ){ for ( int x = 0; x < out.size.x; ++x ){ TensorCoordinate mapped = map_to_input( { 0, (uint16_t)x, (uint16_t)y, 0 }, 0 ); float sum = 0; for ( int z = 0; z < in.size.z; ++z ){ for ( int j = 0; j < kernel_size; ++j ){ for ( int i = 0; i < kernel_size; ++i ){ sum += filter_data( 0, i, j, z ) * padded_in( b, mapped.x + i, mapped.y + j, z ); } } } out( b, x, y, filter ) = sum; } } } }*/ }
23,640
#include <stdio.h> #define BLOCKDIM 512 #define RSIZE 10 #define JFACTOR 1 __device__ double NonOrtho_dist(double a0, double a1, double a2, double b0, double b1, double b2, double *ucell); //------------------------------------------------------------------------------------------------------------------------------------------------ //try thread coarsening __global__ void Action_noImage_center_GPU(double *D_,double *maskCenter,double *SolventMols_,double maxD, int Nmols , int NAtoms, int active_size) { //__shared__ double dist_array[BLOCKDIM]; //int mol = (blockIdx.x * active_size + threadIdx.x)/NAtoms; //int atom = (blockIdx.x * active_size + threadIdx.x) - (mol * NAtoms); //int mol_in_block = threadIdx.x/NAtoms; int mol = blockIdx.x*BLOCKDIM + threadIdx.x; //advantage of register double a0 = maskCenter[0]; double a1 = maskCenter[1]; double a2 = maskCenter[2]; if ( mol < Nmols ) { int sIndex = mol*NAtoms*3; double min_val = maxD; for(int offset = 0 ; offset < NAtoms*3 ; offset+=3 ) { //double x = a0 - SolventMols_[sIndex++]; //double y = a1 - SolventMols_[sIndex++]; //double z = a2 - SolventMols_[sIndex++]; double x = a0 - SolventMols_[sIndex+ offset + 0 ]; double y = a1 - SolventMols_[sIndex+offset + 1]; double z = a2 - SolventMols_[sIndex+offset + 2]; min_val = min(min_val, x*x + y*y + z*z); } D_[mol] = min_val; } } //------------------------------------------------------------------------------------------------------------------------------------------------ __global__ void Action_noImage_no_center_GPU(double *D_,double *SolventMols_,double *Solute_atoms ,double maxD, int Nmols , int NAtoms,int NSAtoms , int active_size) { __shared__ double dist_array[BLOCKDIM]; //__shared__ double sAtom_shared[RSIZE]; int mol = (blockIdx.x * active_size + threadIdx.x)/NAtoms; int atom = (blockIdx.x * active_size + threadIdx.x) - (mol * NAtoms); //int mol_in_block = threadIdx.x/NAtoms; //handling the chunks for solute_atoms int chunksize,start,end, NChunks,i,j; if(NSAtoms*3 > RSIZE) { chunksize = (RSIZE/3)*3; NChunks = ceil(double(NSAtoms*3)/chunksize); start = 0; end = chunksize; } else { chunksize = NSAtoms*3; NChunks = 1; start = 0; end = NSAtoms*3; } // if(threadIdx.x == 0 && blockIdx.x == 0 ) // printf("chunkszize = %d ; Nchunk = %d; start = %d; end = %d\n ", // chunksize,NChunks,start,end); if ( threadIdx.x < active_size && mol*NAtoms + atom < Nmols*NAtoms ) { // if(atom == 0 ) // D_[mol] = maxD; //__syncthreads(); double min_val = maxD; double dist; int sIndex = mol*NAtoms*3 + atom*3; double a0 = SolventMols_[sIndex + 0]; double a1 = SolventMols_[sIndex + 1]; double a2 = SolventMols_[sIndex + 2]; for(i = 0 ; i < NChunks ; i++) { //copying to shared //if (threadIdx.x < (end - start)) // sAtom_shared[threadIdx.x] = Solute_atoms[start + threadIdx.x]; //__syncthreads(); //TODO - add skew per thread for (j = start ; j < end; j+=3 ) { //int offset = start + (j + threadIdx.x)%(end - start); double x = Solute_atoms[j + 0] - a0; double y = Solute_atoms[j + 1] - a1; double z = Solute_atoms[j + 2] - a2; dist = x*x + y*y + z*z; //if (mol == 11) // printf("min = %f\n",min_val); min_val = min(min_val,dist); } start = end; end = min(end + chunksize, NSAtoms*3); } dist_array[threadIdx.x] = min_val; //if (threadIdx.x == 0) // printf("min_val = %f\n",min_val); //printf(" dist = %f\n", Dist); __syncthreads(); //first thread //naive approach to a reduction algorithm //this works if NAtoms is small other wise you need split //and do some of log(n) parallel reduction //min_val = maxD; if( atom ==0 ) { for(i = 0 ; i < NAtoms ; i++ ){ //sIndex = mol*NAtoms*3 + i*3; //if (dist_array[threadIdx.x + i] < min_val) // min_val = dist_array[threadIdx.x + i] ; min_val = min(min_val, dist_array[threadIdx.x + i]); } D_[mol] = min_val; } //if(tx == 0 && bx == 0 ) // printf("end of kernel"); } } //------------------------------------------------------------------------------------------------------------------------------------------------ __global__ void Action_ImageOrtho_center_GPU(double *D_,double *maskCenter,double *SolventMols_,double maxD, double *box, int Nmols , int NAtoms, int active_size) { //__shared__ double dist_array[BLOCKDIM]; //int mol = (blockIdx.x * active_size + threadIdx.x)/NAtoms; //int atom = (blockIdx.x * active_size + threadIdx.x) - (mol * NAtoms); //int mol_in_block = threadIdx.x/NAtoms; int mol = blockIdx.x*BLOCKDIM + threadIdx.x; //advantage of register double a0 = maskCenter[0]; double a1 = maskCenter[1]; double a2 = maskCenter[2]; if ( mol < Nmols ) { int sIndex = mol*NAtoms*3; double min_val = maxD; double dist; for(int offset = 0 ; offset < NAtoms ; offset++ ) { double x = a0 - SolventMols_[sIndex++]; double y = a1 - SolventMols_[sIndex++]; double z = a2 - SolventMols_[sIndex++]; // Get rid of sign info if (x<0) x=-x; if (y<0) y=-y; if (z<0) z=-z; // Get rid of multiples of box lengths //TODO WIERD that should be a way to simplify it while (x > box[0]) x = x - box[0]; while (y > box[1]) y = y - box[1]; while (z > box[2]) z = z - box[2]; // Find shortest distance in periodic reference double D = box[0] - x; if (D < x) x = D; D = box[1] - y; if (D < y) y = D; D = box[2] - z; if (D < z) z = D; //Dist = x*x + y*y + z*z; dist = x*x + y*y + z*z; if (box[0]==0.0 || box[1]==0.0 || box[2]==0.0) dist= -1.0; min_val = min(min_val, dist); } D_[mol] = min_val; } } //------------------------------------------------------------------------------------------------------------------------------------------------ __global__ void Action_ImageOrtho_no_center_GPU(double *D_,double *SolventMols_,double *Solute_atoms ,double maxD, double *box, int Nmols , int NAtoms,int NSAtoms , int active_size) { __shared__ double dist_array[BLOCKDIM]; //__shared__ double sAtom_shared[RSIZE]; int mol = (blockIdx.x * active_size + threadIdx.x)/NAtoms; int atom = (blockIdx.x * active_size + threadIdx.x) - (mol * NAtoms); //int mol_in_block = threadIdx.x/NAtoms; //handling the chunks for solute_atoms int chunksize,start,end, NChunks,i,j; if(NSAtoms*3 > RSIZE) { chunksize = (RSIZE/3)*3; NChunks = ceil(double(NSAtoms*3)/chunksize); start = 0; end = chunksize; } else { chunksize = NSAtoms*3; NChunks = 1; start = 0; end = NSAtoms*3; } // if(threadIdx.x == 0 && blockIdx.x == 0 ) // printf("chunkszize = %d ; Nchunk = %d; start = %d; end = %d\n ", // chunksize,NChunks,start,end); if ( threadIdx.x < active_size && mol*NAtoms + atom < Nmols*NAtoms ) { // if(atom == 0 ) // D_[mol] = maxD; //__syncthreads(); double min_val = maxD; double dist; int sIndex = mol*NAtoms*3 + atom*3; double a0 = SolventMols_[sIndex + 0]; double a1 = SolventMols_[sIndex + 1]; double a2 = SolventMols_[sIndex + 2]; for(i = 0 ; i < NChunks ; i++) { //copying to shared //if (threadIdx.x < (end - start)) // sAtom_shared[threadIdx.x] = Solute_atoms[start + threadIdx.x]; //__syncthreads(); //TODO - add skew per thread for (j = start ; j < end; j+=3 ) { //int offset = start + (j + threadIdx.x)%(end - start); double x = Solute_atoms[j + 0] - a0; double y = Solute_atoms[j + 1] - a1; double z = Solute_atoms[j + 2] - a2; // Get rid of sign info if (x<0) x=-x; if (y<0) y=-y; if (z<0) z=-z; // Get rid of multiples of box lengths //TODO WIERD that should be a way to simplify it while (x > box[0]) x = x - box[0]; while (y > box[1]) y = y - box[1]; while (z > box[2]) z = z - box[2]; //below is actually slower! //x = x - box[0]*((int)x/box[0]); //y = y - box[0]*((int)y/box[1]); //z = z - box[0]*((int)z/box[2]); // Find shortest distance in periodic reference double D = box[0] - x; if (D < x) x = D; D = box[1] - y; if (D < y) y = D; D = box[2] - z; if (D < z) z = D; //Dist = x*x + y*y + z*z; dist = x*x + y*y + z*z; if (box[0]==0.0 || box[1]==0.0 || box[2]==0.0) dist = -1.0; //if (mol == 11) // printf("min = %f\n",min_val); min_val = min(min_val,dist); } start = end; end = min(end + chunksize, NSAtoms*3); } dist_array[threadIdx.x] = min_val; //if (threadIdx.x == 0) // printf("min_val = %f\n",min_val); //printf(" dist = %f\n", Dist); __syncthreads(); //first thread //naive approach to a reduction algorithm //this works if NAtoms is small other wise you need split //and do some of log(n) parallel reduction //min_val = maxD; if( atom ==0 ) { for(i = 0 ; i < NAtoms ; i++ ){ //sIndex = mol*NAtoms*3 + i*3; //if (dist_array[threadIdx.x + i] < min_val) // min_val = dist_array[threadIdx.x + i] ; min_val = min(min_val, dist_array[threadIdx.x + i]); } D_[mol] = min_val; } //if(tx == 0 && bx == 0 ) // printf("end of kernel"); } } //------------------------------------------------------------------------------------------------------------------------------------------------ __global__ void Action_ImageNonOrtho_center_GPU(double *D_,double *maskCenter,double *SolventMols_,double maxD, double *ucell, double *recip, int Nmols , int NAtoms, int active_size) { //__shared__ double dist_array[BLOCKDIM]; //int mol = (blockIdx.x * active_size + threadIdx.x)/NAtoms; //int atom = (blockIdx.x * active_size + threadIdx.x) - (mol * NAtoms); //int mol_in_block = threadIdx.x/NAtoms; int mol = blockIdx.x*BLOCKDIM + threadIdx.x; //advantage of register double a0 = recip[0]*maskCenter[0] + recip[1]*maskCenter[1] + recip[2]*maskCenter[2]; double a1 = recip[3]*maskCenter[0] + recip[4]*maskCenter[1] + recip[5]*maskCenter[2]; double a2 = recip[6]*maskCenter[0] + recip[7]*maskCenter[1] + recip[8]*maskCenter[2]; if ( mol < Nmols ) { int sIndex = mol*NAtoms*3; double min_val = maxD; for(int offset = 0 ; offset < NAtoms*3 ; offset+=3 ) { double x = recip[0]*SolventMols_[sIndex + offset + 0] + recip[1]*SolventMols_[sIndex + offset + 1] + recip[2]*SolventMols_[sIndex + offset + 2]; double y = recip[3]*SolventMols_[sIndex + offset + 0] + recip[4]*SolventMols_[sIndex + offset + 1] + recip[5]*SolventMols_[sIndex + offset + 2]; double z = recip[6]*SolventMols_[sIndex + offset + 0] + recip[7]*SolventMols_[sIndex + offset + 1] + recip[8]*SolventMols_[sIndex + offset + 2]; double dist = NonOrtho_dist(x,y,z,a0,a1,a2,ucell); // if (mol == 0) // printf("dist = %f\n",dist); min_val = min(min_val, dist); } D_[mol] = min_val; } } //------------------------------------------------------------------------------------------------------------------------------------------------ __global__ void Action_ImageNonOrtho_no_center_GPU(double *D_,double *SolventMols_,double *Solute_atoms ,double maxD, double *ucell, double *recip, int Nmols , int NAtoms,int NSAtoms , int active_size) { __shared__ double dist_array[BLOCKDIM]; //__shared__ double sAtom_shared[RSIZE]; int mol = (blockIdx.x * active_size + threadIdx.x)/NAtoms; int atom = (blockIdx.x * active_size + threadIdx.x) - (mol * NAtoms); //int mol_in_block = threadIdx.x/NAtoms; //handling the chunks for solute_atoms int chunksize,start,end, NChunks,i,j; if(NSAtoms*3 > RSIZE) { chunksize = (RSIZE/3)*3; NChunks = ceil(double(NSAtoms*3)/chunksize); start = 0; end = chunksize; } else { chunksize = NSAtoms*3; NChunks = 1; start = 0; end = NSAtoms*3; } // if(threadIdx.x == 0 && blockIdx.x == 0 ) // printf("chunkszize = %d ; Nchunk = %d; start = %d; end = %d\n ", // chunksize,NChunks,start,end); if ( threadIdx.x < active_size && mol*NAtoms + atom < Nmols*NAtoms ) { // if(atom == 0 ) // D_[mol] = maxD; //__syncthreads(); double min_val = maxD; double dist; int sIndex = mol*NAtoms*3 + atom*3; double a0 = recip[0]*SolventMols_[sIndex + 0] + recip[1]*SolventMols_[sIndex + 1] + recip[2]*SolventMols_[sIndex + 2]; double a1 = recip[3]*SolventMols_[sIndex + 0] + recip[4]*SolventMols_[sIndex + 1] + recip[5]*SolventMols_[sIndex + 2]; double a2 = recip[6]*SolventMols_[sIndex + 0] + recip[7]*SolventMols_[sIndex + 1] + recip[8]*SolventMols_[sIndex + 2]; for(i = 0 ; i < NChunks ; i++) { //copying to shared //if (threadIdx.x < (end - start)) // sAtom_shared[threadIdx.x] = Solute_atoms[start + threadIdx.x]; //__syncthreads(); //TODO - add skew per thread for (j = start ; j < end; j+=3 ) { //int offset = start + (j + threadIdx.x)%(end - start); double x = recip[0]*Solute_atoms[j + 0] + recip[1]*Solute_atoms[j + 1] + recip[2]*Solute_atoms[j + 2] ; double y = recip[3]*Solute_atoms[j + 0] + recip[4]*Solute_atoms[j + 1] + recip[5]*Solute_atoms[j + 2] ; double z = recip[6]*Solute_atoms[j + 0] + recip[7]*Solute_atoms[j + 1] + recip[8]*Solute_atoms[j + 2] ; dist = NonOrtho_dist(x,y,z,a0,a1,a2,ucell); //if (mol == 11) // printf("min = %f\n",min_val); min_val = min(min_val,dist); } start = end; end = min(end + chunksize, NSAtoms*3); } dist_array[threadIdx.x] = min_val; //if (threadIdx.x == 0) // printf("min_val = %f\n",min_val); //printf(" dist = %f\n", Dist); __syncthreads(); //first thread //naive approach to a reduction algorithm //this works if NAtoms is small other wise you need split //and do some of log(n) parallel reduction //min_val = maxD; if( atom ==0 ) { for(i = 0 ; i < NAtoms ; i++ ){ //sIndex = mol*NAtoms*3 + i*3; //if (dist_array[threadIdx.x + i] < min_val) // min_val = dist_array[threadIdx.x + i] ; min_val = min(min_val, dist_array[threadIdx.x + i]); } D_[mol] = min_val; } //if(tx == 0 && bx == 0 ) // printf("end of kernel"); } } //-------------------------------------------------------------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------------------------------------------------------------- //NonOrtho Distance function is complicated hence we will put into a __device__ only function __device__ double NonOrtho_dist(double a0, double a1, double a2, double b0, double b1, double b2, double *ucell) { int ixyz[3]; double minIn = -1.0; //double closest2 // The floor() calls serve to bring each point back in the main unit cell. double fx = a0 - floor(a0); double fy = a1 - floor(a1); double fz = a2 - floor(a2); double f2x = b0 - floor(b0); double f2y = b1 - floor(b1); double f2z = b2 - floor(b2); // f2 back in Cartesian space double X_factor = (f2x*ucell[0] + f2y*ucell[3] + f2z*ucell[6]); double Y_factor = (f2x*ucell[1] + f2y*ucell[4] + f2z*ucell[7]); double Z_factor = (f2x*ucell[2] + f2y*ucell[5] + f2z*ucell[8]); // Precompute some factors double fxm1 = fx - 1.0; double fxp1 = fx + 1.0; double fym1 = fy - 1.0; double fyp1 = fy + 1.0; double fzm1 = fz - 1.0; double fzp1 = fz + 1.0; double fxm1u0 = fxm1 * ucell[0]; double fxu0 = fx * ucell[0]; double fxp1u0 = fxp1 * ucell[0]; double fxm1u1 = fxm1 * ucell[1]; double fxu1 = fx * ucell[1]; double fxp1u1 = fxp1 * ucell[1]; double fxm1u2 = fxm1 * ucell[2]; double fxu2 = fx * ucell[2]; double fxp1u2 = fxp1 * ucell[2]; double fym1u3 = fym1 * ucell[3]; double fyu3 = fy * ucell[3]; double fyp1u3 = fyp1 * ucell[3]; double fym1u4 = fym1 * ucell[4]; double fyu4 = fy * ucell[4]; double fyp1u4 = fyp1 * ucell[4]; double fym1u5 = fym1 * ucell[5]; double fyu5 = fy * ucell[5]; double fyp1u5 = fyp1 * ucell[5]; double fzm1u6 = fzm1 * ucell[6]; double fzu6 = fz * ucell[6]; double fzp1u6 = fzp1 * ucell[6]; double fzm1u7 = fzm1 * ucell[7]; double fzu7 = fz * ucell[7]; double fzp1u7 = fzp1 * ucell[7]; double fzm1u8 = fzm1 * ucell[8]; double fzu8 = fz * ucell[8]; double fzp1u8 = fzp1 * ucell[8]; // Calc ix iy iz = 0 case double x = (fxu0 + fyu3 + fzu6) - X_factor; double y = (fxu1 + fyu4 + fzu7) - Y_factor; double z = (fxu2 + fyu5 + fzu8) - Z_factor; // DEBUG //mprintf("DEBUG: a2: %g %g %g\n",(fxu0 + fyu3 + fzu6), (fxu1 + fyu4 + fzu7), (fxu2 + fyu5 + fzu8)); //mprintf("DEBUG: a1: %g %g %g\n", X_factor, Y_factor, Z_factor); double min = (x*x) + (y*y) + (z*z); if (minIn > 0.0 && minIn < min) min = minIn; ixyz[0] = 0; ixyz[1] = 0; ixyz[2] = 0; // -1 -1 -1 x = (fxm1u0 + fym1u3 + fzm1u6) - X_factor; y = (fxm1u1 + fym1u4 + fzm1u7) - Y_factor; z = (fxm1u2 + fym1u5 + fzm1u8) - Z_factor; double D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = -1; ixyz[1] = -1; ixyz[2] = -1; } // -1 -1 0 x = (fxm1u0 + fym1u3 + fzu6 ) - X_factor; y = (fxm1u1 + fym1u4 + fzu7 ) - Y_factor; z = (fxm1u2 + fym1u5 + fzu8 ) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = -1; ixyz[1] = -1; ixyz[2] = 0; } // -1 -1 +1 x = (fxm1u0 + fym1u3 + fzp1u6) - X_factor; y = (fxm1u1 + fym1u4 + fzp1u7) - Y_factor; z = (fxm1u2 + fym1u5 + fzp1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = -1; ixyz[1] = -1; ixyz[2] = 1; } // -1 0 -1 x = (fxm1u0 + fyu3 + fzm1u6) - X_factor; y = (fxm1u1 + fyu4 + fzm1u7) - Y_factor; z = (fxm1u2 + fyu5 + fzm1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = -1; ixyz[1] = 0; ixyz[2] = -1; } // -1 0 0 x = (fxm1u0 + fyu3 + fzu6 ) - X_factor; y = (fxm1u1 + fyu4 + fzu7 ) - Y_factor; z = (fxm1u2 + fyu5 + fzu8 ) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = -1; ixyz[1] = 0; ixyz[2] = 0; } // -1 0 +1 x = (fxm1u0 + fyu3 + fzp1u6) - X_factor; y = (fxm1u1 + fyu4 + fzp1u7) - Y_factor; z = (fxm1u2 + fyu5 + fzp1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = -1; ixyz[1] = 0; ixyz[2] = 1; } // -1 +1 -1 x = (fxm1u0 + fyp1u3 + fzm1u6) - X_factor; y = (fxm1u1 + fyp1u4 + fzm1u7) - Y_factor; z = (fxm1u2 + fyp1u5 + fzm1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = -1; ixyz[1] = 1; ixyz[2] = -1; } // -1 +1 0 x = (fxm1u0 + fyp1u3 + fzu6 ) - X_factor; y = (fxm1u1 + fyp1u4 + fzu7 ) - Y_factor; z = (fxm1u2 + fyp1u5 + fzu8 ) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = -1; ixyz[1] = 1; ixyz[2] = 0; } // -1 +1 +1 x = (fxm1u0 + fyp1u3 + fzp1u6) - X_factor; y = (fxm1u1 + fyp1u4 + fzp1u7) - Y_factor; z = (fxm1u2 + fyp1u5 + fzp1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = -1; ixyz[1] = 1; ixyz[2] = 1; } // 0 -1 -1 x = (fxu0 + fym1u3 + fzm1u6) - X_factor; y = (fxu1 + fym1u4 + fzm1u7) - Y_factor; z = (fxu2 + fym1u5 + fzm1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 0; ixyz[1] = -1; ixyz[2] = -1; } // 0 -1 0 x = (fxu0 + fym1u3 + fzu6 ) - X_factor; y = (fxu1 + fym1u4 + fzu7 ) - Y_factor; z = (fxu2 + fym1u5 + fzu8 ) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 0; ixyz[1] = -1; ixyz[2] = 0; } // 0 -1 +1 x = (fxu0 + fym1u3 + fzp1u6) - X_factor; y = (fxu1 + fym1u4 + fzp1u7) - Y_factor; z = (fxu2 + fym1u5 + fzp1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 0; ixyz[1] = -1; ixyz[2] = 1; } // 0 0 -1 x = (fxu0 + fyu3 + fzm1u6) - X_factor; y = (fxu1 + fyu4 + fzm1u7) - Y_factor; z = (fxu2 + fyu5 + fzm1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 0; ixyz[1] = 0; ixyz[2] = -1; } // 0 0 0 // 0 0 +1 x = (fxu0 + fyu3 + fzp1u6) - X_factor; y = (fxu1 + fyu4 + fzp1u7) - Y_factor; z = (fxu2 + fyu5 + fzp1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 0; ixyz[1] = 0; ixyz[2] = 1; } // 0 +1 -1 x = (fxu0 + fyp1u3 + fzm1u6) - X_factor; y = (fxu1 + fyp1u4 + fzm1u7) - Y_factor; z = (fxu2 + fyp1u5 + fzm1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 0; ixyz[1] = 1; ixyz[2] = -1; } // 0 +1 0 x = (fxu0 + fyp1u3 + fzu6 ) - X_factor; y = (fxu1 + fyp1u4 + fzu7 ) - Y_factor; z = (fxu2 + fyp1u5 + fzu8 ) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 0; ixyz[1] = 1; ixyz[2] = 0; } // 0 +1 +1 x = (fxu0 + fyp1u3 + fzp1u6) - X_factor; y = (fxu1 + fyp1u4 + fzp1u7) - Y_factor; z = (fxu2 + fyp1u5 + fzp1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 0; ixyz[1] = 1; ixyz[2] = 1; } // +1 -1 -1 x = (fxp1u0 + fym1u3 + fzm1u6) - X_factor; y = (fxp1u1 + fym1u4 + fzm1u7) - Y_factor; z = (fxp1u2 + fym1u5 + fzm1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 1; ixyz[1] = -1; ixyz[2] = -1; } // +1 -1 0 x = (fxp1u0 + fym1u3 + fzu6 ) - X_factor; y = (fxp1u1 + fym1u4 + fzu7 ) - Y_factor; z = (fxp1u2 + fym1u5 + fzu8 ) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 1; ixyz[1] = -1; ixyz[2] = 0; } // +1 -1 +1 x = (fxp1u0 + fym1u3 + fzp1u6) - X_factor; y = (fxp1u1 + fym1u4 + fzp1u7) - Y_factor; z = (fxp1u2 + fym1u5 + fzp1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 1; ixyz[1] = -1; ixyz[2] = 1; } // +1 0 -1 x = (fxp1u0 + fyu3 + fzm1u6) - X_factor; y = (fxp1u1 + fyu4 + fzm1u7) - Y_factor; z = (fxp1u2 + fyu5 + fzm1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 1; ixyz[1] = 0; ixyz[2] = -1; } // +1 0 0 x = (fxp1u0 + fyu3 + fzu6 ) - X_factor; y = (fxp1u1 + fyu4 + fzu7 ) - Y_factor; z = (fxp1u2 + fyu5 + fzu8 ) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 1; ixyz[1] = 0; ixyz[2] = 0; } // +1 0 +1 x = (fxp1u0 + fyu3 + fzp1u6) - X_factor; y = (fxp1u1 + fyu4 + fzp1u7) - Y_factor; z = (fxp1u2 + fyu5 + fzp1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 1; ixyz[1] = 0; ixyz[2] = 1; } // +1 +1 -1 x = (fxp1u0 + fyp1u3 + fzm1u6) - X_factor; y = (fxp1u1 + fyp1u4 + fzm1u7) - Y_factor; z = (fxp1u2 + fyp1u5 + fzm1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 1; ixyz[1] = 1; ixyz[2] = -1; } // +1 +1 0 x = (fxp1u0 + fyp1u3 + fzu6 ) - X_factor; y = (fxp1u1 + fyp1u4 + fzu7 ) - Y_factor; z = (fxp1u2 + fyp1u5 + fzu8 ) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 1; ixyz[1] = 1; ixyz[2] = 0; } // +1 +1 +1 x = (fxp1u0 + fyp1u3 + fzp1u6) - X_factor; y = (fxp1u1 + fyp1u4 + fzp1u7) - Y_factor; z = (fxp1u2 + fyp1u5 + fzp1u8) - Z_factor; D = (x*x) + (y*y) + (z*z); if (D < min) { min = D; ixyz[0] = 1; ixyz[1] = 1; ixyz[2] = 1; } //if (closest2 != 0.0 && min < closest2) return (min); // this->ClosestImage(a1, a2, ixyz); // fprintf(stdout,"DEBUG: Predict = %2i %2i %2i\n",ixyz[0],ixyz[1],ixyz[2]); // ix = ixyz[0]; // iy = ixyz[1]; // iz = ixyz[2]; //D = sqrt(min); // fprintf(stdout,"DEBUG: MinDist = %2i %2i %2i = %8.3f\n", ixmin, iymin, izmin, D); // printf("---------------------------------------------------------------\n"); return(min); }
23,641
//// gol.cu #include <stdlib.h> #include <iostream> #include <stdio.h> #include <assert.h> // Game of Life rules // global memory only typedef bool GolCell; inline GolCell GetNeighbourCell (GolCell *input, int mapCellIdx, int mapWidth, int x_off, int y_off) { return input[mapCellIdx + (mapWidth * y_off) + x_off]; } inline void UpdateNeighbourhood(int &neighbourhood, GolCell &neighbourValue) { neighbourhood += neighbourValue; } inline GolCell GetCell(GolCell *grid, int x, int y, int gridWidth) { return grid[x + (y * gridWidth)]; } inline bool IsAlive(GolCell &cell) { return (true == cell); } // A cell is alive the next generation if it is currently alive and has // either 2 or 3 neighbours OR if it is dead and has 3 neighbours. inline void UpdateState(GolCell &thisCell, int &neighbourhood) { if(IsAlive(thisCell)) { thisCell = (neighbourhood == 2 || neighbourhood == 3); } else { thisCell = (neighbourhood == 3); } } void RunGoL(GolCell *input, GolCell *output, int gridWidth, int gridHeight, int iterations, bool wrapAround) { for(int iter = 0; iter < iterations; iter = iter + 1) { for(int glbl_x = 0; glbl_x < gridWidth; glbl_x = glbl_x + 1) { for(int glbl_y = 0; glbl_y < gridHeight; glbl_y = glbl_y + 1) { //Assume row-major here int mapCell = (gridWidth * glbl_y) + glbl_x; GolCell thisCell = input[mapCell]; // The variable we use to track the status of the cells surrounding this one // A basic implementation will be one where for each neighbour that is alive // the neighbourhood value increases by one int neighbourhood = 0; // As is right now, this is a lot of overhead, but I wrote it // like this so we can easily add in optis later. At the end, // if the CUDA compiler does not do inlining for us, we can manually // do inlining of these functions. // Here we assume that (0,0) is the top left of the grid (although there is) // nothing stopping it from being the bottom left. // **JUSTIN** - let me know if you have preference -> it doesn't bother me either way int x_left = (glbl_x == 0) ? gridWidth - 1 : glbl_x - 1; int x_right = (glbl_x == gridWidth - 1) ? 0 : glbl_x + 1; int y_above = (glbl_y == 0) ? gridHeight - 1 : glbl_y - 1; int y_below = (glbl_y == gridHeight - 1) ? 0 : glbl_y + 1; GolCell neighbourValue; // TOP LEFT neighbourValue = GetCell(input, x_left, y_above, gridWidth); UpdateNeighbourhood(neighbourhood, neighbourValue); // TOP neighbourValue = GetCell(input, glbl_x, y_above, gridWidth); UpdateNeighbourhood(neighbourhood, neighbourValue); // TOP RIGHT neighbourValue = GetCell(input, x_right, y_above, gridWidth); UpdateNeighbourhood(neighbourhood, neighbourValue); // RIGHT neighbourValue = GetCell(input, x_right, glbl_y, gridWidth); UpdateNeighbourhood(neighbourhood, neighbourValue); // BOTTOM RIGHT neighbourValue = GetCell(input, x_right, y_below, gridWidth); UpdateNeighbourhood(neighbourhood, neighbourValue); // BOTTOM neighbourValue = GetCell(input, glbl_x, y_below, gridWidth); UpdateNeighbourhood(neighbourhood, neighbourValue); // BOTTOM LEFT neighbourValue = GetCell(input, x_left, y_below, gridWidth); UpdateNeighbourhood(neighbourhood, neighbourValue); // LEFT neighbourValue = GetCell(input, x_left, glbl_y, gridWidth); UpdateNeighbourhood(neighbourhood, neighbourValue); UpdateState(thisCell, neighbourhood); output[mapCell] = thisCell; } } GolCell *bufferSwap = input; input = output; output = bufferSwap; } } void InitializeBoard(GolCell *input, int gridWidth, int gridHeight, char *startingFile, bool bGenGridFromScratch) { FILE *file = NULL; if(!bGenGridFromScratch) { fopen(startingFile, "r"); assert(file); } for(int i = 0; i < gridHeight; i = i + 1) { for(int j = 0; j < gridWidth; j = j + 1) { char cell = '\n'; if(!bGenGridFromScratch) { fgetc(file); } else { cell = (rand() % 3 == 0) ? '1' : '0'; } int index = j + gridWidth * i; // Sorry about this - I would like a nicer way to deal with newline // oddities across windows/Linux plats but we can hack it for now while(cell != '1' && cell != 'X' && cell != '0'&& cell != ' ' && cell != '_' && !bGenGridFromScratch) { cell = fgetc(file); } if((cell == '1' || cell == 'X')) { input[index] = 1; } else if((cell == '0' || cell == ' ' || cell == '_')) { input[index] = 0; } } } if(!bGenGridFromScratch) { fclose(file); } } int main (int argc, char *argv[]) { if(argc != 5 && argc != 4) { return 0; } int gridWidth = atoi(argv[1]); int gridHeight = atoi(argv[2]); int iterations = atoi(argv[3]); int gridSize = gridWidth * gridHeight; char *startingFile = argv[4]; GolCell *input = (GolCell *)malloc(gridSize * sizeof(GolCell)); GolCell *output = (GolCell *)malloc(gridSize * sizeof(GolCell)); InitializeBoard(input, gridWidth, gridHeight, startingFile, argc == 4); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); RunGoL(input, output, gridWidth, gridHeight, iterations, true); cudaEventRecord(end, 0); cudaEventSynchronize(end); float time_ms; cudaEventElapsedTime(&time_ms, start, end); std::cout <<"time: "<<time_ms<<std::endl; if(0 == (iterations & 0x1)) { // even number of iterations -> true output buffer is stored at input output = input; } // std::cout <<"Ending BOARD"<<std::endl; // for(int j = 0; j < gridHeight; j = j + 1) { // for(int i = 0; i < gridWidth; i = i + 1) { // std::cout << (output[j * gridWidth + i] ? '#' : ' '); // } // std::cout << "|" << std::endl; // } }
23,642
#include "includes.h" /// /// Copyright (c) 2018, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: dgemm /// /// PURPOSE: This program tests the efficiency with which a dense matrix /// dense multiplication is carried out /// /// USAGE: The program takes as input the matrix order, /// the number of times the matrix-matrix multiplication /// is carried out, and, optionally, a tile size for matrix /// blocking /// /// <progname> <# iterations> <matrix order> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// FUNCTIONS CALLED: /// /// Other than OpenMP or standard C functions, the following /// functions are used in this program: /// /// cblasDgemm() /// /// HISTORY: Written by Rob Van der Wijngaart, February 2009. /// Converted to C++11 by Jeff Hammond, December, 2017. /// ////////////////////////////////////////////////////////////////////// __global__ void init(int order, double * C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<order) && (j<order)) { C[i*order+j] = 0; } }
23,643
#include <stdio.h> #include <stdlib.h> #include <time.h> /* 配列の形 node 0:state 1:height 2:out_root_id 3:in_root_id 4:source,sinkにつながっているかのflg edge 0:flow 1:reverse_id 2:in_node_id 3:out_node_id 4:in_link_id 5:out_link_id 6:id */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define NODE 7 #define EDGE 7 #define NADR(NUM, ID, EL) (NUM * EL + ID) #define EADR(NUM, ID, EL) ((NUM * 2) * EL + ID) //node #define STATE 0 #define HEIGHT 1 #define OUT_ROOT 2 #define IN_ROOT 3 #define ST_FLG 4 #define ACTIVE 5 #define N_ROUTE 6 //edge #define FLOW 0 #define ROUTE 1 #define REVERSE 2 #define IN_NODE 3 #define IN_LINK 4 #define OUT_NODE 5 #define OUT_LINK 6 int *N_NUM; int *E_NUM; int *n_table; int *e_table; int *source; int *sink; void link(int e_table, int from, int to, int r_edge, int flow); int load(FILE *fp); __global__ void first_reset(int *n_table, int *e_table, int *source, int *sink, int *N_NUM, int *E_NUM) { int total_id = blockDim.x * blockIdx.x + threadIdx.x; if (total_id >= N_NUM[0]) return; if (total_id == source[0]) { n_table[NADR(N_NUM[0], total_id, STATE)] = 1; n_table[NADR(N_NUM[0], total_id, HEIGHT)] = 0; n_table[NADR(N_NUM[0], total_id, ACTIVE)] = 0; n_table[NADR(N_NUM[0], total_id, N_ROUTE)] = 0; return; } else if (total_id == sink[0]) { n_table[NADR(N_NUM[0], total_id, STATE)] = 2; n_table[NADR(N_NUM[0], total_id, HEIGHT)] = 0; n_table[NADR(N_NUM[0], total_id, ACTIVE)] = 0; n_table[NADR(N_NUM[0], total_id, N_ROUTE)] = 0; return; } else if (n_table[NADR(N_NUM[0], total_id, ST_FLG)] != -1) { int f_flg = n_table[NADR(N_NUM[0], total_id, ST_FLG)]; int in_node = e_table[EADR(E_NUM[0], f_flg, IN_NODE)]; n_table[NADR(N_NUM[0], total_id, HEIGHT)] = 1; n_table[NADR(N_NUM[0], total_id, ACTIVE)] = 1; n_table[NADR(N_NUM[0], total_id, N_ROUTE)] = 1; if (in_node == source[0]) n_table[NADR(N_NUM[0], total_id, STATE)] = 1; else n_table[NADR(N_NUM[0], total_id, STATE)] = 2; } } __global__ void node_reset(int *n_table, int *e_table, int *source, int *sink, int *N_NUM, int *E_NUM, int *e_flg) { int total_id = blockDim.x * blockIdx.x + threadIdx.x; if (total_id >= N_NUM[0]) return; if (total_id == source[0] || total_id == sink[0]) return; if (n_table[NADR(N_NUM[0], total_id, ST_FLG)] == -1) { n_table[NADR(N_NUM[0], total_id, HEIGHT)] = N_NUM[0]; n_table[NADR(N_NUM[0], total_id, STATE)] = 0; n_table[NADR(N_NUM[0], total_id, ACTIVE)] = 0; n_table[NADR(N_NUM[0], total_id, N_ROUTE)] = 0; return; } else { n_table[NADR(N_NUM[0], total_id, ACTIVE)] = 1; int f_flg = n_table[NADR(N_NUM[0], total_id, ST_FLG)]; if (e_table[EADR(E_NUM[0], f_flg, FLOW)] == 0) { n_table[NADR(N_NUM[0], total_id, HEIGHT)] = N_NUM[0]; n_table[NADR(N_NUM[0], total_id, STATE)] = 0; n_table[NADR(N_NUM[0], total_id, ACTIVE)] = 0; n_table[NADR(N_NUM[0], total_id, N_ROUTE)] = 0; n_table[NADR(N_NUM[0], total_id, ST_FLG)] = -1; } } } __global__ void trace_cu(int *n_table, int *e_table, int *flg3, int *que3, int *N_NUM, int *E_NUM, int *flg1, int *e_flg) { flg1[0] = 0; int node_id = blockDim.x * blockIdx.x + threadIdx.x; if (node_id >= N_NUM[0]) return; __shared__ int n_num[1]; n_num[0] = N_NUM[0]; __shared__ int e_num[1]; e_num[0] = E_NUM[0]; if (n_table[NADR(n_num[0], node_id, ACTIVE)] != 1) return; int state, look_n, root, link; if (n_table[NADR(n_num[0], node_id, STATE)] == 1) { state = 1; look_n = OUT_NODE; root = OUT_ROOT; link = OUT_LINK; } else { state = 2; look_n = IN_NODE; root = IN_ROOT; link = IN_LINK; } int old; int lok_node; int edge_id = n_table[NADR(n_num[0], node_id, root)]; for (;;) { for (;;) { if (edge_id == -1) { n_table[NADR(n_num[0], node_id, ACTIVE)] = 0; return; } if (e_table[EADR(e_num[0], edge_id, FLOW)] > 0) break; edge_id = e_table[EADR(e_num[0], edge_id, link)]; } lok_node = e_table[EADR(e_num[0], edge_id, look_n)]; if (n_table[NADR(n_num[0], lok_node, STATE)] == 0) { //old = atomicExch(&e_flg[lok_node], 1); old = atomicAdd(&n_table[NADR(n_num[0], lok_node, N_ROUTE)], 1); if (old == 0) { n_table[NADR(n_num[0], lok_node, STATE)] = state; n_table[NADR(n_num[0], lok_node, HEIGHT)] = n_table[NADR(n_num[0], node_id, HEIGHT)] + 1; n_table[NADR(n_num[0], lok_node, ACTIVE)] = 1; flg1[0] = 1; } } else if (n_table[NADR(n_num[0], lok_node, STATE)] != state) { old = atomicAdd(&(flg3[0]), 1); que3[old] = edge_id; } edge_id = e_table[EADR(e_num[0], edge_id, link)]; } } __device__ void flow_t(int *n_table, int *e_table, int flow, int pre_edge, int edge, int N_NUM, int E_NUM, int tag, int *flg1) { int link = IN_NODE; int old = atomicSub(&e_table[EADR(E_NUM, edge, FLOW)], flow); int reverse = e_table[EADR(E_NUM, edge, REVERSE)]; atomicAdd(&e_table[EADR(E_NUM, reverse, FLOW)], flow); int l_node = e_table[EADR(E_NUM, edge, link)]; if (old == flow) { old = atomicSub(&n_table[NADR(N_NUM, l_node, N_ROUTE)], 1); if (old == 1) atomicAdd(&flg1[0], 1); } for (;;) { old = atomicSub(&e_table[EADR(E_NUM, pre_edge, FLOW)], flow); reverse = e_table[EADR(E_NUM, pre_edge, REVERSE)]; atomicAdd(&e_table[EADR(E_NUM, reverse, FLOW)], flow); if (pre_edge == tag) { if (old == flow) atomicAdd(&flg1[0], 1); break; } l_node = e_table[EADR(E_NUM, edge, link)]; if (l_node == e_table[EADR(E_NUM, tag, link)]) link = OUT_NODE; if (old == flow) { old = atomicSub(&n_table[NADR(N_NUM, l_node, N_ROUTE)], 1); if (old == 1) atomicAdd(&flg1[0], 1); } pre_edge = e_table[EADR(E_NUM, pre_edge, ROUTE)]; } } __global__ void aug_cu(int *n_table, int *e_table, int *flg3, int *que3, int *source, int *sink, int *e_flg, int *flow_sum, int *N_NUM, int *E_NUM, int *flg1) { int total_id = blockDim.x * blockIdx.x + threadIdx.x; if (total_id >= flg3[0]) return; int tag = que3[total_id]; int old = atomicExch(&e_flg[tag], 1); if (old != 0) return; int pre_edge = tag; __shared__ int n_num[1]; n_num[0] = N_NUM[0]; __shared__ int e_num[1]; e_num[0] = E_NUM[0]; int flow = e_table[EADR(e_num[0], tag, FLOW)]; if (flow == 0) return; int node = e_table[EADR(e_num[0], tag, IN_NODE)]; int state = 1; int height = n_table[NADR(N_NUM[0], node, HEIGHT)] - 1; int root = IN_ROOT; int look_n = IN_NODE; int link = IN_LINK; for (;;) { int edge_id = n_table[NADR(n_num[0], node, root)]; int flow1; int lok_node; for (;;) { if (edge_id == -1) return; flow1 = e_table[EADR(e_num[0], edge_id, FLOW)]; lok_node = e_table[EADR(e_num[0], edge_id, look_n)]; if (flow1 > 0) { if (n_table[NADR(n_num[0], lok_node, HEIGHT)] == height) { if (n_table[NADR(n_num[0], lok_node, STATE)] == state) { old = atomicExch(&e_flg[edge_id], 1); if (old == 0) { flow = ((flow > flow1) ? flow1 : flow); break; } } } } edge_id = e_table[EADR(e_num[0], edge_id, link)]; } if (lok_node == sink[0]) { flow_t(n_table, e_table, flow, pre_edge, edge_id, n_num[0], e_num[0], tag, flg1); atomicAdd(&flow_sum[0], flow); return; } else if (lok_node == source[0]) { node = e_table[EADR(e_num[0], tag, OUT_NODE)]; height = n_table[NADR(n_num[0], node, HEIGHT)] - 1; state = 2; root = OUT_ROOT; look_n = OUT_NODE; link = OUT_LINK; } else { node = lok_node; height--; } e_table[EADR(e_num[0], edge_id, ROUTE)] = pre_edge; pre_edge = edge_id; } } __global__ void flg_reset(int *flg) { flg[0] = 0; } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: ./bk.exe file.inp\n"); return 1; } FILE *fp = fopen(argv[1], "r"); if (fp == NULL) { printf("Can't open file [%s]\n", argv[1]); return 1; } int max = load(fp); fclose(fp); if (max == -1) return 0;//ファイルの形式がおかしい時return clock_t start, end;//時間計測用 //hostの変数確保 int *flg1, *flg3; gpuErrchk ( cudaMallocHost(&flg1, sizeof(int)) ); gpuErrchk ( cudaMallocHost(&flg3, sizeof(int)) ); int *flow_sum; gpuErrchk ( cudaMallocHost(&flow_sum, sizeof(int)) ); size_t C_SIZE = sizeof(int); size_t N_SIZE = sizeof(int) * N_NUM[0] * NODE; size_t E_SIZE = sizeof(int) * E_NUM[0] * EDGE * 2; //deviceの変数確保 int *DN_NUM; int *DE_NUM; int *nd_table; int *ed_table; int *d_flg1, *d_flg3; int *d_que3; int *d_source, *d_sink; int *dflow_sum; int *de_flg; int *d_cnt; gpuErrchk( cudaMalloc((void**)&DN_NUM, C_SIZE) ); gpuErrchk( cudaMalloc((void**)&DE_NUM, C_SIZE) ); gpuErrchk( cudaMalloc((void**)&d_flg1, C_SIZE) ); gpuErrchk( cudaMalloc((void**)&d_flg3, C_SIZE) ); gpuErrchk( cudaMalloc((void**)&d_que3, sizeof(int) * E_NUM[0]) ); gpuErrchk( cudaMalloc((void**)&nd_table, N_SIZE) ); gpuErrchk( cudaMalloc((void**)&ed_table, E_SIZE) ); gpuErrchk( cudaMalloc((void**)&d_source, C_SIZE) ); gpuErrchk( cudaMalloc((void**)&d_sink, C_SIZE) ); gpuErrchk( cudaMalloc((void**)&dflow_sum, C_SIZE) ); gpuErrchk( cudaMalloc((void**)&de_flg, sizeof(int) * E_NUM[0] * 2) ); gpuErrchk (cudaMalloc((void**)&d_cnt, sizeof(int)) ); //deviceへのコピー gpuErrchk( cudaMemcpy(nd_table, n_table, N_SIZE, cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(ed_table, e_table, E_SIZE, cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_source, source, C_SIZE, cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_sink, sink, C_SIZE, cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(DN_NUM, N_NUM, C_SIZE, cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(DE_NUM, E_NUM, C_SIZE, cudaMemcpyHostToDevice) ); //計測開始 start = clock(); flg_reset<<<1, 1>>>(dflow_sum);//flow_sumを初期化 first_reset<<<(N_NUM[0] / 32) + 1, 32>>>(nd_table, ed_table, d_source, d_sink, DN_NUM, DE_NUM); for (;;) { flg_reset<<<1, 1>>>(d_flg3); node_reset<<<(N_NUM[0] / 32) + 1, 32>>>(nd_table, ed_table, d_source, d_sink, DN_NUM, DE_NUM, de_flg); gpuErrchk( cudaThreadSynchronize() ); //growth stage for (;;) { trace_cu<<<(N_NUM[0] / 56) + 1, 56>>>(nd_table, ed_table, d_flg3, d_que3, DN_NUM, DE_NUM, d_flg1, de_flg); gpuErrchk( cudaThreadSynchronize() ); gpuErrchk( cudaMemcpy(flg1, d_flg1, C_SIZE, cudaMemcpyDeviceToHost) ); if (flg1[0] == 0) break; } //ぶつかったエッジがあったかを確認 gpuErrchk( cudaMemcpy(flg3, d_flg3, C_SIZE, cudaMemcpyDeviceToHost) ); if (flg3[0] == 0) break; //augmentation stage for (int i = 0; i < 3; i++) { gpuErrchk( cudaMemset((void**)de_flg, 0, sizeof(int) * E_NUM[0] * 2) ); aug_cu<<<(flg3[0] / 32) + 1, 32>>>(nd_table, ed_table, d_flg3, d_que3, d_source, d_sink, de_flg, dflow_sum, DN_NUM, DE_NUM, d_flg1); gpuErrchk( cudaThreadSynchronize() ); } gpuErrchk( cudaMemcpy(flg1, d_flg1, C_SIZE, cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(flow_sum, dflow_sum, C_SIZE, cudaMemcpyDeviceToHost) ); printf("current_flow : %d\n", flow_sum[0]); } printf("flow_sum : %d\n", flow_sum[0]); end = clock(); //計測終了 printf("time:%.2f[s]\n", (double)(end - start) / CLOCKS_PER_SEC); cudaFree(DN_NUM); cudaFree(DE_NUM); cudaFree(nd_table); cudaFree(ed_table); cudaFree(d_flg1); cudaFree(d_flg3); cudaFree(d_que3); cudaFree(d_source); cudaFree(d_sink); cudaFree(dflow_sum); cudaFree(d_cnt); cudaFree(de_flg); cudaFreeHost(flg1); cudaFreeHost(flg3); cudaFreeHost(n_table); cudaFreeHost(e_table); cudaFreeHost(source); cudaFreeHost(sink); cudaFreeHost(flow_sum); cudaFreeHost(N_NUM); cudaFreeHost(E_NUM); return 0; } void link(int *n_table, int *e_table, int from, int to, int flow, int edge_id, int *count) { int edge = n_table[NADR(N_NUM[0], from, OUT_ROOT)]; for (;;) { if (edge == -1) break; if (e_table[EADR(E_NUM[0], edge, OUT_NODE)] == to) { e_table[EADR(E_NUM[0], edge, FLOW)] += flow; count[0]++; return; } edge = e_table[EADR(E_NUM[0], edge, OUT_LINK)]; } //正のエッジ e_table[EADR(E_NUM[0], edge_id, REVERSE)] = edge_id + 1; e_table[EADR(E_NUM[0], edge_id, FLOW)] = flow; e_table[EADR(E_NUM[0], edge_id, IN_NODE)] = from; e_table[EADR(E_NUM[0], edge_id, OUT_NODE)] = to; e_table[EADR(E_NUM[0], edge_id, OUT_LINK)] = n_table[NADR(N_NUM[0], from, OUT_ROOT)]; n_table[NADR(N_NUM[0], from, OUT_ROOT)] = edge_id; e_table[EADR(E_NUM[0], edge_id, IN_LINK)] = n_table[NADR(N_NUM[0], to, IN_ROOT)]; n_table[NADR(N_NUM[0], to, IN_ROOT)] = edge_id; if (from == source[0]) { n_table[NADR(N_NUM[0], to, ST_FLG)] = edge_id; } if (to == sink[0]) { n_table[NADR(N_NUM[0], from, ST_FLG)] = edge_id; } //逆(reverse)のエッジ e_table[EADR(E_NUM[0], (edge_id + 1), REVERSE)] = edge_id; e_table[EADR(E_NUM[0], (edge_id + 1), FLOW)] = 0; e_table[EADR(E_NUM[0], (edge_id + 1), IN_NODE)] = to; e_table[EADR(E_NUM[0], (edge_id + 1), OUT_NODE)] = from; e_table[EADR(E_NUM[0], (edge_id + 1), OUT_LINK)] = n_table[NADR(N_NUM[0], to, OUT_ROOT)]; n_table[NADR(N_NUM[0], to, OUT_ROOT)] = edge_id + 1; e_table[EADR(E_NUM[0], (edge_id + 1), IN_LINK)] = n_table[NADR(N_NUM[0], from, IN_ROOT)]; n_table[NADR(N_NUM[0], from, IN_ROOT)] = edge_id + 1; } int load(FILE *fp) { int max = 0; char s1[10], s2[10]; gpuErrchk( cudaMallocHost(&N_NUM, sizeof(int)) ); gpuErrchk( cudaMallocHost(&E_NUM, sizeof(int)) ); int result = fscanf(fp, "%s %s %d %d\n", s1, s2, &N_NUM[0], &E_NUM[0]); if (result == EOF) return -1; gpuErrchk( cudaMallocHost(&n_table, sizeof(int) * N_NUM[0] * NODE) ); gpuErrchk( cudaMallocHost(&e_table, sizeof(int) * E_NUM[0] * EDGE * 2) ); for (int i = 0; i < N_NUM[0]; i++) { n_table[NADR(N_NUM[0], i, OUT_ROOT)] = -1; n_table[NADR(N_NUM[0], i, IN_ROOT)] = -1; n_table[NADR(N_NUM[0], i, ST_FLG)] = -1; } for (int i = 0; i < E_NUM[0] * 2; i++) { e_table[EADR(E_NUM[0], i, REVERSE)] = -1; e_table[EADR(E_NUM[0], i, IN_NODE)] = -1; e_table[EADR(E_NUM[0], i, OUT_NODE)] = -1; e_table[EADR(E_NUM[0], i, IN_LINK)] = -1; e_table[EADR(E_NUM[0], i, OUT_LINK)] = -1; } int t; result = fscanf(fp, "%s %d %s\n", s1, &t, s2); if (result == EOF) return -1; cudaMallocHost(&source, sizeof(int)); source[0] = t; result = fscanf(fp, "%s %d %s\n", s1, &t, s2); if (result == EOF) return -1; cudaMallocHost(&sink, sizeof(int)); sink[0] = t; int *count; count = new int[1](); for (int i = 0; i < E_NUM[0]; i++) { int from, to, flow; result = fscanf(fp, "%s %d %d %d\n", s1, &from, &to, &flow); if (result == EOF) break; if (flow > max) max = flow; link(n_table, e_table, from, to, flow, (i - count[0]) * 2, count); } delete[] count; return max; }
23,644
#include <iostream> #include <math.h> // Kernel function to color the buffer according to the gradient __global__ void color(float *buffer) { int x = threadIdx.x; int y = blockIdx.x; int nx = blockDim.x; int ny = gridDim.x; float r = (float) x / nx; float g = (float) y / ny; float b = 0.2; float* triplet = buffer + 3*(blockIdx.x*blockDim.x + threadIdx.x); triplet[0] = r; triplet[1] = g; triplet[2] = b; } int main(void) { int nx = 200; int ny = 100; float *buffer; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&buffer, 3*nx*ny*sizeof(float)); // Run kernel on nx*ny elements on the GPU color<<<ny, nx>>>(buffer); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Print the buffer out to command std::cout << "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny-1; j >= 0; j--) for (int i = 0; i < nx; i++) { float* triplet = buffer + 3*(j*nx + i); int ir = int(255.99f * triplet[0]); int ig = int(255.99f * triplet[1]); int ib = int(255.99f * triplet[2]); std::cout << ir << " " << ig << " " << ib << "\n"; } // Free memory cudaFree(buffer); return 0; }
23,645
#include "includes.h" __global__ void add(int *a,int *b,int *c) { int tid = threadIdx.x; if(tid < N) { c[tid]=a[tid]+b[tid]; } }
23,646
#include "CudaComputing.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int vvmain(int* c) { const int arraySize = 5; const int a[arraySize] = {1, 2, 3, 4, 5 }; const int b[arraySize] = {10, 20, 30, 40, 50 }; // Add vectors inparallel. cudaError_t cudaStatus =addWithCuda(c, a, b, arraySize); if (cudaStatus !=cudaSuccess) { return 1; } // cudaDeviceReset must becalled before exiting in order for profiling and // tracing tools such asNsight and Visual Profiler to show complete traces. cudaStatus =cudaDeviceReset(); if (cudaStatus !=cudaSuccess) { return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to runon, change this on a multi-GPU system cudaStatus =cudaSetDevice(0); if (cudaStatus !=cudaSuccess) { goto Error; } // Allocate GPU buffersfor three vectors (two input, one output) . cudaStatus =cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus !=cudaSuccess) { goto Error; } cudaStatus =cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus !=cudaSuccess) { goto Error; } cudaStatus =cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus !=cudaSuccess) { goto Error; } // Copy input vectors fromhost memory to GPU buffers. cudaStatus =cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus !=cudaSuccess) { goto Error; } cudaStatus =cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus !=cudaSuccess) { goto Error; } // Launch a kernel on theGPU with one thread for each element. addKernel<<<(size-1)/128+1,128>>>(dev_c, dev_a, dev_b); // cudaDeviceSynchronizewaits for the kernel to finish, and returns // any errors encounteredduring the launch. cudaStatus =cudaDeviceSynchronize(); if (cudaStatus !=cudaSuccess) { goto Error; } // Copy output vector fromGPU buffer to host memory. cudaStatus = cudaMemcpy(c,dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus !=cudaSuccess) { goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
23,647
#include <iostream> #include <cuComplex.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/system_error.h> #include <math.h> struct zaxpy_functor { const cuDoubleComplex x; zaxpy_functor(cuDoubleComplex _x) : x(_x) {} __host__ __device__ cuDoubleComplex operator()(const cuDoubleComplex& A, const cuDoubleComplex& B) const { return cuCadd(cuCmul(A,x),B); } }; int main() { int numElements = pow(2,20); thrust::device_vector<cuDoubleComplex> A(numElements); thrust::device_vector<cuDoubleComplex> B(numElements); cuDoubleComplex CDouble = make_cuDoubleComplex(2.25,2.25); thrust::fill(A.begin(), A.end(), CDouble); thrust::fill(B.begin(), B.end(), CDouble); thrust::transform(A.begin(), A.end(), B.begin(), B.begin(), zaxpy_functor(CDouble)); return 0; }
23,648
#include <iostream> using std::cout; using std::endl; void printDeviceInfo(cudaDeviceProp prop, int idx){ cout << "[" << idx << "]\n"; cout << " Name: " << prop.name << endl; cout << " Major: " << prop.major << endl; cout << " Minor: " << prop.major << endl; cout << " Total Global Memory: " << prop.totalGlobalMem << endl; cout << " Total Shared Memory per Block: " << prop.sharedMemPerBlock << endl; int i; int dim = 3; cout << " Maximun Block Dim: \n"; for(i=0; i<dim; i++){ cout << " Dim " << i << ": " << prop.maxThreadsDim[i] << endl; } cout << " Maximun Grid Dim: \n"; for(i=0; i<dim; i++){ cout << " Dim " << i << ": " << prop.maxGridSize[i] << endl; } cout << " Warp Size: " << prop.warpSize << endl; cout << " Maximun Threads per Block: " << prop.maxThreadsPerBlock << endl; cout << " Number of Multiprocessors: " << prop.multiProcessorCount << endl; } int main(int argc, char const *argv[]){ cudaDeviceProp prop; int count = 0; int i; cudaGetDeviceCount(&count); for(i=0; i<count ;i++ ){ cudaGetDeviceProperties(&prop, i); printDeviceInfo(prop, i); cout << endl; } return 0; }
23,649
#include <stdio.h> //#include <cuda.h> //#include "cuda_runtime_api.h" //#include <stdint.h> //#include <stdlib.h> //This is the working matrix multiplication code - very basic /**** Done: - printing of matrix in a more pleasant manner using printMatrix function - command line arguments - opens matrix files and reads the matrix successfully - working array passing from main to auxiliary (loadMatrixFile) function :) - fixed printing of matrix - fixed erroneous matrix values by moving loading into host matrix multiplication function! - basic move towards SN P simulation: multiplication of s0 and Msnp - moving from multiplication to finally simulating an SN P (sort of) in a very basic manner Problems: - (fixed) MatA and MatB values are overlapping and erroneous TODOS: - error checking of switch case input ( scanf of int and char ) - use multiple files + make file - see code comments ****/ /*** **** START of AUXILIARY functions ***/ /* START of KERNEL functions */ //START vector addition kernel function __global__ void MatrixAddKernel ( int *Md, int *Nd, int *Pd, int Width ){ // MatrixAddKernel<<< dimGrid, dimBlock >>>( Md, Nd, Pd, Width ); //dim3 dimBlock( Width, Width ); dim3 dimGrid( 1, 1 ); //int tx = threadIdx.x; int ty = threadIdx.y; //due to row-major ordering of matrix elements //int Pvalue = 0; for ( int k = 0; k < Width; ++k ){ int Mdelement = Md[ ty * Width + k ]; int Ndelement = Nd[ ty * Width + k ]; Pd[ ty * Width + k ] = Mdelement + Ndelement; } //Pd[ ty * Width + tx ] = Pvalue; } //END of kernel addition //START of kernel multiplication __global__ void MatrixMulKernel ( int *Md, int *Nd, int *Pd, int Width ){ int tx = threadIdx.x; int ty = threadIdx.y; //due to row-major ordering of matrix elements int Pvalue = 0; for ( int k = 0; k < Width; ++k ){ int Mdelement = Md[ ty * Width + k ]; int Ndelement = Nd[ k * Width + tx ]; Pvalue += Mdelement * Ndelement; } Pd[ ty * Width + tx ] = Pvalue; } //END of kernel multiplication /* END of KERNEL functions */ //function to print matrix void printMatrix ( int *M, int rows, int columns ){ //assumes matrix is in row-major format int index; printf ( "\n \n " ); for ( int v = 0; v < rows; v++ ){ //assumes a square matrix for ( int w = 0; w < columns; w++ ) { index = v * columns + w; printf ( " %02d", M[ index ] ); } printf ( " \n\n " ); } }//End of printMatrix function //START of loadMatrixFile void loadMatrixFile( char *filename, int *z, int matWidth, int matHeight ){ int y = 0; int w = 0; int x; int offset = 0; FILE *ptr1 = fopen( filename, "r" ); fscanf( ptr1, " %d", &x ); while( !feof( ptr1 ) && y < ( matWidth * matHeight ) + 1 ){ if ( y > offset ){ fscanf( ptr1, " %d", &z[ w - offset ] ); w++; } else{ fscanf( ptr1, " %d", &x ); } y++; } fclose( ptr1 ); } //END of loadMatrixFile //Start of matrix multiplication host function MatrixMul void MatrixMul( char *filename0, char *filename1, char *filename2, int Width ){ int size = Width * Width * sizeof( int ); int *Md, *Nd, *Pd; dim3 dimBlock( Width, Width ); dim3 dimGrid( 1, 1 ); int *matA = ( int * )malloc( size ); //printf( "Width and height of Matrix A: %d %d and init values are\n", Width, Width ); //printMatrix( matA, Width, Width ); loadMatrixFile( filename1, matA, Width, Width ); printf( " \n%s after loading from file: \n", filename1 ); printMatrix( matA, Width, Width ); int *matB = ( int * )malloc( size ); loadMatrixFile( filename2, matB, Width, Width ); printf( " \n%s after loading from file: \n", filename2 ); printMatrix( matB, Width, Width ); //assumes a square matrix int *matC = ( int * )malloc( size ); cudaMalloc( (void**) &Md, size ); cudaMemcpy( Md, matA, size, cudaMemcpyHostToDevice ); cudaMalloc( (void**) &Nd, size ); cudaMemcpy( Nd, matB, size, cudaMemcpyHostToDevice ); cudaMalloc( (void**) &Pd, size ); //MatrixMulKernel<<< dimGrid, dimBlock >>>( Md, Nd, Pd, Width ); MatrixAddKernel<<< dimGrid, dimBlock >>>( Md, Nd, Pd, Width ); cudaMemcpy( matC, Pd, size, cudaMemcpyDeviceToHost ); printf( " \n %s * %s : \n", filename1, filename2 ); printMatrix( matC, Width, Width ); free( matA ); free( matB ); free( matC ); cudaFree( Md ); cudaFree( Nd ); cudaFree ( Pd ); } //End of Matrix multiplication function MatrixMul /*** ****END of AUXILIARY functions ****/ /*** ****START of MAIN function ****/ int main ( void ) { int x; while( x != 2 ) { printf( "\n Type \n 1 to enter filenames < 20 in length \n 2 to quit \n: " ); scanf( "%d", &x ); switch( x ){ case 1: char a[ 20 ], b[ 20 ], c[ 20 ]; int d; printf( " Please enter spiking vector file: \n" ); scanf( " %s", &a ); printf( " Please enter configuration vector file: \n" ); scanf( " %s", &b ); printf( " Please enter spiking transition matrix file: \n" ); scanf( " %s", &c ); printf( " Please enter the square matrix width: \n" ); scanf( " %d", &d ); if( ( strlen( a ) ) > 20 && ( strlen( b ) ) > 20 && ( strlen( c ) ) > 20 ) { printf( " Filename/s was/were too long ( > 20 char ) " ); // TODO: Do something about segmentation fault here when input filename is > 20 chars //spikVec = { "\0" }; // doesn't work //*confVec = NULL; // doesn't work break; } else { printf( " You entered the file %s for the spiking vector \n", a ); printf( " You entered the file %s for the configuration vector \n", b ); printf( " You entered the file %s for the spiking transition matrix \n ", c ); char *confVec = b; char *spikVec = a; char *spikTransMat = c; int width = d; printf( "\nYou have entered files %s, %s, and %s and square matrix width %d \n", spikVec, confVec, spikTransMat, width ); //load matrices from files FILE *ptr1 = fopen( confVec, "r" ); FILE *ptr2 = fopen( spikVec, "r" ); FILE *ptr3 = fopen( spikTransMat, "r" ); if ( ptr1 == 0 && ptr2 == 0 && ptr3 == 0 ) { printf( "\n could not open one of the following files: %s %s %s \n", a, b, c ); break; } else { MatrixMul( confVec, spikVec, spikTransMat, width ); } fclose( ptr1 ); fclose( ptr2 ); fclose( ptr3 ); break; } case 2: printf( " You entered quit. Bye! \n\n" ); break; default: printf( " You entered an invalid choice \n\n" ); break; } } } /*** ****END of MAIN function ***/
23,650
#include "cuda.h" #include "stdio.h" #include "stdlib.h" #define LBLK 8 __device__ int get_index(int N, int C, int H, int W, int a, int b, int c, int d) { return a * C * H * W + b * W * H + c * W + d; } __device__ int RM(int i, int j, int N) { return i * N + j; } __global__ void conv_kernel(float *weight, float *inp, float *bias, float *out, int N, int C, int H, int W, int num_filter, int filter_depth, int filter_H, int filter_W, int out_size, int stride) { int h = blockIdx.y * blockDim.y + threadIdx.y; int w = blockIdx.x * blockDim.x + threadIdx.x; int k_d = blockIdx.z * blockDim.z + threadIdx.z; int n, c, k_h, k_w; if (h < out_size && w < out_size && k_d < num_filter) { float my_bias = bias[k_d]; int kernel_x = w * stride; int kernel_y = h * stride; for (n = 0; n < N; n ++) { float cur_val = my_bias; for (c = 0; c < filter_depth; c ++) { for (k_h = 0; k_h < filter_H; k_h ++) { for (k_w = 0; k_w < filter_W; k_w ++) { float target_weight = weight[get_index(num_filter, filter_depth, filter_H, filter_W, k_d, c, k_h, k_w)]; float target_inp = inp[get_index(N, C, H, W, n, c, kernel_y + k_h, kernel_x + k_w)]; cur_val += target_inp * target_weight; } } } out[get_index(N, num_filter, out_size, out_size, n, k_d, h, w)] = cur_val; } } } __global__ void matmul_kernel(float *A, float *B, float *out, int Am, int An, int Bm, int Bn) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int bi = threadIdx.y; int bj = threadIdx.x; __shared__ float subA[LBLK * LBLK]; __shared__ float subB[LBLK * LBLK]; float sum = 0.0; int k; for (k = 0; k < An; k += LBLK) { if (i < Am && k + bj < An) { subA[RM(bi, bj, LBLK)] = A[RM(i, k + bj, An)]; } else { subA[RM(bi, bj, LBLK)] = 0.0; } if (k + bi < Bm && j < Bn) { subB[RM(bi, bj, LBLK)] = B[RM(k + bi, j, Bn)]; } else { subB[RM(bi, bj, LBLK)] = 0.0; } __syncthreads(); for (int bk = 0; bk < LBLK; bk++) { sum += subA[RM(bi, bk, LBLK)] * subB[RM(bk, bj, LBLK)]; } __syncthreads(); } if (i < Am && j < Bn) { out[RM(i, j, Bn)] = sum; } } extern "C" int conv_CUDA_main(float *inp, float *weight, float *bias, float *out, int stride, float *dims) { int N = (int)dims[0]; int C = (int)dims[1]; int H = (int)dims[2]; int W = (int)dims[3]; int num_filter = (int)dims[4]; int filter_depth = (int)dims[5]; int filter_H = (int)dims[6]; int filter_W = (int)dims[7]; int out_size = (int)dims[8]; float *dev_weight, *dev_inp, *dev_bias, *dev_out; cudaMalloc(&dev_weight, num_filter * filter_depth * filter_H * filter_W * sizeof(float)); cudaMalloc(&dev_inp, N * C * H * W * sizeof(float)); cudaMalloc(&dev_bias, num_filter * sizeof(float)); cudaMalloc(&dev_out, N * num_filter * out_size * out_size * sizeof(float)); cudaMemcpy(dev_weight, weight, num_filter * filter_depth * filter_H * filter_W * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_inp, inp, N * C * H * W * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_bias, bias, num_filter * sizeof(float), cudaMemcpyHostToDevice); dim3 blockDim(8, 8, 8); dim3 gridDim((out_size + blockDim.x - 1) / blockDim.x, (out_size + blockDim.y - 1) / blockDim.y, (num_filter + blockDim.z - 1) / blockDim.z); conv_kernel<<<gridDim, blockDim>>>(dev_weight, dev_inp, dev_bias, dev_out, N, C, H, W, num_filter, filter_depth, filter_H, filter_W, out_size, stride); cudaMemcpy(out, dev_out, N * num_filter * out_size * out_size * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dev_weight); cudaFree(dev_inp); cudaFree(dev_bias); cudaFree(dev_out); return 0; } extern "C" int matmul_CUDA_main(float *A, float *B, float *out, int Am, int An, int Bm, int Bn) { float *dev_A, *dev_B, *dev_bias, *dev_out; cudaMalloc(&dev_A, Am * An * sizeof(float)); cudaMalloc(&dev_B, Bm * Bn * sizeof(float)); cudaMalloc(&dev_out, Am * Bn * sizeof(float)); cudaMemcpy(dev_A, A, Am * An * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_B, B, Bm * Bn * sizeof(float), cudaMemcpyHostToDevice); dim3 blockDim(8, 8); dim3 gridDim((Bn + blockDim.x - 1) / blockDim.x, (Am + blockDim.y - 1) / blockDim.y); matmul_kernel<<<gridDim, blockDim>>>(dev_A, dev_B, dev_out, Am, An, Bm, Bn); cudaMemcpy(out, dev_out, Am * Bn * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_out); return 0; }
23,651
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #define NUM 100 __constant__ int constant_f; __constant__ int constant_g; __global__ void gpuAtomicAdd(float* d_a){ int threadId = blockDim.x*blockIdx.x + threadIdx.x; extern __shared__ float sh_arr[]; //just for __shared__ memory practice if(threadId<NUM){ sh_arr[threadId] = d_a[threadId]; //it seems no necessary for __syncthreads() atomicAdd(&sh_arr[threadId], 10); d_a[threadId] = sh_arr[threadId]; } return; } __global__ void gpuMemConst(float* d_a){ int threadId = blockDim.x*blockIdx.x + threadIdx.x; if(threadId<NUM){ d_a[threadId] = constant_f*d_a[threadId] + constant_g; } } int main(){ float* h_a = (float*) malloc(NUM*sizeof(float)); for(int i=0;i<NUM;i++){ h_a[i] = i; } float* res_a = (float*) malloc(NUM*sizeof(float)); //device mem float* d_a; cudaMalloc(&d_a, NUM*sizeof(float)); //mem copy: host -> device cudaMemcpy(d_a, h_a, NUM*sizeof(float), cudaMemcpyHostToDevice); int f = 3; int g = 2; //mem copy: host -> device; constant memory cudaMemcpyToSymbol(constant_f, &f, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(constant_g, &g, sizeof(int)); //call kernel gpuMemConst<<<2,64>>>(d_a); //rew copy: device -> host cudaMemcpy(res_a, d_a, NUM*sizeof(float), cudaMemcpyDeviceToHost); for(int i=0;i<NUM;i++){ if((res_a[i] - h_a[i])!=10){ printf("res_a - h_a = %lf\n", (res_a[i]-h_a[i])); } } cudaFree(d_a); printf("Finish!\n"); }
23,652
#include "includes.h" __global__ void add(int *a, int *b, int *c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < N) { c[tid] = a[tid] + b[tid]; tid += blockDim.x * gridDim.x; } }
23,653
#include "includes.h" __global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {}
23,654
#include <error.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <cuda_runtime.h> #define DEVICE_NUMBER (0) typedef struct { int nofThreads; int nofBlocks; int32_t nof_repetitions; int data_size; int buffer_length; unsigned int *targetMeasOH; unsigned int hostMeasOH; int *hostBuffer; int *targetBuffer; uint64_t *target_realSum; uint64_t host_realSum; unsigned int *target_times; unsigned int *host_times; FILE *fd; } param_t; // Prints a message and returns zero if the given value is not cudaSuccess #define CheckCUDAError(val) (InternalCheckCUDAError((val), #val, __FILE__, __LINE__)) // Called internally by CheckCUDAError static int InternalCheckCUDAError(cudaError_t result, const char *fn, const char *file, int line) { if (result == cudaSuccess) return 0; printf("CUDA error %d in %s, line %d (%s): %s\n", (int) result, file, line, fn, cudaGetErrorString(result)); return -1; } static void createSequentialArrayHost(param_t params){ // Link sequentially for(int i = 0; i < params.buffer_length; i++){ params.hostBuffer[i]=(i+params.nofThreads*params.nofBlocks)%params.buffer_length; } } static __global__ void getMeasurementOverhead(param_t params) { unsigned int start, stop; uint64_t sum = 0; start = clock(); for (int j = 0; j < params.buffer_length; j++){ sum +=j; } stop = clock(); *params.targetMeasOH = (stop-start)/params.buffer_length; *params.target_realSum = sum; } static __global__ void sequentialWalk(param_t params) { int current; unsigned int time_start, time_end, time_acc; uint64_t sum; int tindex = blockDim.x*blockIdx.x*params.nof_repetitions + params.nof_repetitions *threadIdx.x; // Warm up data cache for(int i = 0; i < params.buffer_length; i++){ sum += params.targetBuffer[i%params.buffer_length]; } // Run experiment multiple times. First iteration (-1) is to warm up icache for (int i = -2; i < params.nof_repetitions; i++){ sum = 0; time_acc = 0; current = (blockDim.x*blockIdx.x + threadIdx.x)%params.buffer_length; __syncthreads(); time_start = clock(); // Strided access to array for(int j = 0; j < params.buffer_length; j++){ current = params.targetBuffer[current]; sum += current; } time_end = clock(); time_acc += (time_end - time_start); __syncthreads(); *params.target_realSum = sum; // Do not write time for warm up iteration if (i>=0){ // Write element access time with measurement overhead params.target_times[tindex+i] = time_acc/params.buffer_length-(*params.targetMeasOH); } } } #if 0 static void printArray(int* buffer, int size){ printf("%d\n",size); for( int row = 0; row <= ceil(size/10); row++){ for(int i = row*10; i< row*10+10 && i<size; i++) printf("[%4d] ",i); printf("\n"); for(int i = row*10; i< row*10+10 && i<size; i++) printf(" %4d, ",buffer[i]); printf("\n"); } } #endif static int initializeTest(param_t *params){ //allocate buffer params->hostBuffer = NULL; params->hostBuffer = (int *) malloc(params->buffer_length*sizeof(int)); if (!params->hostBuffer) { perror("Failed allocating host buffer: "); return -1; } createSequentialArrayHost(*params); //allocate device random buffer if (CheckCUDAError(cudaMalloc(&params->targetBuffer, \ params->buffer_length*sizeof(int)))) return -1; /* createSequentialArray<<<params->nofBlocks,params->nofThreads>>>(*params); if (CheckCUDAError(cudaDeviceSynchronize())) return -1; if (CheckCUDAError(cudaMemcpy(params->hostBuffer, \ params->targetBuffer, \ params->buffer_length*sizeof(int), \ cudaMemcpyDeviceToHost))) return -1; */ #if 0 printArray(params->hostBuffer, params->buffer_length); #endif //allocate device times int size_time = params->nof_repetitions \ * params->nofThreads \ * params->nofBlocks \ * sizeof(unsigned int); if (CheckCUDAError(cudaMalloc(&params->target_times, \ size_time))) return -1; //allocate host times params->host_times = NULL; params->host_times = (unsigned int *) malloc(size_time); if (!params->host_times) { perror("Failed allocating host_times buffer: "); return -1; } memset(params->host_times,1, size_time); // Allocate device accumulator if (CheckCUDAError(cudaMalloc(&params->target_realSum, \ sizeof(uint64_t)))) return -1; // Allocate device measOH if (CheckCUDAError(cudaMalloc(&params->targetMeasOH, \ sizeof(unsigned int)))) return -1; return 0; } static int runTest(param_t *params){ // Get measurement overhead getMeasurementOverhead<<<1,1>>>(*params); // Launch kernel sequentialWalk<<<params->nofBlocks,params->nofThreads>>>(*params); // Synchronize with device if (CheckCUDAError(cudaDeviceSynchronize())) return -1; // Copyback times int size_time = params->nof_repetitions \ * params->nofThreads \ * params->nofBlocks \ * sizeof(unsigned int); if (CheckCUDAError(cudaMemcpy(params->host_times, \ params->target_times, \ size_time, \ cudaMemcpyDeviceToHost))) return -1; // Copyback sum params->host_realSum=0; if (CheckCUDAError(cudaMemcpy(&params->host_realSum, \ params->target_realSum, \ sizeof(uint64_t), \ cudaMemcpyDeviceToHost))) return -1; // Copyback target meas oh params->hostMeasOH=0; if (CheckCUDAError(cudaMemcpy(&params->hostMeasOH, \ params->targetMeasOH, \ sizeof(unsigned int), \ cudaMemcpyDeviceToHost))) return -1; return 0; } static int writeResults(param_t *params){ if (fprintf(params->fd,"{\n") < 0 ) return -1; // Write device info cudaDeviceProp deviceProp; if (CheckCUDAError(cudaGetDeviceProperties(&deviceProp, DEVICE_NUMBER))) return -1; int driverVersion = 0; if (CheckCUDAError(cudaDriverGetVersion(&driverVersion))) return -1; int runtimeVersion = 0; if (CheckCUDAError(cudaRuntimeGetVersion(&runtimeVersion))) return -1; if (fprintf(params->fd,"\"driverVer\": \"%d\",\n", driverVersion) < 0 ) return -1; if (fprintf(params->fd,"\"runTimeVer\": \"%d\",\n", runtimeVersion) < 0 ) return -1; if (fprintf(params->fd,"\"clockRate\": \"%d\",\n", deviceProp.clockRate) < 0 ) return -1; if (fprintf(params->fd,"\"globalL1CacheSupported\": \"%d\",\n", deviceProp.globalL1CacheSupported) < 0 ) return -1; if (fprintf(params->fd,"\"localL1CacheSupported\": \"%d\",\n", deviceProp.localL1CacheSupported) < 0 ) return -1; if (fprintf(params->fd,"\"l2CacheSize\": \"%d\",\n", deviceProp.l2CacheSize) < 0 ) return -1; if (fprintf(params->fd,"\"memoryBusWidth\": \"%d\",\n", deviceProp.memoryBusWidth) < 0 ) return -1; if (fprintf(params->fd,"\"memoryClockRate\": \"%d\",\n", deviceProp.memoryClockRate) < 0 ) return -1; if (fprintf(params->fd,"\"multiProcessorCount\": \"%d\",\n", deviceProp.multiProcessorCount) < 0 ) return -1; if (fprintf(params->fd,"\"regsPerBlock\": \"%d\",\n", deviceProp.regsPerBlock) < 0 ) return -1; if (fprintf(params->fd,"\"regsPerMultiprocessor\": \"%d\",\n", deviceProp.regsPerMultiprocessor) < 0 ) return -1; if (fprintf(params->fd,"\"sharedMemPerBlock\": \"%zu\",\n", deviceProp.sharedMemPerBlock) < 0 ) return -1; if (fprintf(params->fd,"\"sharedMemPerMultiprocessor\": \"%zu\",\n", deviceProp.sharedMemPerMultiprocessor) < 0 ) return -1; if (fprintf(params->fd,"\"warpSize\": \"%d\",\n", deviceProp.warpSize) < 0 ) return -1; cudaFuncCache config; if (CheckCUDAError(cudaDeviceGetCacheConfig ( &config ) )) return -1; if (fprintf(params->fd,"\"cacheConfig\": \"%d\",\n", config) < 0 ) return -1; // Write header if (fprintf(params->fd,"\"nofThreads\": \"%u\",\n", params->nofThreads) < 0 ) return -1; if (fprintf(params->fd,"\"nofBlocks\": \"%u\",\n", params->nofBlocks) < 0 ) return -1; if (fprintf(params->fd,"\"nof_repetitions\": \"%d\",\n", params->nof_repetitions) < 0 ) return -1; if (fprintf(params->fd,"\"data_size\": \"%d\",\n", params->data_size) < 0 ) return -1; if (fprintf(params->fd,"\"buffer_length\": \"%d\",\n", params->buffer_length) < 0 ) return -1; if (fprintf(params->fd,"\"real_sum\": \"%llu\",\n", (unsigned long long)params->host_realSum) < 0 ) return -1; if (fprintf(params->fd,"\"exp_sum\": \"%llu\",\n", ((unsigned long long)(params->buffer_length-1)*(unsigned long long)params->buffer_length)/2) < 0 ) return -1; if (fprintf(params->fd,"\"measOH\": \"%u\",\n", params->hostMeasOH) < 0 ) return -1; // Write times int size_time = params->nof_repetitions \ * params->nofThreads \ * params->nofBlocks; if (fprintf(params->fd,"\"times\":[\n") < 0 ) return -1; for (int i = 0; i < size_time-1; i++){ if (fprintf(params->fd,"\"%u\",\n",params->host_times[i]) < 0 ) return -1; } if (fprintf(params->fd,"\"%u\"]\n}", params->host_times[size_time-1]) < 0 ) return -1; if (fclose(params->fd) < 0) return -1; return 0; } static int cleanUp(param_t *params){ // Free target buffers cudaFree(params->targetBuffer); cudaFree(params->target_times); // Free host buffers free(params->hostBuffer); free(params->host_times); return 0; } static void PrintUsage(const char *name) { printf("Usage: %s <#threads> <#blocks> <# of intervals> <size in KB>" "<output JSON file name>\n", name); } int main(int argc, char **argv) { if (argc != 6) { PrintUsage(argv[0]); return 1; } param_t params; // Parse input parameter int nof_threads = atoi(argv[1]); if (nof_threads <= 0) { printf("Min one thread. Got %s threads\n", argv[1]); return EXIT_FAILURE; } int nof_blocks = atoi(argv[2]); if (nof_blocks <= 0) { printf("Min 1 block. Got %s blocks\n", argv[2]); return EXIT_FAILURE; } params.nofThreads = nof_threads; params.nofBlocks = nof_blocks; int nof_repetitions = atoi(argv[3]); if (nof_repetitions <= 0) { printf("More than 0 repetitions need to be used. Got %s repetitions\n", argv[3]); return EXIT_FAILURE; } int data_size = atoi(argv[4]); if (data_size <= 0) { printf("The buffer must be 1 or more KB. Got %s KB\n", argv[4]); return EXIT_FAILURE; } params.nof_repetitions = nof_repetitions; params.data_size = data_size*1024; params.buffer_length = data_size*1024/sizeof(int); params.fd = NULL; params.fd = fopen(argv[5],"w"); if (params.fd == NULL) { perror("Error opening output file:"); return EXIT_FAILURE; } // Set CUDA device if (CheckCUDAError(cudaSetDevice(DEVICE_NUMBER))) { return EXIT_FAILURE; } // Initialize parameters if (initializeTest(&params) < 0) return EXIT_FAILURE; // Run test if (runTest(&params) < 0) return EXIT_FAILURE; // Write results if (writeResults(&params) < 0){ perror("Error while writing outpufile: "); return EXIT_FAILURE; } // Clean up if (cleanUp(&params) < 0) return EXIT_FAILURE; printf("Finished testrun\n"); cudaDeviceReset(); return 0; }
23,655
#include <unistd.h> #include <stdio.h> #include "cuda.h" #include <sys/time.h> #define threshold 1e-2 #define n (4096) #define m (3) void init(void); void ref(void); #define TILE_SIZE 4 #define KS_DIV_2 (KERNEL_SIZE >> 1) #define KERNEL_SIZE 3 __constant__ double Mc[KERNEL_SIZE*KERNEL_SIZE]; void compare(int N, double *wref, double *w); __global__ void ConvolutionKernel(double* N, double* P, int inp_size){ __shared__ float tileNs[TILE_SIZE][TILE_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KS_DIV_2; int col_i = col_o - KS_DIV_2; // Load tile elements if(row_i >= 0 && row_i < inp_size && col_i >= 0 && col_i < inp_size) tileNs[ty][tx] = N[row_i*inp_size + col_i]; else tileNs[ty][tx] = 0.0f; // Wait until all tile elements are loaded __syncthreads(); // only compute if you're an output tile element if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++){ pValue += Mc[y*KERNEL_SIZE + x] * tileNs[y+ty][x+tx]; } // only write values if you are inside matrix bounds if(row_o < inp_size && col_o < inp_size) P[row_o*inp_size + col_o] = pValue; } } double rtclock(void); double a[n*n],b[m*m],c[n*n],cref[n*n]; int main(){ int i,j; cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop,0); printf("dev_prop.totalConstMem = %lu\n",dev_prop.totalConstMem); double clkbegin, clkend, t; double *Nd,*Md,*Pd; dim3 blkDim(TILE_SIZE, TILE_SIZE); dim3 grdDim(n/TILE_SIZE, n/TILE_SIZE); int size_input, size_mask; int M=m, N=n; printf("Input Size = %dx%d\n",n,n); printf("Mask size = %dx%d\n",m,m); init(); clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Seq: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); size_input = sizeof(double)*n*n; size_mask = sizeof(double)*m*m; cudaMalloc((void **) &Nd,size_input); cudaMalloc((void **) &Md,size_mask); cudaMalloc((void **) &Pd,size_input); cudaMemcpyToSymbol(Mc, b, size_mask); cudaMemcpy(Nd,a,size_input,cudaMemcpyHostToDevice); cudaMemcpy(Md,b,size_mask,cudaMemcpyHostToDevice); clkbegin = rtclock(); //conv1d_basic<<<grid, threads>>>(Nd,Md,Pd,m,n); ConvolutionKernel<<< blkDim , grdDim >>>(Nd,Pd,n); if (cudaDeviceSynchronize() != cudaSuccess) printf ("Error return for test_kernel\n"); else{ clkend = rtclock(); t = clkend-clkbegin; cudaMemcpy(c,Pd,size_input,cudaMemcpyDeviceToHost); cudaFree(Nd); cudaFree(Md); cudaFree(Pd); printf("GPU: Approx GFLOPS: %.6f ; Time = %.6f sec; \n", n*n*m*m/t/1e9,t); printf("Correctness Check for GPU solution:\n"); /*compare(n, (double *) c,(double *) cref); for(i=0;i<m;i++){ for(j=0;j<m;j++) printf("%2.0lf ",b[i*m+j]); printf("\n"); } printf("\n\n"); for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%2.0lf ",a[i*n+j]); printf("\n"); } for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%3.0lf ",c[i*n+j]); printf("\n"); } */ printf("Correct!\n"); } } void ref(void){ int i,j,k,l,x,y; for(i=0;i<n;i++) for(j=0;j<n;j++){ k = i-m/2; l = j-m/2; for(x=0;x<m;x++) for(y=0;y<m;y++) if((k+x >= 0 && k+x < m) && (l+y >= 0 && l+y < m)) cref[i*n+j] += a[(k+x)*n + (l+y)]*b[x*m + m]; } } void init(void){ int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) a[i*n+j] = i+j; //drand48() for(i=0;i<m;i++) for(j=0;j<m;j++) b[i*m+j] = i+j; } void compare(int N, double *wref, double *w){ double maxdiff,this_diff; int numdiffs; int i; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) { this_diff = wref[i]-w[i]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void){ struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } /* __global__ void test_kernel(int N, double *A, double *B, double *C){ //int x=threadIdx.y+blockIdx.y*blockDim.y; //int y=threadIdx.x+blockIdx.x*blockDim.x; double sum; sum=0; __shared__ double Ads[TILE_WIDTH][TILE_WIDTH]; __shared__ double Bds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*TILE_WIDTH + ty; int col = bx*TILE_WIDTH + tx; for(int m=0; m<N/TILE_WIDTH; ++m){ if(row < N && (m*TILE_WIDTH +tx) < N) Ads[ty][tx] = A[row*N + TILE_WIDTH*m + tx]; else Ads[ty][tx] = 0; if(m*TILE_WIDTH + ty < N && col < N) Bds[ty][tx] = B[(m*TILE_WIDTH + ty)*N + col]; else Bds[ty][tx] = 0; __syncthreads(); for(int k=0; k<TILE_WIDTH; ++k) sum += Ads[ty][k]*Bds[k][tx]; __syncthreads(); } if(row < N && col < N) C[row*N + col] = sum; /* if((x<N)&&(y<N)) for (int k=0;k<N;k+=4){ sum += A[x*N+k]*B[y*N+k]; sum += A[x*N+k+1]*B[y*N+k+1]; sum += A[x*N+k+2]*B[y*N+k+2]; sum += A[x*N+k+3]*B[y*N+k+3]; } C[x*N+y]=sum; }*/
23,656
//gpu_bench.cu #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define CHECK_ERR(x) \ if (x != cudaSuccess) { \ fprintf(stderr,"%s in %s at line %d\n", \ cudaGetErrorString(err),__FILE__,__LINE__); \ exit(-1); \ } \ unsigned long MAX_OPS = 20000000; const long MEGABYTE = 1048576; __global__ void gpu_iops(unsigned long max_ops) { // int a = blockDim.x * blockIdx.x + threadIdx.x; } int main(int argc, char *argv[]) { char c; char test = 'B'; char rw = 'R'; while ( (c = getopt(argc, argv, "r:t:") ) != -1) { switch (c) { case 'r': rw = optarg[0]; break; case 't': test = optarg[0]; break; default: printf("Usage: ./benchCPU -n [number of threads]\n"); return -1; } } struct timeval tv; long long start, stop; double secs; cudaError_t err; unsigned char *d_mem_pointer; unsigned char *mem_pointer; cudaMemcpyKind dir = cudaMemcpyHostToDevice; if(rw == 'R') { dir = cudaMemcpyDeviceToHost; } else if(rw == 'W') { dir - cudaMemcpyHostToDevice; } if(test == 'B') { err = cudaMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*MEGABYTE); CHECK_ERR(err); mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*1); gettimeofday(&tv, NULL); start = tv.tv_sec*1000000LL + tv.tv_usec; for(unsigned long i = 0; i<MEGABYTE; i++) { if(rw == 'W') err = cudaMemcpy((void *)&d_mem_pointer[i], (void *)mem_pointer, 1, dir); else if(rw == 'R') err = cudaMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[i], 1, dir); CHECK_ERR(err); } gettimeofday(&tv, NULL); stop = tv.tv_sec*1000000LL + tv.tv_usec; secs = (stop-start)/1000000.0; printf("%c\t%c\t%lf\n", rw, test, 1.0/(secs)); } else if(test == 'K') { err = cudaMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*256*MEGABYTE); CHECK_ERR(err); mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*1024); gettimeofday(&tv, NULL); start = tv.tv_sec*1000000LL + tv.tv_usec; for(unsigned long i = 0; i<256*MEGABYTE/1024; i++) { if(rw == 'W') err = cudaMemcpy((void *)&d_mem_pointer[i*1024], (void *)mem_pointer, 1024, dir); else if(rw == 'R') err = cudaMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[i*1024], 1024, dir); CHECK_ERR(err); } gettimeofday(&tv, NULL); stop = tv.tv_sec*1000000LL + tv.tv_usec; secs = (stop-start)/1000000.0; printf("%c\t%c\t%lf\n", rw, test, (256.0/1024.0)/(secs)); } else if(test == 'M') { err = cudaMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*512*MEGABYTE); CHECK_ERR(err); mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*MEGABYTE); gettimeofday(&tv, NULL); start = tv.tv_sec*1000000LL + tv.tv_usec; for(unsigned long i = 0; i<512*10; i++) { if(rw == 'W') err = cudaMemcpy((void *)&d_mem_pointer[(i*MEGABYTE)%(512*MEGABYTE)], (void *)mem_pointer, MEGABYTE, dir); else if(rw == 'R') err = cudaMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[(i*MEGABYTE)%(512*MEGABYTE)], MEGABYTE, dir); CHECK_ERR(err); } gettimeofday(&tv, NULL); stop = tv.tv_sec*1000000LL + tv.tv_usec; secs = (stop-start)/1000000.0; printf("%c\t%c\t%lf\n", rw, test, (512*10)/(secs)); } err = cudaFree(d_mem_pointer); CHECK_ERR(err); }
23,657
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <unistd.h> #include <sys/time.h> #define WIDTH 1024 #define HEIGHT 768 #define CORE_COUNT 128 static int colors[] = { 0xff0000, // f 0xee3300, // e 0xcc5500, // d 0xaa5500, // c 0xaa3300, // b 0x666600, // a 0x999900, // 9 0x669900, // 8 0x339900, // 7 0x0099aa, // 6 0x0066aa, // 5 0x0033aa, // 4 0x0000aa, // 3 0x000099, // 2 0x000066, // 1 0x000000, // 0 }; struct MandelInfo { float r_step; float i_step; float real_start; float imaginary_start; int width; int height; }; struct CoreInfo { MandelInfo mandel_info; int *picture; uint8_t signal_start; uint8_t signal_done; int core_id; }; int mandel_calc( int *picture, int width, int height, float real_start, float real_end, float imaginary_start, float imaginary_end) { const int max_count = 127; int x, y; float r, i, r_step, i_step; float tr, ti, zr, zi; int ptr, count; r_step = (real_end - real_start) / (float)width; i_step = (imaginary_end - imaginary_start) / (float)height; ptr = 0; //printf("step = %f %f\n", r_step, i_step); i = imaginary_start; for (y = 0; y < height; y++) { r = real_start; for (x = 0; x < width; x++) { zr = 0; zi = 0; for (count = 0; count < max_count; count++) { tr = ((zr * zr) - (zi * zi)); ti = (2 * zr * zi); zr = tr + r; zi = ti + i; if ((zr * zr) + (zi * zi) > 4) break; } picture[ptr] = colors[count >> 3]; ptr++; r = r + r_step; } i = i + i_step; } return 0; } __global__ void mandel_calc_cuda_single( int *picture, int width, int height, float real_start, float real_end, float imaginary_start, float imaginary_end) { const int max_count = 127; int x,y; float r, i, r_step, i_step; float tr, ti, zr, zi; int ptr, count; int colors[] = { 0xff0000, // f 0xee3300, // e 0xcc5500, // d 0xaa5500, // c 0xaa3300, // b 0x666600, // a 0x999900, // 9 0x669900, // 8 0x339900, // 7 0x0099aa, // 6 0x0066aa, // 5 0x0033aa, // 4 0x0000aa, // 3 0x000099, // 2 0x000066, // 1 0x000000, // 0 }; r_step = (real_end - real_start) / (float)width; i_step = (imaginary_end - imaginary_start) / (float)height; ptr = 0; //printf("step = %f %f\n", r_step, i_step); i = imaginary_start; for (y = 0; y < height; y++) { r = real_start; for (x = 0; x < width; x++) { zr = 0; zi = 0; for (count = 0; count < max_count; count++) { tr = ((zr * zr) - (zi * zi)); ti = (2 * zr * zi); zr = tr + r; zi = ti + i; if ((zr * zr) + (zi * zi) > 4) break; } picture[ptr] = colors[count >> 3]; ptr++; r = r + r_step; } i = i + i_step; } } __global__ void mandel_calc_cuda_multi( int *picture, int width, int height, float real_start, float real_end, float imaginary_start, float imaginary_end) { const int max_count = 127; int x,y; float r, i, r_step, i_step; float tr, ti, zr, zi; int ptr, count; int index = threadIdx.x; //int stride = blockDim.x; //printf("index=%d stride=%d\n", index, stride); int colors[] = { 0xff0000, // f 0xee3300, // e 0xcc5500, // d 0xaa5500, // c 0xaa3300, // b 0x666600, // a 0x999900, // 9 0x669900, // 8 0x339900, // 7 0x0099aa, // 6 0x0066aa, // 5 0x0033aa, // 4 0x0000aa, // 3 0x000099, // 2 0x000066, // 1 0x000000, // 0 }; r_step = (real_end - real_start) / (float)width; i_step = (imaginary_end - imaginary_start) / (float)height; //printf("step = %f %f\n", r_step, i_step); height = height / CORE_COUNT; y = index * height; ptr = y * width; i = imaginary_start + (i_step * y); #if 0 i = imaginary_start; for (y = 0; y < index * height; y++) { i = i + i_step; } #endif //printf("ptr=%d index=%d height=%d %f\n", ptr, index, height, i); for (y = 0; y < height; y++) { r = real_start; for (x = 0; x < width; x++) { zr = 0; zi = 0; for (count = 0; count < max_count; count++) { tr = ((zr * zr) - (zi * zi)); ti = (2 * zr * zi); zr = tr + r; zi = ti + i; if ((zr * zr) + (zi * zi) > 4) break; } picture[ptr] = colors[count >> 3]; ptr++; r = r + r_step; } i = i + i_step; } } int write_int32(FILE *out, int n) { putc((n & 0xff), out); putc(((n >> 8) & 0xff), out); putc(((n >> 16) & 0xff), out); putc(((n >> 24) & 0xff), out); return 0; } int write_int16(FILE *out, int n) { putc((n & 0xff), out); putc(((n >> 8) & 0xff), out); return 0; } void write_bmp(int *picture, int width, int height) { FILE *out; int bmp_width; int bmp_size; int padding; int offset; int color; int x,y; out = fopen("out.bmp", "wb"); if (out == NULL) { printf("Can't open file for writing."); return; } bmp_width = width * 3; bmp_width = (bmp_width + 3) & (~0x3); bmp_size = (bmp_width * height) + 14 + 40; padding = bmp_width - (width * 3); //printf("width=%d (%d)\n", width, width*3); //printf("bmp_width=%d\n", bmp_width); //printf("bmp_size=%d\n", bmp_size); /* size: 14 bytes */ putc('B', out); putc('M', out); write_int32(out, bmp_size); write_int16(out, 0); write_int16(out, 0); write_int32(out, 54); /* head1: 14 head2: 40 */ write_int32(out, 40); /* biSize */ write_int32(out, width); write_int32(out, height); write_int16(out, 1); write_int16(out, 24); write_int32(out, 0); /* compression */ write_int32(out, bmp_width*height); write_int32(out, 0); /* biXPelsperMetre */ write_int32(out, 0); /* biYPelsperMetre */ write_int32(out, 0); write_int32(out, 0); for (y = 0; y < height; y++) { offset = y * width; for (x = 0; x < width; x++) { color = picture[offset++]; putc(color & 0xff, out); putc((color >> 8) & 0xff, out); putc((color >> 16) & 0xff, out); } for (x = 0; x < padding; x++) { putc(0, out); } } fclose(out); } int main(int argc, char *argv[]) { struct timeval tv_start, tv_end; int *picture; float real_start = 0.37 - 0.00; float real_end = 0.37 + 0.04; float imaginary_start = -0.2166 - 0.02; float imaginary_end = -0.2166 + 0.02; int do_cuda = 0; if (argc != 2) { printf("Usage: %s <normal/cuda/cuda128>\n", argv[0]); exit(0); } if (strcmp(argv[1], "normal") == 0) { do_cuda = 0; } else if (strcmp(argv[1], "cuda") == 0) { do_cuda = 1; } else if (strcmp(argv[1], "cuda128") == 0) { do_cuda = 2; } const int length = WIDTH * HEIGHT * sizeof(int); switch (do_cuda) { case 0: picture = (int *)malloc(length); break; case 1: case 2: cudaMallocManaged(&picture, length); break; } gettimeofday(&tv_start, NULL); if (do_cuda == 1) { mandel_calc_cuda_single<<<1,1>>>(picture, WIDTH, HEIGHT, real_start, real_end, imaginary_start, imaginary_end); cudaDeviceSynchronize(); } else if (do_cuda == 2) { mandel_calc_cuda_multi<<<1,128>>>(picture, WIDTH, HEIGHT, real_start, real_end, imaginary_start, imaginary_end); cudaDeviceSynchronize(); } else { mandel_calc(picture, WIDTH, HEIGHT, real_start, real_end, imaginary_start, imaginary_end); } gettimeofday(&tv_end, NULL); printf("%ld %ld\n", tv_end.tv_sec, tv_end.tv_usec); printf("%ld %ld\n", tv_start.tv_sec, tv_start.tv_usec); long time_diff = tv_end.tv_usec - tv_start.tv_usec; while(time_diff < 0) { tv_end.tv_sec--; time_diff += 1000000; } time_diff += (tv_end.tv_sec - tv_start.tv_sec) * 1000000; printf("time=%f\n", (float)time_diff / 1000000); switch (do_cuda) { case 0: { write_bmp(picture, WIDTH, HEIGHT); free(picture); break; } case 1: case 2: { int *image = (int *)malloc(length); cudaMemcpy(image, picture, length, cudaMemcpyDeviceToHost); write_bmp(image, WIDTH, HEIGHT); cudaFree(picture); break; } } return 0; }
23,658
#include "includes.h" __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; for(long i=0;i<1024*500;i++){ c[i] = a[i]*10 + b[i] * 5; } //printf("addKernel::threadIdx: %d, %d, %d\n", threadIdx.x, threadIdx.y, threadIdx.z); }
23,659
#include <stdlib.h> #include <stdio.h> int main(){ int *ptr = 0; cudaError_t error = cudaMalloc((void**)&ptr, UINT_MAX); if(error != cudaSuccess){ printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } return 0; }
23,660
namespace mynamespace { namespace subnamespace { class Foo { public: int a; Foo() { } ~Foo() { } void somefunc() { } }; } class Bar : public subnamespace::Foo { public: Foo foo; int b; Bar() { } ~Bar() { } void somefunc2() { } }; template<typename T> class Templated : public subnamespace::Foo { public: T someattribute; void doSomething(T val) { } }; } template<typename T> __global__ void doSomething(mynamespace::Bar bar, mynamespace::Templated<T> t, T *data, int *int_data) { bar.a = 123; data[0] = t.someattribute; int_data[0] = bar.b; } __host__ void doSomething_host(mynamespace::Bar bar, mynamespace::Templated<float> t, float *data, int *int_data) { doSomething<<<dim3(32), dim3(32)>>>(bar, t, data, int_data); } __host__ void doSomething_host2(mynamespace::Bar bar, mynamespace::Templated<int> t, int *data, int *int_data) { doSomething<<<dim3(32), dim3(32)>>>(bar, t, data, int_data); } __host__ void doSomething_host2(mynamespace::Bar bar, mynamespace::Templated<char> t, char *data, int *int_data) { doSomething<<<dim3(32), dim3(32)>>>(bar, t, data, int_data); }
23,661
#include<stdio.h> #include<stdlib.h> #include<cuda.h> __global__ void oddEven(int *d,int I, int n) { int id=threadIdx.x; if(I==0 &&((id*2+1)<n)) { if(d[id*2] > d[id*2+1]) { int temp = d[id*2]; d[id*2] = d[id*2+1]; d[id*2+1] = temp; } } if(I==1 &&((id*2+2)<n)) { if(d[id*2+1] > d[id*2+2]) { int temp = d[id*2+1]; d[id*2+1] = d[id*2+2]; d[id*2+2] = temp; } } } int main() { int input[100] , output[100], n, i; int *device; printf("\n\nEnter number of elements :"); scanf("%d",&n); int size=n*sizeof(int); cudaMalloc(&device,size); printf("\n\nEnter numbers :"); for(i=0 ;i<n ;i++) { scanf("%d",&input[i]); } printf("\n\nArray Before Sorting =>"); for(i=0 ;i<n ;i++) { printf("%d ",input[i]); } cudaMemcpy(device,input,size,cudaMemcpyHostToDevice); for(i=0 ;i<n ;i++) { oddEven<<<1,n>>>(device,i%2,n); } cudaMemcpy(output,device,size,cudaMemcpyDeviceToHost); printf("\n\nArray After Sorting =>"); for(i=0 ;i<n ;i++) { printf("%d ",output[i]); } cudaFree(device); return 0; }
23,662
/* reduce-integer.cu */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <cuda_runtime.h> /* * Check kernel performance using the following commands * * nvcc reduce-integer.cu -o bin/reduce-integer * * su * nvprof --metrics inst_per_warp bin/reduce-integer * nvprof --metrics gld_throughput bin/reduce-integer * nvprof --metrics gld_efficiency bin/reduce-integer * nvprof --metrics gst_efficiency bin/reduce-integer * nvprof --metrics dram_read_throughput bin/reduce-integer * nvprof --metrics stall_sync bin/reduce-integer */ #define CHECK_CUDA_CALL(call) \ { \ const cudaError_t error = call; \ \ if (error != cudaSuccess) { \ fprintf(stderr, "Error (%s:%d), code: %d, reason: %s\n", \ __FILE__, __LINE__, \ error, cudaGetErrorString(error)); \ exit(EXIT_FAILURE); \ } \ } int recursiveReduceHost(int* inArray, size_t size) { int i; int stride; if (size == 1) return inArray[0]; stride = (int)(size / 2); /* Execute in-place reduction */ for (i = 0; i < stride; ++i) inArray[i] += inArray[i + stride]; /* Recursive call */ return recursiveReduceHost(inArray, stride); } __global__ void reduceNeighbored(int* inArray, int* outArray, unsigned int size) { unsigned int id = threadIdx.x; unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; int* pArray = inArray + blockIdx.x * blockDim.x; int stride; if (i >= size) return; for (stride = 1; stride < blockDim.x; stride *= 2) { if ((id % (2 * stride)) == 0) pArray[id] += pArray[id + stride]; /* Synchronize between all threads in the thread block */ __syncthreads(); } if (id == 0) outArray[blockIdx.x] = pArray[0]; } __global__ void reduceNeighboredLess(int* inArray, int* outArray, unsigned int size) { unsigned int id = threadIdx.x; unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; unsigned int j; int* pArray = inArray + blockIdx.x * blockDim.x; int stride; if (i >= size) return; for (stride = 1; stride < blockDim.x; stride *= 2) { /* Calculate local array index from the thread index */ /* This prevents warp divergences to occur */ j = 2 * stride * id; if (j < blockDim.x) pArray[j] += pArray[j + stride]; /* Synchronize between all threads in the thread block */ __syncthreads(); } if (id == 0) outArray[blockIdx.x] = pArray[0]; } __global__ void reduceInterleaved(int* inArray, int* outArray, unsigned int size) { unsigned int id = threadIdx.x; unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; int* pArray = inArray + blockIdx.x * blockDim.x; int stride; if (i >= size) return; for (stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (id < stride) pArray[id] += pArray[id + stride]; /* Synchronize between all threads in the thread block */ __syncthreads(); } if (id == 0) outArray[blockIdx.x] = pArray[0]; } __global__ void reduceUnrolling2(int* inArray, int* outArray, unsigned int size) { unsigned int id = threadIdx.x; unsigned int i = threadIdx.x + (blockIdx.x * 2) * blockDim.x; int* pArray = inArray + (blockIdx.x * 2) * blockDim.x; int stride; /* Each thread processes 2 data blocks (cyclic) */ if (i + blockDim.x < size) inArray[i] += inArray[i + blockDim.x]; __syncthreads(); for (stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (id < stride) pArray[id] += pArray[id + stride]; /* Synchronize between all threads in the thread block */ __syncthreads(); } if (id == 0) outArray[blockIdx.x] = pArray[0]; } __global__ void reduceUnrolling4(int* inArray, int* outArray, unsigned int size) { unsigned int id = threadIdx.x; unsigned int i = threadIdx.x + (blockIdx.x * 4) * blockDim.x; int* pArray = inArray + (blockIdx.x * 4) * blockDim.x; int stride; /* Each thread processes 4 data blocks (cyclic) */ if (i + blockDim.x * 3 < size) { inArray[i] += inArray[i + blockDim.x]; inArray[i] += inArray[i + blockDim.x * 2]; inArray[i] += inArray[i + blockDim.x * 3]; } __syncthreads(); for (stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (id < stride) pArray[id] += pArray[id + stride]; /* Synchronize between all threads in the thread block */ __syncthreads(); } if (id == 0) outArray[blockIdx.x] = pArray[0]; } __global__ void reduceUnrolling8(int* inArray, int* outArray, unsigned int size) { unsigned int id = threadIdx.x; unsigned int i = threadIdx.x + (blockIdx.x * 8) * blockDim.x; int* pArray = inArray + (blockIdx.x * 8) * blockDim.x; int stride; /* Each thread processes 8 data blocks (cyclic) */ if (i + blockDim.x * 7 < size) { inArray[i] += inArray[i + blockDim.x]; inArray[i] += inArray[i + blockDim.x * 2]; inArray[i] += inArray[i + blockDim.x * 3]; inArray[i] += inArray[i + blockDim.x * 4]; inArray[i] += inArray[i + blockDim.x * 5]; inArray[i] += inArray[i + blockDim.x * 6]; inArray[i] += inArray[i + blockDim.x * 7]; } __syncthreads(); for (stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (id < stride) pArray[id] += pArray[id + stride]; /* Synchronize between all threads in the thread block */ __syncthreads(); } if (id == 0) outArray[blockIdx.x] = pArray[0]; } __global__ void reduceUnrollingWarps8(int* inArray, int* outArray, unsigned int size) { unsigned int id = threadIdx.x; unsigned int i = threadIdx.x + (blockIdx.x * 8) * blockDim.x; int* pArray = inArray + (blockIdx.x * 8) * blockDim.x; volatile int* pvArray; int stride; /* Each thread processes 8 data blocks (cyclic) */ if (i + blockDim.x * 7 < size) { inArray[i] += inArray[i + blockDim.x]; inArray[i] += inArray[i + blockDim.x * 2]; inArray[i] += inArray[i + blockDim.x * 3]; inArray[i] += inArray[i + blockDim.x * 4]; inArray[i] += inArray[i + blockDim.x * 5]; inArray[i] += inArray[i + blockDim.x * 6]; inArray[i] += inArray[i + blockDim.x * 7]; } __syncthreads(); for (stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (id < stride) pArray[id] += pArray[id + stride]; /* Synchronize between all threads in the thread block */ __syncthreads(); } /* Reduction using warp unrolling */ /* Unnecessary __syncthreads() calls are eliminated */ if (id < 32) { pvArray = pArray; pvArray[id] += pvArray[id + 32]; pvArray[id] += pvArray[id + 16]; pvArray[id] += pvArray[id + 8]; pvArray[id] += pvArray[id + 4]; pvArray[id] += pvArray[id + 2]; pvArray[id] += pvArray[id + 1]; } if (id == 0) outArray[blockIdx.x] = pArray[0]; } __global__ void reduceCompleteUnrollingWarps8(int* inArray, int* outArray, unsigned int size) { unsigned int id = threadIdx.x; unsigned int i = threadIdx.x + (blockIdx.x * 8) * blockDim.x; int* pArray = inArray + (blockIdx.x * 8) * blockDim.x; volatile int* pvArray; /* Each thread processes 8 data blocks (cyclic) */ if (i + blockDim.x * 7 < size) { inArray[i] += inArray[i + blockDim.x]; inArray[i] += inArray[i + blockDim.x * 2]; inArray[i] += inArray[i + blockDim.x * 3]; inArray[i] += inArray[i + blockDim.x * 4]; inArray[i] += inArray[i + blockDim.x * 5]; inArray[i] += inArray[i + blockDim.x * 6]; inArray[i] += inArray[i + blockDim.x * 7]; } __syncthreads(); /* In-place reduction and complete unrolling */ /* Maximum number of the threads in the thread block is 1024 */ if (blockDim.x >= 1024 && id < 512) pArray[id] += pArray[id + 512]; __syncthreads(); if (blockDim.x >= 512 && id < 256) pArray[id] += pArray[id + 256]; __syncthreads(); if (blockDim.x >= 256 && id < 128) pArray[id] += pArray[id + 128]; __syncthreads(); if (blockDim.x >= 128 && id < 64) pArray[id] += pArray[id + 64]; __syncthreads(); /* Warp unrolling */ /* Unnecessary __syncthreads() calls are eliminated */ if (id < 32) { pvArray = pArray; pvArray[id] += pvArray[id + 32]; pvArray[id] += pvArray[id + 16]; pvArray[id] += pvArray[id + 8]; pvArray[id] += pvArray[id + 4]; pvArray[id] += pvArray[id + 2]; pvArray[id] += pvArray[id + 1]; } if (id == 0) outArray[blockIdx.x] = pArray[0]; } template <unsigned int BlockSize> __global__ void reduceCompleteUnrollingWarps(int* inArray, int* outArray, unsigned int size) { unsigned int id = threadIdx.x; unsigned int i = threadIdx.x + (blockIdx.x * 8) * blockDim.x; int* pArray = inArray + (blockIdx.x * 8) * blockDim.x; volatile int* pvArray; /* Each thread processes 8 data blocks (cyclic) */ if (i + blockDim.x * 7 < size) { inArray[i] += inArray[i + blockDim.x]; inArray[i] += inArray[i + blockDim.x * 2]; inArray[i] += inArray[i + blockDim.x * 3]; inArray[i] += inArray[i + blockDim.x * 4]; inArray[i] += inArray[i + blockDim.x * 5]; inArray[i] += inArray[i + blockDim.x * 6]; inArray[i] += inArray[i + blockDim.x * 7]; } __syncthreads(); /* In-place reduction and complete unrolling */ /* Maximum number of the threads in the thread block is 1024 */ if (BlockSize >= 1024 && id < 512) pArray[id] += pArray[id + 512]; __syncthreads(); if (BlockSize >= 512 && id < 256) pArray[id] += pArray[id + 256]; __syncthreads(); if (BlockSize >= 256 && id < 128) pArray[id] += pArray[id + 128]; __syncthreads(); if (BlockSize >= 128 && id < 64) pArray[id] += pArray[id + 64]; __syncthreads(); /* Warp unrolling */ /* Unnecessary __syncthreads() calls are eliminated */ if (id < 32) { pvArray = pArray; pvArray[id] += pvArray[id + 32]; pvArray[id] += pvArray[id + 16]; pvArray[id] += pvArray[id + 8]; pvArray[id] += pvArray[id + 4]; pvArray[id] += pvArray[id + 2]; pvArray[id] += pvArray[id + 1]; } if (id == 0) outArray[blockIdx.x] = pArray[0]; } int main(int argc, char** argv) { int i; int dev; cudaDeviceProp deviceProp; int numOfElements; size_t numOfBytes; int blockSize; int* hostInput; int* hostTmp; int* hostResult; int* devInput; int* devResult; int hostSum; int devSum; struct timeval startTime; struct timeval endTime; /* Setup device */ dev = 0; CHECK_CUDA_CALL(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using device %d: %s\n", dev, deviceProp.name); CHECK_CUDA_CALL(cudaSetDevice(dev)); /* Set array size */ numOfElements = 1 << 24; numOfBytes = numOfElements * sizeof(int); printf("Array size: %d\n", numOfElements); /* Set execution configuration */ blockSize = 512; if (argc > 1) blockSize = atoi(argv[1]); dim3 block(blockSize, 1); dim3 grid((numOfElements + block.x - 1) / block.x, 1); printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); /* Allocate host memory */ hostInput = (int*)calloc(numOfElements, sizeof(int)); hostTmp = (int*)calloc(numOfElements, sizeof(int)); hostResult = (int*)calloc(grid.x, sizeof(int)); /* Allocate device memory */ CHECK_CUDA_CALL(cudaMalloc((void**)&devInput, numOfBytes)); CHECK_CUDA_CALL(cudaMalloc((void**)&devResult, grid.x * sizeof(int))); /* Initialize array */ for (i = 0; i < numOfElements; ++i) hostInput[i] = (int)(rand() & 0xFF); memcpy(hostTmp, hostInput, numOfBytes); /* Execute reduction operation in host */ gettimeofday(&startTime, NULL); hostSum = recursiveReduceHost(hostTmp, numOfElements); gettimeofday(&endTime, NULL); printf("Host execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), hostSum); /* Call reduceNeighbored kernel */ CHECK_CUDA_CALL(cudaMemcpy(devInput, hostInput, numOfBytes, cudaMemcpyHostToDevice)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&startTime, NULL); reduceNeighbored<<<grid, block>>>(devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); /* Check kernel error */ CHECK_CUDA_CALL(cudaGetLastError()); /* Copy kernel result */ CHECK_CUDA_CALL(cudaMemcpy(hostResult, devResult, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Calculate kernel result */ devSum = 0; for (i = 0; i < grid.x; ++i) devSum += hostResult[i]; printf("Device (reduceNeighbored) execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), devSum); if (hostSum == devSum) printf("Test (reduceNeighbored) passed!\n"); else printf("Test (reduceNeighbored) failed!\n"); /* Call reduceNeighboredLess kernel */ CHECK_CUDA_CALL(cudaMemcpy(devInput, hostInput, numOfBytes, cudaMemcpyHostToDevice)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&startTime, NULL); reduceNeighboredLess<<<grid, block>>>(devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); /* Check kernel error */ CHECK_CUDA_CALL(cudaGetLastError()); /* Copy kernel result */ CHECK_CUDA_CALL(cudaMemcpy(hostResult, devResult, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Calculate kernel result */ devSum = 0; for (i = 0; i < grid.x; ++i) devSum += hostResult[i]; printf("Device (reduceNeighboredLess) execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), devSum); if (hostSum == devSum) printf("Test (reduceNeighboredLess) passed!\n"); else printf("Test (reduceNeighboredLess) failed!\n"); /* Call reduceInterleaved kernel */ CHECK_CUDA_CALL(cudaMemcpy(devInput, hostInput, numOfBytes, cudaMemcpyHostToDevice)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&startTime, NULL); reduceInterleaved<<<grid, block>>>(devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); /* Check kernel error */ CHECK_CUDA_CALL(cudaGetLastError()); /* Copy kernel result */ CHECK_CUDA_CALL(cudaMemcpy(hostResult, devResult, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Calculate kernel result */ devSum = 0; for (i = 0; i < grid.x; ++i) devSum += hostResult[i]; printf("Device (reduceInterleaved) execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), devSum); if (hostSum == devSum) printf("Test (reduceInterleaved) passed!\n"); else printf("Test (reduceInterleaved) failed!\n"); /* Call reduceUnrolling2 kernel */ CHECK_CUDA_CALL(cudaMemcpy(devInput, hostInput, numOfBytes, cudaMemcpyHostToDevice)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&startTime, NULL); reduceUnrolling2<<<grid.x / 2, block>>>(devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); /* Check kernel error */ CHECK_CUDA_CALL(cudaGetLastError()); /* Copy kernel result */ CHECK_CUDA_CALL(cudaMemcpy(hostResult, devResult, grid.x / 2 * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Calculate kernel result */ devSum = 0; for (i = 0; i < grid.x / 2; ++i) devSum += hostResult[i]; printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n", grid.x / 2, grid.y, block.x, block.y); printf("Device (reduceUnrolling2) execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), devSum); if (hostSum == devSum) printf("Test (reduceUnrolling2) passed!\n"); else printf("Test (reduceUnrolling2) failed!\n"); /* Call reduceUnrolling4 kernel */ CHECK_CUDA_CALL(cudaMemcpy(devInput, hostInput, numOfBytes, cudaMemcpyHostToDevice)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&startTime, NULL); reduceUnrolling4<<<grid.x / 4, block>>>(devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); /* Check kernel error */ CHECK_CUDA_CALL(cudaGetLastError()); /* Copy kernel result */ CHECK_CUDA_CALL(cudaMemcpy(hostResult, devResult, grid.x / 4 * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Calculate kernel result */ devSum = 0; for (i = 0; i < grid.x / 4; ++i) devSum += hostResult[i]; printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n", grid.x / 4, grid.y, block.x, block.y); printf("Device (reduceUnrolling4) execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), devSum); if (hostSum == devSum) printf("Test (reduceUnrolling4) passed!\n"); else printf("Test (reduceUnrolling4) failed!\n"); /* Call reduceUnrolling8 kernel */ CHECK_CUDA_CALL(cudaMemcpy(devInput, hostInput, numOfBytes, cudaMemcpyHostToDevice)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&startTime, NULL); reduceUnrolling8<<<grid.x / 8, block>>>(devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); /* Check kernel error */ CHECK_CUDA_CALL(cudaGetLastError()); /* Copy kernel result */ CHECK_CUDA_CALL(cudaMemcpy(hostResult, devResult, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Calculate kernel result */ devSum = 0; for (i = 0; i < grid.x / 8; ++i) devSum += hostResult[i]; printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n", grid.x / 8, grid.y, block.x, block.y); printf("Device (reduceUnrolling8) execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), devSum); if (hostSum == devSum) printf("Test (reduceUnrolling8) passed!\n"); else printf("Test (reduceUnrolling8) failed!\n"); /* Call reduceUnrollingWarps8 kernel */ CHECK_CUDA_CALL(cudaMemcpy(devInput, hostInput, numOfBytes, cudaMemcpyHostToDevice)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&startTime, NULL); reduceUnrollingWarps8<<<grid.x / 8, block>>>(devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); /* Check kernel error */ CHECK_CUDA_CALL(cudaGetLastError()); /* Copy kernel result */ CHECK_CUDA_CALL(cudaMemcpy(hostResult, devResult, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Calculate kernel result */ devSum = 0; for (i = 0; i < grid.x / 8; ++i) devSum += hostResult[i]; printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n", grid.x / 8, grid.y, block.x, block.y); printf("Device (reduceUnrollingWarps8) execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), devSum); if (hostSum == devSum) printf("Test (reduceUnrollingWarps8) passed!\n"); else printf("Test (reduceUnrollingWarps8) failed!\n"); /* Call reduceCompleteUnrollingWarps8 kernel */ CHECK_CUDA_CALL(cudaMemcpy(devInput, hostInput, numOfBytes, cudaMemcpyHostToDevice)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&startTime, NULL); reduceCompleteUnrollingWarps8<<<grid.x / 8, block>>>(devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); /* Check kernel error */ CHECK_CUDA_CALL(cudaGetLastError()); /* Copy kernel result */ CHECK_CUDA_CALL(cudaMemcpy(hostResult, devResult, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Calculate kernel result */ devSum = 0; for (i = 0; i < grid.x / 8; ++i) devSum += hostResult[i]; printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n", grid.x / 8, grid.y, block.x, block.y); printf("Device (reduceCompleteUnrollingWarps8) execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), devSum); if (hostSum == devSum) printf("Test (reduceCompleteUnrollingWarps8) passed!\n"); else printf("Test (reduceCompleteUnrollingWarps8) failed!\n"); /* Call reduceCompleteUnrollingWarps kernel */ CHECK_CUDA_CALL(cudaMemcpy(devInput, hostInput, numOfBytes, cudaMemcpyHostToDevice)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); switch (blockSize) { case 1024: gettimeofday(&startTime, NULL); reduceCompleteUnrollingWarps<1024><<<grid.x / 8, block>>>( devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); break; case 512: gettimeofday(&startTime, NULL); reduceCompleteUnrollingWarps<512><<<grid.x / 8, block>>>( devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); break; case 256: gettimeofday(&startTime, NULL); reduceCompleteUnrollingWarps<256><<<grid.x / 8, block>>>( devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); break; case 128: gettimeofday(&startTime, NULL); reduceCompleteUnrollingWarps<128><<<grid.x / 8, block>>>( devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); break; case 64: gettimeofday(&startTime, NULL); reduceCompleteUnrollingWarps<64><<<grid.x / 8, block>>>( devInput, devResult, numOfElements); CHECK_CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&endTime, NULL); break; } /* Check kernel error */ CHECK_CUDA_CALL(cudaGetLastError()); /* Copy kernel result */ CHECK_CUDA_CALL(cudaMemcpy(hostResult, devResult, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Calculate kernel result */ devSum = 0; for (i = 0; i < grid.x / 8; ++i) devSum += hostResult[i]; printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n", grid.x / 8, grid.y, block.x, block.y); printf("Device (reduceCompleteUnrollingWarps) execution time: %.6f, result: %d\n", ((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) - ((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6), devSum); if (hostSum == devSum) printf("Test (reduceCompleteUnrollingWarps) passed!\n"); else printf("Test (reduceCompleteUnrollingWarps) failed!\n"); /* Free device memory */ CHECK_CUDA_CALL(cudaFree(devInput)); CHECK_CUDA_CALL(cudaFree(devResult)); /* Free host memory */ free(hostInput); free(hostTmp); free(hostResult); /* Reset device */ CHECK_CUDA_CALL(cudaDeviceReset()); return EXIT_SUCCESS; }
23,663
#include<stdio.h> #include<stdlib.h> __global__ void srt(int* a, int n){ int idx=threadIdx.x; for(int i=0; i<n-1; i++){ if(i%2==idx%2 and idx+1<n) { if(a[idx]>a[idx+1]){ int t=a[idx] ; a[idx]=a[idx+1] ; a[idx+1]=t ; } } } } int main() { int n; scanf("%d",&n); int *a_h; a_h=(int*)malloc(n*sizeof(int)); for(int i=0; i<n; i++) a_h[i]=rand()%1000; printf("earlier:\n"); for(int i=0; i<n; i++) printf("%d ",a_h[i]); printf("\n"); int *a_d; cudaMalloc( (void**)&a_d, n*sizeof(int) ) ; cudaMemcpy( a_d , a_h , n*sizeof(int) , cudaMemcpyHostToDevice ) ; dim3 blockdim=n ; dim3 griddim=1 ; srt<<<griddim,blockdim>>>(a_d,n) ; cudaMemcpy( a_h , a_d , n*sizeof(int) ,cudaMemcpyDeviceToHost ) ; printf("sorted\n"); printf("after:\n"); for(int i=0; i<n; i++) printf("%d ",a_h[i]); }
23,664
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <time.h> #include <sstream> #include <iostream> #ifndef __CUDACC_RTC__ #define __CUDACC_RTC__ #endif #include <device_functions.h> using namespace std; #define imin(a, b) (a<b? a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock); __global__ void dot(float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0.0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) { cache[cacheIndex] = cache[cacheIndex] + cache[cacheIndex + i]; } __syncthreads(); i = i / 2; } if (cacheIndex == 0) { c[blockIdx.x] = cache[0]; } } int main(void) { float *a, *b, *c, result, cpu_result; float *dev_a, *dev_b, *dev_c; a = new float[N]; b = new float[N]; c = new float[N]; result = 0; cpu_result = 0; for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; c[i] = 0; } cout << "start" << endl; for (int i = 0; i < N; i++) { cpu_result += a[i] * b[i]; } cout << cpu_result << endl; cudaMalloc((void**)&dev_a, sizeof(float)*N); cudaMalloc((void**)&dev_b, sizeof(float)*N); cudaMalloc((void**)&dev_c, sizeof(float)*blocksPerGrid); cudaMemcpy(dev_a, a, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemset(dev_c, 0, sizeof(float)*blocksPerGrid); dot <<< blocksPerGrid, threadsPerBlock >>> (dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, sizeof(float)*blocksPerGrid, cudaMemcpyDeviceToHost); for (int j = 0; j < blocksPerGrid; j++) { result += c[j]; } std::cout << result << std::endl; system("pause"); cudaDeviceReset(); delete[] a; delete[] b; delete[] c; return 0; }
23,665
/* Matrix addition with to large matrices for the device memory, without utilizng streams. */ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> // Grid size #define B 100 // Block size #define T 512 // Matrix dimension #define C 51200L // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ cudaError_t cuErr = call; \ if(cudaSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\ exit(0); \ } \ }while(0) int size = sizeof(int64_t); // Host point64_ter int64_t *a; int64_t *b; int64_t *c; // Device point64_ter int64_t *d_a; int64_t *d_b; int64_t *d_c; // Host initialisation and matrix allocation int init(){ a = (int64_t *) malloc(C*C*size); b = (int64_t *) malloc(C*C*size); c = (int64_t *) malloc(C*C*size); for(int64_t i=0; i<C; i++){ for(int64_t j=0; j<C; j++){ b[j+i*C]=1; a[j+i*C]=1; c[j+i*C]=0; } } return 0; } // Kernel __global__ void Mult(int64_t* d_a, int64_t* d_b, int64_t* d_c){ int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; d_c[tid] = d_a[tid] + d_b[tid]; } // Verifying results int check(){ bool test = false; for(int64_t i=0; i<C*C; i++){ if(c[i]!=2){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n"); return 0; } // Allocating device memory and copying matrices a and b from the host to d_a and d_b on the device void initcuda(){ cudaErrorCheck( cudaMalloc(&d_a, C*C*size)); cudaErrorCheck( cudaMalloc(&d_b, C*C*size)); cudaErrorCheck( cudaMalloc(&d_c, C*C*size)); cudaErrorCheck( cudaMemcpy(d_a,a,C*C*size,cudaMemcpyHostToDevice)); cudaErrorCheck( cudaMemcpy(d_b,b,C*C*size,cudaMemcpyHostToDevice)); } // Main program int main(){ // Initialisation init(); initcuda(); //Launch Kernel Mult<<<B,T>>>(d_a,d_b,d_c); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaErrorCheck( cudaGetLastError()); // Check for errors on the GPU after control is returned to CPU cudaErrorCheck( cudaDeviceSynchronize()); // Copying back the result d_c from the device to c on the host cudaErrorCheck( cudaMemcpy(c,d_c,C*C*size,cudaMemcpyHostToDevice)); // Verifying results check(); // Freeing device memory cudaErrorCheck( cudaFree(d_a)); cudaErrorCheck( cudaFree(d_b)); cudaErrorCheck( cudaFree(d_c)); // Freeing host memory free(a); free(b); free(c); return 0; }
23,666
#include "includes.h" __global__ void ComputeLaplacianInPlace(float* d, int n) { // Column to sum auto x = blockIdx.x * blockDim.x + threadIdx.x; if(x < n) { auto dCol = &d[x * n]; for(auto i = 0; i < n; ++i) { if(i != x) { dCol[x] += dCol[i]; dCol[i] = -dCol[i]; } } } }
23,667
#include "includes.h" __global__ void FloatMul(float *A, float *B, float *C) { unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x; C[i] = A[i] * B[i]; }
23,668
/*** fir.gpu.cu**/ #include <stdio.h> /** Tool function*/ void random_ints(int* a, int N){ int i; for(i = 0; i < N; ++i) a[i] = (int)(rand() / (RAND_MAX + 1.0) * 10.0); } /** CUDA parameters */ #define BLOCK_SIZE 512 // Define fir coefficients in the texture memory #define COEF_NBR 5 __constant__ int C[]={1, -2, 4, -2, 1}; int h_C[]={1, -2, 4, -2, 1}; //host fir coefficients /* * FIR kernel */ __global__ void fir(int *D, int *O) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i; O[index] = 0; for(i=0; i<COEF_NBR; i++) O[index] += C[i] * D[index + i]; } /** FIR kernel that uses shared memory */ __global__ void fir_fast(int *D, int *O) { int index = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int tmpD[BLOCK_SIZE + COEF_NBR-1]; // reserve with overlapped data __shared__ int tmpC[COEF_NBR]; __shared__ int tmpO[BLOCK_SIZE]; int i; //my loop iterator // Copy block of input data to shared memory tmpD[threadIdx.x] = D[index]; // Copy overlapped input data if( threadIdx.x < COEF_NBR-1 ) tmpD[blockDim.x + threadIdx.x] = D[(blockIdx.x+1) * blockDim.x + threadIdx.x]; // Copy fir coefficients if( threadIdx.x < COEF_NBR) tmpC[threadIdx.x] = C[threadIdx.x]; __syncthreads(); tmpO[threadIdx.x] = 0; for(i=0; i<COEF_NBR; i++) tmpO[threadIdx.x] += tmpC[i] * tmpD[threadIdx.x + i]; __syncthreads(); O[index] = tmpO[threadIdx.x]; } /** host program */ int main(void) { int *h_D, *h_O; // host copies of D, O int *d_D, *d_O; // device copies of D, O int nBlk = 512; int nThx = BLOCK_SIZE; int N = nBlk * nThx; int sizeD = (N + COEF_NBR) * sizeof(int); int sizeO = N * sizeof(int); int i, j; //my loop iterators int result; // Alloc space for device copies of D, O cudaMalloc((void **)&d_D, sizeD); cudaMalloc((void **)&d_O, sizeO); // Alloc space for host copies of D, O and setup input values h_D = (int *)malloc(sizeD); random_ints(h_D, N+COEF_NBR); h_O = (int *)malloc(sizeO); // Copy input data to device cudaMemcpy(d_D, h_D, sizeD, cudaMemcpyHostToDevice); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Launch add() kernel on GPU with N threads fir<<<nBlk,nThx>>>(d_D, d_O); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("GPU time is %f ms\n", time); // Copy result back to host cudaMemcpy(h_O, d_O, sizeO, cudaMemcpyDeviceToHost); //Checkup for(i=0; i<N; i++){ result=0; for(j=0; j<COEF_NBR; j++) result += h_C[j] * h_D[i + j]; if( result != h_O[i] ){ printf("Error at %d. %d != %d\n", i, h_O[i], result); goto cleanup; } } printf("Success\n"); cleanup: // Cleanup free(h_D); free(h_O); cudaFree(d_D); cudaFree(d_O); return 0; }
23,669
#include "includes.h" __global__ void swap_middle_row(float* data, const int num_threads, const int nx, const int ny, const int xodd, const int yodd, const int offset) { const uint x=threadIdx.x; const uint y=blockIdx.x; const uint c = x+y*num_threads+offset; int r = ny/2; int idx1 = r*nx + c; int idx2 = r*nx + c + nx/2+ xodd; float tmp = data[idx1]; data[idx1] = data[idx2]; data[idx2] = tmp; }
23,670
#include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int id = threadIdx.x; float num = d_in[id]; d_out[id] = num*num*num; } int main(int argc, char ** argv) { // NOTE: h is for host and d is for device // This is the general template of cuda code // 1. CPU allocate memory in the device. // 2. CPU copy data from the host structs to the device structs // 3. CPU runs the kernel on the GPU // 4. CPU copies the data from the device struct to the host struct const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel // The first is the number of blocks and the second is the number of threads per block // Can run many blocks at once. // Max number of threads per block is 512 for old and 1024 for new cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
23,671
#include <stdlib.h> #include <stdio.h> #define MY_CUDA_CHECK( call) { \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } #define MY_CHECK_ERROR(errorMessage) { \ cudaError_t err = cudaGetLastError(); \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ } void writelog(int, int, const char *, ...); #define MAKEMATR_RC 1 #if !defined(TRUE) enum {FALSE, TRUE}; #endif #if !defined(MAKEMATR_RC) #define MAKEMATR_RC 12 #endif void **mmcuda(void ***rp, int r, int c, int s, int init) { int i; char **pc; short int **psi; int **pi; double **pd; char **d_pc; short int **d_psi; int **d_pi; double **d_pd; switch(s) { case sizeof(char): pc=(char **)malloc(r*sizeof(char *)); if(!pc) writelog(TRUE, MAKEMATR_RC, "error in makematr 1\n"); MY_CUDA_CHECK( cudaMalloc( (void **) &d_pc, r*sizeof(char*) ) ); for(i=0; i<r; i++) { MY_CUDA_CHECK( cudaMalloc( (void **) &pc[i], c*sizeof(char) ) ); if(init) { MY_CUDA_CHECK( cudaMemset( pc[i], 0, c*sizeof(char) ) ); } } MY_CUDA_CHECK( cudaMemcpy( d_pc, pc, r*sizeof(char *), cudaMemcpyHostToDevice ) ); rp[0]=(void **)d_pc; return (void **)pc; case sizeof(short int): psi=(short int **)malloc(r*sizeof(short int*)); if(!psi) writelog(TRUE, MAKEMATR_RC, "error in makematr 2\n"); MY_CUDA_CHECK( cudaMalloc( (void **) &d_psi, r*sizeof(short int*) ) ); for(i=0; i<r; i++) { MY_CUDA_CHECK( cudaMalloc( (void **) &psi[i], c*sizeof(short int) ) ); if(init) { MY_CUDA_CHECK( cudaMemset( psi[i], 0, c*sizeof(short int) ) ); } } MY_CUDA_CHECK( cudaMemcpy( d_psi, psi, r*sizeof(short int*), cudaMemcpyHostToDevice ) ); rp[0]=(void **)d_psi; return (void **)psi; case sizeof(int): pi=(int **)malloc(r*sizeof(int*)); if(!pi) writelog(TRUE, MAKEMATR_RC, "error in makematr 3\n"); MY_CUDA_CHECK( cudaMalloc( (void **) &d_pi, r*sizeof(int*) ) ); for(i=0; i<r; i++) { MY_CUDA_CHECK( cudaMalloc( (void **) &pi[i], c*sizeof(int) ) ); if(init) { MY_CUDA_CHECK( cudaMemset( pi[i], 0, c*sizeof(int) ) ); } } MY_CUDA_CHECK( cudaMemcpy( d_pi, pi, r*sizeof(int *), cudaMemcpyHostToDevice ) ); rp[0]=(void **)d_pi; return (void **)pi; case sizeof(double): pd=(double **)malloc(r*sizeof(double*)); if(!pd) writelog(TRUE, MAKEMATR_RC, "error in makematr 4 for %d rows\n",r); MY_CUDA_CHECK( cudaMalloc( (void **) &d_pd, r*sizeof(double*) ) ); for(i=0; i<r; i++) { MY_CUDA_CHECK( cudaMalloc( (void **) &pd[i], c*sizeof(double) ) ); if(init) { MY_CUDA_CHECK( cudaMemset( pd[i], 0, c*sizeof(double) ) ); } } MY_CUDA_CHECK( cudaMemcpy( d_pd, pd, r*sizeof(double *), cudaMemcpyHostToDevice ) ); rp[0]=(void **)d_pd; return (void **)pd; default: writelog(TRUE,MAKEMATR_RC,"Unexpected size: %d\n",s); break; } return NULL; }
23,672
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3) { for (int i=2; i<=N-3; i++) { double _t_27_; double _t_105_; double _t_8_; double _t_87_; double _t_102_; double _t_84_; double _t_24_; double _t_5_; double _t_104_; double _t_103_; double _t_123_; double _t_122_; double _t_141_; double _t_140_; double _t_86_; double _t_85_; double _t_101_; double _t_110_; double _t_108_; double _t_115_; double _t_113_; double _t_83_; double _t_92_; double _t_90_; double _t_97_; double _t_95_; double _t_129_; double _t_134_; double _t_120_; double _t_127_; double _t_132_; double _t_147_; double _t_152_; double _t_150_; double _t_138_; double _t_145_; double _t_106_; double _t_66_; double _t_100_; double _t_47_; double _t_88_; double _t_82_; double _t_44_; double _t_63_; double _t_81_; double _t_111_; double _t_32_; double _t_109_; double _t_13_; double _t_93_; double _t_91_; double _t_11_; double _t_30_; double _t_112_; double _t_71_; double _t_107_; double _t_52_; double _t_94_; double _t_89_; double _t_50_; double _t_69_; double _t_116_; double _t_38_; double _t_114_; double _t_19_; double _t_98_; double _t_96_; double _t_17_; double _t_36_; double _t_117_; double _t_77_; double _t_58_; double _t_99_; double _t_80_; double _t_56_; double _t_75_; double _t_124_; double _t_28_; double _t_121_; double _t_9_; double _t_142_; double _t_139_; double _t_125_; double _t_67_; double _t_119_; double _t_118_; double _t_48_; double _t_143_; double _t_137_; double _t_130_; double _t_33_; double _t_128_; double _t_14_; double _t_148_; double _t_146_; double _t_131_; double _t_72_; double _t_126_; double _t_53_; double _t_149_; double _t_144_; double _t_135_; double _t_39_; double _t_133_; double _t_20_; double _t_153_; double _t_151_; double _t_136_; double _t_78_; double _t_59_; double _t_154_; double _t_79_; double r1ic0jc0kc0 = r1[i][j][k]; double _t_26_; double _t_25_; double _t_178_; double _t_23_; double _t_22_; double _t_21_; double _t_1_; double _t_176_; double _t_37_; double _t_35_; double _t_34_; double _t_31_; double _t_173_; double _t_29_; double _t_171_; double _t_46_; double _t_45_; double _t_191_; double _t_43_; double _t_42_; double _t_189_; double _t_41_; double _t_40_; double _t_57_; double _t_55_; double _t_54_; double _t_51_; double _t_186_; double _t_49_; double _t_184_; double _t_65_; double _t_64_; double _t_203_; double _t_62_; double _t_61_; double _t_201_; double _t_60_; double _t_76_; double _t_74_; double _t_73_; double _t_70_; double _t_198_; double _t_68_; double _t_196_; double _t_0_; double _t_7_; double _t_6_; double _t_166_; double _t_4_; double _t_3_; double _t_2_; double _t_164_; double _t_18_; double _t_16_; double _t_15_; double _t_12_; double _t_161_; double _t_10_; double _t_159_; double _t_162_; double _t_160_; double _t_163_; double _t_158_; double _t_157_; double _t_156_; double _t_167_; double _t_165_; double _t_168_; double _t_174_; double _t_172_; double _t_175_; double _t_170_; double _t_169_; double _t_179_; double _t_177_; double _t_180_; double _t_155_; double _t_187_; double _t_185_; double _t_188_; double _t_183_; double _t_182_; double _t_181_; double _t_192_; double _t_190_; double _t_193_; double _t_199_; double _t_197_; double _t_200_; double _t_195_; double _t_194_; double _t_204_; double _t_202_; double _t_205_; _t_27_ = -u1[i-2][j][k-2]; _t_105_ = -u1[i-2][j][k-2]; _t_105_ += u1[i-2][j][k+2]; _t_8_ = -u1[i-2][j][k+2]; _t_27_ += u1[i+2][j][k-2]; _t_87_ = -u1[i+2][j][k-2]; _t_8_ += u1[i+2][j][k+2]; _t_87_ += u1[i+2][j][k+2]; _t_102_ = c2 * _t_105_; _t_84_ = c2 * _t_87_; _t_24_ = c2 * _t_27_; _t_5_ = c2 * _t_8_; _t_104_ = 2.0 * mu[i-2][j][k]; _t_104_ += la[i-2][j][k]; _t_103_ = _t_104_ * met2[i-2][j][k]; _t_123_ = 2.0 * mu[i+1][j][k]; _t_123_ += la[i+1][j][k]; _t_122_ = _t_123_ * met2[i+1][j][k]; _t_141_ = 2.0 * mu[i-1][j][k]; _t_141_ += la[i-1][j][k]; _t_140_ = _t_141_ * met2[i-1][j][k]; _t_86_ = 2.0 * mu[i+2][j][k]; _t_86_ += la[i+2][j][k]; _t_85_ = _t_86_ * met2[i+2][j][k]; _t_101_ = _t_103_ * met1[i-2][j][k]; _t_110_ = la[i-2][j][k] * met3[i-2][j][k]; _t_108_ = _t_110_ * met1[i-2][j][k]; _t_115_ = la[i-2][j][k] * met4[i-2][j][k]; _t_113_ = _t_115_ * met1[i-2][j][k]; _t_83_ = _t_85_ * met1[i+2][j][k]; _t_92_ = la[i+2][j][k] * met3[i+2][j][k]; _t_90_ = _t_92_ * met1[i+2][j][k]; _t_97_ = la[i+2][j][k] * met4[i+2][j][k]; _t_95_ = _t_97_ * met1[i+2][j][k]; _t_129_ = la[i+1][j][k] * met3[i+1][j][k]; _t_134_ = la[i+1][j][k] * met4[i+1][j][k]; _t_120_ = _t_122_ * met1[i+1][j][k]; _t_127_ = _t_129_ * met1[i+1][j][k]; _t_132_ = _t_134_ * met1[i+1][j][k]; _t_147_ = la[i-1][j][k] * met3[i-1][j][k]; _t_152_ = la[i-1][j][k] * met4[i-1][j][k]; _t_150_ = _t_152_ * met1[i-1][j][k]; _t_138_ = _t_140_ * met1[i-1][j][k]; _t_145_ = _t_147_ * met1[i-1][j][k]; _t_106_ = -u1[i-2][j][k-1]; _t_66_ = -u1[i-2][j][k-1]; _t_106_ += u1[i-2][j][k+1]; _t_102_ += c1 * _t_106_; _t_100_ = _t_101_ * _t_102_; _t_47_ = -u1[i-2][j][k+1]; _t_66_ += u1[i+2][j][k-1]; _t_88_ = -u1[i+2][j][k-1]; _t_47_ += u1[i+2][j][k+1]; _t_88_ += u1[i+2][j][k+1]; _t_84_ += c1 * _t_88_; _t_82_ = _t_83_ * _t_84_; _t_44_ = c2 * _t_47_; _t_63_ = c2 * _t_66_; _t_81_ = _t_100_ * strx[i]; _t_81_ += _t_82_ * strx[i]; _t_111_ = -u2[i-2][j][k-2]; _t_32_ = -u2[i-2][j][k-2]; _t_111_ += u2[i-2][j][k+2]; _t_109_ = c2 * _t_111_; _t_13_ = -u2[i-2][j][k+2]; _t_32_ += u2[i+2][j][k-2]; _t_93_ = -u2[i+2][j][k-2]; _t_13_ += u2[i+2][j][k+2]; _t_93_ += u2[i+2][j][k+2]; _t_91_ = c2 * _t_93_; _t_11_ = c2 * _t_13_; _t_30_ = c2 * _t_32_; _t_112_ = -u2[i-2][j][k-1]; _t_71_ = -u2[i-2][j][k-1]; _t_112_ += u2[i-2][j][k+1]; _t_109_ += c1 * _t_112_; _t_107_ = _t_108_ * _t_109_; _t_81_ += _t_107_ * stry[j]; _t_52_ = -u2[i-2][j][k+1]; _t_71_ += u2[i+2][j][k-1]; _t_94_ = -u2[i+2][j][k-1]; _t_52_ += u2[i+2][j][k+1]; _t_94_ += u2[i+2][j][k+1]; _t_91_ += c1 * _t_94_; _t_89_ = _t_90_ * _t_91_; _t_81_ += _t_89_ * stry[j]; _t_50_ = c2 * _t_52_; _t_69_ = c2 * _t_71_; _t_116_ = -u3[i-2][j][k-2]; _t_38_ = -u3[i-2][j][k-2]; _t_116_ += u3[i-2][j][k+2]; _t_114_ = c2 * _t_116_; _t_19_ = -u3[i-2][j][k+2]; _t_38_ += u3[i+2][j][k-2]; _t_98_ = -u3[i+2][j][k-2]; _t_19_ += u3[i+2][j][k+2]; _t_98_ += u3[i+2][j][k+2]; _t_96_ = c2 * _t_98_; _t_17_ = c2 * _t_19_; _t_36_ = c2 * _t_38_; _t_117_ = -u3[i-2][j][k-1]; _t_77_ = -u3[i-2][j][k-1]; _t_117_ += u3[i-2][j][k+1]; _t_114_ += c1 * _t_117_; _t_81_ += _t_113_ * _t_114_; _t_58_ = -u3[i-2][j][k+1]; _t_77_ += u3[i+2][j][k-1]; _t_99_ = -u3[i+2][j][k-1]; _t_58_ += u3[i+2][j][k+1]; _t_99_ += u3[i+2][j][k+1]; _t_96_ += c1 * _t_99_; _t_81_ += _t_95_ * _t_96_; _t_80_ = c2 * _t_81_; _t_56_ = c2 * _t_58_; _t_75_ = c2 * _t_77_; _t_124_ = -u1[i+1][j][k-2]; _t_28_ = u1[i+1][j][k-2]; _t_124_ += u1[i+1][j][k+2]; _t_121_ = c2 * _t_124_; _t_9_ = u1[i+1][j][k+2]; _t_28_ -= u1[i-1][j][k-2]; _t_24_ += c1 * _t_28_; _t_142_ = -u1[i-1][j][k-2]; _t_9_ -= u1[i-1][j][k+2]; _t_5_ += c1 * _t_9_; _t_142_ += u1[i-1][j][k+2]; _t_139_ = c2 * _t_142_; _t_125_ = -u1[i+1][j][k-1]; _t_67_ = u1[i+1][j][k-1]; _t_125_ += u1[i+1][j][k+1]; _t_121_ += c1 * _t_125_; _t_119_ = _t_120_ * _t_121_; _t_118_ = _t_119_ * strx[i]; _t_48_ = u1[i+1][j][k+1]; _t_67_ -= u1[i-1][j][k-1]; _t_63_ += c1 * _t_67_; _t_143_ = -u1[i-1][j][k-1]; _t_48_ -= u1[i-1][j][k+1]; _t_44_ += c1 * _t_48_; _t_143_ += u1[i-1][j][k+1]; _t_139_ += c1 * _t_143_; _t_137_ = _t_138_ * _t_139_; _t_118_ += _t_137_ * strx[i]; _t_130_ = -u2[i+1][j][k-2]; _t_33_ = u2[i+1][j][k-2]; _t_130_ += u2[i+1][j][k+2]; _t_128_ = c2 * _t_130_; _t_14_ = u2[i+1][j][k+2]; _t_33_ -= u2[i-1][j][k-2]; _t_30_ += c1 * _t_33_; _t_148_ = -u2[i-1][j][k-2]; _t_14_ -= u2[i-1][j][k+2]; _t_11_ += c1 * _t_14_; _t_148_ += u2[i-1][j][k+2]; _t_146_ = c2 * _t_148_; _t_131_ = -u2[i+1][j][k-1]; _t_72_ = u2[i+1][j][k-1]; _t_131_ += u2[i+1][j][k+1]; _t_128_ += c1 * _t_131_; _t_126_ = _t_127_ * _t_128_; _t_118_ += _t_126_ * stry[j]; _t_53_ = u2[i+1][j][k+1]; _t_72_ -= u2[i-1][j][k-1]; _t_69_ += c1 * _t_72_; _t_149_ = -u2[i-1][j][k-1]; _t_53_ -= u2[i-1][j][k+1]; _t_50_ += c1 * _t_53_; _t_149_ += u2[i-1][j][k+1]; _t_146_ += c1 * _t_149_; _t_144_ = _t_145_ * _t_146_; _t_118_ += _t_144_ * stry[j]; _t_135_ = -u3[i+1][j][k-2]; _t_39_ = u3[i+1][j][k-2]; _t_135_ += u3[i+1][j][k+2]; _t_133_ = c2 * _t_135_; _t_20_ = u3[i+1][j][k+2]; _t_39_ -= u3[i-1][j][k-2]; _t_36_ += c1 * _t_39_; _t_153_ = -u3[i-1][j][k-2]; _t_20_ -= u3[i-1][j][k+2]; _t_17_ += c1 * _t_20_; _t_153_ += u3[i-1][j][k+2]; _t_151_ = c2 * _t_153_; _t_136_ = -u3[i+1][j][k-1]; _t_78_ = u3[i+1][j][k-1]; _t_136_ += u3[i+1][j][k+1]; _t_133_ += c1 * _t_136_; _t_118_ += _t_132_ * _t_133_; _t_59_ = u3[i+1][j][k+1]; _t_78_ -= u3[i-1][j][k-1]; _t_75_ += c1 * _t_78_; _t_154_ = -u3[i-1][j][k-1]; _t_59_ -= u3[i-1][j][k+1]; _t_56_ += c1 * _t_59_; _t_154_ += u3[i-1][j][k+1]; _t_151_ += c1 * _t_154_; _t_118_ += _t_150_ * _t_151_; _t_80_ += c1 * _t_118_; _t_79_ = _t_80_ * stry[j]; r1ic0jc0kc0 += _t_79_; _t_26_ = 2.0 * mu[i][j][k-2]; _t_26_ += la[i][j][k-2]; _t_25_ = _t_26_ * met2[i][j][k-2]; _t_178_ = la[i][j][k-2] * met2[i][j][k-2]; _t_23_ = _t_25_ * met1[i][j][k-2]; _t_22_ = _t_23_ * _t_24_; _t_21_ = _t_22_ * strx[i]; _t_1_ = _t_21_ * stry[j]; _t_176_ = _t_178_ * met1[i][j][k-2]; _t_37_ = mu[i][j][k-2] * met4[i][j][k-2]; _t_35_ = _t_37_ * met1[i][j][k-2]; _t_34_ = _t_35_ * _t_36_; _t_1_ += _t_34_ * stry[j]; _t_31_ = mu[i][j][k-2] * met3[i][j][k-2]; _t_173_ = mu[i][j][k-2] * met3[i][j][k-2]; _t_29_ = _t_31_ * met1[i][j][k-2]; _t_1_ += _t_29_ * _t_30_; _t_171_ = _t_173_ * met1[i][j][k-2]; _t_46_ = 2.0 * mu[i][j][k+1]; _t_46_ += la[i][j][k+1]; _t_45_ = _t_46_ * met2[i][j][k+1]; _t_191_ = la[i][j][k+1] * met2[i][j][k+1]; _t_43_ = _t_45_ * met1[i][j][k+1]; _t_42_ = _t_43_ * _t_44_; _t_189_ = _t_191_ * met1[i][j][k+1]; _t_41_ = _t_42_ * strx[i+2]; _t_40_ = _t_41_ * stry[j]; _t_57_ = mu[i][j][k+1] * met4[i][j][k+1]; _t_55_ = _t_57_ * met1[i][j][k+1]; _t_54_ = _t_55_ * _t_56_; _t_40_ += _t_54_ * stry[j]; _t_51_ = mu[i][j][k+1] * met3[i][j][k+1]; _t_186_ = mu[i][j][k+1] * met3[i][j][k+1]; _t_49_ = _t_51_ * met1[i][j][k+1]; _t_40_ += _t_49_ * _t_50_; _t_184_ = _t_186_ * met1[i][j][k+1]; _t_65_ = 2.0 * mu[i][j][k-1]; _t_65_ += la[i][j][k-1]; _t_64_ = _t_65_ * met2[i][j][k-1]; _t_203_ = la[i][j][k-1] * met2[i][j][k-1]; _t_62_ = _t_64_ * met1[i][j][k-1]; _t_61_ = _t_62_ * _t_63_; _t_201_ = _t_203_ * met1[i][j][k-1]; _t_60_ = _t_61_ * strx[i-2]; _t_40_ += _t_60_ * stry[j]; _t_76_ = mu[i][j][k-1] * met4[i][j][k-1]; _t_74_ = _t_76_ * met1[i][j][k-1]; _t_73_ = _t_74_ * _t_75_; _t_40_ += _t_73_ * stry[j]; _t_70_ = mu[i][j][k-1] * met3[i][j][k-1]; _t_198_ = mu[i][j][k-1] * met3[i][j][k-1]; _t_68_ = _t_70_ * met1[i][j][k-1]; _t_40_ += _t_68_ * _t_69_; _t_196_ = _t_198_ * met1[i][j][k-1]; _t_0_ = c1 * _t_40_; _t_7_ = 2.0 * mu[i][j][k+2]; _t_7_ += la[i][j][k+2]; _t_6_ = _t_7_ * met2[i][j][k+2]; _t_166_ = la[i][j][k+2] * met2[i][j][k+2]; _t_4_ = _t_6_ * met1[i][j][k+2]; _t_3_ = _t_4_ * _t_5_; _t_2_ = _t_3_ * strx[i]; _t_1_ += _t_2_ * stry[j]; _t_164_ = _t_166_ * met1[i][j][k+2]; _t_18_ = mu[i][j][k+2] * met4[i][j][k+2]; _t_16_ = _t_18_ * met1[i][j][k+2]; _t_15_ = _t_16_ * _t_17_; _t_1_ += _t_15_ * stry[j]; _t_12_ = mu[i][j][k+2] * met3[i][j][k+2]; _t_161_ = mu[i][j][k+2] * met3[i][j][k+2]; _t_10_ = _t_12_ * met1[i][j][k+2]; _t_1_ += _t_10_ * _t_11_; _t_0_ += c2 * _t_1_; r1ic0jc0kc0 += _t_0_; _t_159_ = _t_161_ * met1[i][j][k+2]; _t_162_ = -u1[i][j-2][k+2]; _t_162_ += u1[i][j+2][k+2]; _t_160_ = c2 * _t_162_; _t_163_ = -u1[i][j-1][k+2]; _t_163_ += u1[i][j+1][k+2]; _t_160_ += c1 * _t_163_; _t_158_ = _t_159_ * _t_160_; _t_157_ = _t_158_ * stry[j+2]; _t_156_ = _t_157_ * strx[i]; _t_167_ = -u2[i][j-2][k+2]; _t_167_ += u2[i][j+2][k+2]; _t_165_ = c2 * _t_167_; _t_168_ = -u2[i][j-1][k+2]; _t_168_ += u2[i][j+1][k+2]; _t_165_ += c1 * _t_168_; _t_156_ += _t_164_ * _t_165_; _t_174_ = -u1[i][j-2][k-2]; _t_174_ += u1[i][j+2][k-2]; _t_172_ = c2 * _t_174_; _t_175_ = -u1[i][j-1][k-2]; _t_175_ += u1[i][j+1][k-2]; _t_172_ += c1 * _t_175_; _t_170_ = _t_171_ * _t_172_; _t_169_ = _t_170_ * stry[j]; _t_156_ += _t_169_ * strx[i]; _t_179_ = -u2[i][j-2][k-2]; _t_179_ += u2[i][j+2][k-2]; _t_177_ = c2 * _t_179_; _t_180_ = -u2[i][j-1][k-2]; _t_180_ += u2[i][j+1][k-2]; _t_177_ += c1 * _t_180_; _t_156_ += _t_176_ * _t_177_; _t_155_ = c2 * _t_156_; _t_187_ = -u1[i][j-2][k+1]; _t_187_ += u1[i][j+2][k+1]; _t_185_ = c2 * _t_187_; _t_188_ = -u1[i][j-1][k+1]; _t_188_ += u1[i][j+1][k+1]; _t_185_ += c1 * _t_188_; _t_183_ = _t_184_ * _t_185_; _t_182_ = _t_183_ * stry[j-2]; _t_181_ = _t_182_ * strx[i]; _t_192_ = -u2[i][j-2][k+1]; _t_192_ += u2[i][j+2][k+1]; _t_190_ = c2 * _t_192_; _t_193_ = -u2[i][j-1][k+1]; _t_193_ += u2[i][j+1][k+1]; _t_190_ += c1 * _t_193_; _t_181_ += _t_189_ * _t_190_; _t_199_ = -u1[i][j-2][k-1]; _t_199_ += u1[i][j+2][k-1]; _t_197_ = c2 * _t_199_; _t_200_ = -u1[i][j-1][k-1]; _t_200_ += u1[i][j+1][k-1]; _t_197_ += c1 * _t_200_; _t_195_ = _t_196_ * _t_197_; _t_194_ = _t_195_ * stry[j]; _t_181_ += _t_194_ * strx[i]; _t_204_ = -u2[i][j-2][k-1]; _t_204_ += u2[i][j+2][k-1]; _t_202_ = c2 * _t_204_; _t_205_ = -u2[i][j-1][k-1]; _t_205_ += u2[i][j+1][k-1]; _t_202_ += c1 * _t_205_; _t_181_ += _t_201_ * _t_202_; _t_155_ += c1 * _t_181_; r1ic0jc0kc0 += _t_155_; r1[i][j][k] = r1ic0jc0kc0; r1[i][j][k] += c2*( mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*( c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) + c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i] + mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*( c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) + c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) ) + ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*( c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) + c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i] + mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*( c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) + c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) ) ) + c1*( mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*( c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) + c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i] + mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*( c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) + c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) ) + ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*( c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) + c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i] + mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*( c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) + c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) ); r1[i][j][k] += c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*( c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) + c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) ) + mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*( c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+ c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) ) ) + c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*( c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) + c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) ) + mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*( c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) + c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k]))) + c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*( c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) + c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) ) + la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*( c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+ c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) ) ) + c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*( c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) + c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) ) + la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*( c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) + c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k]))); } } } extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) { double *r1; cudaMalloc (&r1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for r1\n"); cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u1; cudaMalloc (&u1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u1\n"); cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u2; cudaMalloc (&u2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u2\n"); cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u3; cudaMalloc (&u3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u3\n"); cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *mu; cudaMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *la; cudaMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met1; cudaMalloc (&met1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met1\n"); cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met2; cudaMalloc (&met2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met2\n"); cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met3; cudaMalloc (&met3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met3\n"); cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met4; cudaMalloc (&met4, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met4\n"); cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *strx; cudaMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice); double *stry; cudaMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); }
23,673
#include <stdio.h> #include <stdlib.h> #include <fstream> #include <chrono> #include <cuda_runtime.h> #include <iostream> #include <vector> #define BlockSize 32 const int INF = 1000000000; void input(char *inFileName); void output(char *outFileName); void block_FW(int B,char*); int ceil(int a, int b); void cal(char* d,size_t pitch,int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height,cudaStream_t stream); void cpu(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height); int n, m; int* d; double io_time = 0; double comp_time = 0; double mem_time = 0; int main(int argc, char* argv[]) { auto io_beg = std::chrono::high_resolution_clock::now(); input(argv[1]); auto io_end = std::chrono::high_resolution_clock::now(); io_time += std::chrono::duration<double>(io_end-io_beg).count(); int B = BlockSize; block_FW(B,argv[2]); io_beg = std::chrono::high_resolution_clock::now(); output(argv[2]); io_end = std::chrono::high_resolution_clock::now(); io_time += std::chrono::duration<double>(io_end-io_beg).count(); std::cout<< comp_time <<" "<<mem_time<<" "<<io_time; delete d; return 0; } void input(char* infile) { FILE* file = fopen(infile, "rb"); fread(&n, sizeof(int), 1, file); fread(&m, sizeof(int), 1, file); int *buf = new int[m*3]; d = new int[n*n]; fread(buf, sizeof(int), 3*m, file); #pragma omp parallel for for (int i = 0; i < n; ++ i) { for (int j = 0; j < n; ++ j) { if (i == j) { d[i*n+j] = 0; } else { d[i*n+j] = INF; } } } #pragma omp parallel for for (int i = 0; i < m; ++ i) { int pair[3]; // fread(pair, sizeof(int), 3, file); for(int j=0;j<3;j++) pair[j]=buf[i*3+j]; d[pair[0]*n+pair[1]] = pair[2]; } fclose(file); delete buf; } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); fwrite(d, sizeof(int), n*n, outfile); fclose(outfile); } int ceil(int a, int b) { return (a + b - 1) / b; } __global__ void kernel_I(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r); __global__ void kernel_II(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r); __global__ void kernel_III(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r); inline void moveBlock(char** ptr,int dst, int src,int x,int y,int B,size_t pitch){ for(int k=B*x;k<B*(x+1);k++) cudaMemcpyPeer(ptr[dst]+pitch*B*k+sizeof(int)*B*y,dst, ptr[src]+pitch*B*k+sizeof(int)*B*y,src, sizeof(int)*B); } void block_FW(int B, char* outFileName) { int round = ceil(n, B); char *device_d[2]; size_t pitch[2]; // cudaMalloc(&device_d,sizeof(int)*n*n); // cudaMemcpy(device_d,d,sizeof(int)*n*n,cudaMemcpyHostToDevice); auto mem_beg = std::chrono::high_resolution_clock::now(); for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceEnablePeerAccess(!dev,0); cudaMallocPitch(&device_d[dev],&pitch[dev],sizeof(int)*round*B,round*B); cudaMemcpy2DAsync(device_d[dev],pitch[dev], d,sizeof(int)*n, sizeof(int)*n,n,cudaMemcpyHostToDevice); } auto mem_end = std::chrono::high_resolution_clock::now(); mem_time += std::chrono::duration<double>(mem_end-mem_beg).count(); for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceSynchronize(); } auto comp_beg = std::chrono::high_resolution_clock::now(); for (int r = 0; r <= round; ++r) { dim3 dimBlock(B,B); dim3 dimGrid(1,1); // for(int dev=0;dev<2;dev++){ // cudaSetDevice(dev); // kernel_I <<<dimGrid,dimBlock,0,0>>>(device_d[dev],pitch[dev],r,r,n,B,r); // dimGrid = dim3(2,round-1); // kernel_II <<<dimGrid,dimBlock,0,0>>>(device_d[dev],pitch[dev],0,0,n,B,r); // } for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceSynchronize(); } cudaStream_t streams[2][round-round/2]; for(int i=0;i<round-round/2;i++) for(int j=0;j<2;j++) cudaStreamCreate(&streams[j][i]); #pragma omp parallel sections { #pragma omp section { cudaSetDevice(0); kernel_I <<<dimGrid,dimBlock,0,0>>>(device_d[0],pitch[0],r,r,n,B,r); dimGrid = dim3(2,round-1); kernel_II <<<dimGrid,dimBlock,0,0>>>(device_d[0],pitch[0],0,0,n,B,r); dimGrid = dim3(1,round); for(int i=0;i<round/2;i++){ kernel_III<<<dimGrid,dimBlock,0,streams[0][i]>>>(device_d[0],pitch[0],i,0,n,B,r); // cudaMemcpyPeerAsync(device_d[0]+i*pitch[0]*B,0, // device_d[1]+i*pitch[1]*B,1, // pitch[1]*B,streams[1]); cudaMemcpy2DAsync(device_d[0]+i*pitch[0]*B,pitch[0], device_d[1]+i*pitch[1]*B,pitch[1], sizeof(int)*n,B,cudaMemcpyDefault,streams[0][i]); } cudaDeviceSynchronize(); } #pragma omp section { cudaSetDevice(1); kernel_I <<<dimGrid,dimBlock,0,0>>>(device_d[1],pitch[1],r,r,n,B,r); dimGrid = dim3(2,round-1); kernel_II <<<dimGrid,dimBlock,0,0>>>(device_d[1],pitch[1],0,0,n,B,r); dimGrid = dim3(1,round); for(int i=round/2;i<round;i++){ kernel_III<<<dimGrid,dimBlock,0,streams[1][i-round/2]>>>(device_d[1],pitch[1],i,0,n,B,r); // cudaMemcpyPeerAsync(device_d[1]+i*pitch[1]*B,1, // device_d[0]+i*pitch[0]*B,0, // pitch[0]*B,streams[1]); cudaMemcpy2DAsync(device_d[1]+i*pitch[1]*B,pitch[1], device_d[0]+i*pitch[0]*B,pitch[0], sizeof(int)*n,B,cudaMemcpyDefault,streams[1][i-round/2]); } cudaDeviceSynchronize(); } } for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceSynchronize(); } } auto comp_end = std::chrono::high_resolution_clock::now(); comp_time += std::chrono::duration<double>(comp_end-comp_beg).count(); mem_beg = std::chrono::high_resolution_clock::now(); cudaSetDevice(0); cudaMemcpy2DAsync(d,sizeof(int)*n, device_d[0],pitch[0], sizeof(int)*n,round/2*B,cudaMemcpyDeviceToHost); cudaSetDevice(1); cudaMemcpy2DAsync(d+round/2*B*n,sizeof(int)*n, device_d[1]+round/2*B*pitch[1],pitch[1], sizeof(int)*n,n-round/2*B,cudaMemcpyDeviceToHost); for(int dev=0;dev<2;dev++){ cudaSetDevice(dev); cudaDeviceSynchronize(); } mem_end = std::chrono::high_resolution_clock::now(); mem_time += std::chrono::duration<double>(mem_end-mem_beg).count(); // cudaMemcpy(d,device_d,sizeof(int)*n*n,cudaMemcpyDeviceToHost); for(int dev=0;dev<2;dev++) cudaFree(device_d[dev]); } __device__ inline int gmin(int a,int b){ return (a>b)*b+(a<=b)*a; } __global__ void kernel_I(char* d,size_t pitch,int block_x, int block_y,int n,int B, int r){ __shared__ int d_i_j[BlockSize][BlockSize+1]; const unsigned int i = block_x*B+threadIdx.x; const unsigned int j = block_y*B+threadIdx.y; // const int idx = threadIdx.y*blockDim.x*threadIdx.x; int* d_i = (int*)(d+pitch*i); unsigned int origin_path = i<n&&j<n? __ldg(&d_i[j]) : INF; d_i_j[threadIdx.x][threadIdx.y] = origin_path; // int* d_k_j = (int*)(d+pitch*k); const unsigned int k_max = gmin((r+1) * B,n); #pragma unroll for (unsigned int k = r * B; k < k_max; ++k) { __syncthreads(); int new_d = d_i_j[threadIdx.x][k-r*B]+d_i_j[k-r*B][threadIdx.y]; if(d_i_j[threadIdx.x][threadIdx.y]>new_d){ d_i_j[threadIdx.x][threadIdx.y]=new_d; } } if(origin_path>d_i_j[threadIdx.x][threadIdx.y]&&i<n&&j<n){ d_i[j]=d_i_j[threadIdx.x][threadIdx.y]; } } __global__ void kernel_III(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r){ __shared__ int d_i_k[BlockSize][BlockSize+1]; __shared__ int d_k_j[BlockSize][BlockSize+1]; int i = (block_x+blockIdx.x)*B+threadIdx.x; int j = (block_y+blockIdx.y)*B+threadIdx.y; // unsigned int i = (block_x+blockIdx.x)*B+threadIdx.x; // unsigned int j = (block_y+blockIdx.y)*B+threadIdx.y; int* d_i = ((int*)(d+pitch*i)); int path = i<n&&j<n? __ldg(&d_i[j]) : INF; int origin_path = path; if(r*B+threadIdx.y < n && i < n) d_i_k[threadIdx.x][threadIdx.y] = __ldg(&d_i[r*B+threadIdx.y]); else d_i_k[threadIdx.x][threadIdx.y] = INF; if(r*B+threadIdx.x < n && j < n) d_k_j[threadIdx.x][threadIdx.y] = __ldg(&((int*)(d+pitch*(r*B+threadIdx.x)))[j]); else d_k_j[threadIdx.x][threadIdx.y] = INF; __syncthreads(); // const int k_max = gmin((r+1) * B , n); const unsigned int k_max = gmin((r+1) * B,n); #pragma unroll for (unsigned int k = r * B; k < k_max; ++k) { int new_path = d_i_k[threadIdx.x][k-r*B]+d_k_j[k-r*B][threadIdx.y]; if(path>new_path) path = new_path; } if(origin_path>path&&i<n&&j<n){ d_i[j]=path; } } __global__ void kernel_II(char* d,size_t pitch,int block_x, int block_y,int n,int B,int r){ __shared__ int d_i_k[BlockSize][BlockSize+1]; __shared__ int d_k_j[BlockSize][BlockSize+1]; unsigned int i, j; if(blockIdx.x==0){ i = r*B + threadIdx.x; j = blockIdx.y * B + threadIdx.y ; } else{ i = blockIdx.y * B + threadIdx.x ; j = r*B + threadIdx.y; } // int i = (block_x+blockIdx.x)>=r? // (block_x+blockIdx.x+1)*B+threadIdx.x:(block_x+blockIdx.x)*B+threadIdx.x; // int j = (block_y+blockIdx.y)>=r? // (block_y+blockIdx.y+1)*B+threadIdx.y:(block_y+blockIdx.y)*B+threadIdx.y; // int j = (block_y+blockIdx.y)*B+threadIdx.y; int* d_i = (int*)(d+pitch*i); int path = i<n&&j<n? d_i[j] : INF; int origin_path = path; d_i_k[threadIdx.x][threadIdx.y] = i < n && r*B+threadIdx.y < n ? __ldg(&d_i[r*B+threadIdx.y]) : INF; d_k_j[threadIdx.x][threadIdx.y] = j < n && r*B+threadIdx.x < n ? __ldg(&((int*)(d+pitch*(r*B+threadIdx.x)))[j]) : INF; __syncthreads(); const unsigned int k_max = gmin((r+1) * B , n); #pragma unroll for (unsigned int k = r * B; k < k_max; ++k) { // int* d_k = (int*)(d+pitch*k); int new_path = d_i_k[threadIdx.x][k-r*B]+d_k_j[k-r*B][threadIdx.y]; if(path>new_path) path = new_path; } if(origin_path>path&&i<n&&j<n){ d_i[j]=path; } }
23,674
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda.h> #include <device_launch_parameters.h> #define LIST_SIZE 10000 extern "C" __device__ unsigned long long icmpValue1List[LIST_SIZE]; extern "C" __device__ unsigned long long icmpValue2List[LIST_SIZE]; extern "C" __device__ double fcmpValue1List[LIST_SIZE]; extern "C" __device__ double fcmpValue2List[LIST_SIZE]; extern "C" __device__ unsigned long long icmpCountList[LIST_SIZE]; extern "C" __device__ unsigned long long fcmpCountList[LIST_SIZE]; extern "C" __device__ unsigned long long record_flag; void bambooLogRecordOff(){ long long local_record = 0; cudaMemcpyToSymbol(record_flag, &local_record, sizeof(long long), 0, cudaMemcpyHostToDevice); } void bambooLogKernelBegin(long long i) { i = 1; cudaMemcpyToSymbol(record_flag, &i, sizeof(long long), 0, cudaMemcpyHostToDevice); } void bambooLogKernelEnd() { #ifdef KERNELTRACE cudaDeviceSynchronize(); #endif long long icmpValue1ListLocal[LIST_SIZE]; long long icmpValue2ListLocal[LIST_SIZE]; double fcmpValue1ListLocal[LIST_SIZE]; double fcmpValue2ListLocal[LIST_SIZE]; long long icmpCountListLocal[LIST_SIZE]; long long fcmpCountListLocal[LIST_SIZE]; cudaMemcpyFromSymbol(icmpValue1ListLocal, icmpValue1List, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(icmpValue2ListLocal, icmpValue2List, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(fcmpValue1ListLocal, fcmpValue1List, LIST_SIZE * sizeof(double), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(fcmpValue2ListLocal, fcmpValue2List, LIST_SIZE * sizeof(double), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(icmpCountListLocal, icmpCountList, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(fcmpCountListLocal, fcmpCountList, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost); FILE *profileFile = fopen("profile_cmp_value_result.txt", "w"); for(long long i=0; i < LIST_SIZE; i++){ if(icmpCountListLocal[i] != 0) { fprintf(profileFile, "icmp %lld: %lld %lld %lld\n", i, icmpValue1ListLocal[i]/icmpCountListLocal[i], icmpValue2ListLocal[i]/icmpCountListLocal[i], icmpCountListLocal[i]); } else if(fcmpCountListLocal[i] != 0) { fprintf(profileFile, "fcmp %lld: %f %f %lld\n", i, fcmpValue1ListLocal[i]/fcmpCountListLocal[i], fcmpValue2ListLocal[i]/fcmpCountListLocal[i], fcmpCountListLocal[i]); } } fclose(profileFile); }
23,675
#include "includes.h" __global__ void MatrixMulKernel(float *d_M, float *d_N, float *d_P,int width){ __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * TILE_WIDTH + ty; int col = bx * TILE_WIDTH + tx; float Pvalue = 0; //printf("%f\n", width/TILE_WIDTH ); for (int i = 0; i < width/TILE_WIDTH; ++i){ //printf("%d\n", i ); Mds[ty][tx] = d_M[row*width + i*TILE_WIDTH + tx]; Nds[ty][tx] = d_N[(i*TILE_WIDTH + ty)*width + col]; __syncthreads(); for (int j = 0; j < TILE_WIDTH; ++j){ Pvalue += Mds[ty][j] * Nds[j][tx]; } __syncthreads(); } d_P[row*width + col] = Pvalue; }
23,676
// ####################################################### // // Exercício de multiplicação de matrizes em CUDA // Disciplina: OPRP001 - Programação Paralela // Prof.: Mauricio Pillon // Dupla: Beatriz e Geremias // // ####################################################### #include <cuda.h> #include <stdio.h> #include <math.h> // Matriz Quadrada (nro_linhas = nro_colunas) #define N 4 // Número de linhas // Número de colunas // GPU: Multiplicação das matrizes (a) e (b), resultado em (c) - Tarefa para os alunos: __global__ void matMult (int *da, int *db, int *dc) { /* Explicação Temos uma matriz 4x4, comm quantidade de threads enviadas de dimBlock * dimThreads (1 * 1 * 4 * 4 = 16) Pode possuir uma formatação diferente, mas, para esse caso, a multiplicação entre ambas precisa resultar em 16 Na declaração do dimBlock e dimThreads, deixamos comentada uma possibilidade com formatação, para caso N = 4, com (2,2) (2,2) Mas repare que para essa formatação o seu uso é mais restrito, pois N precisa ser um inteiro com raíz quadrada exata! Portanto, cada thread executa o cálculo de cada uma das células, sendo identificado por ser id de i e j para ser efetuado Para efetuar a multiplicação, é utilizado a ideia do for truncado, que vai de 0 ao tam da matriz (N) Tendo nele o padrão de que: matC[i][j] += matA[i][k] * matB[k][j] Obs: na função dirtyMem(() temos a garantia que não há lixo de memória nos elementos da matriz dc */ int i = blockIdx.x * blockDim.x + threadIdx.x; //identifica de linha int j = blockIdx.y * blockDim.y + threadIdx.y; //identifica de coluna //For para efetuar o cálculo de multiplicação, individual a cada thread. for(int k = 0; k < N; k++){ dc[i*N+j] += (da[i*N+k] * db[k*N+j]); } //Print para teste /* printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n", \\ i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x, \\ threadIdx.y, blockIdx.y, blockDim.y); dc[i*N+j] = da[i*N+j] + db[i*N+j];*/ } // GPU: Imprime índices na matriz __global__ void printIndex (void) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n",i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x,threadIdx.y, blockIdx.y, blockDim.y); } // GPU: Inicializa os vetores (a), (b) e (c) na Memória Global __global__ void dirtyMem (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; da[i] = -1; db[i] = -2; dc[i] = 0; } // CPU: Inicializa os vetores (a) e (b) __host__ void initvet(int *host_a, int *host_b) { for (int i=0; i < N; i++) { for (int j=0; j < N; j++) { host_b[i*N+j] = (i+j)+((N-1)*i); host_a[i*N+j] = (N*N)-host_b[i*N+j]; } } } // CPU: Imprime matriz __host__ void printMat (int *mat){ for (int j =0; j < N; j++) printf("\t(%d)", j); printf("\n"); for (int i=0; i < N; i++) { printf("(%d)", i); for (int j=0; j < N; j++){ printf("\t%d", mat[i*N+j]); } printf("\n"); } } // CPU: função principal int main(int argc, char const *argv[]) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; // Alocação de matriz quadrada size = N * N * sizeof(int); // Alocação de memória no host cudaMallocHost((void **) &a, size); cudaMallocHost((void **) &b, size); cudaMallocHost((void **) &c, size); // Alocação de memória na GPU para os vetores (a,b e c) cudaMalloc ((void **) &dev_a, size); cudaMalloc ((void **) &dev_b, size); cudaMalloc ((void **) &dev_c, size); // Atribui valores iniciais aos vetores em GPU dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c); // Cópia GPU para CPU cudaMemcpy (a, dev_a, size, cudaMemcpyDeviceToHost); cudaMemcpy (b, dev_b, size, cudaMemcpyDeviceToHost); cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores Inicializados na GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Inicialização dos vetores (a) e (b) no host initvet(a,b); // Cópia dos vetores gerados em CPU p/ memória da GPU cudaMemcpy (dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy (dev_b, b, size, cudaMemcpyHostToDevice); // Número de blocos e threads p/ dimensões (x,y) - formatação 1: dim3 dimBlock (1, 1); dim3 dimThreads(N, N); // Número de blocos e threads p/ dimensões (x,y) - formatação 2: //Obs: repare que essa formatação é mais restrita, pois precisa usar inteiros que tenham raíz exata! //dim3 dimBlock ((int) sqrt(N), (int) sqrt(N)); //dim3 dimThreads((int) sqrt(N), (int) sqrt(N)); // Imprime as posições acessadas pelo dimBlock e dimThreads printIndex<<< dimBlock, dimThreads>>>(); // Execução do kernel matMult em GPU matMult<<< dimBlock, dimThreads>>>(dev_a, dev_b, dev_c); cudaDeviceSynchronize(); // Cópia do vetor (c) da GPU (Memória Global) para CPU cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores após processamento em GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Libera a Memória Global (GPU) cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); // Libera a Memória Global (CPU) cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); return 0; }
23,677
#include "includes.h" __global__ void LinearValuesKernel(const float min, const float max, float* output, const int size, const int shift) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; __shared__ float delta; if (threadIdx.x == 0) delta = (max-min)/fmaxf((size-1), 1); __syncthreads(); if(id < size) { output[(id + shift) % size] = min + id * delta; } }
23,678
#include<stdio.h> #include<cuda.h> #include<time.h> __global__ void hello() { printf("GPU:: Hello world!!\n"); } int main() { hello<<<1,10>>>(); cudaDeviceSynchronize(); return 0; }
23,679
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cstring> #include <time.h> __global__ void calculate_unique_3d_idx(int * input, int size) { int tid = (threadIdx.z * blockDim.y * blockDim.x) + (threadIdx.y * blockDim.x) + threadIdx.x; int block_id = blockIdx.x + (blockIdx.y * gridDim.x) + (blockIdx.z * gridDim.x * gridDim.y); int global_index = block_id * blockDim.x * blockDim.y * blockDim.z + tid; printf ("tid: %d, block_id : %d, global_index : %d, value: %d \n", tid, block_id, global_index, input[global_index]); } int main() { int size = 64; int byte_size = sizeof(int) * size; int * h_data; h_data = (int *) malloc(byte_size); time_t t; srand((unsigned) time(&t)); for (int i =0; i < size; i++) { h_data[i] = (int) (rand() && 0xff); } int * d_data; cudaMalloc((void **)&d_data, byte_size); cudaMemcpy(d_data, h_data, byte_size, cudaMemcpyHostToDevice); int nx, ny, nz; nx = 4; ny = 4; nz = 4; dim3 block(2, 2, 2); dim3 grid(nx/block.x, ny/block.y, nz/block.z); calculate_unique_3d_idx<<<grid, block>>>(d_data, size); cudaDeviceSynchronize(); cudaFree(d_data); free(h_data); cudaDeviceReset(); return 0; }
23,680
/*******This is for the test of array-of-struct-of-fixed-array structure******/ // #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define REGIONS 20 #define YEARS 5 __inline __host__ void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } struct AnimalPopulationForYear_s { bool isYearEven; int * rabbits; int * hyenas; }; AnimalPopulationForYear_s * dev_pop; __global__ void RunSim(AnimalPopulationForYear_s dev_pop[], int year) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int rabbits, hyenas; int arrEl = year-1; rabbits = (idx+1) * year * year; hyenas = rabbits / 10; if ( rabbits > 100000 ) rabbits = 100000; if ( hyenas < 2 ) hyenas = 2; if ( idx < REGIONS ) dev_pop[arrEl].rabbits[idx] = rabbits; if ( idx < REGIONS ) dev_pop[arrEl].hyenas[idx] = hyenas; if (threadIdx.x == 0 && blockIdx.x == 0) dev_pop[arrEl].isYearEven = (year & 0x01 == 0x0); } int main() { //Various reused sizes... const size_t fullArrSz = size_t(YEARS) * size_t(REGIONS) * sizeof(int); const size_t structArrSz = size_t(YEARS) * sizeof(AnimalPopulationForYear_s); //Vars to hold struct and merged subarray memory inside it. AnimalPopulationForYear_s * h_pop; int * dev_hyenas, * dev_rabbits, * h_hyenas, * h_rabbits, arrEl; //Alloc. memory. h_pop = (AnimalPopulationForYear_s *) malloc(structArrSz); h_rabbits = (int *) malloc(fullArrSz); h_hyenas = (int *) malloc(fullArrSz); gpuErrchk(cudaMalloc((void **) &dev_pop,structArrSz)); gpuErrchk(cudaMalloc((void **) &dev_rabbits,fullArrSz)); gpuErrchk(cudaMalloc((void **) &dev_hyenas,fullArrSz)); //Offset ptrs. for (int i = 0; i < YEARS; i++) { h_pop[i].rabbits = dev_rabbits+i*REGIONS; h_pop[i].hyenas = dev_hyenas+i*REGIONS; } //Copy host struct with dev. pointers to device. gpuErrchk (cudaMemcpy(dev_pop,h_pop, structArrSz, cudaMemcpyHostToDevice)); //Call kernel for(int i=1; i < YEARS+1; i++) RunSim<<<REGIONS/128+1,128>>>(dev_pop,i); //Make sure nothing went wrong. gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(h_pop,dev_pop,structArrSz, cudaMemcpyDeviceToHost)); gpuErrchk (cudaMemcpy(h_rabbits, dev_rabbits,fullArrSz, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_hyenas,dev_hyenas,fullArrSz, cudaMemcpyDeviceToHost)); for(int i=0; i < YEARS; i++) { h_pop[i].rabbits = h_rabbits + i*REGIONS; h_pop[i].hyenas = h_hyenas + i*REGIONS; } for(int i=1; i < YEARS+1; i++) { arrEl = i-1; printf("\nYear %i\n=============\n\n", i); printf("Rabbits\n-------------\n"); for (int j=0; j < REGIONS; j++) printf("Region: %i Pop: %i\n", j, h_pop[arrEl].rabbits[j]);; printf("Hyenas\n-------------\n"); for (int j=0; j < REGIONS; j++) printf("Region: %i Pop: %i\n", j, h_pop[arrEl].hyenas[j]); } //Free on device and host cudaFree(dev_pop); cudaFree(dev_rabbits); cudaFree(dev_hyenas); free(h_pop); free(h_rabbits); free(h_hyenas); return 0; }
23,681
#include "includes.h" __global__ void updateGradientsKernel(float4 *D, float4 *TD, unsigned int nVertices) { int vidx = 4*(blockIdx.x * blockDim.x) + threadIdx.x; int idx; for (idx=0; idx<4*BLOCK_SIZE_AVGG; idx+=BLOCK_SIZE_AVGG) { D[vidx+idx] = TD[vidx+idx]; } }
23,682
#include<iostream> #include <cuda_runtime.h> #include<cuda.h> #include "device_launch_parameters.h" using namespace std; #define s 96 __global__ void square(int *a, int *b) { int i = threadIdx.x; if(i<s) b[i] = a[i] * a[i] * a[i]; } int main() { int *a,*b, i; a = (int *)malloc(s * sizeof(int)); b = (int *)malloc(s * sizeof(int)); int *d_a, *d_b; cudaMalloc(&d_a, s * sizeof(int)); cudaMalloc(&d_b, s * sizeof(int)); for (i = 0; i < s; i++) { a[i] = i; } cudaMemcpy(d_a, a, s * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, s * sizeof(int), cudaMemcpyHostToDevice); square<<< 1, s >>>(d_a,d_b); cudaMemcpy(a, d_a, s * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(b, d_b, s * sizeof(int), cudaMemcpyDeviceToHost); for (i = 0; i < s; i++) { cout <<i<<":"<< b[i] << ","; } free(a); free(b); return 0; }
23,683
#include "includes.h" __global__ void transposeNaive(float *odata, const float *idata,int idata_rows,int idata_cols) { int x = blockIdx.x * TILE_SIZE + threadIdx.x; int y = blockIdx.y * TILE_SIZE + threadIdx.y; //int width = gridDim.x * TILE_SIZE; if(y<idata_rows && x<idata_cols) odata[x*idata_rows+y] = idata[y*idata_cols+x]; }
23,684
#include <stdio.h> #include <stdlib.h> void cudaHandleError( cudaError_t err,const char *file,int line ) { if (err != cudaSuccess) { printf( "CUDA Error\n%s in %s at line %d\n", cudaGetErrorString( err ),file, line ); exit( EXIT_FAILURE ); } }
23,685
#include <stdio.h> __global__ void my_pooling(float* const out, float const* const data, size_t const H, size_t const W, size_t const C, size_t const N, size_t const stride) { //channel const size_t d = blockIdx.x; //image number const size_t n = blockIdx.y; // spatial dimensions of the output array const size_t H_out = (size_t) ceilf(H/stride); const size_t W_out = (size_t) ceilf(W/stride); const size_t R = H_out * W_out; const size_t regTi = (size_t) ceilf(R / blockDim.x); printf("regTi=%d\n", regTi); // for each of the regions assigned to the current thread for(size_t reg=regTi*threadIdx.x; reg < min((int)regTi*(threadIdx.x+1),(int) R); reg++) { //get the base (v,u) positions of the input Image size_t vIN = stride * ( reg % H_out ); size_t uIN = stride * floorf( reg / H_out ); // -inf float max_ = __int_as_float(0xff800000); for(size_t v_ = vIN; v_ < min(int(vIN + stride), int(H)); v_++) { for(size_t u_ = uIN; u_ < min(int(uIN + stride), int(W)); u_++) { size_t indIN = n*(H*W*C) + d*(H*W) + uIN*(H) + vIN; if(data[indIN] > max_) max_ = data[indIN]; } } //assign result to output size_t vOUT = reg % H_out; size_t uOUT = floorf(reg / H_out); size_t indOUT = n*(H_out*W_out*C) + d*(H_out*W_out) + uOUT*(H_out) + vOUT; out[indOUT] = max_; } } __global__ void my_poolingIndices(float* const outMax, int* const outIndices, float const* const data, size_t const H, size_t const W, size_t const C, size_t const N, size_t const stride) { //channel const size_t d = blockIdx.x; //image number const size_t n = blockIdx.y; // spatial dimensions of the output array const size_t H_out = (size_t) ceil(double(H)/double(stride)); const size_t W_out = (size_t) ceil(double(W)/double(stride)); const size_t R = H_out * W_out; const size_t regTi = (size_t) ceil(double(R) / double(blockDim.x)); // for each of the regions assigned to the current thread for(size_t reg=regTi*threadIdx.x; reg < min((int)regTi*(threadIdx.x+1),(int) R); reg++) { //get the base (v,u) positions of the input Image size_t vIN = stride * ( reg % H_out ); size_t uIN = (size_t)stride * floor( double(reg) / double(H_out) ); // -inf float max_ = __int_as_float(0xff800000); int maxIdx_ = -1; for(size_t v_ = vIN; v_ < min(int(vIN + stride), int(H)); v_++) { for(size_t u_ = uIN; u_ < min(int(uIN + stride), int(W)); u_++) { size_t indIN = n*(H*W*C) + d*(H*W) + u_*(H) + v_; if(data[indIN] > max_) { max_ = data[indIN]; maxIdx_ = indIN; } } } //assign result to output size_t vOUT = reg % H_out; size_t uOUT = (size_t)floor(double(reg) / double(H_out)); size_t indOUT = n*(H_out*W_out*C) + d*(H_out*W_out) + uOUT*(H_out) + vOUT; outMax[indOUT] = max_; outIndices[indOUT] = maxIdx_; } } __global__ void test(int* const deb, float const* const data) { size_t const stride = 2; size_t reg = 93; const size_t H_out = (size_t) ceil(double(360)/double(stride)); size_t uIN = (size_t)stride * floor( double(reg) / double(H_out) ); printf("uIN = %d, H_out = %d\n", uIN, H_out); deb[0] = uIN; deb[1] = H_out; }
23,686
/* lenet_old.cu */ template <int InputSize, int InputChannels, int OutputSize, int OutputChannels, int KernelSize> __global__ void convolution_gpu_shared_memory( float* devInput, float* devOutput, float* devWeight, float* devBias) { int ocol = threadIdx.x + blockIdx.x * blockDim.x; int orow = threadIdx.y + blockIdx.y * blockDim.y; int och = blockIdx.z; int icol; int irow; int kcol; int krow; int kch; int outputIdx = och * OutputSize * OutputSize + orow * OutputSize + ocol; int ochOffset = och * InputChannels * KernelSize * KernelSize; float* pWeight = devWeight + ochOffset; float sum; __shared__ float sharedInput[InputChannels][InputSize][InputSize]; if (ocol >= OutputSize || orow >= OutputSize || och >= OutputChannels) return; icol = ocol; irow = orow; for (kch = 0; kch < InputChannels; ++kch) sharedInput[kch][irow][icol] = devInput[kch * InputSize * InputSize + irow * InputSize + icol]; icol = ocol + KernelSize; irow = orow; if (icol < InputSize) for (kch = 0; kch < InputChannels; ++kch) sharedInput[kch][irow][icol] = devInput[kch * InputSize * InputSize + irow * InputSize + icol]; icol = ocol; irow = orow + KernelSize; if (irow < InputSize) for (kch = 0; kch < InputChannels; ++kch) sharedInput[kch][irow][icol] = devInput[kch * InputSize * InputSize + irow * InputSize + icol]; icol = ocol + KernelSize; irow = orow + KernelSize; if (icol < InputSize && irow < InputSize) for (kch = 0; kch < InputChannels; ++kch) sharedInput[kch][irow][icol] = devInput[kch * InputSize * InputSize + irow * InputSize + icol]; __syncthreads(); sum = devBias[och]; for (kch = 0; kch < InputChannels; ++kch) for (krow = 0; krow < KernelSize; ++krow) for (kcol = 0; kcol < KernelSize; ++kcol) sum += pWeight[kch * KernelSize * KernelSize + krow * KernelSize + kcol] * sharedInput[kch][orow + krow][ocol + kcol]; devOutput[outputIdx] = sum; } template <int BlockSize, int InputSize, int InputChannels, int OutputSize, int OutputChannels, int KernelSize> __global__ void convolution_gpu_shared_memory_2( float* devInput, float* devOutput, float* devWeight, float* devBias) { int i; int ocol = threadIdx.x + blockIdx.x * blockDim.x; int orow = threadIdx.y + blockIdx.y * blockDim.y; int och = blockIdx.z; int ich = threadIdx.z; int icol; int irow; int kcol; int krow; const int outputIdx = och * OutputSize * OutputSize + orow * OutputSize + ocol; const int ochOffset = och * InputChannels * KernelSize * KernelSize; const int inputOffset = ich * InputSize * InputSize; const int kernelOffset = ich * KernelSize * KernelSize; float* pWeight = devWeight + ochOffset; float sum; const int KernelRadius = KernelSize / 2; const int SharedInputSize = BlockSize + KernelRadius * 2; __shared__ float sharedInput[InputChannels][SharedInputSize][SharedInputSize]; __shared__ float sharedWeight[InputChannels][KernelSize][KernelSize]; __shared__ float sharedResult[InputChannels][BlockSize][BlockSize]; if (ocol >= OutputSize || orow >= OutputSize) return; icol = ocol; irow = orow; sharedInput[ich][threadIdx.y][threadIdx.x] = devInput[inputOffset + irow * InputSize + icol]; icol = ocol + KernelRadius * 2; irow = orow; if (icol < InputSize) sharedInput[ich][threadIdx.y][threadIdx.x + KernelRadius * 2] = devInput[inputOffset + irow * InputSize + icol]; icol = ocol; irow = orow + KernelRadius * 2; if (irow < InputSize) sharedInput[ich][threadIdx.y + KernelRadius * 2][threadIdx.x] = devInput[inputOffset + irow * InputSize + icol]; icol = ocol + KernelRadius * 2; irow = orow + KernelRadius * 2; if (icol < InputSize && irow < InputSize) sharedInput[ich][threadIdx.y + KernelRadius * 2][threadIdx.x + KernelRadius * 2] = devInput[inputOffset + irow * InputSize + icol]; /* * Hack: this code works because KernelSize is 5, * blockDim.x is 4, and blockDim.y is also 4 */ sharedWeight[ich][threadIdx.y][threadIdx.x] = pWeight[kernelOffset + threadIdx.y * KernelSize + threadIdx.x]; sharedWeight[ich][threadIdx.y][threadIdx.x + 1] = pWeight[kernelOffset + threadIdx.y * KernelSize + threadIdx.x + 1]; sharedWeight[ich][threadIdx.y + 1][threadIdx.x] = pWeight[kernelOffset + (threadIdx.y + 1) * KernelSize + threadIdx.x]; sharedWeight[ich][threadIdx.y + 1][threadIdx.x + 1] = pWeight[kernelOffset + (threadIdx.y + 1) * KernelSize + threadIdx.x + 1]; __syncthreads(); sharedResult[ich][threadIdx.y][threadIdx.x] = 0.0f; for (krow = 0; krow < KernelSize; ++krow) for (kcol = 0; kcol < KernelSize; ++kcol) sharedResult[ich][threadIdx.y][threadIdx.x] += sharedWeight[ich][krow][kcol] * sharedInput[ich][threadIdx.y + krow][threadIdx.x + kcol]; __syncthreads(); if (ich == 0) { sum = devBias[och]; for (i = 0; i < InputChannels; ++i) sum += sharedResult[i][threadIdx.y][threadIdx.x]; devOutput[outputIdx] = sum; } } template <int InputSize, int InputChannels, int OutputSize, int Stride> __global__ void maxpooling_gpu_kernel_2x2_template( float* devInput, float* devOutput) { int ocol = threadIdx.x; int orow = threadIdx.y; int och = blockIdx.z; float tmp0; float tmp1; float tmp2; float tmp3; float tmp4; float tmp5; int outputIdx = och * OutputSize * OutputSize + orow * OutputSize + ocol; int inputOffset = och * InputSize * InputSize + (orow * Stride) * InputSize + (ocol * Stride); if (ocol >= OutputSize || orow >= OutputSize || och >= InputChannels) return; tmp0 = devInput[inputOffset]; tmp1 = devInput[inputOffset + 1]; tmp2 = devInput[inputOffset + InputSize]; tmp3 = devInput[inputOffset + InputSize + 1]; tmp4 = fmaxf(tmp0, tmp1); tmp5 = fmaxf(tmp2, tmp3); devOutput[outputIdx] = fmaxf(tmp4, tmp5); } template <int BlockSize, int InputSize, int OutputSize> __global__ void classifier_gpu_blocked_and_relu_template( float* devInput, float* devOutput, float* devWeight, float* devBias) { int i; int j; int k; int weightIdxBegin = InputSize * (BlockSize * blockIdx.y); int weightIdxEnd = weightIdxBegin + InputSize; int outputIdx = threadIdx.y + blockDim.y * blockIdx.y; float tmp = 0.0f; __shared__ float subInput[BlockSize]; for (i = weightIdxBegin, j = 0; i < weightIdxEnd; i += BlockSize, j += BlockSize) { if (j + threadIdx.y < InputSize) subInput[threadIdx.y] = devInput[j + threadIdx.y]; else subInput[threadIdx.y] = 0.0f; __syncthreads(); #pragma unroll for (k = 0; k < BlockSize; ++k) tmp += devWeight[i + InputSize * threadIdx.y + k] * subInput[k]; __syncthreads(); } if (outputIdx < OutputSize) if (tmp > 0.0f) devOutput[outputIdx] = tmp; else devOutput[outputIdx] = 0.0f; } template <int BlockSize, int InputSize, int OutputSize> __global__ void classifier_gpu_blocked_and_relu_template_3( float* devInput, float* devOutput, float* devWeight, float* devBias) { int k; int outputIdx = threadIdx.y + blockDim.y * blockIdx.y; float* pInput = devInput + BlockSize * threadIdx.x; float* pWeight = devWeight + InputSize * outputIdx + BlockSize * threadIdx.x; float tmp = 0.0f; __shared__ float subOutput[BlockSize][InputSize / BlockSize]; #pragma unroll for (k = 0; k < BlockSize; ++k) tmp += pWeight[k] * pInput[k]; subOutput[threadIdx.y][threadIdx.x] = tmp; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (k = 1; k < InputSize / BlockSize; ++k) subOutput[threadIdx.y][0] += subOutput[threadIdx.y][k]; subOutput[threadIdx.y][0] += devBias[outputIdx]; devOutput[outputIdx] = subOutput[threadIdx.y][0] * (subOutput[threadIdx.y][0] > 0); } } template <int BlockSize, int InputSize, int OutputSize> __global__ void classifier_gpu_blocked_and_softmax_template( float* devInput, float* devOutput, float* devWeight, float* devBias) { int i; int k; // int weightIdxBegin = InputSize * (BlockSize * blockIdx.y); // int outputIdx = threadIdx.y + blockDim.y * blockIdx.y; int weightIdxBegin = 0; int outputIdx = threadIdx.y; // float* pWeight = devWeight + weightIdxBegin + InputSize * threadIdx.y; float* pWeight = devWeight + InputSize * threadIdx.y; float tmp = 0.0f; float sum = 0.0f; __shared__ float subInput[BlockSize]; __shared__ float subOutput[OutputSize]; for (i = 0; i < InputSize; i += BlockSize) { if (i + threadIdx.y < InputSize) subInput[threadIdx.y] = devInput[i + threadIdx.y]; else subInput[threadIdx.y] = 0.0f; __syncthreads(); #pragma unroll for (k = 0; k < BlockSize; ++k) tmp += pWeight[i + k] * subInput[k]; } if (outputIdx < OutputSize) subOutput[outputIdx] = expf(tmp); __syncthreads(); #pragma unroll for (k = 0; k < OutputSize; ++k) sum += subOutput[k]; if (outputIdx < OutputSize) devOutput[outputIdx] = subOutput[outputIdx] / sum; } template <int BlockSize, int InputSize, int InputChannels, int OutputSize, int OutputChannels, int PoolOutputSize> __global__ void convolution_gpu_shared_memory_2_maxpooling_2x2_old( float* devInput, float* devOutput, float* devWeight, float* devBias, float* devPoolOutput) { /* Assumptions: blockDim.x == 4, blockDim.y == 4 */ const int KernelSize = 5; const int ocol = threadIdx.x + blockIdx.x * blockDim.x; const int orow = threadIdx.y + blockIdx.y * blockDim.y; const int och = blockIdx.z; const int ich = threadIdx.z; /* const int outputIdx = och * OutputSize * OutputSize + orow * OutputSize + ocol; */ const int ochOffset = och * InputChannels * KernelSize * KernelSize; const int inputOffset = ich * InputSize * InputSize; const int kernelOffset = ich * KernelSize * KernelSize; const int tmpOffset = inputOffset + orow * InputSize + ocol; const int KernelRadius = KernelSize / 2; const int SharedInputSize = BlockSize + KernelRadius * 2; int i; int icol; int irow; int kcol; int krow; float* pWeight = devWeight + ochOffset; float tmp = 0.0f; float sum = 0.0f; __shared__ float sharedInput[InputChannels][SharedInputSize][SharedInputSize]; __shared__ float sharedWeight[InputChannels][KernelSize][KernelSize]; __shared__ float sharedResult[InputChannels][BlockSize][BlockSize]; if (ocol >= OutputSize || orow >= OutputSize) return; /* * Bring input data to shared memory */ sharedInput[ich][threadIdx.y][threadIdx.x] = devInput[tmpOffset]; icol = ocol + KernelRadius * 2; if (icol < InputSize) sharedInput[ich][threadIdx.y][threadIdx.x + KernelRadius * 2] = devInput[tmpOffset + KernelRadius * 2]; irow = orow + KernelRadius * 2; if (irow < InputSize) sharedInput[ich][threadIdx.y + KernelRadius * 2][threadIdx.x] = devInput[tmpOffset + InputSize * KernelRadius * 2]; if (icol < InputSize && irow < InputSize) sharedInput[ich][threadIdx.y + KernelRadius * 2][threadIdx.x + KernelRadius * 2] = devInput[tmpOffset + InputSize * KernelRadius * 2 + KernelRadius * 2]; /* * Bring weight data to shared memory */ /* * Hack: this code works because KernelSize == 5, * blockDim.x == blockDim.y == 4 */ pWeight += kernelOffset + threadIdx.y * KernelSize + threadIdx.x; sharedWeight[ich][threadIdx.y][threadIdx.x] = *pWeight; sharedWeight[ich][threadIdx.y][threadIdx.x + 1] = *(pWeight + 1); pWeight += KernelSize; sharedWeight[ich][threadIdx.y + 1][threadIdx.x] = *pWeight; sharedWeight[ich][threadIdx.y + 1][threadIdx.x + 1] = *(pWeight + 1); __syncthreads(); #pragma unroll for (krow = 0; krow < KernelSize; ++krow) #pragma unroll for (kcol = 0; kcol < KernelSize; ++kcol) tmp += sharedWeight[ich][krow][kcol] * sharedInput[ich][threadIdx.y + krow][threadIdx.x + kcol]; sharedResult[ich][threadIdx.y][threadIdx.x] = tmp; __syncthreads(); if (ich == 0) { sum = devBias[och]; #pragma unroll for (i = 0; i < InputChannels; ++i) sum += sharedResult[i][threadIdx.y][threadIdx.x]; sharedResult[0][threadIdx.y][threadIdx.x] = sum; __syncthreads(); /* Max pooling */ if (threadIdx.x % 2 == 0 && threadIdx.y % 2 == 0) { float tmp0; float tmp1; tmp0 = fmaxf(sharedResult[0][threadIdx.y][threadIdx.x], sharedResult[0][threadIdx.y][threadIdx.x + 1]); tmp1 = fmaxf(sharedResult[0][threadIdx.y + 1][threadIdx.x], sharedResult[0][threadIdx.y + 1][threadIdx.x + 1]); devPoolOutput[och * PoolOutputSize * PoolOutputSize + (orow / 2) * PoolOutputSize + (ocol / 2)] = fmaxf(tmp0, tmp1); } /* if (threadIdx.x < BlockSize / 2 && threadIdx.y < BlockSize / 2) { float tmp0; float tmp1; tmp0 = fmaxf(sharedResult[0][threadIdx.y * 2][threadIdx.x * 2], sharedResult[0][threadIdx.y * 2][threadIdx.x * 2 + 1]); tmp1 = fmaxf(sharedResult[0][threadIdx.y * 2 + 1][threadIdx.x * 2], sharedResult[0][threadIdx.y * 2 + 1][threadIdx.x * 2 + 1]); devPoolOutput[och * PoolOutputSize * PoolOutputSize + ((blockDim.y * blockIdx.y) / 2 + threadIdx.y) * PoolOutputSize + ((blockDim.x * blockIdx.x) / 2 + threadIdx.x)] = fmaxf(tmp0, tmp1); } */ /* if (threadIdx.x % 2 == 0) { sharedResult[0][threadIdx.y][threadIdx.x] = fmaxf( sharedResult[0][threadIdx.y][threadIdx.x], sharedResult[0][threadIdx.y][threadIdx.x + 1]); } __syncthreads(); if (threadIdx.y % 2 == 0) { sharedResult[0][threadIdx.y][threadIdx.x] = fmaxf( sharedResult[0][threadIdx.y][threadIdx.x], sharedResult[0][threadIdx.y + 1][threadIdx.x]); } __syncthreads(); if (threadIdx.x % 2 == 0 && threadIdx.y % 2 == 0) { devPoolOutput[och * PoolOutputSize * PoolOutputSize + (orow / 2) * PoolOutputSize + (ocol / 2)] = sharedResult[0][threadIdx.y][threadIdx.x]; } */ } } template <int BlockSize, int InputSize, int OutputSize> __global__ void classifier_gpu_blocked_and_relu_template_2( float* devInput, float* devOutput, float* devWeight, float* devBias) { int i; int k; int weightIdxBegin = InputSize * (BlockSize * blockIdx.y); int outputIdx = threadIdx.y + blockDim.y * blockIdx.y; float* pWeight = devWeight + weightIdxBegin + InputSize * threadIdx.y + threadIdx.x; float tmp = 0.0f; __shared__ float subInput[BlockSize]; __shared__ float subWeight[BlockSize][BlockSize]; __shared__ float subOutput[BlockSize][BlockSize]; subOutput[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); for (i = 0; i < InputSize; i += BlockSize) { /* This implementation wastes so many threads */ subInput[threadIdx.y] = devInput[i + threadIdx.y]; subWeight[threadIdx.y][threadIdx.x] = pWeight[i]; __syncthreads(); subOutput[threadIdx.y][threadIdx.x] += subWeight[threadIdx.y][threadIdx.x] * subInput[threadIdx.x]; } __syncthreads(); // if (threadIdx.x == 0 && outputIdx < OutputSize) { if (threadIdx.x == 0) { #pragma unroll 4 for (k = 0; k < BlockSize; ++k) tmp += subOutput[threadIdx.y][k]; tmp += devBias[outputIdx]; devOutput[outputIdx] = fmaxf(tmp, 0.0f); } }
23,687
#include <iostream> class queue { private: /* data */ public: queue(/* args */); ~queue(); }; queue::queue(/* args */) { } queue::~queue() { }
23,688
#include "matrix.cuh" #include "vector.cuh" #include <iostream> #include <cstdlib> #include <cmath> #include <chrono> // #include <ratio> #include <functional> using namespace gpu_thrust; class TimeRecord { private: cudaEvent_t start; cudaEvent_t stop; float ms; public: TimeRecord() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~TimeRecord() { cudaEventDestroy(start); cudaEventDestroy(stop); } void execute(std::function<void(matrix&, vector&, vector&, vector&, int&, double)> f, matrix& A, vector& b, vector& x0, vector& x, int& k) { cudaEventRecord(start); f(A, b, x0, x, k, 1e-6); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); } auto get_duration() { return ms; } }; void fill_matrix(matrix& A) { for (int i = 1; i <= rows(A); ++i){ for (int j = 1; j <= columns(A); ++j){ A.set(i, j, (double)rand() / (double) RAND_MAX); } } } void fill_vector(vector& b) { for (int i = 1; i <= rows(b); ++i){ b.set(i, 1); } } void run(int n) { matrix A(n, n); fill_matrix(A); vector b(n); fill_vector(b); vector x0(n); vector x(n); int iter_count; TimeRecord time_record; time_record.execute(GMRES, A, b, x0, x, iter_count); std::cout << "execution time: " << time_record.get_duration() << "\n"; std::cout << "iteration count: " << iter_count << "\n"; } int main(int argc, char* argv[]) { int n = atoi(argv[1]); srand(time(NULL)); run(n); return 0; }
23,689
// #include <stdio.h> // #include <iostream> // #include <fstream> // #include <time.h> // #include <random> // #include "reduction_fcm.cuh" // using namespace std; // __host__ void init_membership(float *i_membership, int i_rows, int i_cols, int i_num_clutsers) { // // cout << i_cols << endl; // // cout << i_rows << endl; // // cout << "Init m start" << endl; // // 1d-configuration // int len = i_rows * i_cols * i_num_clutsers; // i_membership = new float[len]; // for (int i = 0; i < len; ++i) { // i_membership[i] = 1 / (float)i_num_clutsers; // } // // cout << "Init m done" << endl; // // 3d-configuration // // i_membership = new float** [i_rows]; // // for (int i = 0; i < i_rows; ++i) { // // i_membership[i] = new float* [i_cols]; // // // i_new_membership[i] = new float* [i_cols]; // // for (int j = 0; j < i_cols; ++j) { // // i_membership[i][j] = new float[i_num_clutsers]; // // // i_new_membership[i][j] = new float[i_num_clutsers]; // // } // // } // // for (int i = 0; i < i_rows; ++i) { // // for (int j = 0; j < i_cols; ++j) { // // for (int k = 0; k < i_num_clutsers; ++k) { // // i_membership[i][j][k] = 1 / (float)i_num_clutsers; // // // i_new_membership[i][j][k] = 99999; // // } // // } // // } // } // __host__ void init_centers(float *i_cluster_centers, int i_num_clutsers) { // // cout << "Init c start" << endl; // i_cluster_centers = new float[i_num_clutsers]; // for (int i = 0; i < i_num_clutsers; ++i) { // // randomly select i_num_clutsers points as cluster centers // // random generator // random_device rd; // mt19937 eng(rd()); // uniform_real_distribution<> dist(0, 1); // i_cluster_centers[i] = dist(eng); // } // // cout << "Init c done" << endl; // // cout << "Centers: " << endl; // // for (int i = 0; i < i_num_clutsers; ++i) { // // cout << i_cluster_centers[i] << endl; // // } // } // __host__ void init_final_cluster(int* i_final_cluster, int rows, int cols) { // // cout << "Init f start" << endl; // // i_final_cluster = new int[rows * cols]; // for (int i = 0; i < rows * cols; ++i) { // i_final_cluster[i] = -1; // } // // cout << "Init f done" << endl; // } // __device__ float eucl_distance(float center, float val) { // // val: data point value // // i: cluster center point value // return sqrt(pow(val - center, 2)); // } // __global__ void update_centers_numerator_kernel(float *i_image, float *i_membership, float *i_cluster_centers, int i_rows, int i_cols, int i_num_clutsers, int i_m, float* numerator, int c) { // extern __shared__ float shared_d[]; // int global_idx = blockIdx.x * blockDim.x + threadIdx.x; // int i = global_idx % i_rows; // int j = (global_idx - i) / i_rows; // int k = c; // // printf("1\n"); // // each thread loads one element into shared memory // if (global_idx < i_rows * i_cols) { // shared_d[threadIdx.x] = i_image[i + i_rows * j] * pow(i_membership[k * i_rows * i_cols + j * i_rows + i], i_m); // } // else { // shared_d[threadIdx.x] = 0; // } // __syncthreads(); // // do reduction in shared memory // for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { // if (threadIdx.x < s) { // shared_d[threadIdx.x] += shared_d[threadIdx.x + s]; // } // __syncthreads(); // } // // write result back to global memory // if (threadIdx.x == 0) { // numerator[blockIdx.x] = shared_d[0]; // } // } // __global__ void update_centers_denominator_kernel(float *i_image, float *i_membership, float *i_cluster_centers, int i_rows, int i_cols, int i_num_clutsers, int i_m, float* denominator, int c) { // // printf("222\n"); // extern __shared__ float shared_d[]; // int global_idx = blockIdx.x * blockDim.x + threadIdx.x; // int i = global_idx % i_rows; // int j = (global_idx - i) / i_rows; // int k = c; // // printf("2\n"); // // each thread loads one element into shared memory // if (global_idx < i_rows * i_cols) { // shared_d[threadIdx.x] = pow(i_membership[k * i_rows * i_cols + j * i_rows + i], i_m); // } // else { // shared_d[threadIdx.x] = 0; // } // __syncthreads(); // // do reduction in shared memory // for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { // if (threadIdx.x < s) { // shared_d[threadIdx.x] += shared_d[threadIdx.x + s]; // } // __syncthreads(); // } // // write result back to global memory // if (threadIdx.x == 0) { // denominator[blockIdx.x] = shared_d[0]; // } // } // // __global__ void update_centers_kernel(float *i_image, float *i_membership, float *i_cluster_centers, int i_rows, int i_cols, int i_num_clutsers, int i_m, int threads_per_block) { // // int global_idx = blockIdx.x * blockDim.x + threadIdx.x; // // unsigned int blks = 1 + (i_rows * i_cols - 1) / threads_per_block; // // unsigned int in_size = i_rows * i_cols; // // unsigned int shared_size = threads_per_block * sizeof(float); // // float *res_num = new float[blks]; // // float *res_den = new float[blks]; // // float *device_in_num, *device_in_den, *device_out_num, *device_out_den; // // float numerator, denominator; // // // cudaMalloc((void **)&device_in_num, in_size * sizeof(float)); // // // cudaMalloc((void **)&device_in_den, in_size * sizeof(float)); // // // cudaMalloc((void **)&device_out_num, blks * sizeof(float)); // // // cudaMalloc((void **)&device_out_den, blks * sizeof(float)); // // // cudaMemcpyAsync(device_in_num, i_membership, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // // // cudaMemcpyAsync(device_in_den, i_membership, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // // // while (true) { // // // update_centers_numerator_kernel<<<blks, threads_per_block, shared_size>>>(i_image, device_in_num, i_cluster_centers, i_rows, i_cols, i_num_clutsers, i_m, device_out_num, global_idx); // // // printf("g\n"); // // // cudaDeviceSynchronize(); // // // update_centers_denominator_kernel<<<blks, threads_per_block, shared_size>>>(i_image, device_in_den, i_cluster_centers, i_rows, i_cols, i_num_clutsers, i_m, device_out_den, global_idx); // // // cudaDeviceSynchronize(); // // // printf("gg\n"); // // // if (blks == 1) { // // // break; // // // } // // // in_size = blks; // // // // device_out now holds the reduce result, copy to device in and reduce again (next time) // // // cudaMemcpyAsync(device_in_num, device_out_num, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // // // cudaMemcpyAsync(device_in_den, device_out_den, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // // // blks = 1 + ((blks - 1) / threads_per_block); // // // } // // // cudaMemcpyAsync(res_num, device_out_num, sizeof(float), cudaMemcpyDeviceToDevice); // // // cudaMemcpyAsync(res_den, device_out_den, sizeof(float), cudaMemcpyDeviceToDevice); // // // numerator = res_num[0]; // // // denominator = res_den[0]; // // // update the cluster center (finally!) // // // i_cluster_centers[global_idx] = numerator / denominator; // // __syncthreads(); // // delete [] res_num; // // delete [] res_den; // // cudaFree(device_in_num); // // cudaFree(device_in_den); // // cudaFree(device_out_num); // // cudaFree(device_out_den); // // } // __global__ void update_membership_kernel(float *i_image, float *i_cluster_centers, float *i_membership, int i_rows, int i_cols, int i_num_clutsers, int i_m) { // // calculate degree of membership of each data point (image) regarding each cluster // // for (int i = 0; i < i_rows; ++i) { // // for (int j = 0; j < i_cols; ++j) { // // for (int k = 0; k < i_num_clutsers; ++k) { // // // cout << "Hi" << endl; // // // i_new_membership[i][j][k] = calculate_membership_point(i, j, k); // // // FIX!! // // // i_membership[i][j][k] = calculate_membership_point_kernel(i_image, i_cluster_centers, i, j, k, i_num_clutsers, i_m); // // } // // } // // } // // std::printf("Dafuq?\n"); // // shared memory that stores: memberships for that pixel, // extern __shared__ float shared_d[]; // // std::printf("%f???\n", i_image[0]); // // global index // int global_idx = blockIdx.x * blockDim.x + threadIdx.x; // // std::printf("2?\n"); // // chunk of shared memory each thread gets to use // int per_thread_len = 2 + i_num_clutsers; // // x, y, z indices in original membership matrix // int x = global_idx % i_rows ; // int y = ((global_idx - x) / i_rows) % i_cols; // int z = ((global_idx - x - y * i_rows) / (i_rows * i_cols)); // // std::printf("3?\n"); // // load data into shared memory (each thread loads info for a **single** pixel membership) // if (global_idx < i_rows * i_cols * i_num_clutsers) { // // load pixel (1) // // std::printf("fxxk this sxit %f\n", i_image[global_idx]); // shared_d[threadIdx.x * per_thread_len] = i_image[x + y * i_rows]; // // std::printf("fxxk this sxit %f\n", i_image[global_idx]); // // load membership (1) // shared_d[threadIdx.x * per_thread_len + 1] = i_membership[(z * i_rows * i_cols) + y * i_rows + x]; // // load centers (i_num_clutsers) // for (int i = 0; i < i_num_clutsers; ++i) { // // shared_d[threadIdx.x * per_thread_len + (i + 1)] = i_membership[(x + y * i_rows) * i_num_clutsers + i]; // shared_d[threadIdx.x * per_thread_len + 1 + (i + 1)] = i_cluster_centers[i]; // } // } // else { // // std::printf("else???? %f\n", i_image[global_idx]); // for (int i = 0; i < per_thread_len; ++i) { // shared_d[threadIdx.x] = 0; // } // } // // printf("image membership: %f %f \n", shared_d[threadIdx.x * per_thread_len], shared_d[threadIdx.x * per_thread_len + 1]); // // printf(" centers: "); // // for (int j = 0; j < i_num_clutsers; ++j) { // // printf("center %f", shared_d[threadIdx.x * per_thread_len + 1 + (j + 1)]); // // } // // printf("\n"); // __syncthreads(); // // std::printf("Dafuq?\n"); // // calculate membership for the loaded pixel // float d_center = 0, d_all = 0, aggr = 0.0; // d_center = eucl_distance(shared_d[threadIdx.x * per_thread_len + 1 + (z + 1)], shared_d[threadIdx.x * per_thread_len]); // // std::printf("a, b: %f, %f\n", shared_d[threadIdx.x * per_thread_len + 1 + (z + 1)], shared_d[threadIdx.x * per_thread_len]); // for (int c = 0; c < i_num_clutsers; ++c) { // d_all = eucl_distance(shared_d[threadIdx.x * per_thread_len + 1 + (c + 1)], shared_d[threadIdx.x * per_thread_len]); // aggr += pow((d_center / d_all), 2 / (i_m - 1)); // // printf("z c d_center d_all: %d %d %f %f \n", z, c, d_center, d_all); // } // // printf("d_center: %f\n", d_center); // // printf("d_all: %f\n", d_all); // // std::printf("Dafuq2?\n"); // // write aggregation results to membership value on shared memory // // std::printf("Aggr: %f\n", aggr); // // printf("z d_center d_all Aggr: %d %f %f %f\n", z, d_center, d_all, aggr); // shared_d[threadIdx.x * per_thread_len + 1] = 1.0 / (float)aggr; // // std::printf("PLZ %f\n", shared_d[threadIdx.x * per_thread_len + 1]); // // write back membership results to global memory // // std::printf("PLZ %f\n", shared_d[threadIdx.x * per_thread_len + 1]); // i_membership[(z * i_rows * i_cols) + y * i_rows + x] = 1.0 / (float)aggr; // // printf("Member: %f\n", i_membership[(z * i_rows * i_cols) + y * i_rows + x]); // // std::printf("WHY %d %d %d %f\n", x, y, z, i_membership[(z * i_rows * i_cols) + y * i_rows + x]); // __syncthreads(); // // print_mebership(); // } // __device__ float calculate_membership_point_kernel(float *i_image, float *i_cluster_centers, int i, int j, int k, int i_num_clutsers, int i_m) { // float d_center = 0, d_all = 0, aggr = 0.0; // // FIX!! // // d_center = eucl_distance(i_cluster_centers[k], i_image[i][j]); // // cout << "d_center: " << d_center << endl; // for (int c = 0; c < i_num_clutsers; ++c) { // // FIX!! // // d_all = eucl_distance(i_cluster_centers[c], i_image[i][j]); // // cout << "d_all " << c << ": " << d_all << endl; // // cout << "center " << c << ": " << i_cluster_centers[c] << endl; // aggr += pow((d_center / d_all), 2 / (i_m - 1)); // } // // cout << "Aggr: " << aggr << endl; // return 1.0 /aggr; // } // __host__ void fcm_step(float *i_image, float *i_membership, float *i_cluster_centers, int rows, int cols, int T, int i_num_clutsers, int i_m, int threads_per_block, float *out_membership) { // // set block size and number of blocks to allocate // // dim3 dimBlock(threads_per_block, threads_per_block); // // dim3 dimGrid(((rows - 1) / dimBlock.x) + 1, ((cols - 1) / dimBlock.y) + 1); // unsigned int blks_m = 1 + (rows * cols * i_num_clutsers - 1) / threads_per_block; // // unsigned int blks_c = 1 + (i_num_clutsers - 1) / threads_per_block; // unsigned int shared_size_m = ((1 + 2 * i_num_clutsers) * threads_per_block) * sizeof(float); // // unsigned int shared_size_c = (i_num_clutsers * threads_per_block) * sizeof(float); // // cout << "blkm: " << blks_m << endl; // // cout << "blkc: " << blks_c << endl; // // // copy output memebership matrix // // for (int i = 0; i < rows; ++i) { // // for (int j = 0; j < cols; ++j) { // // for (int k = 0; k < i_num_clutsers; ++k) { // // std::printf("%f\n", i_membership[(k * rows * cols) + j * rows + i]); // // } // // } // // } // // cudaMemcpy everything! // float *d_image, *d_membership, *d_cluster_centers; // cudaMalloc((void**)&d_image, rows * cols * sizeof(float)); // cudaMalloc((void**)&d_membership, rows * cols * i_num_clutsers * sizeof(float)); // cudaMalloc((void**)&d_cluster_centers, i_num_clutsers * sizeof(float)); // // cudaMalloc((void**)&out_membership, rows * cols * i_num_clutsers * sizeof(float)); // cudaMemcpy(d_image, i_image, rows * cols * sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_membership, i_membership, rows * cols * i_num_clutsers * sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_cluster_centers, i_cluster_centers, i_num_clutsers * sizeof(float), cudaMemcpyHostToDevice); // // kernel config parameters for reduction // unsigned int blks_c = 1 + (rows * cols - 1) / threads_per_block; // unsigned int in_size = rows * cols; // unsigned int shared_size_c = threads_per_block * sizeof(float); // // memory allocations for reduction // float *numerator = new float[1]; // float *denominator = new float[1]; // float *device_in_num, *device_in_den, *device_out_num, *device_out_den; // cudaMalloc((void **)&device_in_num, in_size * sizeof(float)); // cudaMalloc((void **)&device_in_den, in_size * sizeof(float)); // cudaMalloc((void **)&device_out_num, blks_c * sizeof(float)); // cudaMalloc((void **)&device_out_den, blks_c * sizeof(float)); // // iteratively update membership and centers for T epochs // for (int i = 0; i < T; ++i) { // // cout << "Hi" << endl; // // fcm_step_kernel<<<dimGrid, dimBlock, shared_size>>>(i_image, i_membership, i_cluster_centers, rows, cols, i_num_clutsers, i_m); // // 1D grid of 2D blocks // update_membership_kernel<<<blks_m, threads_per_block, shared_size_m>>>(d_image, d_cluster_centers, d_membership, rows, cols, i_num_clutsers, i_m); // // cudaDeviceSynchronize(); // // reduce each center with numerator and denominator reduction // // cudaMemcpy(device_in_num, d_membership, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // // cudaMemcpy(device_in_den, d_membership, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // cudaMemcpy(device_in_num, d_membership, in_size * sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(device_in_den, d_membership, in_size * sizeof(float), cudaMemcpyHostToDevice); // for (int j = 0; j < i_num_clutsers; ++j) { // blks_c = 1 + (rows * cols - 1) / threads_per_block; // while (true) { // update_centers_numerator_kernel<<<blks_c, threads_per_block, shared_size_c>>>(d_image, device_in_num, d_cluster_centers, rows, cols, i_num_clutsers, i_m, device_out_num, j); // update_centers_denominator_kernel<<<blks_c, threads_per_block, shared_size_c>>>(d_image, device_in_den, d_cluster_centers, rows, cols, i_num_clutsers, i_m, device_out_den, j); // // don't re-initialize if we already have the final reduction result // if (blks_c == 1) { // break; // } // in_size = blks_c; // // device_out now holds the reduce result, copy to device in and reduce again (next time) // cudaMemcpy(device_in_num, device_out_num, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // cudaMemcpy(device_in_den, device_out_den, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // blks_c = 1 + ((blks_c - 1) / threads_per_block); // } // cudaMemcpy(numerator, device_out_num, sizeof(float), cudaMemcpyDeviceToHost); // cudaMemcpy(denominator, device_out_den, sizeof(float), cudaMemcpyDeviceToHost); // // update center values // i_cluster_centers[i] = numerator[0] / denominator[0]; // } // // update_centers_kernel<<<blks_c, threads_per_block, shared_size_c>>>(d_image, d_membership, d_cluster_centers, rows, cols, i_num_clutsers, i_m, threads_per_block); // // cudaDeviceSynchronize(); // // cout << "Bye" << endl; // } // // fcm_step_kernel<<<dimGrid, dimBlock, shared_size>>>(i_image, i_membership, i_cluster_centers, rows, cols, i_num_clutsers, i_m); // cudaMemcpy(out_membership, d_membership, rows * cols * i_num_clutsers * sizeof(float), cudaMemcpyDeviceToHost); // cudaMemcpy(i_cluster_centers, d_cluster_centers, i_num_clutsers * sizeof(float), cudaMemcpyDeviceToHost); // cudaFree(d_image); // cudaFree(d_membership); // cudaFree(d_cluster_centers); // } // __global__ void fcm_step_kernel(float *i_image, float *i_membership, float *i_cluster_centers, int rows, int cols, int i_num_clutsers, int i_m) { // // update_membership_kernel(i_image, i_cluster_centers, i_membership, rows, cols, i_num_clutsers, i_m); // // update_centers_kernel(i_image, i_membership, i_cluster_centers, rows, cols, i_num_clutsers, i_m); // } // __host__ void calculate_final_cluster(float *i_membership, int *i_final_cluster, int i_num_clutsers, int i_rows, int i_cols) { // // cout << "Membership: " << endl; // // for (int i = 0; i < i_rows; ++i) { // // for (int j = 0; j < i_cols; ++j) { // // float tmp_max = -999; // // for (int k = 0; k < i_num_clutsers; ++k) { // // // FIX!! // // // if (i_membership[i][j][k] >= tmp_max) { // // // // cout << "hi" << endl; // // // tmp_max = i_membership[i][j][k]; // // // // i_final_cluster[i][j] = 255 / (float)(k + 1); // // // i_final_cluster[i][j] = k; // // // } // // int k = 0; // // // cout << i_membership[i][j][k] << " "; // // } // // // cout << endl; // // } // // } // for (int i = 0; i < i_rows; ++i) { // for (int j = 0; j < i_cols; ++j) { // float tmp_max = -9999; // // std::printf("["); // for (int k = 0; k < i_num_clutsers; ++k) { // // std::printf("%f ", i_membership[(k * i_rows * i_cols) + j * i_rows + i]); // if (i_membership[(k * i_rows * i_cols) + j * i_rows + i] >= tmp_max) { // tmp_max = i_membership[(k * i_rows * i_cols) + j * i_rows + i]; // i_final_cluster[j * i_rows + i] = k; // } // } // // std::printf("]"); // } // // std::printf("\n"); // } // } #include <stdio.h> #include <iostream> #include <fstream> #include <time.h> #include <random> #include "reduction_fcm.cuh" using namespace std; __host__ void init_membership(float *i_membership, int i_rows, int i_cols, int i_num_clutsers) { // cout << i_cols << endl; // cout << i_rows << endl; // cout << "Init m start" << endl; // 1d-configuration int len = i_rows * i_cols * i_num_clutsers; i_membership = new float[len]; for (int i = 0; i < len; ++i) { i_membership[i] = 1 / (float)i_num_clutsers; } // cout << "Init m done" << endl; // 3d-configuration // i_membership = new float** [i_rows]; // for (int i = 0; i < i_rows; ++i) { // i_membership[i] = new float* [i_cols]; // // i_new_membership[i] = new float* [i_cols]; // for (int j = 0; j < i_cols; ++j) { // i_membership[i][j] = new float[i_num_clutsers]; // // i_new_membership[i][j] = new float[i_num_clutsers]; // } // } // for (int i = 0; i < i_rows; ++i) { // for (int j = 0; j < i_cols; ++j) { // for (int k = 0; k < i_num_clutsers; ++k) { // i_membership[i][j][k] = 1 / (float)i_num_clutsers; // // i_new_membership[i][j][k] = 99999; // } // } // } } __host__ void init_centers(float *i_cluster_centers, int i_num_clutsers) { // cout << "Init c start" << endl; i_cluster_centers = new float[i_num_clutsers]; for (int i = 0; i < i_num_clutsers; ++i) { // randomly select i_num_clutsers points as cluster centers // random generator random_device rd; mt19937 eng(rd()); uniform_real_distribution<> dist(0, 1); i_cluster_centers[i] = dist(eng); } // cout << "Init c done" << endl; // cout << "Centers: " << endl; // for (int i = 0; i < i_num_clutsers; ++i) { // cout << i_cluster_centers[i] << endl; // } } __host__ void init_final_cluster(int* i_final_cluster, int rows, int cols) { // cout << "Init f start" << endl; // i_final_cluster = new int[rows * cols]; for (int i = 0; i < rows * cols; ++i) { i_final_cluster[i] = -1; } // cout << "Init f done" << endl; } __device__ float eucl_distance(float center, float val) { // val: data point value // i: cluster center point value return sqrt(pow(val - center, 2)); } __global__ void update_centers_numerator_kernel(float *i_image, float *i_membership, float *i_cluster_centers, int i_rows, int i_cols, int i_num_clutsers, int i_m, float* numerator, int c) { extern __shared__ float shared_d[]; int global_idx = blockIdx.x * blockDim.x + threadIdx.x; int i = global_idx % i_rows; int j = (global_idx - i) / i_rows; int k = c; // printf("1\n"); // each thread loads one element into shared memory if (global_idx < i_rows * i_cols) { shared_d[threadIdx.x] = i_image[i + i_rows * j] * pow(i_membership[k * i_rows * i_cols + j * i_rows + i], i_m); } else { shared_d[threadIdx.x] = 0; } __syncthreads(); // do reduction in shared memory for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { shared_d[threadIdx.x] += shared_d[threadIdx.x + s]; } __syncthreads(); } // write result back to global memory if (threadIdx.x == 0) { numerator[blockIdx.x] = shared_d[0]; } } __global__ void update_centers_denominator_kernel(float *i_image, float *i_membership, float *i_cluster_centers, int i_rows, int i_cols, int i_num_clutsers, int i_m, float* denominator, int c) { // printf("222\n"); extern __shared__ float shared_d[]; int global_idx = blockIdx.x * blockDim.x + threadIdx.x; int i = global_idx % i_rows; int j = (global_idx - i) / i_rows; int k = c; // printf("2\n"); // each thread loads one element into shared memory if (global_idx < i_rows * i_cols) { shared_d[threadIdx.x] = pow(i_membership[k * i_rows * i_cols + j * i_rows + i], i_m); } else { shared_d[threadIdx.x] = 0; } __syncthreads(); // do reduction in shared memory for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { shared_d[threadIdx.x] += shared_d[threadIdx.x + s]; } __syncthreads(); } // write result back to global memory if (threadIdx.x == 0) { denominator[blockIdx.x] = shared_d[0]; } } // __global__ void update_centers_kernel(float *i_image, float *i_membership, float *i_cluster_centers, int i_rows, int i_cols, int i_num_clutsers, int i_m, int threads_per_block) { // int global_idx = blockIdx.x * blockDim.x + threadIdx.x; // unsigned int blks = 1 + (i_rows * i_cols - 1) / threads_per_block; // unsigned int in_size = i_rows * i_cols; // unsigned int shared_size = threads_per_block * sizeof(float); // float *res_num = new float[blks]; // float *res_den = new float[blks]; // float *device_in_num, *device_in_den, *device_out_num, *device_out_den; // float numerator, denominator; // // cudaMalloc((void **)&device_in_num, in_size * sizeof(float)); // // cudaMalloc((void **)&device_in_den, in_size * sizeof(float)); // // cudaMalloc((void **)&device_out_num, blks * sizeof(float)); // // cudaMalloc((void **)&device_out_den, blks * sizeof(float)); // // cudaMemcpyAsync(device_in_num, i_membership, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // // cudaMemcpyAsync(device_in_den, i_membership, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // // while (true) { // // update_centers_numerator_kernel<<<blks, threads_per_block, shared_size>>>(i_image, device_in_num, i_cluster_centers, i_rows, i_cols, i_num_clutsers, i_m, device_out_num, global_idx); // // printf("g\n"); // // cudaDeviceSynchronize(); // // update_centers_denominator_kernel<<<blks, threads_per_block, shared_size>>>(i_image, device_in_den, i_cluster_centers, i_rows, i_cols, i_num_clutsers, i_m, device_out_den, global_idx); // // cudaDeviceSynchronize(); // // printf("gg\n"); // // if (blks == 1) { // // break; // // } // // in_size = blks; // // // device_out now holds the reduce result, copy to device in and reduce again (next time) // // cudaMemcpyAsync(device_in_num, device_out_num, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // // cudaMemcpyAsync(device_in_den, device_out_den, in_size * sizeof(float), cudaMemcpyDeviceToDevice); // // blks = 1 + ((blks - 1) / threads_per_block); // // } // // cudaMemcpyAsync(res_num, device_out_num, sizeof(float), cudaMemcpyDeviceToDevice); // // cudaMemcpyAsync(res_den, device_out_den, sizeof(float), cudaMemcpyDeviceToDevice); // // numerator = res_num[0]; // // denominator = res_den[0]; // // update the cluster center (finally!) // // i_cluster_centers[global_idx] = numerator / denominator; // __syncthreads(); // delete [] res_num; // delete [] res_den; // cudaFree(device_in_num); // cudaFree(device_in_den); // cudaFree(device_out_num); // cudaFree(device_out_den); // } __global__ void update_membership_kernel(float *i_image, float *i_cluster_centers, float *i_membership, int i_rows, int i_cols, int i_num_clutsers, int i_m) { // calculate degree of membership of each data point (image) regarding each cluster // for (int i = 0; i < i_rows; ++i) { // for (int j = 0; j < i_cols; ++j) { // for (int k = 0; k < i_num_clutsers; ++k) { // // cout << "Hi" << endl; // // i_new_membership[i][j][k] = calculate_membership_point(i, j, k); // // FIX!! // // i_membership[i][j][k] = calculate_membership_point_kernel(i_image, i_cluster_centers, i, j, k, i_num_clutsers, i_m); // } // } // } // std::printf("Dafuq?\n"); // shared memory that stores: memberships for that pixel, extern __shared__ float shared_d[]; // std::printf("%f???\n", i_image[0]); // global index int global_idx = blockIdx.x * blockDim.x + threadIdx.x; // std::printf("2?\n"); // chunk of shared memory each thread gets to use int per_thread_len = 2 + i_num_clutsers; // x, y, z indices in original membership matrix int x = global_idx % i_rows ; int y = ((global_idx - x) / i_rows) % i_cols; int z = ((global_idx - x - y * i_rows) / (i_rows * i_cols)); // std::printf("3?\n"); // load data into shared memory (each thread loads info for a **single** pixel membership) if (global_idx < i_rows * i_cols * i_num_clutsers) { // load pixel (1) // std::printf("fxxk this sxit %f\n", i_image[global_idx]); shared_d[threadIdx.x * per_thread_len] = i_image[x + y * i_rows]; // std::printf("fxxk this sxit %f\n", i_image[global_idx]); // load membership (1) shared_d[threadIdx.x * per_thread_len + 1] = i_membership[(z * i_rows * i_cols) + y * i_rows + x]; // load centers (i_num_clutsers) for (int i = 0; i < i_num_clutsers; ++i) { // shared_d[threadIdx.x * per_thread_len + (i + 1)] = i_membership[(x + y * i_rows) * i_num_clutsers + i]; shared_d[threadIdx.x * per_thread_len + 1 + (i + 1)] = i_cluster_centers[i]; } } else { // std::printf("else???? %f\n", i_image[global_idx]); for (int i = 0; i < per_thread_len; ++i) { shared_d[threadIdx.x] = 0; } } // printf("image membership: %f %f \n", shared_d[threadIdx.x * per_thread_len], shared_d[threadIdx.x * per_thread_len + 1]); // printf(" centers: "); // for (int j = 0; j < i_num_clutsers; ++j) { // printf("center %f", shared_d[threadIdx.x * per_thread_len + 1 + (j + 1)]); // } // printf("\n"); __syncthreads(); // std::printf("Dafuq?\n"); // calculate membership for the loaded pixel float d_center = 0, d_all = 0, aggr = 0.0; d_center = eucl_distance(shared_d[threadIdx.x * per_thread_len + 1 + (z + 1)], shared_d[threadIdx.x * per_thread_len]); // std::printf("a, b: %f, %f\n", shared_d[threadIdx.x * per_thread_len + 1 + (z + 1)], shared_d[threadIdx.x * per_thread_len]); for (int c = 0; c < i_num_clutsers; ++c) { d_all = eucl_distance(shared_d[threadIdx.x * per_thread_len + 1 + (c + 1)], shared_d[threadIdx.x * per_thread_len]); aggr += pow((d_center / d_all), 2 / (i_m - 1)); // printf("z c d_center d_all: %d %d %f %f \n", z, c, d_center, d_all); } // printf("d_center: %f\n", d_center); // printf("d_all: %f\n", d_all); // std::printf("Dafuq2?\n"); // write aggregation results to membership value on shared memory // std::printf("Aggr: %f\n", aggr); // printf("z d_center d_all Aggr: %d %f %f %f\n", z, d_center, d_all, aggr); shared_d[threadIdx.x * per_thread_len + 1] = 1.0 / (float)aggr; // std::printf("PLZ %f\n", shared_d[threadIdx.x * per_thread_len + 1]); // write back membership results to global memory // std::printf("PLZ %f\n", shared_d[threadIdx.x * per_thread_len + 1]); i_membership[(z * i_rows * i_cols) + y * i_rows + x] = 1.0 / (float)aggr; // printf("Member: %f\n", i_membership[(z * i_rows * i_cols) + y * i_rows + x]); // std::printf("WHY %d %d %d %f\n", x, y, z, i_membership[(z * i_rows * i_cols) + y * i_rows + x]); __syncthreads(); // print_mebership(); } __device__ float calculate_membership_point_kernel(float *i_image, float *i_cluster_centers, int i, int j, int k, int i_num_clutsers, int i_m) { float d_center = 0, d_all = 0, aggr = 0.0; // FIX!! // d_center = eucl_distance(i_cluster_centers[k], i_image[i][j]); // cout << "d_center: " << d_center << endl; for (int c = 0; c < i_num_clutsers; ++c) { // FIX!! // d_all = eucl_distance(i_cluster_centers[c], i_image[i][j]); // cout << "d_all " << c << ": " << d_all << endl; // cout << "center " << c << ": " << i_cluster_centers[c] << endl; aggr += pow((d_center / d_all), 2 / (i_m - 1)); } // cout << "Aggr: " << aggr << endl; return 1.0 /aggr; } __host__ void fcm_step(float *i_image, float *i_membership, float *i_cluster_centers, int rows, int cols, int T, int i_num_clutsers, int i_m, int threads_per_block, float *out_membership) { // set block size and number of blocks to allocate // dim3 dimBlock(threads_per_block, threads_per_block); // dim3 dimGrid(((rows - 1) / dimBlock.x) + 1, ((cols - 1) / dimBlock.y) + 1); unsigned int blks_m = 1 + (rows * cols * i_num_clutsers - 1) / threads_per_block; // unsigned int blks_c = 1 + (i_num_clutsers - 1) / threads_per_block; unsigned int shared_size_m = ((1 + 2 * i_num_clutsers) * threads_per_block) * sizeof(float); // unsigned int shared_size_c = (i_num_clutsers * threads_per_block) * sizeof(float); // cout << "blkm: " << blks_m << endl; // cout << "blkc: " << blks_c << endl; // // copy output memebership matrix // for (int i = 0; i < rows; ++i) { // for (int j = 0; j < cols; ++j) { // for (int k = 0; k < i_num_clutsers; ++k) { // std::printf("%f\n", i_membership[(k * rows * cols) + j * rows + i]); // } // } // } // cudaMemcpy everything! float *d_image, *d_membership, *d_cluster_centers; cudaMalloc((void**)&d_image, rows * cols * sizeof(float)); cudaMalloc((void**)&d_membership, rows * cols * i_num_clutsers * sizeof(float)); cudaMalloc((void**)&d_cluster_centers, i_num_clutsers * sizeof(float)); // cudaMalloc((void**)&out_membership, rows * cols * i_num_clutsers * sizeof(float)); cudaMemcpy(d_image, i_image, rows * cols * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_membership, i_membership, rows * cols * i_num_clutsers * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_cluster_centers, i_cluster_centers, i_num_clutsers * sizeof(float), cudaMemcpyHostToDevice); // kernel config parameters for reduction unsigned int blks_c = 1 + (rows * cols - 1) / threads_per_block; unsigned int in_size = rows * cols; unsigned int shared_size_c = threads_per_block * sizeof(float); // memory allocations for reduction float *numerator = new float[1]; float *denominator = new float[1]; float *device_in_num, *device_in_den, *device_out_num, *device_out_den; cudaMalloc((void **)&device_in_num, in_size * sizeof(float)); cudaMalloc((void **)&device_in_den, in_size * sizeof(float)); cudaMalloc((void **)&device_out_num, blks_c * sizeof(float)); cudaMalloc((void **)&device_out_den, blks_c * sizeof(float)); // iteratively update membership and centers for T epochs for (int i = 0; i < T; ++i) { // cout << "Hi" << endl; // fcm_step_kernel<<<dimGrid, dimBlock, shared_size>>>(i_image, i_membership, i_cluster_centers, rows, cols, i_num_clutsers, i_m); // 1D grid of 2D blocks update_membership_kernel<<<blks_m, threads_per_block, shared_size_m>>>(d_image, d_cluster_centers, d_membership, rows, cols, i_num_clutsers, i_m); // cudaDeviceSynchronize(); // reduce each center with numerator and denominator reduction cudaMemcpy(device_in_num, i_membership, in_size * sizeof(float), cudaMemcpyDeviceToDevice); cudaMemcpy(device_in_den, i_membership, in_size * sizeof(float), cudaMemcpyDeviceToDevice); for (int j = 0; j < i_num_clutsers; ++j) { blks_c = 1 + (rows * cols - 1) / threads_per_block; while (true) { update_centers_numerator_kernel<<<blks_c, threads_per_block, shared_size_c>>>(d_image, d_membership, d_cluster_centers, rows, cols, i_num_clutsers, i_m, device_out_num, j); update_centers_denominator_kernel<<<blks_c, threads_per_block, shared_size_c>>>(d_image, d_membership, d_cluster_centers, rows, cols, i_num_clutsers, i_m, device_out_den, j); // don't re-initialize if we already have the final reduction result if (blks_c == 1) { break; } in_size = blks_c; // device_out now holds the reduce result, copy to device in and reduce again (next time) cudaMemcpy(device_in_num, device_out_num, in_size * sizeof(float), cudaMemcpyDeviceToDevice); cudaMemcpy(device_in_den, device_out_den, in_size * sizeof(float), cudaMemcpyDeviceToDevice); blks_c = 1 + ((blks_c - 1) / threads_per_block); } cudaMemcpy(numerator, device_out_num, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(denominator, device_out_den, sizeof(float), cudaMemcpyDeviceToHost); // update center values i_cluster_centers[i] = numerator[0] / denominator[0]; } // update_centers_kernel<<<blks_c, threads_per_block, shared_size_c>>>(d_image, d_membership, d_cluster_centers, rows, cols, i_num_clutsers, i_m, threads_per_block); // cudaDeviceSynchronize(); // cout << "Bye" << endl; } // fcm_step_kernel<<<dimGrid, dimBlock, shared_size>>>(i_image, i_membership, i_cluster_centers, rows, cols, i_num_clutsers, i_m); cudaMemcpy(out_membership, d_membership, rows * cols * i_num_clutsers * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(i_cluster_centers, d_cluster_centers, i_num_clutsers * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_image); cudaFree(d_membership); cudaFree(d_cluster_centers); } __global__ void fcm_step_kernel(float *i_image, float *i_membership, float *i_cluster_centers, int rows, int cols, int i_num_clutsers, int i_m) { // update_membership_kernel(i_image, i_cluster_centers, i_membership, rows, cols, i_num_clutsers, i_m); // update_centers_kernel(i_image, i_membership, i_cluster_centers, rows, cols, i_num_clutsers, i_m); } __host__ void calculate_final_cluster(float *i_membership, int *i_final_cluster, int i_num_clutsers, int i_rows, int i_cols) { // cout << "Membership: " << endl; // for (int i = 0; i < i_rows; ++i) { // for (int j = 0; j < i_cols; ++j) { // float tmp_max = -999; // for (int k = 0; k < i_num_clutsers; ++k) { // // FIX!! // // if (i_membership[i][j][k] >= tmp_max) { // // // cout << "hi" << endl; // // tmp_max = i_membership[i][j][k]; // // // i_final_cluster[i][j] = 255 / (float)(k + 1); // // i_final_cluster[i][j] = k; // // } // int k = 0; // // cout << i_membership[i][j][k] << " "; // } // // cout << endl; // } // } for (int i = 0; i < i_rows; ++i) { for (int j = 0; j < i_cols; ++j) { float tmp_max = -9999; // std::printf("["); for (int k = 0; k < i_num_clutsers; ++k) { // std::printf("%f ", i_membership[(k * i_rows * i_cols) + j * i_rows + i]); if (i_membership[(k * i_rows * i_cols) + j * i_rows + i] >= tmp_max) { tmp_max = i_membership[(k * i_rows * i_cols) + j * i_rows + i]; i_final_cluster[j * i_rows + i] = k; } } // std::printf("]"); } // std::printf("\n"); } }
23,690
#define BLOCK_SIZE 16 __global__ void MatMulKernel(float* A, float* B, float* C, int width) { __shared__ float Ads[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bds[BLOCK_SIZE][BLOCK_SIZE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * BLOCK_SIZE + ty; int col = bx * BLOCK_SIZE + tx; float PValue = 0; for(int m = 0; m < width/BLOCK_SIZE; m++) { Ads[ty][tx] = A[row * width + m*BLOCK_SIZE + tx]; Bds[ty][tx] = B[(m*BLOCK_SIZE+ty) * width + col]; __syncthreads(); for(int k = 0; k < BLOCK_SIZE; k++) { PValue += Ads[ty][k]*Bds[k][tx]; } __syncthreads(); } C[row*width+col] = PValue; } void MatMul(const float* A, const float* B, float* C, int width_A, int height_A, int width_B, int height_B) { float* d_A; size_t size = sizeof(A); cudaMalloc(&d_A, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); size = sizeof(B); float* d_B; cudaMalloc(&d_B, size); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); float* d_C; size_t size_C = height_A*width_B*sizeof(float); cudaMalloc(&d_C, size_C); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(width_B/BLOCK_SIZE, height_A/BLOCK_SIZE); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, width_A); cudaMemcpy(C, d_C, size_C, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
23,691
#include<stdio.h> __global__ void kernel(int* array) { int index = blockIdx.x * blockDim.x + threadIdx.x; array[index] = index; } int main() { int num_element = 1024; int* host_array = (int*)malloc( num_element * sizeof(int) ); int* device_array; cudaMalloc( (void**)&device_array , num_element * sizeof(int) ); int block_size = 128; int grid_size = num_element / block_size; kernel<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array,device_array,num_element * sizeof(int),cudaMemcpyDeviceToHost); for(int i = 0 ; i < num_element ; i++) { printf("%d ",host_array[i]); } printf("\n"); free(host_array); cudaFree(device_array); return 0; }
23,692
#include "includes.h" __global__ void gpu_grayscale(int width, int height, float *image, float *image_out) { //////////////// // TO-DO #4.2 ///////////////////////////////////////////// // Implement the GPU version of the grayscale conversion // /////////////////////////////////////////////////////////// }
23,693
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include <cstdlib> int main2(void) { //@@ generate random data serially thrust::host_vector<int> h_vec(200); std::generate(h_vec.begin(), h_vec.end(), rand); //@@ transfer to device and compute sum thrust::device_vector<int> d_vec = h_vec; //Parallel Vector Addition int x = thrust::reduce(d_vec.begin(), d_vec.end(), 0, thrust::plus<int>()); //@@ Display the sum std::cout << x << std::endl; return 0; }
23,694
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <thrust/sort.h> __global__ void getIterationCounts(double x0, double y0, double xD, double yD, int nCols, int nRows, int limitIter, int* iterations) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= nCols || y >= nRows) {return;}; // banish bad threads double zxOld = 0; double zyOld = 0; double zxNew = 0; double zyNew = 0; double cx = x0 + ((double) x) * xD; double cy = y0 - ((double) y) * yD; int escIter = -1; // Will vary from 0 (immediately outside the escape radius) to maxIter - 1; unless it never escapes, then it is -1 for (int i = 0; i < limitIter; i++) { zxNew = ((zxOld * zxOld) - (zyOld * zyOld) + cx); zyNew = ((2 * zxOld * zyOld) + cy); if ((zxNew * zxNew + zyNew * zyNew) > 4) {escIter = i; break;}; // escape radius is 2. Therefore the square of the escape is 4 zxOld = zxNew; zyOld = zyNew; } iterations[x + y * nCols] = escIter; } template<class T> __global__ void getBlockwiseExtrema(const T* inputArray, T* blockwiseExtrema, int inputLength, int numBlocks, bool min) { extern __shared__ float blockInput[]; int globalIdx = threadIdx.x + blockIdx.x * blockDim.x; int localIdx = threadIdx.x; int numBlockThreads = (blockIdx.x != numBlocks - 1) ? blockDim.x : (inputLength - ((numBlocks - 1) * blockDim.x)); if (globalIdx >= inputLength) return; // banish bad threads blockInput[localIdx] = inputArray[globalIdx]; __syncthreads(); T curr; T neww; for (int i = 1; i < numBlockThreads; i *= 2) { if (localIdx > (i - 1)) { curr = blockInput[localIdx - i]; neww = blockInput[localIdx]; if (curr < 0) curr = 0; if (neww < 0) neww = 0; if (min) blockInput[localIdx] = (neww < curr) ? neww : curr; else blockInput[localIdx] = (neww > curr) ? neww : curr; __syncthreads(); } } int last = numBlockThreads - 1; if (localIdx == last) blockwiseExtrema[blockIdx.x] = blockInput[last]; } __global__ void colorImage(unsigned char* image, int* iterations, int nCols, int inputSize, double minIter, double maxIter, double minHue, double maxHue) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int globalIdx = x + y * nCols; if (globalIdx >= inputSize) {return;}; // banish bad threads if (iterations[globalIdx] < 0) {return;}; // banish threads for areas that should be black double H = minHue + ((iterations[globalIdx] - minIter) / (maxIter - minIter)) * (maxHue - minHue); double S = 1; double V = 1; while (H < 0) {H += 360; }; while (H >= 360) {H -= 360; }; double R, G, B; if (V <= 0) { R = 0; G = 0; B = 0; } else if (S <= 0) { R = G = B = V; } else { double hf = H / 60.0; int i = (int) floor(hf); double f = hf - i; double pv = V * (1 - S); double qv = V * (1 - S * f); double tv = V * (1 - S * (1 - f)); switch (i) { // Red is the dominant color case 0: R = V; G = tv; B = pv; break; // Green is the dominant color case 1: R = qv; G = V; B = pv; break; case 2: R = pv; G = V; B = tv; break; // Blue is the dominant color case 3: R = pv; G = qv; B = V; break; case 4: R = tv; G = pv; B = V; break; // Red is the dominant color case 5: R = V; G = pv; B = qv; break; // Just in case we overshoot on our math by a little, we put these here. Since its a switch it won't slow us down at all to put these here. case 6: R = V; G = tv; B = pv; break; case -1: R = V; G = pv; B = qv; break; // The color is not defined, we should throw an error. default: //LFATAL("i Value error in Pixel conversion, Value is %d", i); R = G = B = V; // Just pretend its black/white break; } } unsigned char r = (unsigned char) (R * 255.0); unsigned char g = (unsigned char) (G * 255.0); unsigned char b = (unsigned char) (B * 255.0); if (r > 255) r = 255; if (g > 255) g = 255; if (b > 255) b = 255; image[3 * globalIdx ] = b; image[3 * globalIdx + 1] = g; image[3 * globalIdx + 2] = r; } void render(unsigned char* h_image, double x0, double y0, double xD, double yD, int nCols, int nRows, int limitIter) { int pointsPerBlock = 32; // Determine kernel properties dim3 threadDims(pointsPerBlock, pointsPerBlock); int xBlocks = nCols / pointsPerBlock; int yBlocks = nRows / pointsPerBlock; if (nCols % pointsPerBlock != 0) xBlocks++; if (nRows % pointsPerBlock != 0) yBlocks++; dim3 blockDims(xBlocks, yBlocks); int* d_iterations; cudaMalloc(&d_iterations, nCols * nRows * sizeof(int)); getIterationCounts<<<blockDims, threadDims>>>(x0, y0, xD, yD, nCols, nRows, limitIter, d_iterations); cudaDeviceSynchronize(); printf("Calculated iteration counts\n"); int nThreads = threadDims.x * threadDims.y; int nBlocks = (nCols * nRows) / nThreads; if (nThreads % (nCols * nRows) != 0) nBlocks++; int* blockwiseExtrema; cudaMalloc(&blockwiseExtrema, nBlocks * sizeof(int)); /* int* d_min; // Error in oversizing the grid size int h_min[1]; cudaMalloc(&d_min, sizeof(int)); getBlockwiseExtrema<int><<<nBlocks, nThreads, nThreads * sizeof(int)>>>(d_iterations, blockwiseExtrema, nCols * nRows, nBlocks, true); cudaDeviceSynchronize(); getBlockwiseExtrema<int><<<1, nBlocks, nBlocks * sizeof(int)>>>(blockwiseExtrema, d_min, nBlocks, 1, true); cudaDeviceSynchronize(); cudaMemcpy(h_min, d_min, sizeof(int), cudaMemcpyDeviceToHost); int* d_max; int h_max[1]; cudaMalloc(&d_max, sizeof(int)); getBlockwiseExtrema<int><<<nBlocks, nThreads, nThreads * sizeof(int)>>>(d_iterations, blockwiseExtrema, nCols * nRows, nBlocks, false); cudaDeviceSynchronize(); getBlockwiseExtrema<int><<<1, nBlocks, nBlocks * sizeof(int)>>>(blockwiseExtrema, d_max, nBlocks, 1, false); cudaDeviceSynchronize(); cudaMemcpy(h_max, d_max, sizeof(int), cudaMemcpyDeviceToHost); printf("Calculated iteration range\n"); double minIter = (double) h_min[0]; double maxIter = (double) h_max[0]; */ // manual ovverride of min / max double minIter = 0; double maxIter = limitIter; unsigned char* d_image; cudaMalloc(&d_image, 3 * nCols * nRows * sizeof(unsigned char)); cudaMemset(d_image, 0, 3 * nCols * nRows * sizeof(unsigned char)); printf("Allocated memory for image\n"); colorImage<<<blockDims, threadDims>>>(d_image, d_iterations, nCols, nCols * nRows, minIter, maxIter, 0.0, 200.0); cudaDeviceSynchronize(); printf("Colored image\n"); cudaMemcpy(h_image, d_image, 3 * nCols * nRows * sizeof(unsigned char), cudaMemcpyDeviceToHost); printf("Moved image to host\n"); /* DEBUG iteration count int h_iterations[nCols * nRows]; cudaMemcpy(h_iterations, d_iterations, nCols * nRows * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < nRows; i++) { for (int j = 0; j < nCols; j++) { printf("%3d", h_iterations[j + i * nCols]); } printf("\n"); } printf("\n\n"); */ /* DEBUG min/max printf("Min: %d\n", h_min[0]); printf("Max: %d\n\n\n", h_max[0]); */ /* DEBUG image gneration for (int i = 0; i < nRows; i++) { for (int j = 0; j < nCols; j++) { printf("["); for (int k = 0; k < 3; k++) { printf("%3u ", h_image[3 * (j + i * nCols) + k]); } printf("] "); } printf("\n"); } printf("\n\n"); */ }
23,695
#include "includes.h" __global__ void Crop2DKernel(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int size, int leftMargin, int topMargin, float fillValue) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; if (id < size) { int inputX = id % outputWidth - leftMargin; int inputY = id / outputWidth - topMargin; if (inputX >= 0 && inputX < inputWidth && inputY >= 0 && inputY < inputHeight) output[id] = input[inputX + inputY * inputWidth]; else output[id] = fillValue; } }
23,696
#include <stdio.h> #include <stdlib.h> #define DEBUG __global__ void add(const int* x, const int* y, int* z, const int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n) z[index] = x[index] + y[index]; } void checkCudaError(const char* filename, const int linenum) { cudaThreadSynchronize(); cudaError error = cudaGetLastError(); if(error != cudaSuccess){ printf("File: %s, line: %d, CUDA error: %s\n", __FILE__, __LINE__, cudaGetErrorString(error)); exit(-1); } } void addVec(const int* x, const int*y, int* z, const int N) { const int THREAD_PER_BLOCK = 512; const int nByte = N * sizeof(int); int *dev_x, *dev_y, *dev_z; cudaMalloc((void**)(&dev_x), nByte); cudaMalloc((void**)(&dev_y), nByte); cudaMalloc((void**)(&dev_z), nByte); cudaMemcpy(dev_x, x, nByte, cudaMemcpyHostToDevice); cudaMemcpy(dev_y, y, nByte, cudaMemcpyHostToDevice); add<<<(N + THREAD_PER_BLOCK - 1)/THREAD_PER_BLOCK, THREAD_PER_BLOCK>>>(dev_x, dev_y, dev_z, N); cudaMemcpy(z, dev_z, nByte, cudaMemcpyDeviceToHost); cudaFree(dev_x); cudaFree(dev_y); cudaFree(dev_z); } void random_int(int* array, int N) { if(array){ for(int i = 0; i < N; ++i) array[i] = rand()/1000; } } int main(void) { const int N = 512*512; const int nByte = sizeof(int) * N; int *x = (int*)malloc(nByte); int *y = (int*)malloc(nByte); int *z = (int*)malloc(nByte); random_int(x, N); random_int(y, N); addVec(x, y, z, N); free(x); free(y); free(z); return 0; }
23,697
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define W 25 #define H 25 __global__ void kernel(int* a, size_t pitch) { int x = threadIdx.x; int y = threadIdx.y; int *row_a = (int*)((char*)a + y * pitch); // Clear to zero row_a[x] = 0; } int main() { //int **a; int *dev_a; size_t pitch; dim3 threads(W, H); // allocate memory for array a // a = (int**)malloc(H * sizeof(int*)); int a[H][W]; for (int i = 0; i < H; i++) { //a[i] = (int*)malloc(W * sizeof(int)); } // initialize array a for (int i = 0; i < H; i++) { for (int j = 0; j < W; j++) { a[i][j] = 1; } } for (int i = 0; i < H; i++) { for (int j = 0; j < W; j++) { printf("%d ", a[i][j]); } printf("\n"); } cudaMallocPitch((void**)&dev_a, &pitch, W * sizeof(int), H); cudaMemcpy2D(dev_a, pitch, a, W * sizeof(int), W * sizeof(int), H, cudaMemcpyHostToDevice); kernel<<<1, threads>>>(dev_a, pitch); cudaMemcpy2D(a, W * sizeof(int), dev_a, pitch, W * sizeof(int), H, cudaMemcpyDeviceToHost); for (int i = 0; i < H; i++) { for (int j = 0; j < W; j++) { printf("%d ", a[i][j]); } printf("\n"); } printf("\n"); return 0; }
23,698
#include "cuda.h" /** * ref_dev B,N,D * query_dev B,M,D * ind_dev B,N,K * out_feat_dev B,N,K,D */ __global__ void gather_nn_kernel( float* out_feat_dev, float* ref_dev, float* query_dev, long* ind_dev, int64_t B, int64_t N, int64_t k, int64_t Dim, int64_t M ) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= B*N) return; unsigned int iB = index / N; unsigned int iN = index % N; long *ind_ptr = ind_dev + iB * N * k + iN * k; float *ref_ptr = ref_dev + iB * N * Dim; float *query_ptr = query_dev + iB * M * Dim; float *out_feat_ptr = out_feat_dev + iB * N * k * Dim + iN * k * Dim; for (int64_t i = 0; i < k; i++) { auto ind = *(ind_ptr + i); auto ref = ref_ptr + iN * Dim; auto query = query_ptr + ind * Dim; auto out_feat = out_feat_ptr + i * Dim; for (int64_t j = 0; j < Dim; j++) out_feat[j] = query[j] - ref[j]; } __syncthreads(); } void gather_nn_dev( float* ref_dev, float* query_dev, long* ind_dev, int64_t B, int64_t N, int64_t k, int64_t Dim, int64_t M, float* out_feat_dev ) { int64_t all = B * N; dim3 blockSize; if (all <= 32) blockSize.x = 32; else if (all <= 64) blockSize.x = 64; else if (all <= 512) blockSize.x = 128; else if (all <= 1024) blockSize.x = 256; else blockSize.x = 512; dim3 gridSize((all + blockSize.x - 1) / blockSize.x); gather_nn_kernel<<<gridSize, blockSize>>>( out_feat_dev, ref_dev, query_dev, ind_dev, B, N, k, Dim, M ); cudaDeviceSynchronize(); }
23,699
#include "includes.h" __global__ void _bcnn_dropout_layer_kernel(float *input, int size, float *rand, float prob, float scale) { int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id < size) { input[id] = (rand[id] < prob) ? 0 : input[id] * scale; } }
23,700
#include "includes.h" /** * Project TACO: Parallel ACO algorithm for TSP * 15-418 Parallel Algorithms - Final Project * Ivan Wang, Carl Lin */ #define MAX_THREADS 128 __device__ static inline int toIndex(int i, int j) { return i * MAX_CITIES + j; } __global__ void updateTrailsAtomic(float *phero, int *paths, float *tourLengths) { int antId = blockIdx.x; int from, to; for (int i = 0; i < MAX_CITIES; i++) { from = paths[toIndex(antId, i)]; if (i < MAX_CITIES - 1) { to = paths[toIndex(antId, i+1)]; } else { to = paths[toIndex(antId, 0)]; } if (from < to) { int tmp = from; from = to; to = tmp; } atomicAdd(&phero[toIndex(from, to)], QVAL / tourLengths[antId]); } }