serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
14,401
extern "C" { __global__ void CalpahGax(const double alpha, const double *a, const double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = alpha*a[0]*b[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! } }
14,402
#include <cstdlib> #include <cstdio> #include <iostream> #include <ctime> using namespace std; #define THREADS 8 #define BLOCKS 8 #define n THREADS * BLOCKS int *d_arr, *h_arr; __global__ void bitonicsort(int *d_arr, int j, int k) { unsigned int idx, ixj; idx = threadIdx.x + blockDim.x * blockIdx.x; ixj = idx ^ j; if (ixj > idx) { if ((idx&k) == 0) { if (d_arr[idx] > d_arr[ixj]) { int temp = d_arr[idx]; d_arr[idx] = d_arr[ixj]; d_arr[ixj] = temp; } } if ((idx&k) != 0) { if (d_arr[idx] < d_arr[ixj]) { int temp = d_arr[idx]; d_arr[idx] = d_arr[ixj]; d_arr[ixj] = temp; } } } } void runBitonicsort(int *h_arr, size_t N) { dim3 blocks(BLOCKS,1); dim3 threads(THREADS,1); cout << "Unsorted Array: "; for (int i = 0; i < n; i++) cout << h_arr[i] << " "; cout << endl; cudaMalloc((void **) &d_arr, N); cudaMemcpy(d_arr, h_arr, N, cudaMemcpyHostToDevice); for (int k = 2; k <= n; k <<= 1) { //Move k to the left by 1 bit, replacing it for (int j = k >> 1; j > 0; j--) { //Move k to the right by 1 bit, reducing j bitonicsort<<<blocks, threads>>>(d_arr, j, k); } } cudaDeviceSynchronize(); cudaMemcpy(h_arr, d_arr, N, cudaMemcpyDeviceToHost); cout << "Sorted Array: "; for (int i = 0; i < n; i++) cout << h_arr[i] << " "; cout << endl; cudaFree(d_arr); free(h_arr); } int main(void) { srand(time(0)); size_t N = n * sizeof(int); h_arr = (int*) malloc(N); cout << "Filling array with random numbers..." << endl; for (int i = 0; i < n; i++) h_arr[i] = rand() % 100; runBitonicsort(h_arr, N); return 0; }
14,403
#include "includes.h" __global__ void cudaKernel(int n, double* gpuWeights, int* gpuG, int* gpuTempGrid, int *flag) { // Moment's coordinates in the grid // // allocate shared memory for weights int momentCol = blockIdx.x*blockDim.x + threadIdx.x; int momentRow = blockIdx.y*blockDim.y + threadIdx.y; int gridRowIdx, gridColIdx; // Variable storing the total neighbourhood influence // double weightFactor = 0.0; // Each thread calculates the spin for a block of moments // // The step is based on the GRID_SIZE and BLOCK_SIZE // for(int i=momentRow; i<n; i+=blockDim.y*gridDim.y) { for(int j=momentCol; j<n; j+=blockDim.x*gridDim.x) { weightFactor = 0.0; // Read 24 neighbours of every moment and calculate their total influence // for(int weightsRow=0; weightsRow<5; weightsRow++) { for(int weightsCol=0; weightsCol<5; weightsCol++) { if(weightsCol==2 && weightsRow==2) continue; // Calculate neighbour's coordinates in G // // using modulus to satisfy boundary conditions // gridRowIdx = (weightsRow - 2 + i + n) % n; gridColIdx = (weightsCol - 2 + j + n) % n; weightFactor+= gpuG[gridRowIdx * n + gridColIdx] * gpuWeights[weightsRow*5+weightsCol]; } } // Update moment's atomic spin // // Set flag if a spin value transition has been done // if(weightFactor < 0.0001 && weightFactor > -0.0001) { gpuTempGrid[n*i+j] = gpuG[n*i+j]; }else if(weightFactor > 0.00001) { gpuTempGrid[n*i+j] = 1; if (gpuG[n*i+j] == -1) { *flag = 1; } }else { gpuTempGrid[n*i+j] = -1; if (gpuG[n*i+j] == -1) { *flag = 1; } } } } }
14,404
#include "includes.h" __global__ void histogramSimple(int* d_out, const int* d_in, const int BINS_COUNT) { int tid = threadIdx.x + blockDim.x * blockIdx.x; atomicAdd(&(d_out[d_in[tid] % BINS_COUNT]), 1); }
14,405
// 使用CPU和GPU 进行矩阵的求和计算 // #include "../common.h" // only for linux #include "sys/time.h" #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> double cpuSecond(){ struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6); } void initialInt(int *ip, int size){ for (int i=0; i<size; i++){ ip[i] = i + rand()%10; } } void printMatrix(int *C, const int nx, const int ny){ int *ic = C; printf("\nMatrix: (%d.%d)\n", nx, ny); for (int iy=0; iy<ny; iy++){ for (int ix=0; ix<nx; ix++){ printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void sumMatrixOnGPU2D(int *MatA, int *MatB, int *MatC, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; if(ix < nx && iy < ny) MatC[idx] = MatA[idx] + MatB[idx]; } void sumMatrixOnHost(int *MatA, int *MatB, int *MatC, int nx, int ny){ int *ia = MatA; int *ib = MatB; int *ic = MatC; for (int iy=0; iy<ny; iy++){ for (int ix=0; ix<nx; ix++){ ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } } bool checkResult(int *MatC, int *h_C, int nxy){ for (int i=0; i<nxy; i++){ if (MatC[i] != h_C[i]){ printf("Matc[%d]: %d != h_C[%d]: %d\n", i, MatC[i], i, h_C[i]); return false; } } return true; } // 主函数 int main(int argc, char* argv[]){ if(argc!=3) { printf("Usage : %s maxtrix_dim_x maxtrix_dim_y\n", argv[0]); return 0; } /* int nx = 1<<13; */ /* int ny = 1<<13; */ // int nx = 10240; // int ny = 1024; int nx = atoi(argv[1]); int ny = atoi(argv[2]); if(nx<1 || ny<1) { printf("Fatal error : nx <= 1 or ny <= 1!\n"); return 0; } int nxy = nx * ny; int nBytes = nxy * sizeof(int); int *h_A, *h_B, *h_C, *h_MatC; h_A = (int *)malloc(nBytes); h_B = (int *)malloc(nBytes); h_C = (int *)malloc(nBytes); h_MatC = (int *)malloc(nBytes); initialInt(h_A, nxy); /* printMatrix(h_A, nx, ny); */ initialInt(h_B, nxy); /* printMatrix(h_B, nx, ny); */ memset(h_C, 0, nBytes); memset(h_MatC, 0, nBytes); double iStart = cpuSecond(); sumMatrixOnHost(h_A, h_B, h_C, nx, ny); double iElaps = cpuSecond() - iStart; /* printMatrix(h_C, nx, ny); */ printf("sumMatrixOnHost elapsed %f sec\n", iElaps); int *d_MatA, *d_MatB, *d_MatC; cudaMalloc((void **)&d_MatA, nBytes); cudaMalloc((void **)&d_MatB, nBytes); cudaMalloc((void **)&d_MatC, nBytes); // transfer data from host to Device cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice); int dimx = nx<32? nx:32; int dimy = ny<32? ny:32; dim3 block(dimx, dimy); dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); iStart = cpuSecond(); sumMatrixOnGPU2D <<< grid, block >>>(d_MatA, d_MatB, d_MatC, nx, ny); // NOTICE 这里有个同步的过程 cudaDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("sumMatrixOnGPU2D <<<(%d, %d), (%d, %d)>>> elapsed %f sec\n", grid.x, grid.y, block.x, block.y, iElaps); cudaMemcpy(h_MatC, d_MatC, nBytes, cudaMemcpyDeviceToHost); if (checkResult(h_MatC, h_C, nxy)){ printf("Arrays match!\n"); } else{ printf("Arrays don't match!\n"); } free(h_A); free(h_B); free(h_C); cudaFree(d_MatA); cudaFree(d_MatB); cudaFree(d_MatC); cudaDeviceReset(); return 0; }
14,406
#include <stdio.h> #include <stdlib.h> using namespace std; int main(int argc, char **argv){ printf("Hello World!\n"); return 0; }
14,407
#include <stdio.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void test(){ printf("Hi Cuda World"); } int main( int argc, char** argv ) { test<<<1,1>>>(); cudaDeviceSynchronize(); return 0; }
14,408
#include "includes.h" __global__ void matrixMulCUDA2(float *C, float *A, float *B, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float C_val = 0; for (int k = 0; k < n; ++k) { float A_elem = A[row * n + k]; float B_elem = B[k * n + col]; C_val += A_elem * B_elem; } C[row*n + col] = C_val; }
14,409
#include "includes.h" __global__ void saveTheWhalesXX ( const int d0, const int d1, const int i2, float *xxx, const int d3, const int d4, const float *xx ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; if ( i < d3 && j < d4 ) { xxx[i+j*d0+i2*d0*d1] = xx[i+j*d3]; } }
14,410
#include <cuda.h> #include <stdio.h> #include <iostream> #include <time.h> //ATTENZIONE: //NB: l'algoritmo qui usato (ed esposto dalla prof) nel caso in cui si voglia usare SHARED MEMORY NON funziona nel caso in cui il raggio sia minore del numero di thread MINIMO che lavorano in un blocco //allora l'algoritmo non funziona poichè lavorano 2 threads ma bisogna riempire nel lato sinistra 3 pads //allo stesso modo anche se diamo un numero di thread e blocchi tale che in tutti i blocchi tutti i thread lavorano, ma questo numero di blocchi è < raggio, allo stesso modo non funziona !! //programma per il calcolo dello stencil di un vettore monodimensionale, come da slide //senza l'uso dello shared memory e con l'uso di shared memory //input: n size vettore, t thread per blocco (size blocco) k raggio stencil using namespace std; //ad esempio se si fissa il numero threads per blocco, e questo ci porta ad avere il blocco finale con in totale 2 thread che lavorano, ed il raggio è 3 ad esempio __host__ void stencilCPU(int *a,int *b,int n,int k){ for(int i=0;i<n;i++){ int v=0; for(int j=-k;j<=k;j++) if(i+j>=0 && i+j<n) v+=a[i+j]; b[i]=v; } } __host__ void stampaArray(int *a,int n){ cout<<"-------------------------"<<endl; for(int i=0;i<n;i++) cout<<a[i]<<" "; cout<<endl; } __host__ void inizializzaArray(int *a,int n){ for(int i=0;i<n;i++) a[i]=1+rand()%10; } __global__ void stencilGPU(int *a,int *b,int n,int k){ int iGlob = threadIdx.x + blockIdx.x * blockDim.x; if(iGlob>= n) return; b[iGlob]=0; for(int j=-k;j<=k;j++) if(iGlob+j>=0 && iGlob+j<n) b[iGlob]+=a[iGlob+j]; } __global__ void stencilGPU_CONSHAREDMEMORY (int *a,int *b,int n,int k){ extern __shared__ int sh_buffer[]; //questa sintassi prevede che il size di allocazione (dinamica, quindi in heap) in memoria shared, per quest'array venga passato //come terzo parametro in <<< >>> nella chiamata al kernel. Questo perchè abbiamo messo extern. Se non lo avessimo //messo avremmo potuto allocare solo staticamente la variabile //il size di sh_buffer sarà uguale al numero di thread in un blocco + 2* raggio (k) dello stencil int globId = threadIdx.x + blockIdx.x * blockDim.x; //indice del thread sulla griglia (quindi indice dell'elemento del vettore a su cui il thread ha lo stencil "centrato") int iPadd = threadIdx.x + k; //indice del corrispondente elemento dell'array sh_buffer (che è paddato, cioè ha blockDim.x elementi + k a sinistra e k a destra) /* if(globId >= n) return ; */ //altrimenti passo a: //copio in sh_buffer l'elemento centrale di stencil sh_buffer[iPadd] = a[globId]; //ora solo i primi k thread del blocco, corrispondenti ai primi k elementi dell'array relativi ai thread del blocco //devono copiare i primi k elementi relativi alle zone di padding if(threadIdx.x < k){ //attenzione va fatto il controllo poichè potremmo trovarci all'estremo sinistro o destro dell'array, quindi le zone di padding di sh_buffer //vanno riempite con zeri poichè non hanno corrispettivi nell'array a if(globId-k >= 0) sh_buffer[iPadd-k] = a[globId-k]; else sh_buffer[iPadd-k] = 0; if(globId + blockDim.x <n) sh_buffer[iPadd + blockDim.x] = a[globId + blockDim.x]; else sh_buffer[iPadd + blockDim.x] = 0; } //sincronizzo i thread (barriera) __syncthreads(); //ora posso effettivamente calcolare la somma int v=0; for(int j=-k;j<=k;j++) v+=sh_buffer[iPadd+j]; b[globId]= v; } int main(int argc,char *argv[]){ srand((unsigned int)time(NULL)); int n; dim3 sizeGriglia; dim3 sizeBlocco; int k; //raggio stencil if(argc!=4){ n=10; sizeBlocco.x=4; //4 threads per blocco (BLOCCHI MONODIMENSIONALI) k = 3; } else{ sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",& sizeBlocco.x); sscanf(argv[3],"%d",& k); } sizeGriglia.x = n / sizeBlocco.x ; if(n % sizeBlocco.x != 0) sizeGriglia.x ++; int *h_a = (int *)malloc(n*sizeof(int)); inizializzaArray(h_a,n); stampaArray(h_a,n); int *risCPU= (int *)malloc(n*sizeof(int)); stencilCPU(h_a,risCPU,n,k); stampaArray(risCPU,n); //in memoria globale device int *d_a,*d_b; cudaMalloc(&d_a,n*sizeof(int)); cudaMalloc(&d_b,n*sizeof(int)); cudaMemcpy(d_a,h_a,n*sizeof(int),cudaMemcpyHostToDevice); //lancio kernel stencilGPU<<<sizeGriglia,sizeBlocco>>>(d_a,d_b,n,k); //ricopio su host int *ris = (int *)malloc(n*sizeof(int)); cudaMemcpy(ris,d_b,n*sizeof(int),cudaMemcpyDeviceToHost); stampaArray(ris,n); //struttura dati per secondo kernel (usando shared memory questa volta) int *d_b2; cudaMalloc(&d_b2,n*sizeof(int)); //chiamo kernel //definisco qui il size del buffer contenuto in memoria shared per ogni blocco della griglia int sizeBufferShared = (sizeBlocco.x + 2 * k ) * sizeof(int); stencilGPU_CONSHAREDMEMORY<<<sizeGriglia,sizeBlocco,sizeBufferShared>>>(d_a,d_b2,n,k); cudaThreadSynchronize(); //ricopio i risultati int *ris2= (int *)malloc(n*sizeof(int)); cudaMemcpy(ris2,d_b2,n*sizeof(int),cudaMemcpyDeviceToHost); stampaArray(ris2,n); }
14,411
#include "includes.h" __global__ void jacobiKernel(double* temperature, double* new_temperature, int block_size) { int i = (blockDim.x * blockIdx.x + threadIdx.x) + 1; int j = (blockDim.y * blockIdx.y + threadIdx.y) + 1; if (i <= block_size && j <= block_size) { new_temperature[j * (block_size + 2) + i] = (temperature[j * (block_size + 2) + (i - 1)] + temperature[j * (block_size + 2) + (i + 1)] + temperature[(j - 1) * (block_size + 2) + i] + temperature[(j + 1) * (block_size + 2) + i] + temperature[j * (block_size + 2) + i]) * DIVIDEBY5; } }
14,412
#include "includes.h" __global__ void CalcEntropy(double *Entropy_d, double *pressure_d, double *temperature_d, double Cp, double Rd, double A, double P_Ref, double *Altitude_d, double *Altitudeh_d, double *lonlat_d, double *areasT, double *func_r_d, int num, bool DeepModel) { int id = blockIdx.x * blockDim.x + threadIdx.x; int nv = gridDim.y; int lev = blockIdx.y; if (id < num) { double kappa = Rd / Cp; double potT = temperature_d[id * nv + lev] * pow(P_Ref / pressure_d[id * nv + lev], kappa); double Sdens = Cp * log(potT); //calculate control volume double zup, zlow, Vol; zup = Altitudeh_d[lev + 1] + A; zlow = Altitudeh_d[lev] + A; if (DeepModel) { Vol = areasT[id] / pow(A, 2) * (pow(zup, 3) - pow(zlow, 3)) / 3; } else { Vol = areasT[id] * (zup - zlow); } //total energy in the control volume Entropy_d[id * nv + lev] = Sdens * Vol; } }
14,413
#include "includes.h" __global__ void initKernel(double* temperature, int block_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < block_size + 2 && j < block_size + 2) { temperature[(block_size + 2) * j + i] = 0.0; } }
14,414
extern "C" void test_func() { int *dev_a; // allocate the memory on the GPU (array of 2 ints) cudaMalloc( (void**)&dev_a, 2 * sizeof(int) ); cudaFree( dev_a ); }
14,415
#include "includes.h" __global__ void MatrixSubtract(const float* A_elements, const float* B_elements, float* C_elements, const int size) { int thread = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i = thread; i < size; i += stride) //Modifying array of elements of Matrix C C_elements[i] = A_elements[i] - B_elements[i]; }
14,416
__global__ void spotrf_batched_kernel(int n, int batch, float *dA); //////////////////////////////////////////////////////////////////////////////// extern "C" void spotrf_batched(int n, int batch, float *dA, cudaStream_t stream) { dim3 dimBlock(1, 1, 1); dim3 dimGrid(1, 1, 1); spotrf_batched_kernel<<<dimGrid, dimBlock, 0, stream>>>(n, batch, dA); } //////////////////////////////////////////////////////////////////////////////// __global__ void spotrf_batched_kernel(int N, int batch, float *dA) { int m; int n; int k; int i; // Batched Cholesky factorization. for (i = 0; i < batch; i++) { float *pA = &dA[i*N*N]; // Single Cholesky factorization. for (k = 0; k < N; k++) { // Panel factorization. pA[k*N+k] = sqrtf(pA[k*N+k]); for (m = k+1; m < N; m++) pA[k*N+m] /= pA[k*N+k]; // Update of the trailing submatrix. for (n = k+1; n < N; n++) for (m = n; m < N; m++) pA[n*N+m] -= (pA[k*N+n]*pA[k*N+m]); } } }
14,417
extern __device__ void simple_summator_kernel(int *a, int *b, int *c) { for (int i = 0; i < 3; i++) { c[i] = a[i] + b[i]; } }
14,418
__device__ void vecCopy(float *dest, float *src) { for(int i = 0; i < 3; i ++) dest[i] = src[i]; } __device__ float dist3d(float *p1, float *p2) { return sqrtf( pow(p1[0] - p2[0], 2) + pow(p1[1] - p2[1], 2) + pow(p1[2] - p2[2], 2)); } __device__ void vecSub(float *p1, float *p2, float *ret) { ret[0] = p2[0] - p1[0]; ret[1] = p2[1] - p1[1]; ret[2] = p2[2] - p1[2]; } __device__ void vecAdd(float *p1, float *p2, float *ret) { ret[0] = p2[0] + p1[0]; ret[1] = p2[1] + p1[1]; ret[2] = p2[2] + p1[2]; } __device__ void vecCross(float *v1, float *v2, float *ret) { ret[0] = v1[1] * v2[2] - v1[2] * v2[1]; ret[1] = v1[2] * v2[0] - v1[0] * v2[2]; ret[2] = v1[0] * v2[1] - v1[1] * v2[0]; } __device__ float vecLen(float *p) { return sqrt(p[0] * p[0] + p[1] * p[1] + p[2] * p[2]); } __device__ void vecScale(float *vOrigVec, float fScale, float *vScaledVec) { for(int i = 0; i < 3; i ++) vScaledVec[i] = fScale * vOrigVec[i]; } __device__ void point2point(float *vStartPoint, float *vVec, float *vEndPoint) { for(int i = 0; i < 3; i ++) vEndPoint[i] = vStartPoint[i] + vVec[i]; } __device__ float dot_product(float *vec1, float *vec2) { return vec1[0]*vec2[0] + vec1[1]*vec2[1] + vec1[2]*vec2[2]; } __device__ void normalize(float *p1) { float len = vecLen(p1); if(p1 != 0) { p1[0] /= len; p1[1] /= len; p1[2] /= len; } } __device__ void reflectVec(float *vOrigViewVec, float *vNormal, float *vReflectViewVec) { float vReverseViewVec[3] = {0}; vecScale(vOrigViewVec, -1, vReverseViewVec); float vDiagonalNormalVec[3] = {0}; float fLen = dot_product(vReverseViewVec, vNormal) / vecLen(vNormal) * 2.0f; float vNormalizedNormal[3] = {0}; point2point(vNormalizedNormal, vNormal, vNormalizedNormal); normalize(vNormalizedNormal); vecScale(vNormalizedNormal, fLen, vDiagonalNormalVec); point2point(vDiagonalNormalVec, vOrigViewVec, vReflectViewVec); }
14,419
#include <stdio.h> #include <cuda_runtime.h> #define N 12 // tugas 1: alokasi memori dan transfer dari device ke host __global__ void kern(int *A) { int idx = blockDim.x * blockIdx.x + threadIdx.x; A[idx] = idx; } /** * Host main routine */ int main(void) { // alokasikan memori, dan salin nilainya int *A = (int *) malloc(N*sizeof(int)); // alokasi memory di host int *d_A; cudaMalloc((void **)&d_A,N*sizeof(int)); // alokasi memori di device cudaMemcpy(d_A,A,N*sizeof(int),cudaMemcpyHostToDevice); // dim3 grid,block; block.x = 4; grid.x = 12/block.x; kern<<<grid,block>>>(d_A); cudaMemcpy(A,d_A,N*sizeof(int),cudaMemcpyDeviceToHost); // copy device ke host // copy result for (int i = 0;i < N;i++) printf("A[%d] = %d\n",i,A[i]); free(A); cudaFree(d_A); return 0; }
14,420
#include "includes.h" __global__ void compute_min_gpu(float *device_input, float *device_output){ extern __shared__ float sm[]; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; sm[tid] = device_input[i]; __syncthreads(); for(int s = 1;s < blockDim.x; s*= 2){ if(tid % (2 * s) == 0){ sm[tid] = min(sm[tid], sm[tid+s]); } __syncthreads(); } if(tid == 0) device_output[blockIdx.x] = sm[0]; }
14,421
extern "C" __global__ void any_filter(float** globalInputData, int width, int height, float** filter, float* globalOutputData) { // Get an "unique id" of the thread that correspond to one pixel const unsigned int tidX = blockIdx.x * blockDim.x + threadIdx.x; if (tidX < width * height - 1) { const unsigned int x = tidX / height + 1; const unsigned int y = tidX % height + 1; globalOutputData[tidX] = filter[0][0] * globalInputData[x - 1][y - 1] + filter[1][0] * globalInputData[x][y - 1] + filter[2][0] * globalInputData[x + 1][y - 1] + filter[0][1] * globalInputData[x - 1][y] + filter[1][1] * globalInputData[x][y] + filter[2][1] * globalInputData[x + 1][y] + filter[0][2] * globalInputData[x - 1][y + 1] + filter[1][2] * globalInputData[x][y + 1] + filter[2][2] * globalInputData[x + 1][y + 1]; } __syncthreads(); }
14,422
#include <cuda.h> #include <iostream> #define N 1024 using namespace std; __global__ void matrix_multiplication(int A[][N],int B[][N],int C[][N]) { int id=threadIdx.x; __syncthreads(); for(int j=0;j<N;j++) { for(int k=0;k<N;k++) { C[id][j]+=A[id][k]*B[k][j]; } } } int A[N][N],B[N][N],C[N][N]; int main(int argc,char *argv[]) { for(int i=0;i<N;i++) { for(int j=0;j<N;j++) { A[i][j]=2*i+j; C[i][j]=0; B[i][j]=2*j+i; } } int (*A_D)[N],(*B_D)[N],(*C_D)[N]; cudaMalloc((void**)&A_D, (N*N)*sizeof(int)); cudaMalloc((void**)&B_D, (N*N)*sizeof(int)); cudaMalloc((void**)&C_D, (N*N)*sizeof(int)); cudaMemcpy(A_D,A,N*N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(B_D,B,N*N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(C_D,C,N*N*sizeof(int),cudaMemcpyHostToDevice); matrix_multiplication<<<1,N>>>(A_D,B_D,C_D); cudaMemcpy(C,C_D,N*N*sizeof(int),cudaMemcpyDeviceToHost); // for(int i=0;i<N;i++) // { // for(int j=0;j<N;j++) // { // cout<<C[i][j]<<" "; // } // cout<<endl; // } cudaFree(A_D); cudaFree(B_D); cudaFree(C_D); return 0; }
14,423
#include <stdio.h> #include <cuda_runtime.h> __global__ void helloFromGPU(void) { printf("Hello from GPU thread: %d!\n", threadIdx.x); } int main(void){ // CPU says hi printf("Hello from CPU! \n"); // GPU says hi helloFromGPU<<<1, 4>>>(); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
14,424
//pass //--gridDim=[10,10] --blockDim=1 __global__ void executeFourthLayer(float *Layer4_Neurons_GPU,float *Layer4_Weights_GPU,float *Layer5_Neurons_GPU) { int blockID=blockIdx.x; //int pixelY=threadIdx.y; int weightBegin=blockID*101; float result=0; result+=Layer4_Weights_GPU[weightBegin]; ++weightBegin; for (int i=0; i<100; ++i ) { result+=Layer4_Neurons_GPU[i+(100*blockIdx.y)]*Layer4_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer5_Neurons_GPU[blockID+(10*blockIdx.y)]=result; }
14,425
#include <cstdio> #include <cstdlib> // error checking macro #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) template <typename T> void alloc_bytes(T &ptr, size_t num_bytes){ ptr = (T)malloc(num_bytes); } __global__ void inc(int *array, size_t n){ size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < n){ array[idx]++; idx += blockDim.x*gridDim.x; // grid-stride loop } } const size_t ds = 32ULL*1024ULL*1024ULL; int main(){ int *h_array, *d_array; alloc_bytes(h_array, ds*sizeof(h_array[0])); cudaMalloc(&d_array, ds*sizeof(d_array[0])); cudaCheckErrors("cudaMalloc Error"); memset(h_array, 0, ds*sizeof(h_array[0])); cudaMemcpy(d_array, h_array, ds*sizeof(h_array[0]), cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy H->D Error"); inc<<<256, 256>>>(d_array, ds); cudaCheckErrors("kernel launch error"); cudaMemcpy(h_array, d_array, ds*sizeof(h_array[0]), cudaMemcpyDeviceToHost); cudaCheckErrors("kernel execution or cudaMemcpy D->H Error"); for (int i = 0; i < ds; i++) if (h_array[i] != 1) {printf("mismatch at %d, was: %d, expected: %d\n", i, h_array[i], 1); return -1;} printf("success!\n"); return 0; }
14,426
#include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" //#include <helper_functions.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } void printMatrix(float* pointerToMatrix, int matrixSize) { for (int rowNumber = 0; rowNumber < matrixSize; rowNumber++) { for (int index = 0; index < matrixSize; index++) { printf("%f ", pointerToMatrix[rowNumber * matrixSize + index]); } printf("\r\n"); } } int matMultHost() { printf("Hello, World!\n"); int matrixSize = 2; // matrix a // ----------------------------------------------------------------------------------------------------------------- float* a; a = (float*)malloc(matrixSize * matrixSize * sizeof(float)); if (a == NULL) { printf("malloc failed for matrix a."); exit(1); } // fill matrix a with values of counter which get incremented with every loop float counter = 0; for (int rowNumber = 0; rowNumber < matrixSize; rowNumber++) { for (int index = 0; index < matrixSize; index++) { a[rowNumber * matrixSize + index] = counter; counter++; } } printMatrix(a, matrixSize); // matrix b // ----------------------------------------------------------------------------------------------------------------- float* b; b = (float*)malloc(matrixSize * matrixSize * sizeof(float)); if (b == NULL) { printf("malloc failed for matrix b."); exit(1); } // fill matrix a with values of counter which get incremented with every loop for (int rowNumber = 0; rowNumber < matrixSize; rowNumber++) { for (int index = 0; index < matrixSize; index++) { b[rowNumber * matrixSize + index] = counter; counter--; } } printMatrix(b, matrixSize); // multiply // ----------------------------------------------------------------------------------------------------------------- // result matrix c float* c; c = (float*)malloc(matrixSize * matrixSize * sizeof(float)); if (c == NULL) { printf("malloc failed for matrix c."); exit(1); } // choose row/line for (int rowNumber = 0; rowNumber < matrixSize; rowNumber++) { // choose column for (int columnNumber = 0; columnNumber < matrixSize; columnNumber++) { // result for the chose element (given by row and column) float result = 0; // iterate over both chosen row/line and column for (int element = 0; element < matrixSize; element++) { result += a[rowNumber * matrixSize + element] * b[element * matrixSize + columnNumber]; } c[rowNumber * matrixSize + columnNumber] = result; } } printMatrix(c, matrixSize); return 0; } int matMultCUDA() { printf("Hello, World!\n"); int matrixSize = 2; float *d_A, *d_B, *d_C; float *h_A, *h_B, *h_C; // matrix a // ----------------------------------------------------------------------------------------------------------------- float* a; a = (float*)malloc(matrixSize * matrixSize * sizeof(float)); cudaError_t error = cudaMalloc((void **)&d_A, matrixSize * sizeof(float)); if (error != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_A, h_A, matrixSize * sizeof(float), cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **)&d_B, matrixSize * sizeof(float)); if (error != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, matrixSize * sizeof(float), cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **)&d_C, matrixSize * sizeof(float)); if (error != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(h_C, d_C, matrixSize * sizeof(float), cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } error = cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); if (error != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } if (a == NULL) { printf("malloc failed for matrix a."); exit(1); } // fill matrix a with values of counter which get incremented with every loop float counter = 0; for (int rowNumber = 0; rowNumber < matrixSize; rowNumber++) { for (int index = 0; index < matrixSize; index++) { a[rowNumber * matrixSize + index] = counter; counter++; } } printMatrix(a, matrixSize); // matrix b // ----------------------------------------------------------------------------------------------------------------- float* b; b = (float*)malloc(matrixSize * matrixSize * sizeof(float)); if (b == NULL) { printf("malloc failed for matrix b."); exit(1); } // fill matrix a with values of counter which get incremented with every loop for (int rowNumber = 0; rowNumber < matrixSize; rowNumber++) { for (int index = 0; index < matrixSize; index++) { b[rowNumber * matrixSize + index] = counter; counter--; } } printMatrix(b, matrixSize); // multiply // ----------------------------------------------------------------------------------------------------------------- // result matrix c float* c; c = (float*)malloc(matrixSize * matrixSize * sizeof(float)); if (c == NULL) { printf("malloc failed for matrix c."); exit(1); } // choose row/line for (int rowNumber = 0; rowNumber < matrixSize; rowNumber++) { // choose column for (int columnNumber = 0; columnNumber < matrixSize; columnNumber++) { // result for the chose element (given by row and column) float result = 0; // iterate over both chosen row/line and column for (int element = 0; element < matrixSize; element++) { result += a[rowNumber * matrixSize + element] * b[element * matrixSize + columnNumber]; } c[rowNumber * matrixSize + columnNumber] = result; } } printMatrix(c, matrixSize); return 0; } int main() { //StopWatchInterface *t; //if (!sdkCreateTimer(&t)) { // printf("timercreate failed\n"); // exit(-1); //} //sdkStartTimer(&t); // zu vermessende Funktionalitt //sdkStopTimer(&t); //printf("Zeitdauer: %f\n", sdkGetTimerValue(&t); matMultHost(); matMultCUDA(); return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel <<<1, size >>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
14,427
#include "includes.h" __global__ void reduceUnrollWarp8(int *g_idata, int *g_odata, unsigned int n){ // thread id int idx = blockIdx.x * blockDim.x + threadIdx.x; // data pointer of this block(s) int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling blocks if (idx + 7 * blockDim.x < n) { int el0 = g_idata[idx]; int el1 = g_idata[idx + blockDim.x]; int el2 = g_idata[idx + 2*blockDim.x]; int el3 = g_idata[idx + 3*blockDim.x]; int el4 = g_idata[idx + 4*blockDim.x]; int el5 = g_idata[idx + 5*blockDim.x]; int el6 = g_idata[idx + 6*blockDim.x]; int el7 = g_idata[idx + 7*blockDim.x]; g_idata[idx] = el0+el1+el2+el3+el4+el5+el6+el7; } __syncthreads(); // thread id out of range if (idx >= n) return; for (int stride = blockDim.x/2; stride > 32; stride >>= 1){ if (threadIdx.x < stride){ idata[threadIdx.x] += idata[threadIdx.x + stride]; } __syncthreads(); } // unrolling sync in blocks(stride less than 32) if (threadIdx.x < 32){ volatile int *vmem = idata; vmem[threadIdx.x] += vmem[threadIdx.x + 32]; vmem[threadIdx.x] += vmem[threadIdx.x + 16]; vmem[threadIdx.x] += vmem[threadIdx.x + 8]; vmem[threadIdx.x] += vmem[threadIdx.x + 4]; vmem[threadIdx.x] += vmem[threadIdx.x + 2]; vmem[threadIdx.x] += vmem[threadIdx.x + 1]; } if (threadIdx.x == 0){ g_odata[blockIdx.x] = idata[0]; } }
14,428
#include "includes.h" __global__ void reduce_fields(float *d_rho, float *d_Ex, float* d_Ey, float* d_Ez, float *d_Rrho, float* d_REx, float* d_REy, float* d_REz, int N) { __shared__ float rho_array[gThreadsAll]; __shared__ float Ex_array[gThreadsAll]; __shared__ float Ey_array[gThreadsAll]; __shared__ float Ez_array[gThreadsAll]; int n = blockDim.x * blockIdx.x + threadIdx.x; if (n < N){ for (int s = blockDim.x / 2; s > 0; s >>= 1){ if ( threadIdx.x < s) { rho_array[threadIdx.x] += d_rho[threadIdx.x + s]; Ex_array[threadIdx.x] += d_Ex[threadIdx.x + s] * d_Ex[threadIdx.x + s]; Ey_array[threadIdx.x] += d_Ey[threadIdx.x + s] * d_Ey[threadIdx.x + s]; Ez_array[threadIdx.x] += d_Ez[threadIdx.x + s] * d_Ez[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x ==0){ d_Rrho[blockIdx.x] = rho_array[0]; d_REx[blockIdx.x] = Ex_array[0]; d_REy[blockIdx.x] = Ey_array[0]; d_REz[blockIdx.x] = Ez_array[0]; } } }
14,429
// INCLUDES #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <errno.h> /* errno */ #include <string.h> /* strerror */ #include <math.h> // ceil #include <time.h> // CLOCKS_PER_SEC // CUDA #include <cuda.h> #include <cuda_runtime.h> /** * PARS */ #define TILE_DIM 32 // ctid: current tid __device__ unsigned int fMOD(unsigned int ctix,unsigned int mask_len){ return ((ctix % mask_len) == 0) ? 0 : 1; } __device__ int fNEAR(unsigned int ctix,unsigned int RADIUS){ // This function is valid only for blockIdx.x>0 unsigned int mask_len = RADIUS*2+1; return ((ctix % mask_len) <= RADIUS) ? -(ctix % mask_len) : -(ctix % mask_len)+mask_len; } __device__ unsigned int fMAX(unsigned int available_col,unsigned int required_col){ return (required_col<available_col) ? required_col : available_col; } __global__ void gtranspose_char(unsigned char *O, const unsigned char *I, unsigned WIDTH, unsigned HEIGHT) { unsigned int tix = threadIdx.x; unsigned int tiy = threadIdx.y; unsigned int bix = blockIdx.x; unsigned int biy = blockIdx.y; unsigned int bdx = blockDim.x; unsigned int bdy = blockDim.y; // |--grid------| |-block--| |-thread--| unsigned int itid = WIDTH*bdy*biy + WIDTH*tiy + bix*bdx+tix; unsigned int otid = HEIGHT*bdx*bix + HEIGHT*tix + biy*bdy+tiy; unsigned int xtid = bix*bdx+tix; unsigned int ytid = biy*bdy+tiy; //if( is32Multiple || (xtid<WIDTH && ytid<HEIGHT) ){ if( xtid<WIDTH && ytid<HEIGHT ){ O[ otid ] = I[ itid ]; //__syncthreads(); } } __global__ void gtranspose_double(double *O, const double *I, unsigned WIDTH, unsigned HEIGHT) { unsigned int tix = threadIdx.x; unsigned int tiy = threadIdx.y; unsigned int bix = blockIdx.x; unsigned int biy = blockIdx.y; unsigned int bdx = blockDim.x; unsigned int bdy = blockDim.y; // |--grid------| |-block--| |-thread--| unsigned int itid = WIDTH*bdy*biy + WIDTH*tiy + bix*bdx+tix; unsigned int otid = HEIGHT*bdx*bix + HEIGHT*tix + biy*bdy+tiy; unsigned int xtid = bix*bdx+tix; unsigned int ytid = biy*bdy+tiy; //if( is32Multiple || (xtid<WIDTH && ytid<HEIGHT) ){ if( xtid<WIDTH && ytid<HEIGHT ){ O[ otid ] = I[ itid ]; //__syncthreads(); } } __global__ void complementary_to_ONE(const unsigned char *ONE, const unsigned char *BIN, unsigned char *COMP, unsigned WIDTH, unsigned HEIGHT) { /** * NOTES * In rural fragmentation: I need COMP to mask out urban pixels * from being fragmented (because only rural * pixels can be fragmented). * In urban fragmentation: I need COMP to run the whole fragmentation program * (instead of BIN as in rural frag.), and I use BIN * to mask out rural pixels. */ unsigned int r = threadIdx.x; unsigned int c = threadIdx.y; unsigned int bix = blockIdx.x; unsigned int biy = blockIdx.y; unsigned int bdx = blockDim.x; unsigned int bdy = blockDim.y; unsigned int tix = bix * bdx + r; unsigned int tiy = biy * bdy + c; unsigned int tid = tix + tiy*WIDTH; if( tix<WIDTH && tiy<HEIGHT ){ COMP[ tid ] = ONE[tid] - BIN[ tid ]; } } __global__ void Vcumsum_char( const unsigned char *IN , unsigned long int map_width , unsigned long int map_height , double *OUT , unsigned int RADIUS ){ /* NOTES: * This kernel performs the cumulative sum along the Y axis. The block is * made by all threads displaced along X (i.e. [1024,1,1]. Each thread of * the block is in charge of summing the (RADIUS*2+1) pixels below thread. * So each block is in charge of a tile of size X<--blockDim.x and * Y<--mask_len. * The first "if" avoids going beyond the last row and the second "if" * avoids going beyond the last column. * block = [32*32, 1, 1] * tile = [32*32, mask_len, 1] * * If I avoid the MASK in this kernel I avoid the gtransform on ROI. * If I don't mask here signifies that all pixels within IN are used to * compute the fragmentation of all pixels; further, masking at the end * of whole program I exclude all points outside ROI. This seems to be * the most wonderful situation. */ unsigned long int ii; unsigned long int mask_len = RADIUS*2+1; unsigned long int tix = blockDim.x*blockIdx.x + threadIdx.x; unsigned long int tiy = blockIdx.y*mask_len;//+ threadIdx.y; unsigned long int tid = tix + tiy*map_width; if( tix < map_width ){ // && tiy < map_height // Here I copy the first row of values within the tile: if(tid<map_width*map_height) OUT[tid] = IN[tid];//*MASK[tid]; /* Here, for every row(i.e. block) and every thread of the row(i.e. block), * I sum the current(tid+ii) and the previous(tid+ii-1) values and write * the result in the current position(tid+ii) of the output array. */ for(ii=1;ii<mask_len;ii++) if(tiy+ii<map_height) OUT[tid+ii*map_width] = OUT[tid+(ii-1)*map_width] + IN[tid+ii*map_width];//*MASK[tid+ii*map_width]; } } __global__ void Vcumsum_double( const double *IN , unsigned long int map_width , unsigned long int map_height , double *OUT , unsigned int RADIUS ){ /* NOTES: * This kernel performs the cumulative sum along the Y axis. The block is * made by all threads displaced along X (i.e. [1024,1,1]. Each thread of * the block is in charge of summing the (RADIUS*2+1) pixels below thread. * So each block is in charge of a tile of size X<--blockDim.x and * Y<--mask_len. * The first "if" avoids going beyond the last row and the second "if" * avoids going beyond the last column. * block = [32*32, 1, 1] * tile = [32*32, mask_len, 1] * * If I avoid the MASK in this kernel I avoid the gtransform on ROI. * If I don't mask here signifies that all pixels within IN are used to * compute the fragmentation of all pixels; further, masking at the end * of whole program I exclude all points outside ROI. This seems to be * the most wonderful situation. */ unsigned long int ii; unsigned long int mask_len = RADIUS*2+1; unsigned long int tix = blockDim.x*blockIdx.x + threadIdx.x; unsigned long int tiy = blockIdx.y*mask_len;//+ threadIdx.y; unsigned long int tid = tix + tiy*map_width; if( tix < map_width ){ // && tiy < map_height // Here I copy the first row of values within the tile: if(tid<map_width*map_height) OUT[tid] = IN[tid];//*MASK[tid]; /* Here, for every row(i.e. block) and every thread of the row(i.e. block), * I sum the current(tid+ii) and the previous(tid+ii-1) values and write * the result in the current position(tid+ii) of the output array. */ for(ii=1;ii<mask_len;ii++) if(tiy+ii<map_height) OUT[tid+ii*map_width] = OUT[tid+(ii-1)*map_width] + IN[tid+ii*map_width];//*MASK[tid+ii*map_width]; } } __global__ void sum_of_3_LINES( const double *IN , unsigned int map_width , unsigned int map_height , double *OUT , unsigned int RADIUS ){ /* NOTES: * This kernel performs the algebraic sum of three columns: * > the column on the right side +[tid+ii+radius ] * > the column on the left side -[tid+ii-radius-1 ] * > the nearest terminal column +[tid+ii+fNEAR(tix+ii+1,mask_len)] * * Particular cases are figured out according to blockIdx.x position. * See later comments! */ unsigned int ii = 0; unsigned int mask_len = RADIUS*2+1; unsigned int tix = blockDim.x*blockIdx.x + threadIdx.x; unsigned int tiy = blockIdx.y*mask_len; unsigned int tid = tix + tiy*map_width; unsigned int latest_row = 0; if( tix < map_width ){ /* Here I distinguish between 4 kind of tiles(i.e. of blockIdx.y): * > 0 The case is particular only for threads before mask centre; * > [1,end-2] Cases are general; * > end-1 The case is particular only for threads after mask centre; * > end The case is particular for all threads, because we don't know in advance where the latest column is. */ // ***first tile*** if(blockIdx.y==0){ for(ii=0;ii<RADIUS;ii++) OUT[tid+ii*map_width] = IN[tid+(ii+RADIUS)*map_width]; OUT[tid+RADIUS*map_width] = IN[tid+(mask_len-1)*map_width]; for(ii=RADIUS+1;ii<mask_len;ii++) OUT[tid+ii*map_width] = IN[tid+(ii+RADIUS)*map_width] - IN[tid+(ii-RADIUS-1)*map_width] + IN[tid+(mask_len-1)*map_width]; } // ***all centre tiles*** if(blockIdx.y>0 && blockIdx.y<gridDim.y-2){ /* This is the most general case/formulation: * > fMOD: It is zero when the thread is at RADIUS+1, i.e. at the centre of the mask; * > fNEAR: It finds the nearest mask_len column, which is in: * -current block, if thread is beyond the mask centre, * -previous block, if thread is before the mask centre. */ for(ii=0;ii<mask_len;ii++) OUT[tid+ii*map_width] = IN[tid+(ii+fNEAR(tiy+ii+1,RADIUS))*map_width] + ( -IN[tid+(ii-RADIUS-1)*map_width] + IN[tid+(ii+RADIUS)*map_width] )*fMOD(tiy+ii+RADIUS+1,mask_len); } // ***tile before last one*** if(blockIdx.y==gridDim.y-2){ latest_row = map_height-tiy-1; for(ii=0;ii<RADIUS;ii++) OUT[tid+ii*map_width] = IN[tid+(ii+RADIUS)*map_width] - IN[tid+(ii-RADIUS-1)*map_width] + IN[tid-1*map_width]; OUT[tid+RADIUS*map_width] = IN[tid+(mask_len-1)*map_width]; for(ii=RADIUS+1;ii<mask_len;ii++) OUT[tid+ii*map_width] = IN[tid+(min(ii+RADIUS,latest_row))*map_width] - IN[tid+(ii-RADIUS-1)*map_width] + IN[tid+(mask_len-1)*map_width]; } // ***last tile*** if(blockIdx.y==gridDim.y-1){ latest_row = map_height-tiy-1; for(ii=0;ii<RADIUS;ii++) if(tiy+ii<map_height) OUT[tid+ii*map_width] = IN[tid+(min(latest_row,ii+RADIUS))*map_width] - IN[tid+(ii-RADIUS-1)*map_width] + IN[tid-1*map_width]; if(tiy+RADIUS<map_height) OUT[tid+RADIUS*map_width] = IN[tid+latest_row*map_width]; for(ii=RADIUS+1;ii<mask_len;ii++) if(tiy+ii<map_height) OUT[tid+ii*map_width] = IN[tid+latest_row*map_width] - IN[tid+(ii-RADIUS-1)*map_width]; } } } __global__ void mask_twice( double *FRAG , // in/out const unsigned char *ROI , const unsigned char *COMP , unsigned int map_width , unsigned int map_height , double mask_area ){ /** * NOTES * I multiply by: * > ROI: to exclude pixels outside the region of interest. * > COMP: to exclude urban (rural) pixels in rural (urban) fragmentation. * If it is rural fragmentation COMP is the complentary to 1 of BIN, * else if urban fragmentation COMP is BIN. */ unsigned int tix = blockDim.x*blockIdx.x + threadIdx.x; unsigned int tiy = blockDim.y*blockIdx.y + threadIdx.y; unsigned int tid = tix + tiy*map_width; if( tix < map_width && tiy < map_height){ //double FRAG_reg = 0.0; //FRAG[tid] = (double)((unsigned int)FRAG[tid] * ROI[tid] * COMP[tid]) / mask_area; if(ROI[tid]!=1 || COMP[tid]!=1){ FRAG[tid] = 0.0; } //FRAG[tid] = FRAG_reg; } }
14,430
#include <cuda.h> #include <stdio.h> #define N 2 __global__ void K(int *out, int *in, int size) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; out[id] = in[id] * in[id]; } int main() { cudaStream_t stream[N]; for (unsigned ii = 0; ii < N; ++ii) cudaStreamCreate(&stream[ii]); int *hptr, *dinptr, *doutptr; unsigned nbytesperstream = (1<<10); unsigned nbytes = N * nbytesperstream; cudaHostAlloc(&hptr, nbytes, 0); cudaMalloc(&dinptr, nbytes); cudaMalloc(&doutptr, nbytes); for (unsigned ii = 0; ii < N; ++ii) { cudaMemcpyAsync(dinptr + ii * nbytesperstream, hptr + ii * nbytesperstream, nbytesperstream, cudaMemcpyHostToDevice, stream[ii]); K<<<nbytesperstream / 512, 512, 0, stream[ii]>>>(doutptr + ii * nbytesperstream, dinptr + ii * nbytesperstream, nbytesperstream); cudaMemcpyAsync(hptr + ii * nbytesperstream, doutptr + ii * nbytesperstream, nbytesperstream, cudaMemcpyDeviceToHost, stream[ii]); } return 0; }
14,431
#include "includes.h" __global__ void clean(unsigned int * e, int n) { e[threadIdx.x % n] = 0; }
14,432
#include "includes.h" __device__ float fitness_function(float x[]) { float y,yp; float res=0; float y1=1+(x[0]-1)/4; float yn=1+(x[NUM_OF_DIMENSIONS-1]-1)/4; res+=pow(sin(phi*y1),2)+pow(yn-1,2); for(int i=0;i<NUM_OF_DIMENSIONS-1;i++) { y=1+(x[i]-1)/4; yp=1+(x[i+1]-1)/4; res+=pow(y-1,2)*(1+10*pow(sin(phi*yp),2)); } return res; } __global__ void kernelUpdatePBest(float *positions,float *pBests,float *gBest) { int i=blockIdx.x*blockDim.x+threadIdx.x; if(i>=NUM_OF_PARTICLES*NUM_OF_DIMENSIONS||i%NUM_OF_DIMENSIONS!=0) return; float tempParticle1[NUM_OF_DIMENSIONS]; float tempParticle2[NUM_OF_DIMENSIONS]; for(int j=0;j<NUM_OF_DIMENSIONS;j++) { tempParticle1[j]=positions[i+j]; tempParticle2[j]=pBests[i+j]; } if(fitness_function(tempParticle1)<fitness_function(tempParticle2)) { for(int j=0;j<NUM_OF_DIMENSIONS;j++) pBests[i+j]=tempParticle1[j]; if(fitness_function(tempParticle1)<fitness_function(gBest)) { for(int j=0;j<NUM_OF_DIMENSIONS;j++) atomicExch(gBest+j,tempParticle1[j]); } } }
14,433
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda.h> #include <device_launch_parameters.h> const int MAX_THREAD_NUMBER = 1000000; extern "C" __device__ long long counterArray[MAX_THREAD_NUMBER]; long long dynamicKernelIndex = 0; void bambooLogKernelBegin(int staticKernelIndex) { } void bambooLogKernelEnd(int staticKernelIndex) { #ifdef KERNELTRACE cudaDeviceSynchronize(); #endif long long resultArray[MAX_THREAD_NUMBER] = {0}; cudaMemcpyFromSymbol(&resultArray, counterArray, MAX_THREAD_NUMBER * sizeof(long long), 0, cudaMemcpyDeviceToHost); for(long long i=0; i<MAX_THREAD_NUMBER; i++){ if(resultArray[i] != 0){ //printf(" -- index %lld -- counter %lld --\n", i, resultArray[i]); FILE *profileFile = fopen("bamboo.profile.txt", "a"); fprintf(profileFile, " -- threadIndex %lld -- instCount %lld -- dynamicKernelIndex %lld -- staticKernelIndex %d -- \n", i, resultArray[i], dynamicKernelIndex, staticKernelIndex); fclose(profileFile); } } memset(resultArray, 0, sizeof(resultArray)); cudaMemcpyToSymbol(counterArray, &resultArray, MAX_THREAD_NUMBER * sizeof(long long), 0, cudaMemcpyHostToDevice); dynamicKernelIndex++; }
14,434
#include <stdio.h> #include <stdlib.h> #include "kernels.cu" static int read_data(float *A0, int nx,int ny,int nz) { int s=0; for(int i=0;i<nz;i++) { for(int j=0;j<ny;j++) { for(int k=0;k<nx;k++) { A0[s] = 3; s++; } } } return 0; } int main(int argc, char** argv) { int nx,ny,nz; int size; int iteration; float c0=1.0f/6.0f; float c1=1.0f/6.0f/6.0f; if (argc<5) { printf("Usage: probe nx ny nz tx ty t\n" "nx: the grid size x\n" "ny: the grid size y\n" "nz: the grid size z\n" "t: the iteration time\n"); return -1; } nx = atoi(argv[1]); if (nx<1) return -1; ny = atoi(argv[2]); if (ny<1) return -1; nz = atoi(argv[3]); if (nz<1) return -1; iteration = atoi(argv[4]); if(iteration<1) return -1; float *h_A0; float *h_Anext; float *d_A0; float *d_Anext; size=nx*ny*nz; h_A0=(float*)malloc(sizeof(float)*size); h_Anext=(float*)malloc(sizeof(float)*size); read_data(h_A0, nx,ny,nz); cudaMalloc((void **)&d_A0, size*sizeof(float)); cudaMalloc((void **)&d_Anext, size*sizeof(float)); cudaMemset(d_Anext,0,size*sizeof(float)); cudaMemcpy(d_A0, h_A0, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_Anext, d_A0, size*sizeof(float), cudaMemcpyDeviceToDevice); int tx=32; int ty=4; dim3 block (tx, ty, 1); dim3 grid ((nx+tx*2-1)/(tx*2), (ny+ty-1)/ty,1); int sh_size = tx*2*ty*sizeof(float); for(int t=0;t<iteration;t++) { block2D_hybrid_coarsen_x<<<grid, block,sh_size>>>(c0,c1, d_A0, d_Anext, nx, ny, nz); float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; } float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; cudaMemcpy(h_Anext, d_Anext,size*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_A0); cudaFree(d_Anext); free (h_A0); free (h_Anext); return 0; }
14,435
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <cuda_runtime.h> //9 blocos, cada um d N/2 threads __global__ void findMin(int N, int *S, int *min) { int tId = threadIdx.x; //thread ID int bId = blockIdx.x; //block ID int roundAnt = N; //A cada round toda thread tem um "companheiro" na outra metade de round. Desse modo todo elemento é checado for(unsigned int round=N/2; round>0; round/=2, roundAnt/=2) { if(tId < round) { if(S[tId*9 + bId] > S[(tId+round)*9+bId]) S[tId*9+bId] = S[(tId+round)*9+bId]; } if(tId == 0 && roundAnt%2 != 0){ if(S[bId] > S[(roundAnt-1)*9+bId]) S[bId] = S[(roundAnt-1)*9+bId]; } __syncthreads(); } if(tId == 0) min[bId] = S[bId]; } int main(int argc, char **argv) { FILE *lista; int numMatrix, i; int *S; //N matrizes 3x3 int *min; int threadsPerBlock, blockNum; char aux[3]; //inicializa if(argc != 2) { printf("Número errado de argumentos!!\n"); return 0; } lista = fopen(argv[1], "r"); fscanf(lista, "%d", &numMatrix); cudaMallocManaged(&S, 9*numMatrix*sizeof(int)); cudaMallocManaged(&min, 9*sizeof(int)); for(i=0;i<numMatrix*9; i+=9) { fscanf(lista, "%s", aux); fscanf(lista, "%d %d %d", &S[i], &S[i+1], &S[i+2]); fscanf(lista, "%d %d %d", &S[i+3], &S[i+1+3], &S[i+2+3]); fscanf(lista, "%d %d %d", &S[i+6], &S[i+1+6], &S[i+2+6]); } //executa threadsPerBlock = numMatrix/2; blockNum = 9; //PARA RODAR NA REDE LINUX //cudaSetDevice(0); findMin<<< blockNum, threadsPerBlock >>>(numMatrix, S, min); cudaDeviceSynchronize(); printf("%d %d %d\n", min[0], min[1], min[2]); printf("%d %d %d\n", min[3], min[4], min[5]); printf("%d %d %d\n", min[6], min[7], min[8]); //free cudaFree(S); cudaFree(min); return 0; }
14,436
#include <vector> using std::vector; // Simple transformation kernel __global__ void transformKernel(float* output, cudaTextureObject_t texObj, int width, int height, float theta) { // Calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; // Transform coordinates u -= 0.5f; v -= 0.5f; float tu = u * cosf(theta) - v * sinf(theta) + 0.5f; float tv = v * cosf(theta) + u * sinf(theta) + 0.5f; // Read from texture and write to global memory output[y * width + x] = tex2D<float>(texObj, tu, tv); } // Host code int main() { int width = 1920; int height = 1080; float angle = 3.14f; vector<float> h_data(width * height); int size = h_data.size() * sizeof(float); // Allocate CUDA array in device memory cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaArray* cuArray; cudaMallocArray(&cuArray, &channelDesc, width, height); // Copy to device memory some data located at address h_data // in host memory cudaMemcpyToArray(cuArray, 0, 0, h_data.data(), size, cudaMemcpyHostToDevice); // Specify texture struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = cuArray; // Specify texture object parameters struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeWrap; texDesc.addressMode[1] = cudaAddressModeWrap; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 1; // Create texture object cudaTextureObject_t texObj = 0; cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); // Allocate result of transformation in device memory float* output; cudaMalloc(&output, width * height * sizeof(float)); // Invoke kernel dim3 dimBlock(16, 16); dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); transformKernel<<<dimGrid, dimBlock>>>(output, texObj, width, height, angle); // Destroy texture object cudaDestroyTextureObject(texObj); // Free device memory cudaFreeArray(cuArray); cudaFree(output); return 0; }
14,437
#include <stdio.h> #define TPB 256 #define B 1 __global__ void hello() { printf("Hello World ! My thread id is %2d \n",threadIdx.x); } int main() { hello<<<B,TPB>>>(); cudaDeviceSynchronize(); return 0; }
14,438
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <unistd.h> #include <sys/wait.h> #include <sys/time.h> #define THREADS 1024 #define SIZE (THREADS*250000) __device__ float device_array0[SIZE]; __device__ float device_array1[SIZE]; __global__ void devmem_kernel(){ int id = blockDim.x*blockIdx.x + threadIdx.x; device_array0[id] += 1.0f; device_array1[id] += device_array0[id]; } static float elapsed(struct timeval tv0,struct timeval tv1){ return (float)(tv1.tv_sec - tv0.tv_sec) + (float)(tv1.tv_usec - tv0.tv_usec) * 0.000001f; } int main(){ struct timeval t0,t1; gettimeofday(&t0,NULL); printf("Vector SIZE : %d[Mbyte]\n",sizeof(float)*SIZE >> 20); float *result; int niter; cudaError_t res ; result = (float*)malloc(sizeof(float)*SIZE); dim3 threads (THREADS,1,1); dim3 blocks (SIZE/THREADS,1,1); niter = 2000; for(int i = 0 ; i < SIZE ; i ++){ result[i] = 0.0f; } res = cudaMemcpyToSymbol(device_array0,result,sizeof(float)*SIZE,0,cudaMemcpyHostToDevice); res = cudaMemcpyToSymbol(device_array1,result,sizeof(float)*SIZE,0,cudaMemcpyHostToDevice); printf("cudaMemcpyToSymbol(%d)\n",res); for(int i = 0 ; i < niter ; i ++){ devmem_kernel<<<blocks,threads>>>(); cudaDeviceSynchronize(); } res = cudaMemcpyFromSymbol(result,device_array1,sizeof(float)*SIZE,0,cudaMemcpyDeviceToHost); printf("cudaMemcpyFromSymbol(%d)\n",res); int pass = 1; for(int i = 0 ; i < SIZE ; i ++){ if(result[i] != (float)(((niter+1)*niter)/2.0f) ){ pass = 0; printf("result[%d] : %f\n",i,result[i]); break; } } if(pass){ printf("Result test : PASS\n"); }else{ printf("Result test : Failed\n"); } gettimeofday(&t1,NULL); printf("TIME RESULT : %f[sec](DEV MEM)\n",elapsed(t0,t1)); return 0; }
14,439
/* * purpose: CUDA managed unified memory for >= pascal architectures; * this version just uses cudaMallocManaged() on the host, * then runs kernels on the GPU to add together two arrays * of size 1 GB and save the results into a third array; * n.b. here we want to again separate the initialization * stage but then run the actual calculation in a * loop over 100 iterations to get a little better * statistics from the profiler * result: from profiling via 'nvprof ./a.out' we now see greatly * improved compute performance and much better memory * bandwidth at again almost identical page fault statistics * compilation: nvcc ./unified_memory_example_3.cu * usage: ./a.out */ #include <stdio.h> #define ARRAYDIM 268435456 /* * GPU kernel doing the initialization */ __global__ void KrnlDmmyInit(float *x, float *y, float *z) { int i; i = (blockIdx.x * blockDim.x) + threadIdx.x; x[i] = (float) i; y[i] = (float) (i + 1); return; } /* * GPU kernel doing the calculation, ie adding together two arrays */ __global__ void KrnlDmmyCalc(float *x, float *y, float *z) { int i; i = (blockIdx.x * blockDim.x) + threadIdx.x; z[i] = x[i] + y[i]; return; } /* * host main */ int main() { int i, cudaRtrn; dim3 thrds_per_block, blcks_per_grid; float *a, *b, *c; /* * Let us make use of cudaMallocManaged() to allocate 3 arrays * of size 1 GB each for subsequent usage on the GPU. */ if (cudaRtrn = cudaMallocManaged(&a, ARRAYDIM * sizeof(float)) != 0) { printf("*** allocation failed for array a[], %d ***\n", cudaRtrn); } if (cudaRtrn = cudaMallocManaged(&b, ARRAYDIM * sizeof(float)) != 0) { printf("*** allocation failed for array b[], %d ***\n", cudaRtrn); } if (cudaRtrn = cudaMallocManaged(&c, ARRAYDIM * sizeof(float)) != 0) { printf("*** allocation failed for array c[], %d ***\n", cudaRtrn); } /* * next we want to call simple kernels that (i) initialize array * elements a[] and b[] with thread-specific values and (ii) add * together these values and store back the results into array c[] * the latter within a loop over 100 iterations */ thrds_per_block.x = 256; blcks_per_grid.x = ARRAYDIM / thrds_per_block.x; KrnlDmmyInit<<<blcks_per_grid, thrds_per_block>>>(a, b, c); cudaDeviceSynchronize(); for (i=0; i<100; i++) { KrnlDmmyCalc<<<blcks_per_grid, thrds_per_block>>>(a, b, c); cudaDeviceSynchronize(); } cudaFree(c); cudaFree(b); cudaFree(a); return(0); }
14,440
#include <stdio.h> #include <stdlib.h> #include <stdio.h> #include <stdlib.h> __global__ void gpuMatMul(float * A, float * B, float *C, int ROW_A, int COL_A, int COL_B); void mat_mul_cuda_multi(float *A, float *B, float *C, int ROW_A, int COL_A, int COL_B) { /******************** TODO *********************/ float *d_A[4], *d_B[4], *d_C[4]; for (int device =0; device < 4; device++){ cudaSetDevice(device); // 현재 Context 에서 사용할 CUDA device를 선택 cudaMalloc(&d_A[device], sizeof(float)*(ROW_A/4)*COL_A); //cudaMalloc시 현재 사용중인 device에 메모리가 할당됨 cudaMalloc(&d_B[device], sizeof(float)*(COL_A)*COL_B); cudaMalloc(&d_C[device], sizeof(float)*(ROW_A/4)*COL_B); cudaMemcpy(d_A[device], A+(ROW_A/4)*COL_A*device, sizeof(float)*(ROW_A/4)*COL_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B[device], B, sizeof(float)*(COL_A)*COL_B, cudaMemcpyHostToDevice); } dim3 grid(COL_B/16, (ROW_A/4)/16); dim3 block(16,16); for(int device = 0; device <4; device++){ //커널 실행될때 cudaSetDevice 사용 cudaSetDevice(device); gpuMatMul <<< grid, block >>>(d_A[device],d_B[device],d_C[device], ROW_A, COL_A, COL_B); } for(int device =0 ; device <4; device++){ cudaSetDevice(device); cudaMemcpy(C+(ROW_A/4)*COL_B*device, d_C[device], sizeof(float)*(ROW_A/4)*COL_B, cudaMemcpyDeviceToHost); } for (int device=0; device <4; device++){ cudaFree(d_A[device]); cudaFree(d_B[device]); cudaFree(d_C[device]); // 메모리 해제 } }
14,441
#include <iostream> #include <stdio.h> #include <math.h> #define ni 25088 #define nn 4096 #define ti 32 #define tn 32 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void random_ints(int* a, int N) { int i; for (i = 0; i < N; i++) { a[i] = rand(); } } void zeros(int* a, int N) { int i; for (i = 0; i < N; i++) { a[i] = 0; } } // CURRENT MEMORY PERFORMANCE = 20.85 MB/s // perform 1 tile of the matrix-vector multiply (subset of input, subset of weights matrix) // this means that the batch size is (ti*tn)(?) // the dimensions of the weights matrix are (ni, nn) => 2D array // the full input is a vector of dimension ni (represented as an array) // the full output is a vector of dimension nn (represented as an array) // this is what is done in a fully-connected classifier layer // this method utilizes a scratchpad memory for better thread block performance __global__ void matrix_vector_mult(int *inp, int *outp, int *kern) { // scratchpad memory used for shared variables __shared__ int temp_inp[ti]; // partial input vector __shared__ int temp_kern[ti * tn]; // partial kernel matrix // populate shared data structures int help = (blockIdx.x % (ni/ti)); if (threadIdx.x % ti == 0) { int hold = threadIdx.x / ti; int i_index = help + hold; temp_inp[hold] = inp[i_index]; } int k_index = (help * (tn + ti) + (threadIdx.x % ti) * ti + (threadIdx.x % tn)); // 1) get to first element of correct submatrix 2) get to desired row of submatrix 3) get to desired element of submatrix temp_kern[threadIdx.x] = kern[k_index]; __syncthreads(); // sync all threads to this point // populate output int n_index = (blockIdx.x % (nn/tn)) + (threadIdx.x % ti); outp[n_index] += temp_kern[threadIdx.x] * temp_inp[threadIdx.x % ti]; } int main(void) { // declare host + device pointers int *inp, *outp, *kern; int *d_inp, *d_outp, *d_kern; // compute array sizes int i_size = ni; int o_size = nn; int k_size = nn*ni; // allocate space for each array on the device gpuErrchk( cudaMalloc(&d_inp, i_size*sizeof(int)) ); gpuErrchk( cudaMalloc(&d_outp, o_size*sizeof(int)) ); gpuErrchk( cudaMalloc(&d_kern, k_size*sizeof(int)) ); // allocate space and populate each array on the host inp = (int*)malloc(i_size*sizeof(int)); outp = (int*)malloc(o_size*sizeof(int)); kern = (int*)malloc(k_size*sizeof(int)); random_ints(inp, i_size); zeros(outp, o_size); random_ints(kern, k_size); // copy populated host arrays to corresponding device arrays gpuErrchk( cudaMemcpy(d_inp, inp, i_size*sizeof(int), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_outp, outp, o_size*sizeof(int), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_kern, kern, k_size*sizeof(int), cudaMemcpyHostToDevice) ); // launch all threads on device // # blocks = # submatrices (tiles) // # threads / block = # elements per submatrix (tile) matrix_vector_mult<<<(ni*nn)/(ti*tn), ti*tn>>>(d_inp, d_outp, d_kern); // determine if run succeeded gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // copy output array back to host gpuErrchk( cudaMemcpy(outp, d_outp, o_size, cudaMemcpyDeviceToHost) ); // free all memory free(inp); free(outp); free(kern); gpuErrchk( cudaFree(d_inp) ); gpuErrchk( cudaFree(d_outp) ); gpuErrchk( cudaFree(d_kern) ); return 0; }
14,442
#include <iostream> #include <chrono> using namespace std; using namespace std::chrono; #define BLOCKSIZE 16 #define N 1024 __global__ void gpu_mat_vec_multiply(double *device_mat, double *device_vec, double *device_res){ int tidx = blockIdx.x * blockDim.x + threadIdx.x; int tidy = blockIdx.y * blockDim.y + threadIdx.y; int tindex = tidx + gridDim.x * BLOCKSIZE * tidy; if(tindex < N){ int i; int m = tindex * N; device_res[tindex] = 0.0; for(int i = 0;i < N;i ++){ device_res[tindex] += device_mat[m + i] * device_vec[i]; } } __syncthreads(); } int main(){ double *host_mat, *host_vec, *host_res; double *device_mat, *device_vec, *device_res; host_mat = new double[N * N]; host_vec = new double[N]; host_res = new double[N]; for(int i = 0;i < N;i ++){ host_vec[i] = double(rand()%100); for(int j = 0;j < N;j ++){ host_mat[i * N + j] = double(rand()%40); } } cudaMalloc(&device_mat, (N*N)*sizeof(double)); cudaMalloc(&device_vec, N*sizeof(double)); cudaMalloc(&device_res, N*sizeof(double)); cudaMemcpy(device_mat, host_mat, (N*N)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(device_vec, host_vec, N*sizeof(double), cudaMemcpyHostToDevice); int max = BLOCKSIZE * BLOCKSIZE; int BLocksPerGrid = N / max + 1; dim3 dimBlock(BLOCKSIZE, BLOCKSIZE); if(N % max == 0) BLocksPerGrid --; dim3 dimGrid(1, BLocksPerGrid); gpu_mat_vec_multiply<<<dimGrid, dimBlock>>>(device_mat, device_vec, device_res); }
14,443
#include <iostream> using namespace std; __global__ void channelScaleKernel(float *ptr, int width, int height, float min_val, float scale, float new_min) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x < width && y < height) { int idx = 3 * (width * y + x); ptr[idx] = new_min + (ptr[idx] - min_val) * scale; ptr[idx + 1] = new_min + (ptr[idx + 1] - min_val) * scale; ptr[idx + 2] = new_min + (ptr[idx + 2] - min_val) * scale; } } extern "C" void scaleChannels(float *ptr, int width, int height, float min_val, float scale, float new_min) { int image_memory = width * height * 3 * sizeof(*ptr); float *gpuPtr = NULL; cudaMalloc((void**) &gpuPtr, image_memory); cudaMemcpy(gpuPtr, ptr, image_memory, cudaMemcpyHostToDevice); dim3 threads(16, 16); dim3 blocks((width + threads.x - 1) / threads.x, (height + threads.y - 1) / threads.y); channelScaleKernel<<<blocks, threads>>>(gpuPtr, width, height, min_val, scale, new_min); cudaMemcpy(ptr, gpuPtr, image_memory, cudaMemcpyDeviceToHost); cudaFree(gpuPtr); }
14,444
/************************************************ * * Test for DynamicGlobalMemoryAllocationAndOperations function! * date:2018-5-16 * a :zhonghy * *************************************************/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #define NUM_BLOCKS 20 //per thread allocation __global__ void mallocTestPerThread() { size_t size = 123; char *ptr = (char*)malloc(size); memset(ptr, 0, size); printf("Thread %d got pointer: %p\n", threadIdx.x, ptr); free(ptr); } //per Thread Block Allocation __global__ void mallocTestPerBlock() { __shared__ int *data; //The first thread in the block does the allocation and initialization //and then shares the pointer with all other threads through shared memory //so that access can easily be coalesced. //64 bytes per thread are allocated. if(threadIdx.x == 0) { size_t size = blockDim.x * 64; //per every thread? data = (int *)malloc(size); memset(data, 0, size); } __syncthreads(); //Check for failure if(data == NULL) return; //Threads index into the memory, ensuring coalescence int *ptr = data; for(int i = 0; i < 64; ++i) { ptr[i * blockDim.x + threadIdx.x] = threadIdx.x; printf("Thread %d got pointer: %p\n", threadIdx.x, ptr); printf("\n"); } //Ensure all threads complete before freeing __syncthreads(); //Only one thread may free the memory if(threadIdx.x == 0) free(ptr); } //Allocation persisting between kernel launches __device__ int* dataptr[NUM_BLOCKS]; //Per-block pointer? __global__ void allocmem() { //only the first thread in the block does the allocation //since we want only one allocation per block. if(threadIdx.x == 0) dataptr[blockIdx.x] = (int *)malloc(blockDim.x * sizeof(int)); //4 __syncthreads(); //chech for failure if(dataptr[blockIdx.x] == NULL) return; //zero the data with all threads in parallel dataptr[blockIdx.x][threadIdx.x] = 0; } //simple example: store thread ID into each element __global__ void usemem() { int *ptr = dataptr[blockIdx.x]; if(ptr != NULL) ptr[threadIdx.x] += threadIdx.x; } //print the content of the buffer before freeing it __global__ void freemem() { int *ptr = dataptr[blockIdx.x]; if(ptr != NULL) printf("Block %d, Thread %d: final value = %d\n", blockIdx.x, threadIdx.x, ptr[threadIdx.x]); //only free from one thread! if(threadIdx.x == 0) free(ptr); } int main() { /*per thread*/ // //Set a heap size of 128 megabytes. Note that that is must // //be done before any kernel is launched //cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128*1024*1024); //mallocTestPerThread<<<1, 5>>>(); //cudaDeviceSynchronize(); /*per block*/ //cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128*1024*1024); //mallocTestPerBlock<<<10, 128>>>(); //cudaDeviceSynchronize(); /*Allocation Persisting Between Kernel Launches*/ cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128*1024*1024); //Allocate memory allocmem<<< NUM_BLOCKS, 10 >>>(); //Use memory usemem<<< NUM_BLOCKS, 10 >>>(); usemem<<< NUM_BLOCKS, 10 >>>(); usemem<<< NUM_BLOCKS, 10 >>>(); //Free memory freemem<<< NUM_BLOCKS, 10 >>>(); cudaDeviceSynchronize(); return EXIT_SUCCESS; }
14,445
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3) { for (int i=2; i<=N-3; i++) { double _t_4_; double _t_5_; double _t_2_; double _t_0_; double _t_9_; double _t_10_; double _t_16_; double _t_17_; double _t_14_; double _t_21_; double _t_22_; double _t_29_; double _t_30_; double _t_27_; double _t_34_; double _t_35_; double _t_41_; double _t_42_; double _t_39_; double _t_46_; double _t_47_; double r1ic0jc0kc0 = r1[i][j][k]; double _t_58_; double _t_55_; double _t_56_; double _t_53_; double _t_62_; double _t_63_; double _t_67_; double _t_68_; double _t_76_; double _t_73_; double _t_74_; double _t_80_; double _t_81_; double _t_85_; double _t_86_; double _t_51_; double _t_95_; double _t_92_; double _t_93_; double _t_90_; double _t_99_; double _t_100_; double _t_104_; double _t_105_; double _t_113_; double _t_110_; double _t_111_; double _t_117_; double _t_118_; double _t_122_; double _t_123_; double _t_134_; double _t_131_; double _t_132_; double _t_129_; double _t_127_; double _t_137_; double _t_138_; double _t_143_; double _t_144_; double _t_142_; double _t_153_; double _t_150_; double _t_151_; double _t_148_; double _t_156_; double _t_157_; double _t_162_; double _t_163_; double _t_161_; double _t_173_; double _t_170_; double _t_171_; double _t_168_; double _t_176_; double _t_177_; double _t_182_; double _t_183_; double _t_181_; double _t_192_; double _t_189_; double _t_190_; double _t_187_; double _t_195_; double _t_196_; double _t_201_; double _t_202_; double _t_200_; _t_4_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2]; _t_5_ = c2 * u1[i][j+2][k+2]; _t_5_ -= c2 * u1[i][j-2][k+2]; _t_5_ += c1 * u1[i][j+1][k+2]; _t_5_ -= c1 * u1[i][j-1][k+2]; _t_2_ = stry[j+2] * _t_4_ * _t_5_; _t_0_ = c2 * _t_2_ * strx[i]; _t_9_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2]; _t_10_ = c2 * u2[i][j+2][k+2]; _t_10_ -= c2 * u2[i][j-2][k+2]; _t_10_ += c1 * u2[i][j+1][k+2]; _t_10_ -= c1 * u2[i][j-1][k+2]; _t_0_ += c2 * _t_9_ * _t_10_; _t_16_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2]; _t_17_ = c2 * u1[i][j+2][k-2]; _t_17_ -= c2 * u1[i][j-2][k-2]; _t_17_ += c1 * u1[i][j+1][k-2]; _t_17_ -= c1 * u1[i][j-1][k-2]; _t_14_ = stry[j] * _t_16_ * _t_17_; _t_0_ += c2 * _t_14_ * strx[i]; _t_21_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2]; _t_22_ = c2 * u2[i][j+2][k-2]; _t_22_ -= c2 * u2[i][j-2][k-2]; _t_22_ += c1 * u2[i][j+1][k-2]; _t_22_ -= c1 * u2[i][j-1][k-2]; _t_0_ += c2 * _t_21_ * _t_22_; _t_29_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1]; _t_30_ = c2 * u1[i][j+2][k+1]; _t_30_ -= c2 * u1[i][j-2][k+1]; _t_30_ += c1 * u1[i][j+1][k+1]; _t_30_ -= c1 * u1[i][j-1][k+1]; _t_27_ = stry[j-2] * _t_29_ * _t_30_; _t_0_ += c1 * _t_27_ * strx[i]; _t_34_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1]; _t_35_ = c2 * u2[i][j+2][k+1]; _t_35_ -= c2 * u2[i][j-2][k+1]; _t_35_ += c1 * u2[i][j+1][k+1]; _t_35_ -= c1 * u2[i][j-1][k+1]; _t_0_ += c1 * _t_34_ * _t_35_; _t_41_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1]; _t_42_ = c2 * u1[i][j+2][k-1]; _t_42_ -= c2 * u1[i][j-2][k-1]; _t_42_ += c1 * u1[i][j+1][k-1]; _t_42_ -= c1 * u1[i][j-1][k-1]; _t_39_ = stry[j] * _t_41_ * _t_42_; _t_0_ += c1 * _t_39_ * strx[i]; _t_46_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1]; _t_47_ = c2 * u2[i][j+2][k-1]; _t_47_ -= c2 * u2[i][j-2][k-1]; _t_47_ += c1 * u2[i][j+1][k-1]; _t_47_ -= c1 * u2[i][j-1][k-1]; _t_0_ += c1 * _t_46_ * _t_47_; r1ic0jc0kc0 += _t_0_; _t_58_ = 2.0 * mu[i+2][j][k]; _t_58_ += la[i+2][j][k]; _t_55_ = met1[i+2][j][k] * _t_58_ * met2[i+2][j][k]; _t_56_ = c2 * u1[i+2][j][k+2]; _t_56_ -= c2 * u1[i+2][j][k-2]; _t_56_ += c1 * u1[i+2][j][k+1]; _t_56_ -= c1 * u1[i+2][j][k-1]; _t_53_ = strx[i] * _t_55_ * _t_56_; _t_62_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k]; _t_63_ = c2 * u2[i+2][j][k+2]; _t_63_ -= c2 * u2[i+2][j][k-2]; _t_63_ += c1 * u2[i+2][j][k+1]; _t_63_ -= c1 * u2[i+2][j][k-1]; _t_53_ += stry[j] * _t_62_ * _t_63_; _t_67_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k]; _t_68_ = c2 * u3[i+2][j][k+2]; _t_68_ -= c2 * u3[i+2][j][k-2]; _t_68_ += c1 * u3[i+2][j][k+1]; _t_68_ -= c1 * u3[i+2][j][k-1]; _t_53_ += _t_67_ * _t_68_; _t_76_ = 2.0 * mu[i-2][j][k]; _t_76_ += la[i-2][j][k]; _t_73_ = met1[i-2][j][k] * _t_76_ * met2[i-2][j][k]; _t_74_ = c2 * u1[i-2][j][k+2]; _t_74_ -= c2 * u1[i-2][j][k-2]; _t_74_ += c1 * u1[i-2][j][k+1]; _t_74_ -= c1 * u1[i-2][j][k-1]; _t_53_ += strx[i] * _t_73_ * _t_74_; _t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k]; _t_81_ = c2 * u2[i-2][j][k+2]; _t_81_ -= c2 * u2[i-2][j][k-2]; _t_81_ += c1 * u2[i-2][j][k+1]; _t_81_ -= c1 * u2[i-2][j][k-1]; _t_53_ += stry[j] * _t_80_ * _t_81_; _t_85_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k]; _t_86_ = c2 * u3[i-2][j][k+2]; _t_86_ -= c2 * u3[i-2][j][k-2]; _t_86_ += c1 * u3[i-2][j][k+1]; _t_86_ -= c1 * u3[i-2][j][k-1]; _t_53_ += _t_85_ * _t_86_; _t_51_ = stry[j] * c2 * _t_53_; _t_95_ = 2.0 * mu[i+1][j][k]; _t_95_ += la[i+1][j][k]; _t_92_ = met1[i+1][j][k] * _t_95_ * met2[i+1][j][k]; _t_93_ = c2 * u1[i+1][j][k+2]; _t_93_ -= c2 * u1[i+1][j][k-2]; _t_93_ += c1 * u1[i+1][j][k+1]; _t_93_ -= c1 * u1[i+1][j][k-1]; _t_90_ = strx[i] * _t_92_ * _t_93_; _t_99_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k]; _t_100_ = c2 * u2[i+1][j][k+2]; _t_100_ -= c2 * u2[i+1][j][k-2]; _t_100_ += c1 * u2[i+1][j][k+1]; _t_100_ -= c1 * u2[i+1][j][k-1]; _t_90_ += stry[j] * _t_99_ * _t_100_; _t_104_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k]; _t_105_ = c2 * u3[i+1][j][k+2]; _t_105_ -= c2 * u3[i+1][j][k-2]; _t_105_ += c1 * u3[i+1][j][k+1]; _t_105_ -= c1 * u3[i+1][j][k-1]; _t_90_ += _t_104_ * _t_105_; _t_113_ = 2.0 * mu[i-1][j][k]; _t_113_ += la[i-1][j][k]; _t_110_ = met1[i-1][j][k] * _t_113_ * met2[i-1][j][k]; _t_111_ = c2 * u1[i-1][j][k+2]; _t_111_ -= c2 * u1[i-1][j][k-2]; _t_111_ += c1 * u1[i-1][j][k+1]; _t_111_ -= c1 * u1[i-1][j][k-1]; _t_90_ += strx[i] * _t_110_ * _t_111_; _t_117_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k]; _t_118_ = c2 * u2[i-1][j][k+2]; _t_118_ -= c2 * u2[i-1][j][k-2]; _t_118_ += c1 * u2[i-1][j][k+1]; _t_118_ -= c1 * u2[i-1][j][k-1]; _t_90_ += stry[j] * _t_117_ * _t_118_; _t_122_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k]; _t_123_ = c2 * u3[i-1][j][k+2]; _t_123_ -= c2 * u3[i-1][j][k-2]; _t_123_ += c1 * u3[i-1][j][k+1]; _t_123_ -= c1 * u3[i-1][j][k-1]; _t_90_ += _t_122_ * _t_123_; _t_51_ += stry[j] * c1 * _t_90_; r1ic0jc0kc0 += _t_51_; _t_134_ = 2.0 * mu[i][j][k+2]; _t_134_ += la[i][j][k+2]; _t_131_ = met1[i][j][k+2] * _t_134_ * met2[i][j][k+2]; _t_132_ = c2 * u1[i+2][j][k+2]; _t_132_ -= c2 * u1[i-2][j][k+2]; _t_132_ += c1 * u1[i+1][j][k+2]; _t_132_ -= c1 * u1[i-1][j][k+2]; _t_129_ = strx[i] * _t_131_ * _t_132_; _t_127_ = c2 * _t_129_ * stry[j]; _t_137_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2]; _t_138_ = c2 * u2[i+2][j][k+2]; _t_138_ -= c2 * u2[i-2][j][k+2]; _t_138_ += c1 * u2[i+1][j][k+2]; _t_138_ -= c1 * u2[i-1][j][k+2]; _t_127_ += c2 * _t_137_ * _t_138_; _t_143_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2]; _t_144_ = c2 * u3[i+2][j][k+2]; _t_144_ -= c2 * u3[i-2][j][k+2]; _t_144_ += c1 * u3[i+1][j][k+2]; _t_144_ -= c1 * u3[i-1][j][k+2]; _t_142_ = _t_143_ * _t_144_; _t_127_ += c2 * _t_142_ * stry[j]; _t_153_ = 2.0 * mu[i][j][k-2]; _t_153_ += la[i][j][k-2]; _t_150_ = met1[i][j][k-2] * _t_153_ * met2[i][j][k-2]; _t_151_ = c2 * u1[i+2][j][k-2]; _t_151_ -= c2 * u1[i-2][j][k-2]; _t_151_ += c1 * u1[i+1][j][k-2]; _t_151_ -= c1 * u1[i-1][j][k-2]; _t_148_ = strx[i] * _t_150_ * _t_151_; _t_127_ += c2 * _t_148_ * stry[j]; _t_156_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2]; _t_157_ = c2 * u2[i+2][j][k-2]; _t_157_ -= c2 * u2[i-2][j][k-2]; _t_157_ += c1 * u2[i+1][j][k-2]; _t_157_ -= c1 * u2[i-1][j][k-2]; _t_127_ += c2 * _t_156_ * _t_157_; _t_162_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2]; _t_163_ = c2 * u3[i+2][j][k-2]; _t_163_ -= c2 * u3[i-2][j][k-2]; _t_163_ += c1 * u3[i+1][j][k-2]; _t_163_ -= c1 * u3[i-1][j][k-2]; _t_161_ = _t_162_ * _t_163_; _t_127_ += c2 * _t_161_ * stry[j]; _t_173_ = 2.0 * mu[i][j][k+1]; _t_173_ += la[i][j][k+1]; _t_170_ = met1[i][j][k+1] * _t_173_ * met2[i][j][k+1]; _t_171_ = c2 * u1[i+2][j][k+1]; _t_171_ -= c2 * u1[i-2][j][k+1]; _t_171_ += c1 * u1[i+1][j][k+1]; _t_171_ -= c1 * u1[i-1][j][k+1]; _t_168_ = strx[i+2] * _t_170_ * _t_171_; _t_127_ += c1 * _t_168_ * stry[j]; _t_176_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1]; _t_177_ = c2 * u2[i+2][j][k+1]; _t_177_ -= c2 * u2[i-2][j][k+1]; _t_177_ += c1 * u2[i+1][j][k+1]; _t_177_ -= c1 * u2[i-1][j][k+1]; _t_127_ += c1 * _t_176_ * _t_177_; _t_182_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1]; _t_183_ = c2 * u3[i+2][j][k+1]; _t_183_ -= c2 * u3[i-2][j][k+1]; _t_183_ += c1 * u3[i+1][j][k+1]; _t_183_ -= c1 * u3[i-1][j][k+1]; _t_181_ = _t_182_ * _t_183_; _t_127_ += c1 * _t_181_ * stry[j]; _t_192_ = 2.0 * mu[i][j][k-1]; _t_192_ += la[i][j][k-1]; _t_189_ = met1[i][j][k-1] * _t_192_ * met2[i][j][k-1]; _t_190_ = c2 * u1[i+2][j][k-1]; _t_190_ -= c2 * u1[i-2][j][k-1]; _t_190_ += c1 * u1[i+1][j][k-1]; _t_190_ -= c1 * u1[i-1][j][k-1]; _t_187_ = strx[i-2] * _t_189_ * _t_190_; _t_127_ += c1 * _t_187_ * stry[j]; _t_195_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1]; _t_196_ = c2 * u2[i+2][j][k-1]; _t_196_ -= c2 * u2[i-2][j][k-1]; _t_196_ += c1 * u2[i+1][j][k-1]; _t_196_ -= c1 * u2[i-1][j][k-1]; _t_127_ += c1 * _t_195_ * _t_196_; _t_201_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1]; _t_202_ = c2 * u3[i+2][j][k-1]; _t_202_ -= c2 * u3[i-2][j][k-1]; _t_202_ += c1 * u3[i+1][j][k-1]; _t_202_ -= c1 * u3[i-1][j][k-1]; _t_200_ = _t_201_ * _t_202_; _t_127_ += c1 * _t_200_ * stry[j]; r1ic0jc0kc0 += _t_127_; r1[i][j][k] = r1ic0jc0kc0; } } } __global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_i= (int)(blockDim.z); int i0 = (int)(blockIdx.z)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.z); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) { double _t_4_; double _t_5_; double _t_2_; double _t_0_; double _t_9_; double _t_10_; double _t_16_; double _t_17_; double _t_14_; double _t_21_; double _t_22_; double _t_29_; double _t_30_; double _t_27_; double _t_34_; double _t_35_; double _t_41_; double _t_42_; double _t_39_; double _t_46_; double _t_47_; double r1ic0jc0kc0 = r1[i][j][k]; double _t_53_; double _t_54_; double _t_51_; double _t_58_; double _t_59_; double _t_64_; double _t_65_; double _t_69_; double _t_70_; double _t_75_; double _t_76_; double _t_80_; double _t_81_; double _t_86_; double _t_87_; double _t_91_; double _t_92_; _t_4_ = met1[i][j+2][k] * mu[i][j+2][k] * met3[i][j+2][k]; _t_5_ = c2 * u1[i][j+2][k+2]; _t_5_ -= c2 * u1[i][j+2][k-2]; _t_5_ += c1 * u1[i][j+2][k+1]; _t_5_ -= c1 * u1[i][j+2][k-1]; _t_2_ = stry[j+1] * _t_4_ * _t_5_; _t_0_ = c2 * _t_2_ * strx[i]; _t_9_ = met1[i][j+2][k] * mu[i][j+2][k] * met2[i][j+2][k]; _t_10_ = c2 * u2[i][j+2][k+2]; _t_10_ -= c2 * u2[i][j+2][k-2]; _t_10_ += c1 * u2[i][j+2][k+1]; _t_10_ -= c1 * u2[i][j+2][k-1]; _t_0_ += c2 * _t_9_ * _t_10_; _t_16_ = met1[i][j-2][k] * mu[i][j-2][k] * met3[i][j-2][k]; _t_17_ = c2 * u1[i][j-2][k+2]; _t_17_ -= c2 * u1[i][j-2][k-2]; _t_17_ += c1 * u1[i][j-2][k+1]; _t_17_ -= c1 * u1[i][j-2][k-1]; _t_14_ = stry[j] * _t_16_ * _t_17_; _t_0_ += c2 * _t_14_ * strx[i]; _t_21_ = met1[i][j-2][k] * mu[i][j-2][k] * met2[i][j-2][k]; _t_22_ = c2 * u2[i][j-2][k+2]; _t_22_ -= c2 * u2[i][j-2][k-2]; _t_22_ += c1 * u2[i][j-2][k+1]; _t_22_ -= c1 * u2[i][j-2][k-1]; _t_0_ += c2 * _t_21_ * _t_22_; _t_29_ = met1[i][j+1][k] * mu[i][j+1][k] * met3[i][j+1][k]; _t_30_ = c2 * u1[i][j+1][k+2]; _t_30_ -= c2 * u1[i][j+1][k-2]; _t_30_ += c1 * u1[i][j+1][k+1]; _t_30_ -= c1 * u1[i][j+1][k-1]; _t_27_ = stry[j-1] * _t_29_ * _t_30_; _t_0_ += c1 * _t_27_ * strx[i]; _t_34_ = met1[i][j+1][k] * mu[i][j+1][k] * met2[i][j+1][k]; _t_35_ = c2 * u2[i][j+1][k+2]; _t_35_ -= c2 * u2[i][j+1][k-2]; _t_35_ += c1 * u2[i][j+1][k+1]; _t_35_ -= c1 * u2[i][j+1][k-1]; _t_0_ += c1 * _t_34_ * _t_35_; _t_41_ = met1[i][j-1][k] * mu[i][j-1][k] * met3[i][j-1][k]; _t_42_ = c2 * u1[i][j-1][k+2]; _t_42_ -= c2 * u1[i][j-1][k-2]; _t_42_ += c1 * u1[i][j-1][k+1]; _t_42_ -= c1 * u1[i][j-1][k-1]; _t_39_ = stry[j] * _t_41_ * _t_42_; _t_0_ += c1 * _t_39_ * strx[i]; _t_46_ = met1[i][j-1][k] * mu[i][j-1][k] * met2[i][j-1][k]; _t_47_ = c2 * u2[i][j-1][k+2]; _t_47_ -= c2 * u2[i][j-1][k-2]; _t_47_ += c1 * u2[i][j-1][k+1]; _t_47_ -= c1 * u2[i][j-1][k-1]; _t_0_ += c1 * _t_46_ * _t_47_; r1ic0jc0kc0 += _t_0_; _t_53_ = met1[i][j+2][k] * mu[i][j+2][k] * met1[i][j+2][k]; _t_54_ = c2 * u2[i+2][j+2][k]; _t_54_ -= c2 * u2[i-2][j+2][k]; _t_54_ += c1 * u2[i+1][j+2][k]; _t_54_ -= c1 * u2[i-1][j+2][k]; _t_51_ = c2 * _t_53_ * _t_54_; _t_58_ = met1[i][j-2][k] * mu[i][j-2][k] * met1[i][j-2][k]; _t_59_ = c2 * u2[i+2][j-2][k]; _t_59_ -= c2 * u2[i-2][j-2][k]; _t_59_ += c1 * u2[i+1][j-2][k]; _t_59_ -= c1 * u2[i-1][j-2][k]; _t_51_ += c2 * _t_58_ * _t_59_; _t_64_ = met1[i][j+1][k] * mu[i][j+1][k] * met1[i][j+1][k]; _t_65_ = c2 * u2[i+2][j+1][k]; _t_65_ -= c2 * u2[i-2][j+1][k]; _t_65_ += c1 * u2[i+1][j+1][k]; _t_65_ -= c1 * u2[i-1][j+1][k]; _t_51_ += c1 * _t_64_ * _t_65_; _t_69_ = met1[i][j-1][k] * mu[i][j-1][k] * met1[i][j-1][k]; _t_70_ = c2 * u2[i+2][j-1][k]; _t_70_ -= c2 * u2[i-2][j-1][k]; _t_70_ += c1 * u2[i+1][j-1][k]; _t_70_ -= c1 * u2[i-1][j-1][k]; _t_51_ += c1 * _t_69_ * _t_70_; _t_75_ = met1[i+2][j][k] * la[i+2][j][k] * met1[i+2][j][k]; _t_76_ = c2 * u2[i+2][j+2][k]; _t_76_ -= c2 * u2[i+2][j-2][k]; _t_76_ += c1 * u2[i+2][j+1][k]; _t_76_ -= c1 * u2[i+2][j-1][k]; _t_51_ += c2 * _t_75_ * _t_76_; _t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met1[i-2][j][k]; _t_81_ = c2 * u2[i-2][j+2][k]; _t_81_ -= c2 * u2[i-2][j-2][k]; _t_81_ += c1 * u2[i-2][j+1][k]; _t_81_ -= c1 * u2[i-2][j-1][k]; _t_51_ += c2 * _t_80_ * _t_81_; _t_86_ = met1[i+1][j][k] * la[i+1][j][k] * met1[i+1][j][k]; _t_87_ = c2 * u2[i+1][j+2][k]; _t_87_ -= c2 * u2[i+1][j-2][k]; _t_87_ += c1 * u2[i+1][j+1][k]; _t_87_ -= c1 * u2[i+1][j-1][k]; _t_51_ += c1 * _t_86_ * _t_87_; _t_91_ = met1[i-1][j][k] * la[i-1][j][k] * met1[i-1][j][k]; _t_92_ = c2 * u2[i-1][j+2][k]; _t_92_ -= c2 * u2[i-1][j-2][k]; _t_92_ += c1 * u2[i-1][j+1][k]; _t_92_ -= c1 * u2[i-1][j-1][k]; _t_51_ += c1 * _t_91_ * _t_92_; r1ic0jc0kc0 += _t_51_; r1[i][j][k] = r1ic0jc0kc0; } } extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) { double *r1; cudaMalloc (&r1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for r1\n"); cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u1; cudaMalloc (&u1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u1\n"); cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u2; cudaMalloc (&u2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u2\n"); cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u3; cudaMalloc (&u3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u3\n"); cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *mu; cudaMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *la; cudaMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met1; cudaMalloc (&met1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met1\n"); cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met2; cudaMalloc (&met2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met2\n"); cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met3; cudaMalloc (&met3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met3\n"); cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met4; cudaMalloc (&met4, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met4\n"); cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *strx; cudaMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice); double *stry; cudaMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); curvi_1 <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); dim3 blockconfig_1 (16, 2, 2); dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z)); curvi_2 <<<gridconfig_1, blockconfig_1>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); }
14,446
/* * Solves the Panfilov model using an explicit numerical scheme. * Based on code orginally provided by Xing Cai, Simula Research Laboratory * and reimplementation by Scott B. Baden, UCSD * * Modified and restructured by Didem Unat, Koc University * * Refer to "Detailed Numerical Analyses of the Aliev-Panfilov Model on GPGPU" * https://www.simula.no/publications/detailed-numerical-analyses-aliev-panfilov-model-gpgpu * by Xing Cai, Didem Unat and Scott Baden * */ #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <iostream> #include <iomanip> #include <string.h> #include <math.h> #include <sys/time.h> #include <getopt.h> using namespace std; #define BLOCK_SIZE 16 #define THREAD_WORKING_SET_SIZE = 1 // External functions extern "C" void splot(double **E, double T, int niter, int m, int n); void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads); __global__ void calc(double *in, double *out, int size) { int tid = threadIdx.x; if(tid < size){ out[tid] = in [tid]; } } __global__ void copyHorizontalBoundaries(int m, int n, double* E_prev) { /* * Copy data from boundary of the computational box * to the padding region, set up for differencing * on the boundary of the computational box * Using mirror boundaries */ int row_col = (threadIdx.x + 1); E_prev[row_col * (n + 2)] = E_prev[row_col * (n + 2) + 2]; E_prev[row_col * (n + 2) + (n + 1)] = E_prev[row_col * (n + 2) + (n - 1)]; E_prev[row_col] = E_prev[2 * (n + 2) + row_col]; E_prev[(m+1) * (n + 2) + row_col] = E_prev[(m - 1) * (n + 2) + row_col]; } __global__ void pdeSolver(int m, int n, double* E_prev, double* E, double alpha) { // Solve for the excitation, the PDE int row = blockIdx.y*blockDim.y + threadIdx.y + 1; int col = blockIdx.x*blockDim.x + threadIdx.x + 1; E[row * (n + 2) + col] = E_prev[row * (n + 2) + col]+alpha*(E_prev[row * (n + 2) + (col + 1)]+ E_prev[row * (n + 2) + (col - 1)]-4*E_prev[row * (n + 2) + col]+E_prev[(row + 1) * (n + 2) + col]+ E_prev[(row - 1) * (n + 2) + col]); } __global__ void swap(double** E_ref, double** E_prev_ref) { for(int i = 0; i< 18; i++){ for(int j=0; j<18; j++){ printf("%f ", (*E_prev_ref)[i * 18 + j]); } printf("\n"); } double *tmp = *E_ref; *E_ref = *E_prev_ref; *E_prev_ref = tmp; for(int i = 0; i< 18; i++){ for(int j=0; j<18; j++){ printf("%f ", (*E_prev_ref)[i * 18 + j]); } printf("\n"); } } __global__ void odeSolver(int m, int n, double *E, double* R, double *E_prev, double epsilon, double M1, double M2, double kk, double dt, double a, double b, double** E_ref, double** E_prev_ref) { // // Solve the ODE, advancing excitation and recovery to the // next timtestep // /* for(int i = 0; i< 18; i++){ for(int j=0; j<18; j++){ printf("%f ", E_prev[i * 18 + j]); } printf("\n"); } */ int row = blockIdx.y*blockDim.y + threadIdx.y + 1; int col = blockIdx.x*blockDim.x + threadIdx.x + 1; E[row * (n + 2) + col] = E[row * (n + 2) + col] -dt*(kk* E[row * (n + 2) + col] * (E[row * (n + 2) + col] - a)*(E[row * (n + 2) + col]-1)+ E[row * (n + 2) + col] *R[row * (n + 2) + col]); R[row * (n + 2) + col] = R[row * (n + 2) + col] + dt*(epsilon+M1* R[row * (n + 2) + col]/ ( E[row * (n + 2) + col]+M2))*(-R[row * (n + 2) + col]-kk* E[row * (n + 2) + col] * (E[row * (n + 2) + col]-b-1)); /* //swap current E with previous E if(row == 1 and col== 1){ } //printf() for(int i = 0; i< 18; i++){ for(int j=0; j<18; j++){ printf("%f ", E_prev[i * 18 + j]); } printf("\n"); } */ } // Utilities // // Timer // Make successive calls and take a difference to get the elapsed time. static const double kMicro = 1.0e-6; double getTime() { struct timeval TV; struct timezone TZ; const int RC = gettimeofday(&TV, &TZ); if(RC == -1) { cerr << "ERROR: Bad call to gettimeofday" << endl; return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } // end getTime() // Allocate a 2D array double **alloc2D(int m,int n){ double **E; int nx=n, ny=m; E = (double**)malloc(sizeof(double*)*ny + sizeof(double)*nx*ny); assert(E); int j; for(j=0;j<ny;j++) E[j] = (double*)(E+ny) + j*nx; return(E); } // Reports statistics about the computation // These values should not vary (except to within roundoff) // when we use different numbers of processes to solve the problem double stats(double *E, int m, int n, double *_mx){ double mx = -1; double l2norm = 0; int i, j; for (j=1; j<=m; j++) for (i=1; i<=n; i++) { l2norm += E[j * (n + 2) + i]*E[j * (n + 2) + i]; if (E[j * (n + 2) + i] > mx) mx = E[j * (n + 2) + i]; } *_mx = mx; l2norm /= (double) ((m)*(n)); l2norm = sqrt(l2norm); return l2norm; } void simulate (double* E, double** dev_E_ref, double* E_prev,double** dev_E_prev_ref,double* R, double* dev_R, const double alpha, const int n, const int m, const double kk, const double dt, const double a, const double epsilon, const double M1,const double M2, const double b) { const dim3 block_size(BLOCK_SIZE,BLOCK_SIZE); const dim3 num_blocks(n / block_size.x, m / block_size.y); copyHorizontalBoundaries<<<1,n>>>(n, m, *dev_E_prev_ref); pdeSolver<<<num_blocks, block_size>>>(m, n, *dev_E_prev_ref, *dev_E_ref, alpha); odeSolver<<<num_blocks, block_size>>>(m, n, *dev_E_ref, dev_R, *dev_E_prev_ref, epsilon, M1, M2, kk, dt, a, b, dev_E_ref, dev_E_prev_ref); swap<<<1,1>>>(dev_E_ref, dev_E_prev_ref); } // Main program int main (int argc, char** argv) { /* * Solution arrays * E is the "Excitation" variable, a voltage * R is the "Recovery" variable * E_prev is the Excitation variable for the previous timestep, * and is used in time integration */ double *E, *R, *E_prev; // Various constants - these definitions shouldn't change const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5; double T=500.0; int m=1024,n=1024; int plot_freq = 0; int px = 1, py = 1; int no_comm = 0; int num_threads=1; cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads); m = n; // Allocate contiguous memory for solution arrays // The computational box is defined on [1:m+1,1:n+1] // We pad the arrays in order to facilitate differencing on the // boundaries of the computation box E = (double*) malloc((m+2) * (n+2) * sizeof(double)); E_prev = (double*) malloc((m+2) * (n+2) * sizeof(double)); R = (double*) malloc((m+2) * (n+2) * sizeof(double)); int i,j; // Initialization for (j=1; j<=m; j++) for (i=1; i<=n; i++) E_prev[j * (n + 2) + i] = R[j * (n + 2) + i] = 0; for (j=1; j<=m; j++) for (i=n/2+1; i<=n; i++) E_prev[j * (n + 2) + i] = 1.0; for (j=m/2+1; j<=m; j++) for (i=1; i<=n; i++) R[j * (n + 2) + i] = 1.0; double dx = 1.0/n; // For time integration, these values shouldn't change double rp= kk*(b+1)*(b+1)/4; double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk)); double dtr=1/(epsilon+((M1/M2)*rp)); double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr; double alpha = d*dt/(dx*dx); cout << "Grid Size : " << n << endl; cout << "Duration of Sim : " << T << endl; cout << "Time step dt : " << dt << endl; cout << "Process geometry: " << px << " x " << py << endl; if (no_comm) cout << "Communication : DISABLED" << endl; cout << endl; double *dev_E_prev, *dev_E, *dev_R; double **dev_E_ref, **dev_E_prev_ref, **E_ref, **E_prev_ref; E_ref = (double**)malloc(sizeof(int)); E_prev_ref = (double**)malloc(sizeof(int)); cudaMalloc((void**)&dev_E_prev, (m+2) * (n+2) * sizeof(double)); cudaMalloc((void**)&dev_E, (m+2) * (n+2) * sizeof(double)); cudaMalloc((void**)&dev_R, (m+2) * (n+2) * sizeof(double)); cudaMalloc((void**)&dev_E_prev_ref, sizeof(int*)); cudaMalloc((void**)&dev_E_ref, sizeof(int*)); cudaMemcpy(dev_E_prev, E_prev, (m+2) * (n+2) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_E, E, (m+2) * (n+2) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_R, R, (m+2) * (n+2) * sizeof(double), cudaMemcpyHostToDevice); *E_ref = dev_E; *E_prev_ref = dev_E_prev; cudaMemcpy(dev_E_prev_ref, E_prev_ref, sizeof(double*), cudaMemcpyHostToDevice); cudaMemcpy(dev_E_ref, E_ref, sizeof(double*), cudaMemcpyHostToDevice); // Start the timer double t0 = getTime(); // Simulated time is different from the integer timestep number // Simulated time double t = 0.0; // Integer timestep number int niter=0; while (t<T) { t += dt; niter++; simulate(E, &dev_E, E_prev, &dev_E_prev, R, dev_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b); //cudaMemcpy(E_prev, dev_E_prev, (m+2) * (n+2) * sizeof(double), cudaMemcpyDeviceToHost); //cudaMemcpy(E, dev_E, (m+2) * (n+2) * sizeof(double), cudaMemcpyDeviceToHost); //swap current E with previous E //double *tmp = E; E = E_prev; E_prev = tmp; //cudaMemcpy(dev_E_prev, E_prev, (m+2) * (n+2) * sizeof(double), cudaMemcpyHostToDevice); //cudaMemcpy(dev_E, E, (m+2) * (n+2) * sizeof(double), cudaMemcpyHostToDevice); if (plot_freq){ int k = (int)(t/plot_freq); if ((t - k * plot_freq) < dt){ //splot(E,t,niter,m+2,n+2); } } }//end of while loop double time_elapsed = getTime() - t0; cudaMemcpy(E_prev, dev_E_prev, (m+2) * (n+2) * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(E, dev_E, (m+2) * (n+2) * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(R, dev_R, (m+2) * (n+2) * sizeof(double), cudaMemcpyDeviceToHost); double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ; double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed; cout << "Number of Iterations : " << niter << endl; cout << "Elapsed Time (sec) : " << time_elapsed << endl; cout << "Sustained Gflops Rate : " << Gflops << endl; cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl; double mx; double l2norm = stats(E_prev,m,n,&mx); cout << "Max: " << mx << " L2norm: "<< l2norm << endl; if (plot_freq){ cout << "\n\nEnter any input to close the program and the plot..." << endl; getchar(); } free (E); free (E_prev); free (R); cudaFree(dev_E); cudaFree(dev_E_prev); cudaFree(dev_R); return 0; } void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int& num_threads){ /// Command line arguments // Default value of the domain sizes static struct option long_options[] = { {"n", required_argument, 0, 'n'}, {"px", required_argument, 0, 'x'}, {"py", required_argument, 0, 'y'}, {"tfinal", required_argument, 0, 't'}, {"plot", required_argument, 0, 'p'}, {"nocomm", no_argument, 0, 'k'}, {"numthreads", required_argument, 0, 'o'}, }; // Process command line arguments int ac; for(ac=1;ac<argc;ac++) { int c; while ((c=getopt_long(argc,argv,"n:x:y:t:kp:o:",long_options,NULL)) != -1){ switch (c) { // Size of the computational box case 'n': n = atoi(optarg); break; // X processor geometry case 'x': px = atoi(optarg); // Y processor geometry case 'y': py = atoi(optarg); // Length of simulation, in simulated time units case 't': T = atof(optarg); break; // Turn off communication case 'k': no_comm = 1; break; // Plot the excitation variable case 'p': plot_freq = atoi(optarg); break; // Plot the excitation variable case 'o': num_threads = atoi(optarg); break; // Error default: printf("Usage: a.out [-n <domain size>] [-t <final time >]\n\t [-p <plot frequency>]\n\t[-px <x processor geometry> [-py <y proc. geometry] [-k turn off communication] [-o <Number of OpenMP threads>]\n"); exit(-1); } } } } /* ********************************************************** * Author : Urvashi R.V. [04/06/2004] * Modified by Didem Unat [03/23/18] *************************************************************/ #include <stdio.h> /* Function to plot the 2D array * 'gnuplot' is instantiated via a pipe and * the values to be plotted are passed through, along * with gnuplot commands */ FILE *gnu=NULL; void splot(double **U, double T, int niter, int m, int n) { int i, j; if(gnu==NULL) gnu = popen("gnuplot","w"); double mx = -1, mn = 32768; for (j=0; j<m; j++) for (i=0; i<n; i++){ if (U[j][i] > mx) mx = U[j][i]; if (U[j][i] < mn) mn = U[j][i]; } fprintf(gnu,"set title \"T = %f [niter = %d]\"\n",T, niter); fprintf(gnu,"set size square\n"); fprintf(gnu,"set key off\n"); fprintf(gnu,"set pm3d map\n"); // Various color schemes fprintf(gnu,"set palette defined (-3 \"blue\", 0 \"white\", 1 \"red\")\n"); // fprintf(gnu,"set palette rgbformulae 22, 13, 31\n"); // fprintf(gnu,"set palette rgbformulae 30, 31, 32\n"); fprintf(gnu,"splot [0:%d] [0:%d][%f:%f] \"-\"\n",m-1,n-1,mn,mx); for (j=0; j<m; j++){ for (i=0; i<n; i++) { fprintf(gnu,"%d %d %f\n", i, j, U[i][j]); } fprintf(gnu,"\n"); } fprintf(gnu,"e\n"); fflush(gnu); return; }
14,447
#include <iostream> #include <stdlib.h> using namespace std; __global__ void add(int *a, int *b, int *c, int n){ int index = threadIdx.x + blockIdx.x*blockDim.x; if (index < n){ c[index] = a[index] + b[index]; } } int main(int argc, char *argv[]){ cout<<"****** Array Addition ******\n"<<endl; int *a, *b, *c; int size = 10; a = (int*) malloc(size*sizeof(int)); b = (int*) malloc(size*sizeof(int)); c = (int*) malloc(size*sizeof(int)); int *d_a, *d_b, *d_c; cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); add<<<1,10>>>(d_a, d_b, d_c, size); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(a); free(b); free(c); return 0; }
14,448
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <assert.h> #include <time.h> /*COLOR*/ #define RED "\x1B[31m" /*RESET COLOR*/ #define RESET "\x1B[0m" #define N 5120 #define M 64 #define FILE_NAME "../test-files/5120x5120.txt" #define STEPS 1000 char **allocate2DArray(int rows, int columns) { char **block; int i; block = (char **) malloc(rows * sizeof(char *)); block[0] = (char *) malloc(rows * columns * sizeof(char)); for (i = 1; i < rows; i++) { block[i] = &(block[0][i * rows]); } memset(block[0], '0', rows * columns * sizeof(char)); return block; } void free2DArray(char **block) { free(block[0]); free(block); } void print_array(char **array, bool split, int dim, int localDim) { printf("\n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%s %c ", array[i][j] == '1' ? RED"\u2B1B" RESET : "\u2B1C", (split && (j + 1) % localDim == 0) ? ' ' : '\0'); } printf("\n%c", (split && (i + 1) % localDim == 0) ? '\n' : '\0'); } printf("\n"); } // Device code __global__ void kernel(const char *old, char *current) { __shared__ char local[M + 2][M + 2]; int sum = 0; unsigned int local_row = threadIdx.x; unsigned int local_col = threadIdx.y; // unsigned int local_thread_id = local_col + local_row * M; unsigned int ix = blockIdx.x * (blockDim.x) + threadIdx.x; unsigned int iy = blockIdx.y * (blockDim.y) + threadIdx.y; unsigned int idx = ix * N + iy; // Initialize 'local' shared array local[local_row + 1][local_col + 1] = old[idx]; // Initialize neighbors if (blockIdx.x > 0 && blockIdx.x < gridDim.x - 1 && blockIdx.y > 0 && blockIdx.y < gridDim.y - 1) { //up if (local_row == 0) { local[local_row][local_col + 1] = old[idx - N]; } //down if (local_row == blockDim.x - 1) { local[local_row + 2][local_col + 1] = old[idx + N]; } //left if (local_col == 0) { local[local_row + 1][local_col] = old[idx - 1]; } //right if (local_col == blockDim.y - 1) { local[local_row + 1][local_col + 2] = old[idx + 1]; } //up left if (local_col == 0 && local_row == 0) { local[local_row][local_col] = old[idx - N - 1]; } //up right if (local_col == blockDim.y - 1 && local_row == 0) { local[local_row][local_col + 2] = old[idx - N + 1]; } //down left if (local_col == 0 && local_row == blockDim.y - 1) { local[local_row + 2][local_col] = old[idx + N - 1]; } //down right if (local_col == blockDim.y - 1 && local_row == blockDim.x - 1) { local[local_row + 2][local_col + 2] = old[idx + N + 1]; } } else { if (blockIdx.x == 0) { //up if (local_row == 0) { local[local_row][local_col + 1] = old[idx + (N - 1) * N]; } //down if (local_row == blockDim.x - 1) { local[local_row + 2][local_col + 1] = old[idx + N]; } //left if (local_col == 0) { if (blockIdx.y == 0) { local[local_row + 1][local_col] = old[idx + N - 1]; } else { local[local_row + 1][local_col] = old[idx - 1]; } } //right if (local_col == blockDim.y - 1) { if (blockIdx.y != gridDim.y - 1) { local[local_row + 1][local_col + 2] = old[idx + 1]; } else { local[local_row + 1][local_col + 2] = old[idx - N + 1]; } } //up left if (local_col == 0 && local_row == 0) { if (blockIdx.y == 0) { local[local_row][local_col] = old[idx + N * N - 1]; } else { local[local_row][local_col] = old[idx + (N - 1) * N - 1]; } } //up right if (local_row == 0 && local_col == blockDim.y - 1) { if (blockIdx.y != gridDim.y - 1) { local[local_row][local_col + 2] = old[idx + (N - 1) * N + 1]; } else { local[local_row][local_col + 2] = old[idx + (N - 1) * N - N + 1]; } } //down left if (local_row == blockDim.x - 1 && local_col == 0) { if (blockIdx.y == 0) { local[local_row + 2][local_col] = old[idx + 2 * N - 1]; } else { local[local_row + 2][local_col] = old[idx + 2 * N - 1 - N]; } } //down right if (local_row == blockDim.x - 1 && local_col == blockDim.y - 1) { if (blockIdx.y != gridDim.y - 1) { local[local_row + 2][local_col + 2] = old[idx + 1 * N + 1]; } else { local[local_row + 2][local_col + 2] = old[idx + N + 1]; } } } if (blockIdx.x == gridDim.x - 1) { //up if (local_row == 0) { local[local_row][local_col + 1] = old[idx - N]; } //down if (local_row == blockDim.x - 1) { local[local_row + 2][local_col + 1] = old[idx - N * (N - 1)]; } //left if (local_col == 0) { if (blockIdx.y == 0) { local[local_row + 1][local_col] = old[idx + N - 1]; } else { local[local_row + 1][local_col] = old[idx - 1]; } } //right if (local_col == blockDim.y - 1) { if (blockIdx.y != gridDim.y - 1) { local[local_row + 1][local_col + 2] = old[idx + 1]; } else { local[local_row + 1][local_col + 2] = old[idx - N + 1]; } } //up left if (local_col == 0 && local_row == 0) { if (blockIdx.y == 0) { local[local_row][local_col] = old[idx - 1]; } else { local[local_row][local_col] = old[idx - N - 1]; } } //up right if (local_row == 0 && local_col == blockDim.y - 1) { if (blockIdx.y != gridDim.y - 1) { local[local_row][local_col + 2] = old[idx - N + 1]; } else { local[local_row][local_col + 2] = old[idx - 2 * N + 1]; } } //down left if (local_row == blockDim.x - 1 && local_col == 0) { if (blockIdx.y == 0) { local[local_row + 2][local_col] = old[idx - (N - 1) * (N - 1)]; } else { local[local_row + 2][local_col] = old[idx - N * (N - 1) - 1]; } } //down right if (local_row == blockDim.x - 1 && local_col == blockDim.y - 1) { if (blockIdx.y != gridDim.y - 1) { local[local_row + 2][local_col + 2] = old[idx - (N - 1) * N + 1]; } else if (blockIdx.y == gridDim.y - 1) { local[local_row + 2][local_col + 2] = old[idx - (N - 1) * N + 1 - N]; } } } if (blockIdx.x > 0 && blockIdx.x < gridDim.x - 1 && blockIdx.y == 0) { //up if (local_row == 0) { local[local_row][local_col + 1] = old[idx - N]; } //down if (local_row == blockDim.x - 1) { local[local_row + 2][local_col + 1] = old[idx + N]; } //right if (local_col == blockDim.y - 1) { local[local_row + 1][local_col + 2] = old[idx + 1]; } //left if (local_col == 0) { local[local_row + 1][local_col] = old[idx + N - 1]; } //up right if (local_col == blockDim.y - 1 && local_row == 0) { local[local_row][local_col + 2] = old[idx - N + 1]; } //down right if (local_col == blockDim.y - 1 && local_row == blockDim.x - 1) { local[local_row + 2][local_col + 2] = old[idx + N + 1]; } //up left if (local_col == 0 && local_row == 0) { if (blockIdx.y == 0) { local[local_row][local_col] = old[idx - 1]; } } //down left if (local_row == blockDim.x - 1 && local_col == 0) { if (blockIdx.y == 0) { local[local_row + 2][local_col] = old[idx + 2 * N - 1]; } } } if (blockIdx.x > 0 && blockIdx.x < gridDim.x - 1 && blockIdx.y == gridDim.y - 1) { //up if (local_row == 0) { local[local_row][local_col + 1] = old[idx - N]; } //down if (local_row == blockDim.x - 1) { local[local_row + 2][local_col + 1] = old[idx + N]; } //left if (local_col == 0) { local[local_row + 1][local_col] = old[idx - 1]; } //up left if (local_col == 0 && local_row == 0) { local[local_row][local_col] = old[idx - N - 1]; } //down left if (local_col == 0 && local_row == blockDim.y - 1) { local[local_row + 2][local_col] = old[idx + N - 1]; } //right if (local_col == blockDim.y - 1) { local[local_row + 1][local_col + 2] = old[idx - N + 1]; } //up right if (local_row == 0 && local_col == blockDim.y - 1) { local[local_row][local_col + 2] = old[idx - 2 * N + 1]; } //down right if (local_row == blockDim.x - 1 && local_col == blockDim.y - 1) { local[local_row + 2][local_col + 2] = old[idx + 1]; } } } __syncthreads(); // Calculate cells sum = (local[local_row][local_col] - '0') + (local[local_row][local_col + 1] - '0') + (local[local_row][local_col + 2] - '0') + (local[local_row + 1][local_col] - '0') + (local[local_row + 1][local_col + 2] - '0') + (local[local_row + 2][local_col] - '0') + (local[local_row + 2][local_col + 1] - '0') + (local[local_row + 2][local_col + 2] - '0'); // Is alive if ((local[local_row + 1][local_col + 1]) == '1') { if (sum <= 1 || sum >= 4) { current[idx] = '0'; } else { current[idx] = '1'; } } else if (sum == 3) { current[idx] = '1'; } else { current[idx] = '0'; } } // Host code int main() { char **host_array = nullptr, *device_old = nullptr, *device_current = nullptr, *temp = nullptr;; int i = 0, fd = 0; double time_spent = 0.0; clock_t begin, end; // Threads (2D) per block dim3 m(M, M); // Blocks (2D grid) dim3 n((unsigned int) ((N + (float) M - 1) / (float) M), (unsigned int) ((N + (float) M - 1) / (float) M)); assert(N * N == M * M * (n.x * n.y)); // Array allocations host_array = allocate2DArray(N, N); // Read file fd = open(FILE_NAME, O_RDONLY); assert(fd > 0); i = 0; while (read(fd, &host_array[i++][0], N)); close(fd); // printf("host_array before:\n"); // print_array(host_array, true, N, N); // Allocate 2D 'old' array on device cudaMalloc((void **) &device_old, N * N * sizeof(char)); // Copy 2D 'old' array on device cudaMemcpy(device_old, host_array[0], N * N * sizeof(char), cudaMemcpyHostToDevice); // Allocate 2D 'current' array on device cudaMalloc((void **) &device_current, N * N * sizeof(char)); // Initialize 2D 'current' array on device cudaMemset(device_current, '0', N * N * sizeof(char)); begin = clock(); // Computations for (i = 0; i < STEPS; i++) { // Call device function kernel<<<n, m>>>(device_old, device_current); // Copy 2D 'device_current' array from device to host cudaMemcpy(host_array[0], device_current, sizeof(char) * N * N, cudaMemcpyDeviceToHost); cudaMemset(device_old, '0', N * N * sizeof(char)); // printf("host_array on step %d:\n", i); // print_array(host_array, true, N, N); cudaDeviceSynchronize(); // Swap 'device_old' and 'device_current' arrays temp = device_old; device_old = device_current; device_current = temp; } end = clock(); time_spent = (double) (end - begin) / CLOCKS_PER_SEC; printf("time_spent=%f\n", time_spent); // Free memory cudaFree(device_old); cudaFree(device_current); free2DArray(host_array); return 0; }
14,449
#include "includes.h" #define MAX_BUF 100000000 typedef unsigned int UINT; UINT buffer[MAX_BUF]; // 核函数统一使用该命名,参数列表可自定义 __global__ void kernel() { }
14,450
/************************************************* ** Accelereyes Training Day 1 ** ** Matrix Addition ** ** ** ** This program will add two matrices and store ** ** the result in a third matrix using the GPU ** *************************************************/ #include <iostream> #include <vector> #define THREADS 10 using namespace std; __global__ void add(int *a, int *b, int *c,int columns,int rows) { // get the global id for the thread // calculate the index of the input data // perform addition } int main(void) { int rows = 100; int columns = 100; int elements = rows * columns; size_t size = rows * columns * sizeof(int); // create device pointers int* d_a; int* d_b; int* d_c; // allocate memory on the device cudaMalloc(&d_a, size); cudaMalloc(&d_b, size); cudaMalloc(&d_c, size); // initalize host variables vector<int> h_a(elements, 5); vector<int> h_b(elements, 5); vector<int> h_c(elements); // transfer the host data to the GPU cudaMemcpy(d_a, &h_a.front(), size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &h_b.front(), size, cudaMemcpyHostToDevice); // calculate the number of threads and blocks // Launch the add kernel // get the results from the GPU cudaMemcpy(&h_c.front(), d_c, size, cudaMemcpyDeviceToHost); // print top left corner for(int i = 0; i < 5; i++) { for(int j = 0; j < 10; j++) cout << h_c[i * rows + j] << " "; cout << endl; } }
14,451
#include "includes.h" __global__ void naivekernel(float* output, float* frameA, float* frameB, int chans) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = i<<1; if (i < chans) { int test = frameA[j] >= frameB[j]; if (test) { output[j] = frameA[j]; output[j+1] = frameA[j+1]; } else { output[j] = frameB[j]; output[j+1] = frameB[j+1]; } } }
14,452
#include <stdio.h> #include <math.h> #include <unistd.h> #include <cuda_runtime_api.h> #include <time.h> #include <errno.h> /***************************************************************************** * * * * * * Compile with: * nvcc -o 2_3_a 2_3_a.cu * * Dr Kevan Buckley, University of Wolverhampton, 2018 ****************************************************************************/ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {65.78,84.94},{80.66,124.82},{72.60,113.64},{73.25,118.85}, {87.09,111.60},{73.60,99.61},{67.41,113.22},{72.33,105.25}, {66.66,113.69},{85.69,138.86},{65.88,110.02},{74.81,100.78}, {69.99,119.69},{69.96,109.03},{66.97,95.97},{60.83,102.06}, {48.92,92.22},{31.78,70.11},{88.86,140.37},{11.76,58.21}, {84.69,117.22},{77.05,115.49},{71.92,95.04},{ 9.70,47.44}, {97.37,140.03},{92.21,125.84},{42.88,74.76},{60.45,108.15}, {80.69,101.67},{ 4.33,31.86},{78.79,118.41},{71.97,101.20}, {88.61,116.18},{76.62,111.13},{76.71,115.16},{96.03,134.40}, {92.85,134.32},{79.01,112.20},{ 4.22,21.46},{31.16,42.04}, {76.10,99.37},{93.06,117.73},{88.67,122.71},{96.27,123.05}, {58.19,83.25},{68.07,108.38},{12.86,30.19},{49.04,77.19}, {16.44,44.81},{43.75,65.50},{52.12,93.32},{29.31,69.57}, {53.84,94.57},{96.29,116.55},{50.22,88.41},{69.95,99.83}, {95.20,122.15},{34.44,69.00},{69.47,107.12},{ 0.91,28.02}, {29.66,53.79},{ 4.30,29.31},{12.95,34.18},{ 2.76,28.50}, { 6.19,23.96},{78.09,122.77},{16.65,66.25},{73.03,125.98}, {51.04,82.58},{89.00,138.15},{12.50,49.42},{71.70,114.12}, {10.26,38.81},{78.93,128.07},{30.70,46.10},{ 9.55,19.67}, {79.22,95.02},{60.48,96.79},{82.67,106.82},{57.14,91.51}, {82.19,113.94},{32.98,64.19},{72.45,129.00},{13.19,49.45}, {91.62,120.70},{86.07,114.37},{13.23,41.46},{57.73,119.52}, {63.60,97.62},{ 6.67,20.56},{83.57,119.99},{14.51,44.79}, {65.46,101.29},{69.36,91.20},{35.50,64.60},{48.64,58.92}, {84.29,111.71},{ 5.65,25.65},{54.15,72.92},{29.92,67.69}, {19.21,41.51},{91.63,121.28},{95.57,124.52},{24.65,53.85}, {61.85,84.76},{93.80,112.72},{56.37,99.04},{33.41,53.01}, {86.81,112.34},{12.88,39.42},{ 7.05,37.58},{ 5.66,38.58}, {95.20,118.02},{50.13,93.55},{ 7.89,45.65},{15.84,61.16}, { 9.40,34.02},{ 6.69,52.52},{41.84,66.84},{45.61,84.32}, {83.67,119.45},{12.89,35.99},{14.82,44.90},{46.04,81.28}, {76.55,118.50},{ 3.73,41.21},{45.36,67.25},{33.88,64.53}, {92.55,124.17},{39.86,85.34},{88.52,128.15},{49.31,70.56}, {73.49,112.64},{57.96,88.82},{63.00,99.83},{59.79,93.59}, {64.86,118.67},{ 4.18,31.42},{50.59,90.69},{88.98,139.34}, {40.11,66.24},{98.38,123.02},{57.27,105.62},{16.22,41.11}, {30.34,49.43},{87.37,140.74},{18.01,49.80},{ 2.21,11.45}, {75.21,112.46},{26.54,71.74},{74.16,107.29},{30.97,64.03}, {84.37,123.86},{41.01,77.79},{37.72,62.23},{12.93,28.26}, {54.31,73.15},{39.55,81.70},{62.89,100.66},{67.99,109.15}, {87.35,124.70},{ 1.55,31.17},{33.61,50.80},{59.69,105.76}, {32.61,60.19},{63.51,101.36},{ 1.05,23.22},{54.73,92.57}, {56.28,84.65},{80.65,108.52},{89.85,119.70},{28.04,51.66}, {46.12,73.69},{22.41,58.04},{94.15,111.90},{23.99,51.74}, {16.70,38.71},{22.92,70.77},{88.85,129.50},{ 9.37,34.93}, {13.91,49.14},{87.42,120.84},{33.75,90.66},{38.86,60.00}, {95.32,130.46},{52.82,106.97},{23.25,49.61},{70.02,101.12}, {22.57,46.22},{88.25,135.29},{85.46,118.28},{15.36,39.22}, {93.93,119.32},{44.87,72.28},{74.63,117.37},{20.47,38.88}, {58.64,104.16},{77.26,115.23},{73.81,107.74},{44.31,83.05}, {73.33,112.30},{76.13,101.68},{66.14,111.98},{19.30,51.44}, {83.30,126.09},{30.35,58.24},{33.53,68.47},{30.81,55.14}, {94.40,140.26},{16.30,34.96},{15.20,47.46},{41.71,80.03}, {11.08,35.60},{26.14,48.13},{25.37,69.13},{36.07,71.36}, {19.76,33.08},{45.40,68.61},{64.20,111.70},{11.05,43.83}, {35.08,46.97},{23.36,53.34},{76.49,100.85},{20.09,42.43}, {70.47,113.53},{44.40,67.48},{95.32,136.24},{58.11,86.17}, {52.80,93.70},{83.16,107.14},{70.78,122.96},{11.55,32.35}, {58.75,97.71},{52.95,77.08},{30.81,48.93},{95.85,132.94}, {44.50,71.55},{ 0.39,32.70},{34.93,71.22},{41.68,91.73}, {42.71,76.66},{87.49,114.97},{81.65,126.86},{35.88,78.86}, {42.78,93.23},{36.62,57.25},{68.42,106.48},{ 1.02,30.03}, { 1.44,23.22},{46.30,79.52},{12.22,46.90},{43.53,75.44}, { 1.50,14.47},{47.36,62.50},{75.81,112.76},{16.45,48.77}, {24.42,53.77},{13.81,35.41},{97.51,128.68},{54.79,79.80}, {47.90,76.49},{25.27,55.69},{73.38,107.88},{48.62,92.85}, {15.50,38.19},{ 2.04,26.79},{23.24,39.56},{18.89,55.69}, {15.46,35.52},{40.33,63.30},{65.85,90.79},{33.30,71.08}, {44.22,73.63},{71.67,103.74},{91.74,129.75},{82.96,106.84}, { 9.99,33.58},{95.07,132.89},{10.11,34.28},{93.17,140.73}, {79.57,113.71},{90.91,123.45},{68.53,100.33},{30.80,67.13}, {75.92,106.00},{25.37,42.29},{24.28,65.42},{12.45,38.50}, {41.10,70.73},{ 0.44,41.25},{36.74,74.70},{79.37,105.60}, {64.98,102.09},{89.88,134.54},{98.48,147.09},{15.10,36.07}, {28.95,58.48},{98.56,145.43},{53.20,89.24},{57.47,101.11}, {80.42,89.49},{99.85,131.47},{59.01,85.63},{19.64,49.58}, {24.15,49.11},{19.15,47.59},{91.22,124.16},{20.49,61.47}, {40.40,61.57},{86.76,127.32},{67.21,104.20},{35.84,59.15}, { 2.66,39.39},{80.93,113.88},{25.54,68.66},{20.10,68.51}, {12.45,43.00},{ 0.98,19.94},{21.20,53.05},{90.33,114.89}, {32.52,65.61},{63.07,102.47},{69.59,120.30},{66.46,88.79}, { 4.45,40.60},{46.32,60.06},{ 7.53,16.18},{ 9.71,42.26}, {27.23,42.86},{27.76,47.88},{21.45,19.98},{52.37,84.64}, {45.27,86.31},{67.86,99.14},{ 3.11,29.83},{72.59,88.63}, {91.71,132.76},{63.40,112.65},{33.54,67.40},{36.53,74.07}, {78.84,100.02},{ 4.10,21.67},{42.67,71.97},{14.22,46.05}, {45.89,66.53},{12.11,36.29},{69.36,117.95},{52.72,83.50}, {53.09,92.41},{28.52,62.94},{52.89,99.94},{14.36,49.70}, {33.54,58.50},{29.36,64.05},{54.32,79.41},{91.29,135.17}, {68.97,95.29},{60.95,93.37},{38.01,52.19},{13.66,36.57}, { 0.13,30.87},{92.37,115.09},{87.79,130.79},{87.56,130.04}, {84.52,119.77},{54.18,91.22},{40.68,84.46},{99.57,143.57}, {43.30,66.25},{81.93,118.24},{27.55,66.43},{47.14,54.18}, { 8.23,45.57},{12.73,42.70},{36.24,60.69},{88.02,121.50}, {68.71,111.61},{61.03,87.77},{96.08,134.20},{53.94,70.89}, {89.93,130.48},{81.50,108.78},{60.53,80.79},{49.01,79.16}, {75.17,103.32},{41.22,79.69},{29.31,56.01},{48.56,89.38}, {47.35,63.86},{47.29,84.39},{74.83,122.45},{82.09,128.53}, {16.68,57.09},{27.78,69.23},{61.28,82.19},{68.66,111.12}, {34.78,66.88},{ 3.85,12.89},{48.94,78.61},{66.81,97.84}, {36.29,86.04},{27.89,58.21},{91.55,142.66},{15.07,61.46}, {49.66,77.69},{45.34,66.40},{92.36,126.32},{83.72,114.99}, {74.22,98.96},{52.59,63.50},{57.40,89.25},{ 8.11,28.79}, {70.64,98.18},{28.97,64.93},{59.06,97.86},{26.78,55.09}, {30.61,66.34},{86.07,125.23},{63.69,101.98},{34.21,57.52}, {98.15,125.37},{67.61,124.80},{85.06,124.63},{68.99,94.71}, {56.65,88.43},{48.26,76.38},{37.47,109.00},{56.65,123.41}, {24.77,52.15},{37.08,69.44},{57.73,99.72},{ 5.72,30.64}, {53.54,87.65},{59.89,102.05},{85.23,132.20},{52.18,103.33}, {66.06,102.25},{78.83,119.47},{38.97,82.64},{13.94,21.20}, {45.53,48.05},{89.34,131.71},{84.58,130.87},{ 4.58,37.47}, {44.91,54.60},{31.07,66.31},{ 6.41,34.58},{46.37,67.66}, {88.40,120.11},{53.28,75.24},{88.74,128.67},{10.45,37.74}, {65.52,97.32},{62.16,103.38},{72.06,113.98},{53.36,82.72}, {27.69,61.02},{ 5.32,10.52},{22.18,54.06},{32.44,49.68}, { 1.20,38.95},{77.84,119.69},{18.07,37.00},{ 5.27,41.43}, {33.05,65.31},{37.46,55.62},{82.73,124.56},{ 8.11,42.31}, {72.77,103.88},{27.88,44.33},{16.10,29.03},{18.27,59.33}, {54.84,81.40},{68.63,97.16},{ 6.73,34.02},{34.76,73.62}, {43.35,80.93},{82.83,117.96},{22.36,40.83},{ 6.34,46.03}, {52.15,83.33},{52.83,72.35},{26.59,52.77},{38.87,68.02}, {12.23,47.63},{36.72,68.58},{42.18,90.26},{29.34,60.82}, {37.62,86.68},{19.33,27.92},{73.34,108.09},{48.93,73.80}, {72.41,126.86},{54.47,98.81},{35.09,62.59},{46.61,67.81}, {69.08,103.82},{79.73,107.37},{36.27,56.49},{30.85,43.20}, { 8.50,43.86},{59.02,88.38},{69.07,113.97},{17.12,54.82}, {31.09,73.11},{75.12,109.39},{56.58,85.07},{89.64,131.91}, {46.48,90.29},{66.02,113.76},{97.52,123.13},{57.37,83.39}, {37.12,72.21},{ 5.99,35.25},{ 2.90,32.47},{67.30,89.07}, {40.80,65.62},{66.03,103.43},{86.40,105.31},{35.97,72.43}, {52.88,92.48},{65.37,99.35},{97.88,123.46},{42.45,64.70}, {82.41,121.35},{25.18,60.12},{47.93,84.65},{70.53,103.81}, {72.03,103.51},{51.72,70.08},{99.56,142.75},{44.50,70.40}, { 7.72,27.82},{91.01,131.50},{ 8.81,43.72},{11.42,45.66}, {48.85,83.83},{32.25,62.82},{63.74,107.36},{70.70,122.82}, {26.26,62.04},{30.79,64.38},{46.16,80.00},{80.20,144.95}, {51.88,92.05},{ 7.65,37.11},{93.77,115.78},{84.86,132.34}, {92.24,127.84},{50.25,67.89},{25.80,65.91},{81.90,116.78}, {62.26,109.25},{16.44,44.66},{41.08,69.20},{40.91,72.34}, {57.65,103.90},{14.23,53.28},{53.61,80.02},{97.67,116.54}, {41.67,62.16},{89.94,129.01},{47.12,83.97},{ 7.64,29.90}, {39.02,64.24},{81.77,131.44},{65.39,97.70},{65.69,98.38}, {67.20,105.36},{ 1.19,22.92},{90.92,131.26},{ 5.65,13.89}, {75.56,104.27},{29.76,67.34},{ 1.91,43.49},{ 8.29,43.45}, {20.48,41.18},{33.63,70.78},{37.38,70.45},{ 5.48,28.78}, {75.75,120.92},{82.28,135.33},{14.77,74.49},{39.10,83.36}, { 8.86,40.33},{75.41,113.16},{57.56,91.58},{89.33,120.10}, {15.92,30.37},{79.65,114.27},{33.63,64.56},{56.81,84.99}, { 0.68,26.24},{79.57,119.83},{22.91,69.49},{ 9.88,42.80}, { 9.15,45.84},{51.81,87.55},{32.13,89.86},{71.92,113.29}, { 4.74,28.94},{21.16,63.23},{45.47,71.86},{84.21,117.65}, {18.69,64.74},{31.99,47.79},{46.30,66.98},{ 1.12,37.93}, {83.86,134.58},{28.49,75.14},{52.66,63.86},{54.97,85.99}, { 4.79,24.49},{58.55,93.05},{67.40,90.61},{ 4.35,25.67}, {21.58,32.08},{75.69,108.42},{69.79,107.03},{40.41,60.86}, {49.51,82.66},{54.77,92.15},{95.14,116.82},{20.44,39.46}, {16.47,45.72},{90.97,115.53},{ 1.27,36.57},{ 4.98,47.82}, { 3.37,42.17},{87.25,110.00},{68.97,91.94},{66.31,109.22}, {67.60,76.86},{17.43,57.61},{68.35,84.18},{84.88,123.97}, {48.31,78.55},{56.03,91.48},{37.08,52.78},{20.44,48.80}, {21.10,32.35},{63.38,81.85},{97.99,121.65},{29.04,63.52}, {94.79,126.70},{98.68,131.02},{66.07,94.72},{ 0.58,49.60}, {30.35,70.60},{71.38,102.31},{10.39,32.46},{90.88,125.15}, {54.73,80.33},{18.04,55.48},{54.19,84.82},{87.33,128.97}, {20.11,58.77},{88.69,135.20},{50.73,73.95},{41.50,65.20}, {52.09,76.29},{61.36,85.27},{86.80,127.70},{48.84,71.02}, {86.20,130.25},{28.01,73.72},{28.93,68.22},{ 0.93,17.86}, {60.92,103.87},{34.30,72.21},{ 8.65,41.15},{69.58,109.43}, {33.85,64.67},{64.17,83.59},{ 2.17,29.56},{26.90,71.34}, {17.83,44.07},{38.24,65.62},{77.71,104.20},{75.90,126.26}, {42.28,76.92},{62.47,96.95},{ 4.76,24.44},{15.54,50.13}, {79.25,97.12},{70.46,113.54},{ 7.16,32.16},{36.08,62.26}, {96.38,141.09},{71.33,109.52},{56.85,85.48},{87.62,120.82}, {25.45,67.87},{ 6.92,26.77},{71.90,93.01},{45.46,70.53}, {59.16,81.85},{87.99,108.31},{ 0.24,14.80},{81.47,111.72}, {35.34,40.20},{61.84,76.32},{85.96,128.15},{46.62,85.33}, {38.73,60.24},{22.65,26.64},{45.10,69.32},{10.81,55.39}, {72.16,113.00},{ 3.64,40.11},{44.44,93.04},{59.83,102.08}, {33.07,53.38},{88.30,114.97},{16.94,35.62},{58.77,91.32}, {58.84,80.83},{57.79,109.11},{82.11,99.22},{65.81,97.26}, {56.46,89.24},{98.54,138.29},{48.82,72.65},{11.74,38.78}, {89.73,122.59},{14.75,42.60},{85.57,122.56},{16.54,52.18}, {29.02,59.69},{37.02,62.00},{49.71,85.41},{17.62,39.28}, {35.96,73.54},{22.96,46.01},{28.48,74.16},{63.80,98.66}, {82.33,111.20},{12.18,47.98},{51.29,81.89},{86.83,120.17}, {38.94,75.34},{16.84,60.70},{21.13,56.89},{89.28,102.86}, {49.60,86.24},{96.19,148.10},{55.93,102.19},{93.10,118.80}, {66.87,106.87},{21.23,52.69},{19.15,38.20},{49.28,89.46}, {58.54,82.41},{ 8.61,20.99},{46.80,64.82},{21.45,39.17}, {98.06,162.09},{33.55,76.96},{21.72,47.03},{30.75,63.04}, {70.31,108.72},{11.38,36.57},{16.03,41.65},{89.58,132.84}, {82.09,118.12},{90.12,101.79},{62.86,88.28},{63.01,96.38}, {66.04,96.24},{23.56,53.42},{69.99,103.91},{ 2.29,38.48}, {23.46,55.17},{73.82,111.65},{63.50,106.29},{24.67,50.27}, {93.34,116.02},{49.77,83.52},{65.01,90.02},{74.38,119.88}, {98.05,137.44},{60.50,112.18},{17.75,46.40},{48.77,66.10}, { 9.87,36.71},{41.19,60.68},{11.91,42.76},{94.89,143.01}, {38.94,74.93},{18.64,38.54},{66.85,106.14},{46.23,83.12}, {14.12,55.73},{48.36,86.23},{37.79,68.52},{57.58,90.10}, {52.82,103.65},{22.80,54.35},{96.37,128.93},{36.01,71.90}, {56.61,87.72},{28.47,42.19},{97.85,132.03},{54.71,85.06}, {59.97,93.65},{12.80,33.33},{89.46,118.05},{67.74,99.10}, {72.96,110.21},{ 3.37,30.79},{61.08,87.34},{30.27,54.07}, {80.44,113.77},{61.79,107.35},{16.80,36.25},{36.11,87.10}, {23.47,34.65},{42.93,67.18},{59.81,100.74},{ 9.83,31.58}, {20.50,52.04},{69.02,98.83},{68.99,116.05},{86.70,98.10}, {18.14,55.01},{99.32,132.01},{93.04,116.40},{73.44,103.58}, {54.85,93.77},{30.30,73.03},{42.70,69.68},{28.74,47.53}, {36.70,58.24},{ 2.25,20.30},{31.58,69.69},{50.11,73.27}, {15.12,44.77},{94.15,137.43},{56.68,80.90},{96.46,129.14}, { 0.43,29.74},{71.62,117.72},{59.58,96.36},{84.18,129.18}, {41.01,83.94},{37.25,81.88},{52.97,76.90},{82.33,117.77}, {93.01,125.58},{19.82,42.13},{15.34,66.30},{10.41,32.21}, {15.46,27.21},{84.50,112.50},{95.28,139.45},{97.08,127.88}, {47.62,68.52},{22.22,39.32},{35.06,48.35},{98.97,149.30}, {31.27,50.78},{97.44,131.59},{15.17,56.41},{10.45,38.70}, {25.33,58.60},{ 2.11,30.71},{19.70,50.07},{76.22,116.69}, { 3.03, 8.54},{75.53,114.39},{ 7.84,23.87},{38.08,64.11}, {15.47,35.63},{50.02,93.93},{33.98,66.41},{79.23,114.48}, {68.81,96.68},{50.48,97.02},{24.41,57.24},{97.06,131.69}, {27.59,57.96},{ 7.43,36.04},{26.22,54.52},{51.16,91.41}, {55.65,98.61},{84.28,120.89},{42.51,67.14},{28.43,64.76}, { 1.04,56.55},{35.33,73.84},{27.15,59.22},{70.41,102.22}, {88.04,124.07},{19.41,29.02},{22.28,53.20},{88.19,122.10}, {50.02,106.03},{56.03,95.14},{31.24,51.95},{64.07,101.28}, {78.51,103.60},{32.47,68.50},{67.91,110.01},{67.66,93.38}, {63.05,105.49},{ 3.19,43.07},{57.24,101.65},{69.49,97.46}, {49.58,85.88},{94.65,130.13},{54.50,88.30},{43.62,89.43}, {57.97,83.41},{43.13,83.20},{21.00,44.82},{59.35,74.14}, {57.82,75.53},{77.34,97.47},{30.29,64.36},{49.88,84.15}, {41.55,81.68},{96.27,142.28},{54.18,64.01},{78.95,122.31}, {96.53,131.50},{96.33,117.12},{59.78,74.57},{25.86,55.66}, {93.50,136.69},{84.76,119.37},{73.14,106.46},{48.07,74.40}, {22.53,54.86},{ 6.39,42.39},{62.43,81.91},{45.44,67.17}, {76.81,116.23},{94.19,127.34},{31.03,55.92},{21.76,36.42}, {32.47,61.13},{70.47,85.93},{23.19,54.75},{81.57,122.35}, {96.74,134.52},{ 9.15,51.97},{89.90,118.11},{ 2.77,33.70}, { 3.36,29.82},{31.95,64.99},{11.11,25.57},{30.51,46.15}, {22.58,56.37},{60.04,86.98},{64.42,92.98},{ 4.02,28.30}, {52.93,105.09},{68.61,100.56},{97.57,140.89},{91.88,132.20}, { 8.89,35.30},{64.23,94.59},{93.45,139.06},{37.62,44.86}, {14.43,51.46},{32.21,84.10},{80.69,127.51},{33.19,73.49}, { 1.40,36.45},{76.65,107.98},{93.43,122.99},{88.91,121.39}, {81.95,120.81},{20.32,42.43},{56.95,87.28},{80.09,111.05}, {83.63,129.54},{75.02,109.92},{73.08,117.07},{35.44,71.25}, { 7.84,30.65},{33.31,72.21},{68.75,95.19},{41.02,77.76}, {69.90,102.93},{80.38,121.94},{77.02,117.53},{47.01,82.60}, {28.49,64.08},{73.36,117.31},{37.29,73.73},{28.05,57.95}, {71.58,118.36},{30.60,59.11},{ 1.13,14.84},{29.99,54.38}, {15.21,47.19},{14.12,21.18},{97.73,133.90},{69.66,95.75} }; double residual_error (double x , double y , double m , double c){ double e = (m*x) +c - y; return e * e; } __device__ double d_residual_error (double x , double y , double m , double c){ double e = (m*x) +c - y; return e*e; } double rms_error (double m , double c){ int i; double mean; double error_sum =0; for (i=0; i<n_data; i++){ error_sum += residual_error(data[i].x,data [i].y,m,c); } mean = error_sum / n_data; return sqrt (mean); } __global__ void d_rms_error (double *m , double *c, double *error_sum_arr, point_t *d_data){ int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; //Device variables double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = cudaMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_dc error = cudaMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_data error = cudaMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } //Copy memory for dm to d_dm error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for dc to d_dc error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for data to d_data error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error, cudaGetErrorString(error)); } for(i=0;i<8;i++) { //Host variable storing the array returned from the kernel function. double h_error_sum_arr[1000]; //Stores the total sum of the values from the error sum array. double error_sum_total; //Stores the mean of the total sum of the error sums. double error_sum_mean; //Call the rms_error function using 100 blocks and 10 threads. d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); cudaThreadSynchronize(); //Copy memory for d_error_sum_arr error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error, cudaGetErrorString(error)); } //Loop through the error sum array returned from the kernel function for(int j=0; j<n_data; j++) { //Add each error sum to the error sum total. error_sum_total += h_error_sum_arr[j]; } //Calculate the mean for the error sum. error_sum_mean = error_sum_total / n_data; //Calculate the square root for the error sum mean. e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } //Reset the error sum total. error_sum_total = 0; } //printf("best m,c is %lf,%lf with error %lf in direction %d\n", //dm[best_error_i], dc[best_error_i], best_error, best_error_i); if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } //Free memory for d_dm error = cudaFree(d_dm); if(error){ fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_dc error = cudaFree(d_dc); if(error){ fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_data error = cudaFree(d_data); if(error){ fprintf(stderr, "cudaFree on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_error_sum_arr error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); //Get the system time after we have run the linear regression function. clock_gettime(CLOCK_MONOTONIC, &finish); //Calculate the time spent between the start time and end time. time_difference(&start, &finish, &time_elapsed); //Output the time spent running the program. printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
14,453
#include "includes.h" __global__ void calcMaxPoolForwardGPU( float *in,float *out, int in_size_x, int in_size_y, int in_size_z, int batch_size, int out_size_x, int out_size_y, int out_size_z, int stride, int kernel_size ) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int id_out = id; if( id_out < batch_size * out_size_x * out_size_y * out_size_z) { int x = id % out_size_x; id /= out_size_x; int y = id % out_size_y; id /= out_size_y; int z = id % out_size_z; id /= out_size_z; int b = id; int mapped_x = x * stride; int mapped_y = y * stride; float mval = -1000000.0; for ( int j = 0; j < kernel_size; ++j ){ for ( int i = 0; i < kernel_size; ++i ){ int id_in = b * (in_size_z * in_size_x * in_size_y) + z * (in_size_x * in_size_y) + (mapped_y + j) * (in_size_x) + (mapped_x + i); float v = in[id_in]; if ( v > mval ){ mval = v; } } } out[id_out] = mval; } /* original for ( int b = 0; b < in.size.b; ++b ){ for ( int z = 0; z < out.size.z; ++z ){ for ( int y = 0; y < out.size.y; ++y ){ for ( int x = 0; x < out.size.x; ++x ){ TensorCoordinate mapped = map_to_input( { 0, (uint16_t)x, (uint16_t)y, 0 }, 0 ); float mval = -FLT_MAX; for ( int j = 0; j < kernel_size; ++j ){ for ( int i = 0; i < kernel_size; ++i ){ float v = in( b, mapped.x + i, mapped.y + j, z ); if ( v > mval ){ mval = v; } } } out( b, x, y, z ) = mval; } } } } */ }
14,454
/* * Hello World in CUDA * * CS3210 * * This program start from "hello world" string and should print "HELLO WORLD" * */ #include <stdio.h> #define N 32 // #define DISCRETE __global__ void hello(char *a, int len) { int tid = threadIdx.x; if (tid >= len) return; a[tid] += 'A' - 'a'; } int main() { // original string char a[N] = "hello@world"; // length int len = strlen(a); // pointer to the string on device char* ad; // pointer to the final string on host char* ah; // CUDA returned error code cudaError_t rc; //allocate space for the string on device (GPU) memory cudaMalloc((void**)&ad, N); cudaMemcpy(ad, a, N, cudaMemcpyHostToDevice); // launch the kernel hello<<<1, N>>>(ad, len); cudaDeviceSynchronize(); // for discrete GPUs, get the data from device memory to host memory cudaMemcpy(a, ad, N, cudaMemcpyDeviceToHost); ah = a; // was there any error? rc = cudaGetLastError(); if (rc != cudaSuccess) printf("Last CUDA error %s\n", cudaGetErrorString(rc)); // print final string printf("%s!\n", ah); // free memory cudaFree(ad); return 0; }
14,455
#include <iostream> #include <cuda_runtime.h> /* Lectura Archivo */ void Read(int **f, int *M, int *N, const char *filename, int X, int tipo) { FILE *fp; fp = fopen(filename, "r"); fscanf(fp, "%d %d\n", N, M); int imsize = (*M) * (*N) * X; int* f1 = new int[imsize]; int Largo = (*M) * (*N); if (tipo != 0){ // AoS for(int x=0; x<X; x++){ for(int i = 0; i < Largo; i++){ fscanf(fp, "%d ", &(f1[i*4 + x])); // printf("%d ", i*4 + x); } } } else{ // SoA for(int j=0; j<X; j++){ for(int i = 0; i < Largo; i++){ fscanf(fp, "%d ", &(f1[i + j*Largo])); // printf("%d ", f1[i + j*Largo]); } // printf("\n"); } } fclose(fp); *f = f1; } /* Escritura de archivo initial con array */ void Write_AoS(int *f, int M, int N, const char *filename) { FILE *fp; fp = fopen(filename, "w"); fprintf(fp, "%d %d\n", N, M); int Largo = M*N; for(int j=0; j<4; j++){ for(int i = 0; i < Largo-1; i++){ fprintf(fp, "%d ", f[i*4 + j]); // printf("%d ", f[i*4 + j]); } fprintf(fp, "%d\n", f[(Largo-1)*4 + j]); // printf("%d\n", f[(Largo-1)*4 + j]); } //printf("\n"); fclose(fp); } /* Escritura de archivo initial con array */ void Write_SoA(int *f, int M, int N, const char *filename) { FILE *fp; fp = fopen(filename, "w"); fprintf(fp, "%d %d\n", N, M); int Largo = M*N; for(int j=0; j<4; j++){ for(int i = 0; i < Largo-1; i++){ fprintf(fp, "%d ", f[i + j*Largo]); // printf("%d ", f[i + j*Largo]); } fprintf(fp, "%d\n", f[Largo-1 + j*Largo]); // printf("%d\n", f[Largo-1 + j*Largo]); } //printf("\n"); fclose(fp); } void validar(int *f, int N, int M, int i){ int suma=0; for(int i=0; i<N*M*4; i++){ suma += f[i]; } if (i == 0){ printf("Cantidad inicial de particulas: %d\n", suma); } else if (i == 1){ printf("Cantidad final de particulas: %d\n", suma); printf("\n"); } } //funcion auxiliar %, funciona con entradas negativas __device__ int modulo(int a, int b){ //a%b if (a >= 0){ return a %b; } return b + a; } /* Procesamiento GPU AoS Coalisiones */ __global__ void kernelAoS_col(int *f, int *f_out, int X, int N, int M){ int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < M*N){ int idb = tid*4; int f0, f1, f2, f3; // Almacenamos los datos en memoria f0 = f[idb]; f1 = f[idb+1]; f2 = f[idb+2]; f3 = f[idb+3]; if(f0 && f2 && f1 == 0 && f3 == 0){ f[idb] = 0; f[idb+1] = 1; f[idb+2] = 0; f[idb+3] = 1; } else if(f0 == 0 && f2 == 0 && f1 && f3){ f[idb] = 1; f[idb+1] = 0; f[idb+2] = 1; f[idb+3] = 0; } } } /* Procesamiento GPU AoS Streaming */ __global__ void kernelAoS_stream(int *f, int *f_out, int N, int M){ int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < N*M){ // f0: der // f1: arr // f2: izq // f3: abj int x, y, idb; idb = tid*4; //indice del f0 en el arreglo x = tid % M; // 4 y = tid / M; // 1 // Id de los nodos adyacentes int nd[] = {modulo(x+1,M) + y *M, x + modulo(y+1, N) *M, modulo(x-1, M) + y *M, x + modulo(y-1, N) *M }; // Recorremos las direcciones for(int i=0; i<4; i++){ // Seteo todas en 0 //f_out[idb+i] = 0; // Si la particula se mueve en esta direccion if(f[idb+i] == 1){ // La direccion del nodo de esa direccion cambia f_out[nd[i]*4+i] = 1; } } } } /* Procesamiento GPU SoA Coalisiones */ __global__ void kernelSoA_col(int *f, int *f_out, int X, int N, int M){ int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < M*N){ int f0, f1, f2, f3, Largo; Largo = N*M; // Almacenamos los datos en memoria f0 = f[tid]; f1 = f[tid+1*Largo]; f2 = f[tid+2*Largo]; f3 = f[tid+3*Largo]; if(f0 && f2 && f1 == 0 && f3 == 0){ f[tid] = 0; f[tid+1*Largo] = 1; f[tid+2*Largo] = 0; f[tid+3*Largo] = 1; } else if(f0 == 0 && f2 == 0 && f1 && f3){ f[tid] = 1; f[tid+1*Largo] = 0; f[tid+2*Largo] = 1; f[tid+3*Largo] = 0; } } } /* Procesamiento GPU SoA Streaming */ __global__ void kernelSoA_stream(int *f, int *f_out, int X, int N, int M){ int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < N*M){ // f0: der // f1: arr // f2: izq // f3: abj int x, y, Largo = N*M; x = tid % M; // 4 y = tid / M; // 1 // Id de los nodos adyacentes int nd[] = { modulo(x+1,M) + y*M, x + modulo(y+1,N)*M, modulo(x-1,M) + y*M, x + modulo(y-1,N)*M }; // Recorremos las direcciones for(int i=0; i<X; i++){ // Seteo todas en 0 //f_out[tid + i*Largo] = 0; // Si la particula se mueve en esta direccion if(f[tid + i*Largo] == 1){ // La direccion del nodo de esa direccion cambia f_out[nd[i] + i*Largo] = 1; } } } } __global__ void f_out_0(int *f_out, int N, int M){ int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < N*M*4){ f_out[tid] = 0; } } //-------------------------------------------------------------------------------- //Pregunta 2, condiciones de borde con AoS /* Procesamiento GPU AoS Coalisiones */ __global__ void kernelAoS_col_borde(int *f, int *f_out, int X, int N, int M, int j){ int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < M*N){ int idb = tid*4; int f0, f1, f2, f3, x, y; x = tid % M; y = tid / M; // Almacenamos los datos en memoria f0 = f[idb+0]; f1 = f[idb+1]; f2 = f[idb+2]; f3 = f[idb+3]; bool borde = (x == 0 || x == M -1 || y == 0 || y == N-1) ; bool horizontal = f0 && f2 && f1 == 0 && f3 == 0; bool vertical = f0 == 0 && f2 == 0 && f1 && f3; //if statement if (j == 0){ if ( !borde ){ //si es que no se está en algun borde if(horizontal){ f[idb] = 0; f[idb+1] = 1; f[idb+2] = 0; f[idb+3] = 1; } else if(vertical){ f[idb] = 1; f[idb+1] = 0; f[idb+2] = 1; f[idb+3] = 0; } } } //operador ternario else if (j == 1){ f[idb] = (borde ? f0 : (horizontal ? 0: (vertical ? 1 : f0))); f[idb+1] = (borde ? 1 : (horizontal ? 1: (vertical ? 0 : f1))); f[idb+2] = (borde ? f2 : (horizontal ? 0: (vertical ? 1 : f2))); f[idb+3] = (borde ? f3 : (horizontal ? 1: (vertical ? 0 : f3))); } //operador booleano else if (j == 2){ f[idb] = (borde) * f0 + abs(borde -1) * ((horizontal) * 0 + abs(horizontal-1) * ((vertical) * 1 + abs(vertical -1) * f0)); f[idb+ 1] = (borde) * f1 + abs(borde -1) * ((horizontal) * 1 + abs(horizontal-1) * ((vertical) * 0 + abs(vertical -1) * f1)); f[idb+ 2] = (borde) * f2 + abs(borde -1) * ((horizontal) * 0 + abs(horizontal-1) * ((vertical) * 1 + abs(vertical -1) * f2)); f[idb+ 3] = (borde) * f3 + abs(borde -1) * ((horizontal) * 1 + abs(horizontal-1) * ((vertical) * 0 + abs(vertical -1) * f3)); } } } /* Procesamiento GPU AoS Streaming */ __global__ void kernelAoS_stream_borde(int *f, int *f_out, int N, int M, int j){ int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < N*M){ int x, y, idb; idb = tid*4; //indice del f0 en el arreglo x = tid % M; y = tid / M; // Id de los nodos adyacentes int nd[] = {modulo(x+1,M) + y*M, x + modulo(y+1, N) *M, modulo(x-1, M) + y*M, x + modulo(y-1, N) *M }; // if statement if (j == 0){ for(int i=0; i<4; i++){ // f0: der // f1: arr // f2: izq // f3: abj //condiciones de borde bool der = (x == M-1 && i == 0); bool arr = (y == N-1 && i == 1); bool izq = (x == 0 && i == 2); bool abj = (y == 0 && i==3); // Si la particula se mueve en esta direccion if(f[idb+i] == 1){ //si fi == 0 if (abj){ //si se mueve hacia abajo en el borde inferior f_out[nd[1] * 4 + 1] = 1; //rebota hacia arriba } else if (arr){ f_out[nd[3] * 4 + 3] = 1; } else if (izq){ f_out[nd[0] *4 + 0] = 1; } else if (der){ f_out[nd[2] * 4 + 2] = 1; } else{ f_out[nd[i]*4+i] = 1; } } } } //operador ternario else if(j == 1){ for(int i=0; i<4; i++){ bool der = (x == M-1 && i == 0); bool arr = (y == N-1 && i == 1); bool izq = (x == 0 && i == 2); bool abj = (y == 0 && i==3); !(f[idb+i] == 1) ? true : (abj) ? f_out[nd[1] * 4 + 1] = 1 : (arr) ? f_out[nd[3] * 4 + 3] = 1: (izq) ? f_out[nd[0] *4 + 0] = 1 : (der) ? f_out[nd[2] * 4 + 2] = 1 : f_out[nd[i]*4+i] = 1; } } //operador booleano else if (j == 2){ for(int i=0; i<4; i++){ bool activo = (f[idb+i] == 1); bool der = (x == M-1 && i == 0); bool arr = (y == N-1 && i == 1); bool izq = (x == 0 && i == 2); bool abj = (y == 0 && i==3); bool pared = (der || arr || izq || abj); f_out[nd[i]*4+i] = activo * abs(pared-1) + abs(activo * abs(pared-1) - 1 ) *f_out[nd[i]*4+i]; f_out[nd[1] * 4 + 1] = activo * abj + abs(activo * abj -1) * f_out[nd[1] * 4 + 1]; f_out[nd[3] * 4 + 3] = activo * arr + abs(activo * arr -1) * f_out[nd[3] * 4 + 3]; f_out[nd[2] * 4 + 2] = activo * der + abs(activo * der -1) * f_out[nd[2] * 4 + 2]; f_out[nd[0] * 4 + 0] = activo * izq + abs(activo * izq -1) * f_out[nd[0] * 4 + 0]; } } } } //kernel pregunta 3 //----------------------------------------------------------- __global__ void kernelAoS_stream_col_borde(int *f, int *f_out, int N, int M, int j){ //para cada nodo, se revisan las f entrantes por parte de los vecinos //luego se hace la colision int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < N*M){ int x, y, idb; idb = tid*4; //indice del f0 en el arreglo x = tid % M; y = tid / M; int f_i[4]; int nd[] = {modulo(x+1,M) + y*M, x + modulo(y+1, N) *M, modulo(x-1, M) + y*M, x + modulo(y-1, N) *M }; //distancia al borde mas cercano int distancia_x = (x < M - 1 - x ? x : M - 1 -x); int distancia_y = (y < N - 1 - y ? y : N - 1 -y); int distancia_borde = (distancia_x < distancia_y ? distancia_x : distancia_y); for(int i=0; i<4; i++){ bool vecino_borde = (nd[i]%M == 0 || nd[i]%M == M-1 || nd[i]/M == 0 || nd[i]/M == N-1? true: false); //para cada uno de los vecinos if (distancia_borde >= 1){ //no se aplican condiciones de borde if (f[nd[i]*4 + (i+2)%4] == 1){ f_out[tid * 4 + (i+2)%4] = 1; f_i[i] = 1; } else if(vecino_borde && f[nd[i]*4 + i%4] == 1){ // Si vecino esta en el borde puede reflejar f_out[tid * 4 + (i+2)%4] = 1; f_i[i] = 1; } else{ f_out[tid * 4 + (i+2)%4] = 0; f_i[i] = 0; } } else if (distancia_borde == 0){ //solo tiene 3 o 2 vecinos bool der = (x == M-1); bool arr = (y == N-1); bool izq = (x == 0); bool abj = (y == 0); if (f[nd[i]*4 + (i+2)%4] == 1 || (vecino_borde && f[nd[i]*4 + i%4] == 1)){ // Si vecino apunta al nodo if (abj && i != 3){ // Nodo en el borde de abajo if((der && i != 0) || (arr && i != 1)){ f_out[tid*4 + (i+2)%4] = 1; f_i[i] = 1; } else if(izq && i != 2){ f_out[tid*4 + (i+2)%4] = 1; f_i[i] = 1; } else if(!der && !izq){ f_out[tid*4 + (i+2)%4] = 1; f_i[i] = 1; } } else if ((izq && i != 2) || (der && i != 0)){ if(arr && i != 1){ f_out[tid*4 + (i+2)%4] = 1; f_i[i] = 1; } else if(abj && i != 3){ f_out[tid*4 + (i+2)%4] = 1; f_i[i] = 1; } else if(!abj && !arr){ f_out[tid*4 + (i+2)%4] = 1; f_i[i] = 1; } } } else { f_out[tid*4 + (i+2)%4] = 0; f_i[i] = 0; } } } //manejar colisiones en el arreglo f_out //---------------------------------------------------------- bool borde = (x == 0 || x == M -1 || y == 0 || y == N-1); bool horizontal = (f_i[(0+2)%4] && f_i[(2+2)%4] && f_i[(1+2)%4] == 0 && f_i[(3+2)%4] == 0); bool vertical = (f_i[(0+2)%4] == 0 && f_i[(2+2)%4] == 0 && f_i[(1+2)%4] && f_i[(3+2)%4]); //if statement if (j == 0){ if ( !borde ){ //si es que no se está en algun borde if(horizontal){ f_out[idb] = 0; f_out[idb+1] = 1; f_out[idb+2] = 0; f_out[idb+3] = 1; } else if(vertical){ f_out[idb] = 1; f_out[idb+1] = 0; f_out[idb+2] = 1; f_out[idb+3] = 0; } } } } } /* Codigo Principal */ int main(int argc, char **argv){ cudaEvent_t ct1, ct2; float dt; // N eje y, M eje x const char *metodo; int M, N; int *f_host, *f_hostout, *f, *f_out, *temp; int iteraciones[] = {1000}; char filename[15] = "initial.txt\0"; int gs, bs = 256; int X = 4; for(int iteracion = 0; iteracion<1; iteracion++){ std::cout << "Archivo 2000x2000 con " << iteraciones[iteracion] << " iteraciones." << std::endl; // Ejecucion pregunta 1 // 2 metodos SoA y AoS for (int i=0; i<2; i++){ Read(&f_host, &M, &N, filename, X, i); gs = (int)ceil((float) M * N * X / bs); cudaMalloc((void**)&f, M * N * X * sizeof(int)); cudaMemcpy(f, f_host, M * N * X * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&f_out, M * N * X * sizeof(int)); cudaMalloc((void**)&temp, M * N * X * sizeof(int)); cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); // Iteraciones de time step for (int j=0; j<iteraciones[iteracion]; j++){ f_out_0<<<gs, bs>>>(f_out, N, M); if (i == 0){ kernelSoA_col<<<gs, bs>>>(f, f_out, X, N, M); kernelSoA_stream<<<gs, bs>>>(f, f_out, X, N, M); } else{ kernelAoS_col<<<gs, bs>>>(f, f_out, X, N, M); kernelAoS_stream<<<gs, bs>>>(f, f_out, N, M); } //memory swap temp = f; f = f_out; f_out = temp; } cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); f_hostout = new int[M * N * X]; cudaMemcpy(f_hostout, f, M * N * X * sizeof(int), cudaMemcpyDeviceToHost); if (i == 0){ // Write_SoA(f_hostout, M, N, "initial_S.txt\0"); metodo = "SoA"; } else{ // Write_AoS(f_hostout, M, N, "initial_A.txt\0"); metodo = "AoS"; } std::cout << "Tiempo " << metodo << ": " << dt << "[ms]" << std::endl; cudaFree(f); cudaFree(temp); cudaFree(f_out); delete[] f_host; delete[] f_hostout; } std::cout << "" << std::endl; // Ejecucion pregunta 2 // metodo AoS con if, terciario y booleano // Matriz con bordes for (int i=0; i<3; i++){ Read(&f_host, &M, &N, filename, X, 1); gs = (int)ceil((float) M * N * X / bs); cudaMalloc((void**)&f, M * N * X * sizeof(int)); cudaMemcpy(f, f_host, M * N * X * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&f_out, M * N * X * sizeof(int)); // cudaMalloc((void**)&temp, M * N * X * sizeof(int)); cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); // Iteraciones de time step for (int j=0; j<iteraciones[iteracion]; j++){ f_out_0<<<gs, bs>>>(f_out, N, M); kernelAoS_col_borde<<<gs, bs>>>(f, f_out, X, N, M, i); kernelAoS_stream_borde<<<gs, bs>>>(f, f_out, N, M, i); //memory swap temp = f; f = f_out; f_out = temp; } cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); f_hostout = new int[M * N * X]; cudaMemcpy(f_hostout, f, M * N * X * sizeof(int), cudaMemcpyDeviceToHost); // Write_AoS(f_hostout, M, N, "initial_A.txt\0"); if (i == 0){ metodo = "IF "; } else if (i == 1){ metodo = "TERNARIO "; } else if (i == 2) { metodo = "BOOLEANO "; } std::cout << "Tiempo AoS con bordes y operador: " << metodo << dt << "[ms]" << std::endl; cudaFree(f); cudaFree(temp); cudaFree(f_out); delete[] f_host; delete[] f_hostout; } std::cout << "" << std::endl; // Ejecucion pregunta 3 //----------------------------------------------------------------------- // metodo AoS con if todo en un solo kernel // Matriz con bordes Read(&f_host, &M, &N, filename, X, 1); gs = (int)ceil((float) M * N * X / bs); cudaMalloc((void**)&f, M * N * X * sizeof(int)); cudaMemcpy(f, f_host, M * N * X * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&f_out, M * N * X * sizeof(int)); cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); kernelAoS_col<<<gs, bs>>>(f, f_out, X, N, M); // Iteraciones de time step for (int j=0; j<iteraciones[iteracion]; j++){ f_out_0<<<gs, bs>>>(f_out, N, M); kernelAoS_stream_col_borde<<<gs, bs>>>(f, f_out, N, M, 0); //memory swap temp = f; f = f_out; f_out = temp; } cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); f_hostout = new int[M * N * X]; cudaMemcpy(f_hostout, f, M * N * X * sizeof(int), cudaMemcpyDeviceToHost); // Write_AoS(f_hostout, M, N, "initial_A.txt\0"); std::cout << "Tiempo AoS con bordes y operador if en un solo kernel: " << dt << "[ms]\n" << std::endl; cudaFree(f); cudaFree(temp); cudaFree(f_out); delete[] f_host; delete[] f_hostout; } return 0; }
14,456
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "Normalize.h" #include "Normalize.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* normalized the data with normal distribution (kernel code). For an input x, y = a * (x-mean)/sqrt(variance+\epsilon) + b where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter >> input - the input data array >> output - the output data array >> mean - the mean of the input >> var - the variance of the input >> a - the scalar >> b - the bias >> epsilon - a parameter >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to go over for next block >> blockNum - how many blocks we have */ template<class T> __global__ void KernelNormalizeFloat(T * input, T * output, T * mean, T * var, T * a, T * b, T epsilon, int stride, int strideNum, int blockNum) { __shared__ T iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ T iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int blockSize; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= stride * blockNum || j >= strideNum) return; if (threadIdx.y == 0) { iOffset[threadIdx.x] = i % stride; iBlock[threadIdx.x] = i / stride; iMean[threadIdx.x] = mean[i]; iVar[threadIdx.x] = var[i]; blockSize = stride * strideNum; } __syncthreads(); int inBlockOffset = j * stride + iOffset[threadIdx.x]; int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset; output[offset] = (DTYPE)(a[inBlockOffset] * (input[offset] - iMean[threadIdx.x])) / sqrt((DTYPE)(iVar[threadIdx.x] + epsilon)) + (DTYPE)b[inBlockOffset]; } template<class T> __global__ void KernelNormalizeHalf(T * input, T * output, T * mean, T * var, T * a, T * b, int stride, int strideNum, int blockNum) { __shared__ half iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ half iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int blockSize; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= stride * blockNum || j >= strideNum) return; if (threadIdx.y == 0) { iOffset[threadIdx.x] = i % stride; iBlock[threadIdx.x] = i / stride; iMean[threadIdx.x] = mean[i]; iVar[threadIdx.x] = var[i]; blockSize = stride * strideNum; } __syncthreads(); int inBlockOffset = j * stride + iOffset[threadIdx.x]; int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset; output[offset] = __hadd(__hdiv(__hmul(a[inBlockOffset], __hsub(input[offset], iMean[threadIdx.x])), hsqrt(iVar[threadIdx.x])), b[inBlockOffset]); } /* normalized the data with normal distribution. For an input x, y = a * (x-mean)/sqrt(variance+\epsilon) + b where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter >> input - the input tensor >> output - the output tensor >> dim - dimension alone which we generate the mean and variance >> mean - the mean of the input >> var - the variance of the input >> a - the scalar >> b - the bias >> epsilon - a parameter */ void _CudaNormalize(const XTensor * input, XTensor * output, int dim, const XTensor * mean, const XTensor * var, const XTensor * a, const XTensor * b, DTYPE epsilon) { int stride = 1; int strideNum = input->dimSize[dim]; int blockNum = 1; for (int i = 0; i < input->order; i++) { if (i > dim) stride *= input->dimSize[i]; else if (i < dim) blockNum *= input->dimSize[i]; } int cudaGridSize[3]; int cudaBlockSize[3]; GDevs.GetCudaThread2D(input->devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]); dim3 threads(cudaBlockSize[1], cudaBlockSize[0]); int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); if (input->dataType == DEFAULT_DTYPE) { KernelNormalizeFloat <DTYPE><< <blocks, threads >> >((DTYPE*)input->data, (DTYPE*)output->data, (DTYPE*)mean->data, (DTYPE*)var->data, (DTYPE*)a->data, (DTYPE*)b->data, epsilon, stride, strideNum, blockNum); } else if (input->dataType == X_FLOAT16) { #ifdef HALF_PRECISION KernelNormalizeHalf <half><< <blocks, threads>>> ((__half*)input->data, (__half*)output->data, (__half*)mean->data, (__half*)var->data, (__half*)a->data, (__half*)b->data, stride, strideNum, blockNum); #else ShowNTErrors("Please compile with -DHALF_PRECISION"); #endif } BacktoCudaDev(a->devID, devIDBackup); } /* normalized the data with normal distribution (kernel code). For an input x, y = a * (x-mean)/sqrt(variance+\epsilon) + b where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter >> input - the input data array >> output - the output data array >> mean - the mean of the input >> var - the variance of the input >> a - the scalar >> b - the bias >> epsilon - a parameter >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to go over for next block >> blockNum - how many blocks we have */ template<class T> __global__ void KernelL1NormalizeFloat(T * input, T * output, T * mean, T * distance, T * a, T * b, int stride, int strideNum, int blockNum) { __shared__ T iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ T iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int blockSize; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= stride * blockNum || j >= strideNum) return; if (threadIdx.y == 0) { iOffset[threadIdx.x] = i % stride; iBlock[threadIdx.x] = i / stride; iMean[threadIdx.x] = mean[i]; iVar[threadIdx.x] = distance[i]; blockSize = stride * strideNum; } __syncthreads(); int inBlockOffset = j * stride + iOffset[threadIdx.x]; int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset; output[offset] = (DTYPE)(a[inBlockOffset] * (input[offset] - iMean[threadIdx.x]) / iVar[threadIdx.x]) + (DTYPE)b[inBlockOffset]; } template<class T> __global__ void KernelL1NormalizeHalf(T * input, T * output, T * mean, T * distance, T * a, T * b, int stride, int strideNum, int blockNum) { __shared__ half iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ half iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int blockSize; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= stride * blockNum || j >= strideNum) return; if (threadIdx.y == 0) { iOffset[threadIdx.x] = i % stride; iBlock[threadIdx.x] = i / stride; iMean[threadIdx.x] = mean[i]; iVar[threadIdx.x] = distance[i]; blockSize = stride * strideNum; } __syncthreads(); int inBlockOffset = j * stride + iOffset[threadIdx.x]; int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset; output[offset] = __hadd(__hdiv(__hmul(a[inBlockOffset], __hsub(input[offset], iMean[threadIdx.x])), iVar[threadIdx.x]), b[inBlockOffset]); } /* normalized the data with normal distribution. For an input x, y = a * (x-mean)/distance + b where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter >> input - the input tensor >> output - the output tensor >> dim - dimension alone which we generate the mean and variance >> mean - the mean of the input >> distance - the distance of the input >> a - the scalar >> b - the bias */ void _CudaL1Normalize(const XTensor * input, XTensor * output, int dim, const XTensor * mean, const XTensor * distance, const XTensor * a, const XTensor * b) { int stride = 1; int strideNum = input->dimSize[dim]; int blockNum = 1; for (int i = 0; i < input->order; i++) { if (i > dim) stride *= input->dimSize[i]; else if (i < dim) blockNum *= input->dimSize[i]; } int cudaGridSize[3]; int cudaBlockSize[3]; GDevs.GetCudaThread2D(input->devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]); dim3 threads(cudaBlockSize[1], cudaBlockSize[0]); int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); if (input->dataType == DEFAULT_DTYPE) { KernelL1NormalizeFloat <DTYPE><< <blocks, threads >> >((DTYPE*)input->data, (DTYPE*)output->data, (DTYPE*)mean->data, (DTYPE*)distance->data, (DTYPE*)a->data, (DTYPE*)b->data, stride, strideNum, blockNum); } else if (input->dataType == X_FLOAT16) { #ifdef HALF_PRECISION KernelL1NormalizeHalf <half><< <blocks, threads>>> ((__half*)input->data, (__half*)output->data, (__half*)mean->data, (__half*)distance->data, (__half*)a->data, (__half*)b->data, stride, strideNum, blockNum); #else ShowNTErrors("Please compile with -DHALF_PRECISION"); #endif } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
14,457
#include "matrices.cuh" #include <stdlib.h> #include <assert.h> #define checkCUDAError(val) { checkError((val), #val, __FILE__, __LINE__); } // in-line regular function void checkError(cudaError_t code, char const * func, const char *file, const int line) { if (code != cudaSuccess) { std::cerr << "CUDA error returned from \"" << func << "\" at " << file << ":" << line << "\nError code: " << code << "(" << cudaGetErrorString(code) << ")\n"; cudaDeviceReset(); exit(code); } } __global__ void mat_init(float* buffer, int height, int width, int value) { int i = blockDim.x * blockIdx.x + threadIdx.x; //int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= width * height) return; buffer[i] = value; } Mat::Mat(int height, int width) : m_height(height) , m_width(width) , m_buffer((float*) calloc(height * width, sizeof(float))) {} // Need to use a custom kernel instead of CudaMemSet because we operate of float pointers Mat::Mat(int height, int width, float value) : m_height{height} , m_width{width} { std::size_t buffer_size = height * width; this->m_buffer = (float*) malloc(height * width * sizeof(float)); float* d_buffer; checkCUDAError(cudaMalloc(&d_buffer, height * width * sizeof(float))); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::size_t threadsPerBlock = (buffer_size < prop.maxThreadsPerBlock) ? buffer_size : prop.maxThreadsPerBlock; std::size_t nbBlocks = buffer_size / threadsPerBlock + 1; mat_init<<<nbBlocks, threadsPerBlock>>>(d_buffer, height, width, value); cudaDeviceSynchronize(); checkCUDAError(cudaMemcpy(this->m_buffer, d_buffer, height * width * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_buffer); } Mat::Mat(float* list_init, int height, int width) : m_height(height) , m_width(width) { std::size_t buffer_size = height * width; this->m_buffer = (float*) malloc(buffer_size * sizeof(float)); checkCUDAError(cudaMemcpy(this->m_buffer, list_init, buffer_size * sizeof(float), cudaMemcpyHostToHost)); } Mat::Mat(float* list_init, int height) : Mat(list_init, height, 1) {} Mat::Mat(const Mat& m) : Mat(m.m_buffer, m.m_height, m.m_width) {} Mat Mat::copy() const { return Mat(m_buffer, m_height, m_width);} void Mat::operator=(const Mat& other) { m_height = other.m_height; m_width = other.m_width; checkCUDAError(cudaMemcpy(m_buffer, other.m_buffer, m_height * m_width * sizeof(float), cudaMemcpyHostToHost)); } Mat::~Mat(){ free(this->m_buffer); } // I don't think that using a kernel (with all the overhead needed) will be faster than a little for loop Mat Mat::eye(int dim) { Mat ret(dim, dim); for (int i = 0; i < dim; ++i) ret.m_buffer[i * ret.m_width + i] = 1; return ret; } // Internet say, use a loop for k to avoid concurrency problem __global__ void dot_kernel(float* self, float* other, float* ret, int s_height, int s_width, int o_width){ int th = blockDim.x * blockIdx.x + threadIdx.x; if (th >= s_height * o_width) return; int i = th / o_width; //0 to height int j = th % o_width; //0 to width for (int k = 0; k < s_width; ++k) ret[i * o_width + j] += self[i * s_width + k] * other[k * o_width + j]; } Mat Mat::dot(const Mat& other) { if (m_width != other.m_height) { printf("Invalid dot product, shapes do not match {%i, %i} vs {%i, %i}", m_height, m_width, other.m_height, other.m_width); throw "Invalid dot product"; } Mat ret(m_height, other.m_width); float* ret_buffer; checkCUDAError(cudaMalloc(&ret_buffer, ret.m_height * ret.m_width* sizeof(float))); float* self_buffer; checkCUDAError(cudaMalloc(&self_buffer, m_height * m_width* sizeof(float))); checkCUDAError(cudaMemcpy(self_buffer, m_buffer, m_height * m_width * sizeof(float), cudaMemcpyHostToDevice)); float* other_buffer; checkCUDAError(cudaMalloc(&other_buffer, other.m_height * other.m_width * sizeof(float))); checkCUDAError(cudaMemcpy(other_buffer, other.m_buffer, other.m_height * other.m_width * sizeof(float), cudaMemcpyHostToDevice)); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::size_t buffer_size = ret.m_height * ret.m_width; std::size_t threadsPerBlock = (buffer_size < prop.maxThreadsPerBlock) ? buffer_size : prop.maxThreadsPerBlock; std::size_t nbBlocks = buffer_size / threadsPerBlock + 1; dot_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, other_buffer, ret_buffer, m_height, m_width, other.m_width); cudaDeviceSynchronize(); checkCUDAError(cudaMemcpy(ret.m_buffer, ret_buffer, ret.m_height * ret.m_width * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(ret_buffer); cudaFree(self_buffer); cudaFree(other_buffer); return ret; } __global__ void T_kernel(float* self, float* ret, int s_height, int s_width) { int th = blockDim.x * blockIdx.x + threadIdx.x; if (th >= s_height * s_width) return; int i = th / s_width; //0 to height int j = th % s_width; //0 to width ret[j * s_height + i] = self[i * s_width + j]; } Mat Mat::T() { Mat ret(m_width, m_height); float* ret_buffer; checkCUDAError(cudaMalloc(&ret_buffer, ret.m_height * ret.m_width * sizeof(float))); float* self_buffer; checkCUDAError(cudaMalloc(&self_buffer, m_height * m_width* sizeof(float))); checkCUDAError(cudaMemcpy(self_buffer, m_buffer, m_height * m_width * sizeof(float), cudaMemcpyHostToDevice)); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::size_t buffer_size = ret.m_height * ret.m_width; std::size_t threadsPerBlock = (buffer_size < prop.maxThreadsPerBlock) ? buffer_size : prop.maxThreadsPerBlock; std::size_t nbBlocks = buffer_size / threadsPerBlock + 1; T_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, ret_buffer, m_height, m_width); cudaDeviceSynchronize(); checkCUDAError(cudaMemcpy(ret.m_buffer, ret_buffer, ret.m_height * ret.m_width * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(ret_buffer); cudaFree(self_buffer); return ret; } __global__ void add_kernel(float* self, float* other, float* ret, int s_height, int s_width) { int th = blockDim.x * blockIdx.x + threadIdx.x; if (th >= s_height * s_width) return; ret[th] = self[th] + other[th]; } __global__ void add_broadcast_kernel(float* self, float* other, float* ret, int s_height, int s_width) { int th = blockDim.x * blockIdx.x + threadIdx.x; if (th >= s_height * s_width) return; int i = th / s_width; //0 to height int j = th % s_width; //0 to width ret[i * s_width + j] = self[i * s_width + j] + other[j]; } Mat Mat::operator+(const Mat& other) const{ if ((this->m_width != other.m_width) || (m_height != other.m_height && other.m_height != 1)) { printf("Could not add matrices, dimensions do not match {%i, %i} vs {%i, %i}", this->m_height, this->m_width, other.m_height, other.m_width); throw "Invalid addition"; } Mat ret(m_height, m_width); float* ret_buffer; checkCUDAError(cudaMalloc(&ret_buffer, ret.m_height * ret.m_width* sizeof(float))); float* self_buffer; checkCUDAError(cudaMalloc(&self_buffer, m_height * m_width* sizeof(float))); checkCUDAError(cudaMemcpy(self_buffer, m_buffer, m_height * m_width * sizeof(float), cudaMemcpyHostToDevice)); float* other_buffer; checkCUDAError(cudaMalloc(&other_buffer, other.m_height * other.m_width * sizeof(float))); checkCUDAError(cudaMemcpy(other_buffer, other.m_buffer, other.m_height * other.m_width * sizeof(float), cudaMemcpyHostToDevice)); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::size_t buffer_size = ret.m_height * ret.m_width; std::size_t threadsPerBlock = (buffer_size < prop.maxThreadsPerBlock) ? buffer_size : prop.maxThreadsPerBlock; std::size_t nbBlocks = buffer_size / threadsPerBlock + 1; if (m_height == other.m_height) add_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, other_buffer, ret_buffer, m_height, m_width); else add_broadcast_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, other_buffer, ret_buffer, m_height, m_width); cudaDeviceSynchronize(); checkCUDAError(cudaMemcpy(ret.m_buffer, ret_buffer, ret.m_height * ret.m_width * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(ret_buffer); cudaFree(self_buffer); cudaFree(other_buffer); return ret; } __global__ void sub_kernel(float* self, float* other, float* ret, int s_height, int s_width) { int th = blockDim.x * blockIdx.x + threadIdx.x; if (th >= s_height * s_width) return; ret[th] = self[th] - other[th]; } __global__ void sub_broadcast_kernel(float* self, float* other, float* ret, int s_height, int s_width) { int th = blockDim.x * blockIdx.x + threadIdx.x; if (th >= s_height * s_width) return; int i = th / s_width; //0 to height int j = th % s_width; //0 to width ret[i * s_width + j] = self[i * s_width + j] - other[j]; } Mat Mat::operator-(const Mat& other) const{ if ((this->m_width != other.m_width) || (m_height != other.m_height && other.m_height != 1)) { printf("Could not add matrices, dimensions do not match {%i, %i} vs {%i, %i}", this->m_height, this->m_width, other.m_height, other.m_width); throw "Invalid addition"; } Mat ret(m_height, m_width); float* ret_buffer; checkCUDAError(cudaMalloc(&ret_buffer, ret.m_height * ret.m_width* sizeof(float))); float* self_buffer; checkCUDAError(cudaMalloc(&self_buffer, m_height * m_width* sizeof(float))); checkCUDAError(cudaMemcpy(self_buffer, m_buffer, m_height * m_width * sizeof(float), cudaMemcpyHostToDevice)); float* other_buffer; checkCUDAError(cudaMalloc(&other_buffer, other.m_height * other.m_width * sizeof(float))); checkCUDAError(cudaMemcpy(other_buffer, other.m_buffer, other.m_height * other.m_width * sizeof(float), cudaMemcpyHostToDevice)); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::size_t buffer_size = ret.m_height * ret.m_width; std::size_t threadsPerBlock = (buffer_size < prop.maxThreadsPerBlock) ? buffer_size : prop.maxThreadsPerBlock; std::size_t nbBlocks = buffer_size / threadsPerBlock + 1; if (m_height == other.m_height) sub_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, other_buffer, ret_buffer, m_height, m_width); else sub_broadcast_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, other_buffer, ret_buffer, m_height, m_width); cudaDeviceSynchronize(); checkCUDAError(cudaMemcpy(ret.m_buffer, ret_buffer, ret.m_height * ret.m_width * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(ret_buffer); cudaFree(self_buffer); cudaFree(other_buffer); return ret; } __global__ void normalize_kernel(float *A, float *I, int n, int x, bool diag){ int th = blockDim.x * blockIdx.x + threadIdx.x; if (th >= n * n) return; int i = th / n; //0 to height int j = th % n; //0 to width if ((!diag && (i == x && i != j)) || (diag && (i == x && i == j))){ I[i * n + j] /= A[x * n + x]; A[i * n + j] /= A[x * n + x]; } } __global__ void gaussjordan_kernel(float *A, float *I, int n, int x) { int th = blockDim.x * blockIdx.x + threadIdx.x; if (th >= n * n) return; int i = th / n; //0 to height int j = th % n; //0 to width if (i != x) { I[i * n + j] -= I[x * n + j] * A[i * n + x]; if (j != x){ A[i * n + j] -= A[x * n + j] * A[i * n + x]; } } } __global__ void zero_kernel(float *A, int n, int x){ int th = blockDim.x * blockIdx.x + threadIdx.x; if (th >= n * n) return; int i = th / n; //0 to height int j = th % n; //0 to width if (i != x && j == x){ A[i * n + j] = 0; } } Mat Mat::inverse() const { Mat ret = eye(m_height); float* ret_buffer; checkCUDAError(cudaMalloc(&ret_buffer, ret.m_height * ret.m_width * sizeof(float))); checkCUDAError(cudaMemcpy(ret_buffer, ret.m_buffer, ret.m_height * ret.m_width * sizeof(float), cudaMemcpyHostToDevice)); float* self_buffer; checkCUDAError(cudaMalloc(&self_buffer, m_height * m_width * sizeof(float))); checkCUDAError(cudaMemcpy(self_buffer, m_buffer, m_height * m_width * sizeof(float), cudaMemcpyHostToDevice)); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::size_t buffer_size = ret.m_height * ret.m_width; std::size_t threadsPerBlock = (buffer_size < prop.maxThreadsPerBlock) ? buffer_size : prop.maxThreadsPerBlock; std::size_t nbBlocks = buffer_size / threadsPerBlock + 1; for (int i = 0; i < m_height; ++i) { normalize_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, ret_buffer, m_height, i, 0); normalize_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, ret_buffer, m_height, i, 1); gaussjordan_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, ret_buffer, m_height, i); zero_kernel<<<nbBlocks, threadsPerBlock>>>(self_buffer, m_height, i); } checkCUDAError(cudaMemcpy(ret.m_buffer, ret_buffer, ret.m_height * ret.m_width * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(self_buffer); cudaFree(ret_buffer); return ret; } void Mat::print() const { std::cout << "{\n"; for (int i = 0; i < this->m_height; ++i) { std::cout << " { "; for (int j = 0; j < this->m_width;) { std::cout << this->m_buffer[i * this->m_width + j]; if (++j < this->m_width) std::cout << ", "; } std::cout << " }\n"; } std::cout << "}\n"; }
14,458
#include <stdio.h> #include <time.h> #include <sys/time.h> #define USECPSEC 1000000ULL unsigned long long dtime_usec(unsigned long long start){ timeval tv; gettimeofday(&tv, 0); return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start; } __global__ void tkernel(){ } int main(){ tkernel<<<2000, 32>>>(); cudaDeviceSynchronize(); unsigned long long dt = dtime_usec(0); unsigned long long dt1 = dt; tkernel<<<2000, 32>>>(); dt = dtime_usec(dt); cudaDeviceSynchronize(); dt1 = dtime_usec(dt1); printf("kernel launch: %fs, kernel duration: %fs\n", dt/(float)USECPSEC, dt1/(float)USECPSEC); }
14,459
//============================================== //TRABALHO DE PROGRAMAÇÃO PARALELA E DISTRIBUÍDA // Mandelbrot Set // CUDA // Daniela Kuinchtner, 152064 //============================================== #include <iostream> #include <cstdlib> using namespace std; #define THREADSPERBLOCK 1024 __global__ void brot(char *d_A, int max_row, int max_column, int max_n, int n); int main(int argc, char *argv[]){ int max_row, max_column, max_n; max_row = atoi(argv[1]); max_column = atoi(argv[2]); max_n = atoi(argv[3]); int n = max_row * max_column; size_t size = n * sizeof(char); int nBlocks = (n+THREADSPERBLOCK-1) / THREADSPERBLOCK; char *h_A; char *d_A; h_A = (char *)malloc(size); cudaSetDevice(0); cudaMalloc((void**)&d_A, size); cudaMemcpy(d_A, h_A, size ,cudaMemcpyHostToDevice); brot <<< nBlocks, THREADSPERBLOCK >>> (d_A, max_row, max_column, max_n, n); cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost); int i = 0; for(int r = 0; r < max_row; ++r){ for(int c = 0; c < max_column; ++c){ cout << h_A[i++]; } cout << "\n"; } cudaFree(d_A); } __global__ void brot(char *d_A, int max_row, int max_column, int max_n, int n){ int k = 0; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ int r = i / max_column; int c = i % max_column; float x=0, y=0, tmp=0; while((x*x + y*y) < 4 && ++k < max_n) { tmp = x*x - y*y + ((float) c * 2 / max_column - 1.5); y = x*y*2 + ((float) r * 2 / max_row - 1); x = tmp; } d_A[i]=(k == max_n ? '#' : '.'); } }
14,460
#if GOOGLE_CUDA #define EIGEN_USE_GPU #include<cassert> __device__ inline void swapf(float & a, float & b) { a += b ; b = a - b; a -= b; } __device__ inline void swap(int & a, int & b) { a += b ; b = a - b; a -= b; } __global__ void KnnKernel( int b, const int n,const int dim,const float * xyz,const int k,float* tmpd,int* tmpi,float * result,int * result_i) { float* dist = tmpd + ( blockIdx.x + blockIdx.y*gridDim.x )*n; int* idx = tmpi + ( blockIdx.x + blockIdx.y*gridDim.x )*n; for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x ) { for ( int i = blockIdx.y ; i < n ; i += gridDim.y ) { for ( int j = threadIdx.x ; j < n ; j += blockDim.x ) { if( i == j ){ dist[j] = 0; idx[j] = j; continue; } dist[j] = 0.0; for ( int dimi = 0 ; dimi < dim ; ++dimi ) { float dif = xyz[(bi*n+i)*dim+dimi] - xyz[(bi*n+j)*dim+dimi]; dist[j] += dif*dif; } idx[j] = j; } __syncthreads(); //odd-even sort int pownum = int(log2(float(n))); if ( n != pow(2, pownum) ) { for ( int cnt = 0 ; cnt < ( n + 1 ) / 2 ; ++cnt ) { for ( int j = 2*threadIdx.x + 1 ; j < n ; j += 2*blockDim.x ) { if ( dist[j] < dist[ j - 1 ] ) { swapf(dist[j], dist[j-1]); swap(idx[j], idx[j-1]); } } __syncthreads(); for ( int j = 2*threadIdx.x + 2 ; j < n ; j += 2*blockDim.x ) { if ( dist[j] < dist[ j - 1 ] ) { swapf(dist[j], dist[j-1]); swap(idx[j], idx[j-1]); } } __syncthreads(); } }else{ //Bitonic Sort for (unsigned int t = 2; t <= n ; t *= 2) { // Bitonic merge: for (unsigned int j = t / 2; j>0; j /= 2) { for (unsigned int tid = threadIdx.x ; tid < n ; tid += blockDim.x ) { unsigned int ixj = tid ^ j; if (ixj > tid) { if ((tid & t) == 0) { if (dist[tid] > dist[ixj]) { swapf(dist[tid], dist[ixj]); swap(idx[tid], idx[ixj]); } } else { if (dist[tid] < dist[ixj]) { swapf(dist[tid], dist[ixj]); swap(idx[tid], idx[ixj]); } } } } __syncthreads(); } } } __syncthreads(); //copy result for ( int j = threadIdx.x ; j < k ; j += blockDim.x ) { result[(bi*n+i)*k+j] = dist[j]; result_i[ ((bi*n+i)*k+j)*2+0 ] = bi; result_i[ ((bi*n+i)*k+j)*2+1 ] = idx[j]; } } } } void KnnKernelLauncher(const int b,const int subn, const int n,const int dim,const float * xyz,const int k,float* tmpd,int* tmpi,float * result,int * result_i){ KnnKernel<<<dim3(b,subn,1),512>>>(b,n,dim,xyz,k,tmpd,tmpi,result,result_i); } #endif
14,461
#pragma once #include <cuda_runtime_api.h> template<unsigned long long k> class BitSequence { public: __host__ BitSequence() { } __host__ BitSequence(char array[]) { cudaMemcpy(this->array, array, arSize, cudaMemcpyHostToHost); } __host__ __device__ inline char GetBit(unsigned long long index) const { return array[index / 8] >> (index % 8) & 1; } __host__ __device__ inline void SetBit(unsigned long long index, char value) { array[index / 8] = (array[index / 8] & (~(1 << (index % 8)))) | ((!!value) << (index % 8)); } __host__ __device__ inline unsigned int *GetWord32(unsigned long long word_index) { return (unsigned int*)(array + word_index * 32 / 8); } __host__ __device__ inline unsigned long long *GetWord64(unsigned long long word_index) { return (unsigned long long*)(array + word_index * 64 / 8); } static const unsigned long long arSize = (k/64 + (!!(k%64)))*8; private: char array[arSize]; }; /*void f() { BitSequence<1000> bs; BitSequence<1000000> bs2; bs.GetBit(0); bs.SetBit(0, 0); bs.GetWord32(0); bs.GetWord64(0); bs2.GetBit(0); bs2.GetWord32(0); bs2.GetWord64(0); bs2.SetBit(0, 0); }*/
14,462
#include "includes.h" __global__ void useSingleTexture(cudaTextureObject_t tex, float* pout) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; unsigned int k = blockIdx.z * blockDim.z + threadIdx.z; float4 sample = tex3D<float4>(tex, i + 0.5, j + 0.5, k + 0.5); pout[i + c_size.x * (j + k * c_size.y)] = sqrtf(powf(sample.x,2)+ powf(sample.y, 2)+ powf(sample.z, 2)); }
14,463
/*************************************************************************** ************************************************************************** Spherical Harmonic Transform Kit 2.7 Copyright 1997-2003 Sean Moore, Dennis Healy, Dan Rockmore, Peter Kostelec Copyright 2004 Peter Kostelec, Dan Rockmore This file is part of SpharmonicKit. SpharmonicKit is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. SpharmonicKit is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. See the accompanying LICENSE file for details. ************************************************************************ ************************************************************************/ /**** OUR supermoduli arrays used in FCTs. See Sean's thesis for details. The interface function (defined at the bottom of this file) is const double *get_mods(int n) ****/ __device__ __constant__ double mod2[2] = {-0.7071067811865475, 0.7071067811865475}; __device__ __constant__ double mod4[4] = {-0.923879532511287, 0.923879532511287, 0.3826834323650897, -0.3826834323650898}; __device__ __constant__ double mod8[8] = {-0.98078528040323, 0.98078528040323, 0.1950903220161282, -0.1950903220161283, -0.5555702330196023, 0.555570233019602, 0.831469612302545, -0.831469612302545}; __device__ __constant__ double mod16[16] = {-0.995184726672197, 0.995184726672197, 0.0980171403295607, -0.0980171403295608, -0.6343932841636457, 0.6343932841636453, 0.7730104533627369, -0.773010453362737, -0.881921264348355, 0.881921264348355, 0.4713967368259978, -0.4713967368259978, -0.2902846772544623, 0.2902846772544622, 0.956940335732209, -0.956940335732209}; __device__ __constant__ double mod32[32] = {-0.998795456205172, 0.998795456205172, 0.04906767432741801, -0.04906767432741814, -0.6715589548470185, 0.6715589548470185, 0.7409511253549589, -0.7409511253549593, -0.903989293123443, 0.903989293123443, 0.4275550934302819, -0.4275550934302822, -0.33688985339222, 0.3368898533922199, 0.941544065183021, -0.941544065183021, -0.970031253194544, 0.970031253194544, 0.2429801799032639, -0.242980179903264, -0.5141027441932219, 0.5141027441932216, 0.857728610000272, -0.857728610000272, -0.803207531480645, 0.803207531480645, 0.5956993044924333, -0.5956993044924336, -0.1467304744553617, 0.1467304744553616, 0.989176509964781, -0.989176509964781}; __device__ __constant__ double mod64[64] = {-0.999698818696204, 0.999698818696204, 0.02454122852291214, -0.02454122852291227, -0.689540544737067, 0.6895405447370669, 0.7242470829514668, -0.7242470829514671, -0.914209755703531, 0.914209755703531, 0.4052413140049898, -0.4052413140049899, -0.3598950365349883, 0.3598950365349882, 0.932992798834739, -0.932992798834739, -0.975702130038529, 0.975702130038528, 0.2191012401568697, -0.2191012401568698, -0.5349976198870974, 0.534997619887097, 0.844853565249707, -0.844853565249707, -0.817584813151584, 0.817584813151584, 0.5758081914178454, -0.5758081914178455, -0.1709618887603014, 0.1709618887603012, 0.985277642388941, -0.985277642388941, -0.99247953459871, 0.99247953459871, 0.1224106751992162, -0.1224106751992163, -0.6152315905806269, 0.6152315905806267, 0.7883464276266061, -0.7883464276266064, -0.870086991108711, 0.870086991108711, 0.492898192229784, -0.4928981922297842, -0.2667127574748984, 0.2667127574748983, 0.96377606579544, -0.96377606579544, -0.949528180593037, 0.949528180593037, 0.3136817403988914, -0.3136817403988915, -0.4496113296546067, 0.4496113296546067, 0.893224301195515, -0.893224301195515, -0.7572088465064847, 0.7572088465064847, 0.6531728429537766, -0.6531728429537769, -0.07356456359966746, 0.07356456359966735, 0.99729045667869, -0.99729045667869}; __device__ __constant__ double mod128[128] = {-0.999924701839145, 0.999924701839145, 0.01227153828571982, -0.01227153828571994, -0.698376249408973, 0.6983762494089728, 0.7157308252838185, -0.7157308252838188, -0.919113851690058, 0.919113851690058, 0.393992040061048, -0.3939920400610481, -0.3713171939518376, 0.3713171939518375, 0.928506080473215, -0.928506080473215, -0.978317370719628, 0.978317370719628, 0.2071113761922184, -0.2071113761922186, -0.5453249884220466, 0.5453249884220462, 0.838224705554838, -0.838224705554838, -0.824589302785025, 0.824589302785025, 0.5657318107836132, -0.5657318107836132, -0.183039887955141, 0.1830398879551409, 0.983105487431216, -0.983105487431216, -0.993906970002356, 0.993906970002356, 0.1102222072938831, -0.1102222072938832, -0.6248594881423865, 0.6248594881423863, 0.7807372285720944, -0.7807372285720945, -0.876070094195407, 0.876070094195407, 0.4821837720791227, -0.4821837720791229, -0.278519689385053, 0.278519689385053, 0.960430519415566, -0.960430519415566, -0.953306040354194, 0.953306040354194, 0.3020059493192281, -0.3020059493192282, -0.4605387109582401, 0.4605387109582402, 0.887639620402854, -0.887639620402854, -0.765167265622459, 0.765167265622459, 0.6438315428897913, -0.6438315428897915, -0.0857973123444399, 0.0857973123444398, 0.996312612182778, -0.996312612182778, -0.998118112900149, 0.998118112900149, 0.06132073630220854, -0.06132073630220866, -0.6624157775901719, 0.6624157775901719, 0.7491363945234591, -0.7491363945234594, -0.898674465693954, 0.898674465693954, 0.4386162385385274, -0.4386162385385277, -0.325310292162263, 0.3253102921622629, 0.945607325380521, -0.945607325380521, -0.966976471044852, 0.966976471044852, 0.2548656596045145, -0.2548656596045146, -0.5035383837257176, 0.5035383837257175, 0.863972856121587, -0.863972856121587, -0.7958369046088836, 0.7958369046088835, 0.6055110414043254, -0.6055110414043256, -0.1345807085071262, 0.1345807085071261, 0.99090263542778, -0.99090263542778, -0.987301418157858, 0.987301418157858, 0.1588581433338613, -0.1588581433338614, -0.5857978574564389, 0.5857978574564389, 0.810457198252595, -0.810457198252595, -0.851355193105265, 0.851355193105265, 0.5245896826784687, -0.524589682678469, -0.2310581082806713, 0.2310581082806711, 0.97293995220556, -0.97293995220556, -0.937339011912575, 0.937339011912575, 0.3484186802494344, -0.3484186802494345, -0.4164295600976374, 0.416429560097637, 0.909167983090523, -0.909167983090523, -0.7326542716724129, 0.7326542716724127, 0.6806009977954531, -0.6806009977954531, -0.03680722294135899, 0.03680722294135887, 0.99932238458835, -0.99932238458835}; __device__ __constant__ double mod256[256] = {-0.999981175282601, 0.999981175282601, 0.006135884649154394, -0.006135884649154516, -0.7027547444572254, 0.7027547444572252, 0.7114321957452165, -0.7114321957452165, -0.921514039342042, 0.921514039342042, 0.3883450466988262, -0.3883450466988263, -0.3770074102164183, 0.3770074102164182, 0.926210242138311, -0.926210242138311, -0.97956976568544, 0.97956976568544, 0.2011046348420918, -0.2011046348420919, -0.5504579729366049, 0.5504579729366048, 0.83486287498638, -0.83486287498638, -0.828045045257756, 0.828045045257756, 0.5606615761973359, -0.5606615761973361, -0.1890686641498063, 0.1890686641498062, 0.981963869109555, -0.981963869109555, -0.994564570734255, 0.994564570734255, 0.1041216338720546, -0.1041216338720547, -0.6296382389149271, 0.629638238914927, 0.7768884656732322, -0.7768884656732325, -0.879012226428634, 0.879012226428634, 0.476799230063322, -0.4767992300633223, -0.2844075372112718, 0.2844075372112717, 0.958703474895872, -0.958703474895872, -0.955141168305771, 0.955141168305771, 0.2961508882436238, -0.2961508882436239, -0.4659764957679661, 0.465976495767966, 0.884797098430938, -0.884797098430938, -0.7691033376455798, 0.7691033376455795, 0.6391244448637758, -0.6391244448637759, -0.0919089564971327, 0.0919089564971326, 0.99576741446766, -0.99576741446766, -0.998475580573295, 0.998475580573295, 0.05519524434968992, -0.05519524434969004, -0.6669999223036376, 0.6669999223036374, 0.7450577854414659, -0.7450577854414661, -0.901348847046022, 0.901348847046022, 0.433093818853152, -0.433093818853152, -0.3311063057598764, 0.3311063057598763, 0.94359345816196, -0.94359345816196, -0.968522094274417, 0.968522094274417, 0.2489276057457201, -0.2489276057457203, -0.5088301425431072, 0.5088301425431072, 0.860866938637767, -0.860866938637767, -0.799537269107905, 0.799537269107905, 0.6006164793838687, -0.600616479383869, -0.1406582393328492, 0.1406582393328491, 0.990058210262297, -0.990058210262297, -0.988257567730749, 0.988257567730749, 0.1527971852584433, -0.1527971852584434, -0.5907597018588743, 0.5907597018588741, 0.806847553543799, -0.806847553543799, -0.8545579883654, 0.8545579883654, 0.5193559901655897, -0.5193559901655897, -0.2370236059943673, 0.2370236059943672, 0.971503890986252, -0.971503890986252, -0.93945922360219, 0.93945922360219, 0.3426607173119943, -0.3426607173119944, -0.4220002707997998, 0.4220002707997998, 0.906595704514915, -0.906595704514915, -0.7368165688773699, 0.7368165688773698, 0.6760927035753159, -0.6760927035753161, -0.04293825693494096, 0.04293825693494084, 0.999077727752645, -0.999077727752645, -0.999529417501093, 0.999529417501093, 0.03067480317663646, -0.03067480317663658, -0.6850836677727005, 0.6850836677727001, 0.7284643904482252, -0.7284643904482254, -0.91170603200543, 0.91170603200543, 0.4108431710579038, -0.4108431710579039, -0.3541635254204905, 0.3541635254204904, 0.935183509938948, -0.935183509938948, -0.974339382785576, 0.974339382785576, 0.2250839113597927, -0.2250839113597927, -0.5298036246862949, 0.5298036246862946, 0.848120344803297, -0.848120344803297, -0.814036329705948, 0.814036329705948, 0.5808139580957644, -0.5808139580957647, -0.1649131204899701, 0.1649131204899699, 0.986308097244599, -0.986308097244599, -0.9917097536691, 0.9917097536691, 0.1284981107937931, -0.1284981107937932, -0.6103828062763095, 0.6103828062763095, 0.7921065773002122, -0.7921065773002124, -0.867046245515693, 0.867046245515693, 0.4982276669727816, -0.498227666972782, -0.2607941179152756, 0.2607941179152755, 0.965394441697689, -0.96539444169769, -0.947585591017741, 0.947585591017741, 0.3195020308160156, -0.3195020308160157, -0.4441221445704293, 0.4441221445704292, 0.895966249756185, -0.895966249756185, -0.7531867990436126, 0.7531867990436125, 0.6578066932970785, -0.6578066932970788, -0.06744391956366412, 0.067443919563664, 0.997723066644192, -0.997723066644192, -0.996820299291166, 0.996820299291166, 0.07968243797143002, -0.07968243797143014, -0.6485144010221126, 0.6485144010221124, 0.7612023854842617, -0.7612023854842618, -0.890448723244758, 0.890448723244758, 0.4550835871263438, -0.4550835871263439, -0.307849640041535, 0.3078496400415349, 0.951435020969008, -0.951435020969008, -0.962121404269042, 0.962121404269042, 0.2726213554499488, -0.272621355449949, -0.4875501601484361, 0.4875501601484357, 0.87309497841829, -0.87309497841829, -0.7845565971555754, 0.7845565971555751, 0.6200572117632892, -0.6200572117632892, -0.1163186309119049, 0.1163186309119047, 0.993211949234795, -0.993211949234795, -0.984210092386929, 0.984210092386929, 0.1770042204121487, -0.1770042204121489, -0.5707807458869674, 0.5707807458869672, 0.821102514991105, -0.821102514991105, -0.841554977436898, 0.841554977436898, 0.5401714727298929, -0.540171472729893, -0.2131103199160913, 0.2131103199160913, 0.977028142657754, -0.977028142657754, -0.930766961078984, 0.930766961078984, 0.3656129978047739, -0.365612997804774, -0.3996241998456468, 0.3996241998456467, 0.916679059921043, -0.916679059921043, -0.7200025079613816, 0.7200025079613816, 0.6939714608896538, -0.693971460889654, -0.01840672990580482, 0.01840672990580469, 0.999830581795824, -0.999830581795823}; /*** "interface" to the above arrays ***/ /************************************************************************ get supermoduli lists These are the values of the T0 components of the supermoduli in the proper order. ************************************************************************/ __device__ double *get_mods(int n) { switch(n) { case 2: return mod2; case 4: return mod4; case 8: return mod8; case 16: return mod16; case 32: return mod32; case 64: return mod64; case 128: return mod128; case 256: return mod256; default: return 0 ; } }
14,464
#include "includes.h" __global__ void ConstantB(bool * x, bool value, size_t idx, size_t N) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { x[(idx)*N + i] = value; } return; }
14,465
// #CSCS CUDA Training // // #Example 2.0 - sum vectors, fix number of threads per block // // #Author Ugo Varetto // // #Goal: compute the scalar product of two 1D vectors using a number of GPU threads greater than or equal to // the number of vector elements and not evenly divisible by the block size // #Rationale: shows how to implement a kernel with a computation/memory configuration that matches // the domain layout. Each threads computes at most one element of the output vector. // // #Solution: // . number of elements in the output array = E // . number of threads per block = Tb // The number of blocks is = ( E + Tb - 1 ) div Tb where 'div' is the integer division operator // Each thread on the GPU computes one(thread id < vector size) or zero(thread id >= vector size) // elements of the output vector. // // // #Code: typical flow: // 1) compute launch grid configuration // 2) allocate data on host(cpu) and device(gpu) // 3) copy data from host to device // 4) launch kernel // 5) read data back // 6) consume data (in this case print result) // 7) free memory // // #Compilation: nvcc -arch=sm_13 2_0_sum-vectors.cu -o sum-vectors-1 // // #Execution: ./sum-vectors-1 // // #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to // cudaThreadSynchronize() is required to wait for the end of kernel execution from // a host thread; in case of synchronous copy operations like cudaMemcpy(...,cudaDeviceToHost) // kernel execution is guaranteed to be terminated before data are copied // // #Note: the code is C++ also because the default compilation mode for CUDA is C++, all functions // are named with C++ convention and the syntax is checked by default against C++ grammar rules // // #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better // // #Note: -arch=sm_13 is the lowest architecture version that supports double precision // // #Note: the example can be extended to read configuration data and array size from the command line // and could be timed to investigate how performance is dependent on single/double precision // and thread block size //#include <cuda_runtime.h> // automatically added by nvcc #include <vector> #include <iostream> #include <iomanip> #include <sstream> #include <string> typedef float real_t; // In this case the kernel assumes that the computation was started with enough threads to cover the entire domain. // This is the preferred solution provided there are enough threads to cover the entire domain which might not be the // case in case of a 1D grid layout (max number of threads = 512 threads per block x 65536 blocks = 2^25 = 32 Mi threads) __global__ void sum_vectors( const real_t* v1, const real_t* v2, real_t* out, size_t num_elements ) { // compute current thread id const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; // since we assume that num threads >= num element we need to make sure we do not write outside the // range of the output buffer if( xIndex < num_elements ) out[ xIndex ] = v1[ xIndex ] + v2[ xIndex ]; } //------------------------------------------------------------------------------ int main( int , char** ) { const int VECTOR_SIZE = 0x10000 + 1; //vector size 65537 const int SIZE = sizeof( real_t ) * VECTOR_SIZE; // total size in bytes const int THREADS_PER_BLOCK = 32; //number of gpu threads per block // block size: the number of threads per block multiplied by the number of blocks // must be at least equal to NUMBER_OF_THREADS const int NUMBER_OF_BLOCKS = ( VECTOR_SIZE + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK; // if number of threads is not evenly divisable by the number of threads per block // we need an additional block; the above code can be rewritten as // if( NUMBER_OF_THREADS % THREADS_PER_BLOCK == 0) BLOCK_SIZE = NUMBER_OF_THREADS / THREADS_PER_BLOCK; // else BLOCK_SIZE = NUMBER_OF_THREADS / THREADS_PER_BLOCK + 1 // host allocated storage; use std vectors to simplify memory management // and initialization std::vector< real_t > v1 ( VECTOR_SIZE, 1.f ); //initialize all elements to 1 std::vector< real_t > v2 ( VECTOR_SIZE, 2.f ); //initialize all elements to 2 std::vector< real_t > vout( VECTOR_SIZE, 0.f ); //initialize all elements to 0 // gpu allocated storage real_t* dev_in1 = 0; //vector 1 real_t* dev_in2 = 0; //vector 2 real_t* dev_out = 0; //result value cudaMalloc( &dev_in1, SIZE ); cudaMalloc( &dev_in2, SIZE ); cudaMalloc( &dev_out, SIZE ); // copy data to GPU cudaMemcpy( dev_in1, &v1[ 0 ], SIZE, cudaMemcpyHostToDevice ); cudaMemcpy( dev_in2, &v2[ 0 ], SIZE, cudaMemcpyHostToDevice ); // execute kernel with num threads >= num elements sum_vectors<<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK>>>( dev_in1, dev_in2, dev_out, VECTOR_SIZE ); // read back result cudaMemcpy( &vout[ 0 ], dev_out, SIZE, cudaMemcpyDeviceToHost ); // print first and last element of vector std::cout << "result: " << vout.front() << ".." << vout.back() << std::endl; // free memory cudaFree( dev_in1 ); cudaFree( dev_in2 ); cudaFree( dev_out ); return 0; }
14,466
#include "includes.h" __global__ void gpuIt(float *tNew,float *tOld,float *tOrig,int x,int y,int z,float k,float st) { int i = threadIdx.x + blockIdx.x * blockDim.x; // may want an if(i < x*y*z) to prevent overflowing, likea thisa if(i < x*y*z){ if(i == 0){ // top left corner tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 1; } else if(i == x-1){ // top right corner tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 3; } else if(i == x*y - 1){ // bottom right corner tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i-x] + tOld[i] - 4*tOld[i]); //tNew[i] = 5; } else if(i == x*y - x){ // bottom left corner tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i-x] + tOld[i] - 4*tOld[i]); //tNew[i] = 7; } else if(i%x == 0){ // left side tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i-x] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 8; } else if(i%x == x-1){ // right side tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i-x] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 4; } else if(i - x < 0){ // top row tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 2; } else if(i + x > x*y){ // bottom row tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i-x] + tOld[i] - 4*tOld[i]); //tNew[i] = 6; } else{ tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i-x] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 9; } //tNew[i] = i; // for debugging // replace heaters if(tOrig[i] != st){ tNew[i] = tOrig[i]; } //tNew[i] = i%x; } }
14,467
// Procédure de recherche de kmers sur le GPU __global__ void find_kmers_GPU(unsigned long* input, unsigned long* s, unsigned long* s2, unsigned long n, unsigned long NB_BLOCK_MAX) { unsigned long J = blockIdx.x * blockDim.x + threadIdx.x; unsigned long i; unsigned long K = J*NB_BLOCK_MAX; if (K < n) { // additionne les minimum de chaque thread en s[K] if (input[K] < s[K]) s[K] = input[K]; for(i=1; i<NB_BLOCK_MAX; i++) { if (input[K+i] < s[K+i]) s[K] += input[K+i]; else s[K] += s[K+i]; } } __syncthreads(); // créaction d'un tableau s2 simple à sommer if (J<n/NB_BLOCK_MAX) s2[J] = s[K]; // somme for (i=n/(2*NB_BLOCK_MAX); i>0; i=i/2) { __syncthreads(); if (J < i) s2[J] += s2[J+i]; } s[0] = s2[0]; } // Fonction de calcul du code sur le GPU __device__ unsigned long code_GPU(char *w, unsigned long n) { int wi = 0; unsigned long i; unsigned long result = 0; unsigned long power_4_i = 1; // 4^i for(i=0; i<n; i++) { if (w[i] == 'A') wi = 0; if (w[i] == 'C') wi = 1; if (w[i] == 'T') wi = 2; if (w[i] == 'G') wi = 3; result = result + wi * power_4_i; power_4_i = power_4_i * 4; } return result; } // Procédure de création de l'index de la séquence de référence sur le GPU __global__ void creation_index_GPU(char *chaine, unsigned long* temp_code, unsigned long size, unsigned long* index, unsigned long nb_kmers, unsigned long k) { int J = blockIdx.x * blockDim.x + threadIdx.x; if (J<size-k) index[code_GPU(chaine+J, k)]++; }
14,468
#include <cuComplex.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void multiply_const_kernel(cuFloatComplex *in, cuFloatComplex *out, cuFloatComplex k, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { // e ix = cos x + i sin x out[i] = cuCmulf(in[i], k); } } void exec_multiply_const(cuFloatComplex *in, cuFloatComplex *out, cuFloatComplex k, int n, int grid_size, int block_size, cudaStream_t stream) { multiply_const_kernel<<<grid_size, block_size, 0, stream>>>(in, out, k, n); } void get_block_and_grid_multiply_const(int *minGrid, int *minBlock) { cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, multiply_const_kernel, 0, 0); }
14,469
#include <cuda.h> #include <iostream> #include <sys/time.h> using namespace std; /* Simple Cuda Program: 2D block version * - map 1D thread block to 2D data * - use 2D thread block * - effect of non-optimal block size */ // (*3*) set dataX to 17 #define dataX 16 #define nThreadsX (dataX*dataX) #define BLOCK_DATA(i,j) block_data[(i)*dataX+(j)] __global__ void addOne(double *data) { int b = blockIdx.x; // pointer to block data double *block_data = data + b*nThreadsX; // (*1*) Interchange the definitions of tx and ty // (*2*) use threadIdx.x and threadIdx.y int tx = threadIdx.x % dataX; int ty = threadIdx.x / dataX; // access data as 2D for (int i=0;i<100000; i++) BLOCK_DATA(ty,tx)++; } int main() { // time variables time_t sTime = time(NULL); struct timeval tt1, tt2; int ms; double fms; // (*3*) set data size to 4624 (17*17 * 16) int n = 4096; double *data = (double*) malloc(n * sizeof(double)); for (int i=0; i<n; i++) { data[i] = 0.0; } double *data_dev; cudaMalloc((void**) &data_dev, n * sizeof(double)); cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice); dim3 nBlocks(n/nThreadsX,1); // (*2*) modify here to make a 2D block (dataX x dataX) dim3 nThreads(nThreadsX,1,1); gettimeofday( &tt1, NULL ); addOne <<< nBlocks, nThreads >>> (data_dev); cudaThreadSynchronize(); gettimeofday( &tt2, NULL ); cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost); cudaFree(data_dev); // time calculation ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.0; cout << "kernel run time = " << fms << endl; cout << "data[n-1] = " << data[n-1] << endl; free(data); }
14,470
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The is the original sequential program. It uses the timer from the OpenMP runtime library History: Written by Tim Mattson, 11/99. */ #include <stdio.h> #include "cuda.h" /*#include <omp.h>*/ static long num_steps = 1000000000; double step; int main () { int i; double x, pi, sum = 0.0; cudaEvent_t start_time, stop_time; float elapsed_time; step = 1.0/(double) num_steps; cudaEventCreate( &start_time ); cudaEventCreate( &stop_time ); cudaEventRecord( start_time, 0 ); /* start_time = omp_get_wtime();*/ for (i=1;i<= num_steps; i++){ x = (i-0.5)*step; sum = sum + 4.0/(1.0+x*x); } pi = step * sum; cudaEventRecord( stop_time, 0 ); cudaEventSynchronize( stop_time ); cudaEventElapsedTime(&elapsed_time,start_time,stop_time); /* run_time = omp_get_wtime() - start_time;*/ printf("\n pi with %ld steps is %lf in %lf millisecond\n ",num_steps,pi,elapsed_time); cudaEventDestroy( start_time ); cudaEventDestroy( stop_time ); }
14,471
#include "kernels.cuh" __global__ void add(REAL *answer, REAL *a, REAL *b, size_t size) { size_t tid = get_tid(); size_t stride = get_stride(); for ( ; tid<size; tid+=stride) answer[tid] = a[tid] + b[tid]; } __global__ void mul(REAL *answer, REAL *a, REAL mult, size_t size) { size_t tid = get_tid(); size_t stride = get_stride(); for ( ; tid<size; tid+=stride) answer[tid] = mult * a[tid]; } __global__ void fill(REAL *a, REAL value, size_t size) { size_t tid = get_tid(); size_t stride = get_stride(); for ( ; tid<size; tid+=stride) { a[tid] = value; } }
14,472
#include "includes.h" __global__ void matMul(unsigned char *image,unsigned char *resImage,int rows,int cols){ /* it will modify each pixel */ //int ti = blockIdx.y*blockDim.y+threadIdx.y; int tj = blockIdx.x*blockDim.x+threadIdx.x; if(tj < rows*cols){ int pos = tj*chanDepth; resImage[pos+BLUE] = image[pos+BLUE]*2; resImage[pos+GREEN] = image[pos+GREEN]*2; resImage[pos+RED] = image[pos+RED]*2; } }
14,473
#include "includes.h" __global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { if (inplace) { if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0; } else { if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i]; } } }
14,474
#include <iostream> #include <math.h> #include <fstream> // Set CIRCLE_BORDER to a negative value to activate normal mode instead #define CIRCLE_BORDER -1 // If RANDOM_WALK is defined, the direction of a particle changes every tick #define RANDOM_WALK typedef struct { float x; float y; float horizontal_speed; float vertical_speed; uint seed; } Particle; using ullong = unsigned long long; const float radius = 2.0f; // Size of a single particle const int ceil_radius = (int)radius + ((((float)(int)radius) < radius) ? 1 : 0); // Radius rounded up const float max_speed = 3.0f; // Maximum speed a particle can have const int particle_count = 4096 * 64; // The amount of particles simulated at once, should be a multiple of particle_threads_per_block const int particle_threads_per_block = 16; // Amount of threads in one thread block when calculating a tick, and when initializing partickles const int grid_size = 1024 * 1; // Size of the grid on which the fractal is generated const int grid_width = grid_size; const int grid_height = grid_size; __device__ int grid[grid_height][grid_width]; // The grid on which the fractal will be generated // Information about the boundaries in which all static particles are __device__ int border_left; __device__ int border_right; __device__ int border_top; __device__ int border_bottom; __device__ int smallest_distance_to_center; // Work in progress __device__ ullong total_static_particles; __device__ ullong weight_center_x; __device__ ullong weight_center_y; // Used in debugging const int debug_array_size = 1024; __device__ int debug = 0; __device__ int debug_array[debug_array_size]; void VecAdd(); void simulate(); void tick(Particle* particles, int tick_count); __host__ __device__ int random_int(int min, int max, uint seed); __device__ Particle make_static(Particle particle, int tick_count, float modulo_x, float modulo_y); __host__ __device__ float random_float(uint seed); #define print(message) std::cout << message << std::endl /* Entry point: main */ int main() { print("starting"); simulate(); print("done"); } // checks for cuda errors // could be improved void cuda_error() { auto result = cudaGetLastError(); if (result != cudaSuccess) { do { std::cout << "error: " << result << std::endl; std::cout << "error message: " << cudaGetErrorString(result) << std::endl; result = cudaGetLastError(); break; } while(result != cudaSuccess); } else { std::cout << "success" << std::endl; } } // Sets all the grid values to -1 (meaning 'empty' or 'no static particle here') __global__ void init_grid_negative() { grid[blockIdx.y * blockDim.y + threadIdx.y][blockIdx.x * blockDim.x + threadIdx.x] = -1; } // Performs all the initialization that happens only once on the gpu __global__ void init_gpu_single() { border_top = grid_height / 2; border_bottom = grid_height / 2; border_left = grid_width / 2; border_right = grid_width / 2; smallest_distance_to_center = CIRCLE_BORDER * CIRCLE_BORDER; if(CIRCLE_BORDER < 0) { // Set the center of the grid to 0 (meaning there is a static particle in the center) grid[grid_height / 2][grid_width / 2] = 0; } else { // Init weight center int center_bias = 10; total_static_particles = center_bias; weight_center_x = (grid_width / 2) * center_bias; weight_center_y = (grid_height / 2) * center_bias; } } // Outputs the grid (preceded by its width/height) to a file void output_grid() { // Get grid from GPU memory size_t mem_size = sizeof(int) * grid_height * grid_width; int* host_grid = (int*)malloc(mem_size); cudaMemcpyFromSymbol(host_grid, grid, mem_size, 0, cudaMemcpyDeviceToHost); // Create file std::ofstream output_file; output_file.open("grid_output.bin", std::ios::binary); if(output_file.is_open()) { print("output_file is open"); } // Output to file const int grid_size[2] = {grid_width, grid_height}; output_file.write((const char*) &grid_size, sizeof(int) * 2); output_file.write((const char*) host_grid, mem_size); // clean up output_file.close(); delete host_grid; } // Returns a pseudorandom number based on the input number x __host__ __device__ uint hash(uint x) { const uint seed = 1324567967; x += seed; x = ((x >> 16) ^ x) * seed; x = ((x >> 16) ^ x) * seed; x = (x >> 16) ^ x; return x; } // Returns an int in the range [min, max) based on seed __host__ __device__ int random_int(int min, int max, uint seed) { uint random = hash(seed); random %= (uint)(max - min); return (int)random + min; } // Returns a float in the range [0, 1) based on seed; __host__ __device__ float random_float(uint seed) { const int max = 10000000; int base = random_int(0, max, seed); return (float)base / (float)max; } // Randomizes the speed and direction of a particle __device__ Particle randomize_speed(Particle particle, int direction_seed, int speed_seed) { float direction = M_PI * 2.0f * random_float(direction_seed); float speed = random_float(speed_seed) * max_speed; particle.vertical_speed = cosf(direction) * speed; particle.horizontal_speed = sinf(direction) * speed; return particle; } // Randomizes all fields of the particle __device__ Particle randomize_particle(Particle particle) { uint seed = particle.seed; int center_height = border_bottom - border_top; if(CIRCLE_BORDER < 0) { // Place the particle outside the borders particle.x = random_int(0, grid_width, seed + 0); if(particle.x > border_left && particle.x < border_right) { // Generate a y-coordiante that does not overlap with the borders particle.y = random_int(0, grid_height - center_height, seed + 1); if(particle.y > border_top) { particle.y += center_height; } } else { particle.y = random_int(0, grid_height, seed + 1); } } else { particle.x = (float) (grid_width - (weight_center_x / total_static_particles)); particle.y = (float) (grid_height - (weight_center_y / total_static_particles)); } #ifndef RANDOM_WALK // When doing a random walk, the speed is randomized at the beginning of each tick anyways particle = randomize_speed(particle, seed + 2, seed + 3); #endif particle.seed = hash(seed); return particle; } // Initializes the particle __global__ void init_particles(Particle* particles) { int i = threadIdx.x + blockIdx.x * blockDim.x; // particle index in the particles array Particle* particle = particles + i; particle->seed = (uint)i * 4; *particle = randomize_particle(*particle); } // Prints border_left, border_right, border_top and border_bottom to stdio void print_boundaries() { int left, right, top, bottom; cudaMemcpyFromSymbol(&left, border_left, sizeof(int)); cudaMemcpyFromSymbol(&right, border_right, sizeof(int)); cudaMemcpyFromSymbol(&top, border_top, sizeof(int)); cudaMemcpyFromSymbol(&bottom, border_bottom, sizeof(int)); print(left << ", " << right << ", " << top << ", " << bottom); } // Creates the fractal void simulate() { // Make sure there are no errors at the start of the simulation cuda_error(); // Initialize grid dim3 threadsPerBlock(16, 16); dim3 blocks(grid_width / threadsPerBlock.x, grid_height / threadsPerBlock.y); init_grid_negative<<<blocks, threadsPerBlock>>>(); init_gpu_single<<<1, 1>>>(); // Initialize particles size_t mem_size = particle_count * sizeof(Particle); Particle* particles; cudaMalloc(&particles, mem_size); const int particle_blocks = particle_count / particle_threads_per_block; cuda_error(); init_particles<<<particle_blocks, particle_threads_per_block>>>(particles); // Done intializing particles // Print some debug information, I left this in since it safed me some confusion a few times print_boundaries(); cuda_error(); // Perform simulation ticks, until a particle hits the margins int tick_count = 0; for(int i = 0; true; i++) { // Perform one tick tick(particles, ++tick_count); // Debug information int left, right, top, bottom, center_distance; int debug_copy; int debug_array_copy[debug_array_size]; ullong total_static_particles_copy; cudaMemcpyFromSymbol(&left, border_left, sizeof(int)); cudaMemcpyFromSymbol(&right, border_right, sizeof(int)); cudaMemcpyFromSymbol(&top, border_top, sizeof(int)); cudaMemcpyFromSymbol(&bottom, border_bottom, sizeof(int)); cudaMemcpyFromSymbol(&bottom, border_bottom, sizeof(int)); cudaMemcpyFromSymbol(&center_distance, smallest_distance_to_center, sizeof(int)); cudaMemcpyFromSymbol(&debug_copy, debug, sizeof(int)); cudaMemcpyFromSymbol(&total_static_particles_copy, total_static_particles, sizeof(ullong)); cudaMemcpyFromSymbol(&debug_array_copy, debug_array, sizeof(int) * debug_copy); if(i % 10000 == 0) { print(left << ", " << right << ", " << top << ", " << bottom << ", " << center_distance); print(debug_copy); // print(total_static_particles_copy); for(int i = 0; i < debug_copy && i < 1024; i++) { if(i % 2 == 0) { print(""); } print(debug_array_copy[i]); } } // Check if a particle has come to close to the margins, and if it did, finish the simulation const int margin = 150; if(CIRCLE_BORDER > -1 && center_distance < margin * margin) { break; } if(left < margin || right > grid_width - margin || top < margin || bottom > grid_height - margin) { break; } } // Finish up cuda_error(); output_grid(); cudaFree(particles); } __device__ float pythagoras(float a, float b) { return a * a + b * b; } __device__ float pythagoras(Particle particle) { return pythagoras(particle.x - (float)(grid_width / 2), particle.y - (float)(grid_height / 2)); } __device__ Particle move_particle(Particle particle) { #ifdef RANDOM_WALK // randomize direction and speed particle = randomize_speed(particle, particle.seed, particle.seed + 1); particle.seed = hash(particle.seed); #endif // move particle particle.x += particle.horizontal_speed; particle.y += particle.vertical_speed; // check bounds if(particle.x - radius <= 0.0f) { particle.x = 0.01f + radius; particle.horizontal_speed *= -1.0f; } else if(particle.x + radius >= grid_width) { particle.x = grid_width - 0.01f - radius; particle.horizontal_speed *= -1.0f; } if(particle.y - radius <= 0.0f) { particle.y = 0.01f + radius ; particle.vertical_speed *= -1.0f; } else if(particle.y + radius >= grid_height) { particle.y = grid_height - 0.01f - radius; particle.vertical_speed *= -1.0f; } return particle; } // Performs one step of one particle __global__ void particle_step(Particle* particles, int tick_count) { int i = blockIdx.x * blockDim.x + threadIdx.x; // particle index in the particles array Particle particle = particles[i]; const int max_steps = 5; bool outside_border_margins = true; // Move at least once const int border_margins = 150; if(CIRCLE_BORDER < 0) { // Perform at most max_steps when outside the border margins for(int i = 0; i < max_steps && outside_border_margins; i++) { particle = move_particle(particle); outside_border_margins = particle.x < (border_left - border_margins) || particle.x > (border_right + border_margins) || particle.y > (border_bottom + border_margins) || particle.y < (border_top - border_margins); } } else { // set to false to avoid confusion outside_border_margins = false; particle = move_particle(particle); } float modulo_x = fmod(particle.x, 1.0f); float modulo_y = fmod(particle.y, 1.0f); // If the particle is outside the circle border, turn it static if(CIRCLE_BORDER > -1 && (int)(pythagoras(particle) + radius) >= CIRCLE_BORDER * CIRCLE_BORDER) { particles[i] = make_static(particle, tick_count, modulo_x, modulo_y); return; } if(!outside_border_margins) { // Check for collision with static particles bool looping = true; for(int dx = -ceil_radius; dx <= ceil_radius && looping; dx++) { for(int dy = -ceil_radius; dy <= ceil_radius && looping; dy++) { // Calculate distance from center of the particle float distance_x = -dx + modulo_x; float distance_y = -dy + modulo_y; if(pythagoras(distance_x, distance_y) < radius * radius) { // Position is within distance of the center if(grid[(int)(particle.y - distance_y)][(int)(particle.x - distance_x)] >= 0) { // It hit another particle, so turn this one static too particle = make_static(particle, tick_count, modulo_x, modulo_y); looping = false; break; } } } } } // Update value in particles array particles[i] = particle; } // Creates a static particle in the grid on this location, and replaces the live particle by a new one __device__ Particle make_static(Particle particle, int tick_count, float modulo_x, float modulo_y) { // Create new static particle for(int dx = -ceil_radius; dx <= ceil_radius; dx++) { for(int dy = -ceil_radius; dy <= ceil_radius; dy++) { // Calculate distance from center of the particle float distance_x = -dx + modulo_x; float distance_y = -dy + modulo_y; if(distance_x * distance_x + distance_y * distance_y < radius * radius) { // Calculate position in grid int absolute_x = (int)(particle.x - distance_x); int absolute_y = (int)(particle.y - distance_y); // If the absolute_x/y are within the grid if(absolute_x >= 0 && absolute_x < grid_width && absolute_y >= 0 && absolute_y < grid_height) { // Set the grid to being hit grid[absolute_y][absolute_x] = tick_count; /* Because the program writes and reads from the same grid in a single tick, the algorithm isn't completely deterministic. I could use two different grids and then copy values, but it doesn't feel necessary. */ } } } } // Update information about the boundaries that contain all particles if(CIRCLE_BORDER < 0) { atomicMin(&border_left, (int)(particle.x - radius)); atomicMax(&border_right, (int)(particle.x + radius)); atomicMin(&border_top, (int)(particle.y - radius)); atomicMax(&border_bottom, (int)(particle.y + radius)); } else { atomicMin(&smallest_distance_to_center, (int)(pythagoras(particle) - radius)); atomicAdd(&total_static_particles, 1l); atomicAdd(&weight_center_x, (ullong)particle.x); atomicAdd(&weight_center_y, (ullong)particle.y); } // Give the particle a random new position and speed return randomize_particle(particle); } // Performs one tick void tick(Particle* particles, int tick_count) { const int blocks = particle_count / particle_threads_per_block; particle_step<<<blocks, particle_threads_per_block>>>(particles, tick_count); }
14,475
#include <cuda.h> __global__ void foo(int *p) { if(p[threadIdx.x]) { // May be reached by some threads but not others depending on contents of p __syncthreads(); } }
14,476
// 系统头文件 #include <stdlib.h> #include <stdio.h> // cuda头文件 #include <cuda_runtime.h> #include "device_launch_parameters.h" #define N 10 #define GRID_SIZE 32 #define BLOCK_SIZE 16 __global__ void matrixAddition(float *a, float *b, float *c) { int bx = blockIdx.x; int tx = threadIdx.x; int i = bx * BLOCK_SIZE + tx; c[i] = a[i] + b[i]; } // 初始化向量为随机数值 void randomInit(float* data, unsigned int size) { srand(1); for (unsigned int i = 0; i < size; i++) { data[i] = rand() / (float) 100000000; } } // 主机端主函数 int main(void) { float *aH, *bH, *cH, *aD, *bD, *cD; int mem_size = N * N * sizeof(float); // 在主机内存申请 A,B,C 向量的空间 aH = (float*) malloc(mem_size); bH = (float*) malloc(mem_size); cH = (float*) malloc(mem_size); // 在 GPU 设备申请 A,B,C 向量的空间 cudaMalloc((void**) &aD, mem_size); cudaMalloc((void**) &bD, mem_size); cudaMalloc((void**) &cD, mem_size); // 初始化主机内存的 A,B 向量 randomInit(aH, N * N); randomInit(bH, N * N); // 拷贝主机内存的 A,B 的内容到 GPU 设备的 A,B cudaMemcpy(aD, aH, mem_size, cudaMemcpyHostToDevice); cudaMemcpy(bD, bH, mem_size, cudaMemcpyHostToDevice); // GPU 内核函数的维度参数 dim3 dimGrid(GRID_SIZE, GRID_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // 执行 GPU 内核函数 matrixAddition <<< dimGrid, dimBlock >>> (aD, bD, cD); // 从 GPU 设备复制结果向量 C 到主机内存的 C cudaMemcpy(cH, cD, mem_size, cudaMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { printf("%.2f\t", cH[i * N + j]); } printf("\n"); } free(aH); free(bH); free(cH); cudaFree(aD); cudaFree(bD); cudaFree(cD); }
14,477
#include "includes.h" __global__ void ShortestPath2(float *Arr1,float *Arr2,float *recv,int N,int rows, int k,int rank,int owner){ int col=blockIdx.x * blockDim.x + threadIdx.x; int row=blockIdx.y * blockDim.y + threadIdx.y; int index=row*N+col; int index_ik = row*N+k; if(Arr1[index]>(Arr1[index_ik]+recv[col])){ Arr2[index]=Arr1[index_ik]+recv[col]; } __syncthreads(); }
14,478
// Program seqCuda.cu // Taken from CUDA: Application Design and Development by Rob Farber #include <iostream> using namespace std; #include <thrust/reduce.h> #include <thrust/sequence.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> int main() { const int N = 50000; // Task 1: create the array thrust::device_vector<int> a(N); // Task 2: fill the array thrust::sequence(a.begin(),a.end(),0); // Task 3: calculate the sum of the array int sumA = thrust::reduce(a.begin(),a.end(),0); // Task 4: calculate the sum of 0 .. N-1 int sumCheck = 0; for (int i = 0; i < N; i++) sumCheck += i; // Task 5: check the results agree if (sumA == sumCheck) cout << "Test Succeeded!" << endl; else { cerr << "Test Failed!" << endl; return(1); } return(0); }
14,479
#include <stdio.h> #include <assert.h> #include <cuda.h> #define CHECK_CUDA(x) \ { \ cudaError_t err = x; \ if (err != cudaSuccess) \ { \ printf("!!! CUDA ERROR: \"%s\" at file %s, line %d !!!\n", cudaGetErrorString(err), __FILE__, __LINE__);\ exit(1); \ } \ } int main(int argc, char* argv[]) { const int N = (argc==1)? 10000 : atoi(argv[1]); printf("N = %d\n", N); double *a_h, *b_h; // pointers to host memory double *a_d, *b_d; // pointers to device memory // allocate arrays on host a_h = new double [N]; b_h = new double [N]; // allocate arrays on device CHECK_CUDA( cudaMalloc((void **) &a_d, sizeof(double)*N) ); CHECK_CUDA( cudaMalloc((void **) &b_d, sizeof(double)*N) ); // initialize host data for (int i=0; i<N; i++) { a_h[i] = 10.f+i; b_h[i] = 0.f; } // send data from host to device: a_h to a_d CHECK_CUDA( cudaMemcpy(a_d, a_h, sizeof(double)*N, cudaMemcpyDefault) ); // copy data within device: a_d to b_d CHECK_CUDA( cudaMemcpy(b_d, a_d, sizeof(double)*N, cudaMemcpyDefault) ); // retrieve data from device: b_d to b_h CHECK_CUDA( cudaMemcpy(b_h, b_d, sizeof(double)*N, cudaMemcpyDefault) ); // check result for (int i=0; i<N; i++) assert(a_h[i] == b_h[i]); // cleanup delete [] a_h; delete [] b_h; CHECK_CUDA( cudaFree(a_d) ); CHECK_CUDA( cudaFree(b_d) ); printf("Gratulacje, program dziala poprawnie!!!\n"); }
14,480
#include <stdio.h> #include <sys/time.h> /////////////////////////////////////////////////////////// // Simple vector addition in CUDA with pinned memory /////////////////////////////////////////////////////////// #define N 1024*1024 //Number of elements in the vector // Definition of the kernel that will be executed by all threads on the GPU __global__ void add(float *A, float *B, float *C, int n){ int id = (blockDim.x * blockIdx.x)*n + threadIdx.x*n; for (int i = id; i < id+n; i++) { C[i] = A[i] + B[i]; } } int main(void) { float *A, *B, *C; float *d_A, *d_B, *d_C; int size = N * sizeof(float); int numBlocks, numThreadsPerBlock; // Memory allocation on the HOST (pinned) cudaMallocHost(&A,size); cudaMallocHost(&B,size); cudaMallocHost(&C,size); // Initial values for (int i=0; i<N; i++) { A[i] = i+1; B[i] = (i+1)*2; } //Memory allocation on the GPU cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); //Copy data from HOST to GPU cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); // Number of threads and blocks used to compte the kernel numThreadsPerBlock = 1024; numBlocks = 256; //Executing kernel function add<<<numBlocks,numThreadsPerBlock>>>(d_A,d_B,d_C,N/(numBlocks*numThreadsPerBlock)); cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); //Display results printf("\n#########################\n"); printf("Calculation results\n"); printf("#########################\n"); printf("Vector A : [%f,%f, ...,%f] \n",A[0],A[1],A[N-1]); printf("Vector B : [%f,%f, ...,%f] \n",B[0],B[1],B[N-1]); printf("Vector C (result A+B) : [%f,%f, ...,%f] \n",C[0],C[1],C[N-1]); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFreeHost(A); cudaFreeHost(B); cudaFreeHost(C); return 0; }
14,481
#include "includes.h" __global__ void dense_mv_add(size_t sz, float_t* src, float_t* dest) { size_t index = blockIdx.x*blockDim.x + threadIdx.x; if(index < sz) { dest[index] += src[index]; } }
14,482
#include "includes.h" /* Copyright 2014-2015 Dake Feng, Peri LLC, dakefeng@gmail.com This file is part of TomograPeri. TomograPeri is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. TomograPeri is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with TomograPeri. If not, see <http://www.gnu.org/licenses/>. */ #define blockx 16 #define blocky 16 __global__ void _weightTopkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg5, float *dev_recon) { uint q; int ind0, indg[5]; uint k = blockIdx.x*blockDim.x + threadIdx.x; uint n = blockIdx.y*blockDim.y + threadIdx.y+1; if ((k>=num_slices)||(n<1)||(n>=(num_grid-1))) return; ind0 = n + k*num_grid*num_grid; indg[0] = ind0+1; indg[1] = ind0-1; indg[2] = ind0+num_grid; indg[3] = ind0+num_grid+1; indg[4] = ind0+num_grid-1; for (q = 0; q < 5; q++) { dev_F[ind0] += 2*beta*dev_wg5[q]; dev_G[ind0] -= 2*beta*dev_wg5[q]*(dev_recon[ind0]+dev_recon[indg[q]]); } }
14,483
#include "includes.h" __global__ void copySharedMem(float *odata, const float *idata) { __shared__ float tile[TILE_DIM * TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x]; __syncthreads(); for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x]; }
14,484
#include <cuda_runtime.h> #include <stdio.h> int main() { // define the total data elements. int nElem = 1024; // define grid and block structure. dim3 block (1024); dim3 grid ( (nElem + block.x -1)/block.x ); printf("grid.x: %d, block.x: %d \n", grid.x, block.x); // reset block. block.x = 512; grid.x = ( (nElem + block.x -1)/block.x ); printf("grid.x: %d, block.x: %d \n", grid.x, block.x); // reset block. block.x = 256; grid.x = ( (nElem + block.x -1)/block.x ); printf("grid.x: %d, block.x: %d \n", grid.x, block.x); // reset block. block.x = 128; grid.x = ( (nElem + block.x -1)/block.x ); printf("grid.x: %d, block.x: %d \n", grid.x, block.x); cudaDeviceReset(); return 0; }
14,485
#include <stdio.h> #include <stdlib.h> #include "JControlFlow.cu" #include <time.h> void initialize(int num_arrays, int array_stride, int * bool_array){ int tot = num_arrays * array_stride; int r; for(int i = 0; i < tot; i++) { r = rand() % 50; if(r < 25) bool_array[i] = 0; else bool_array[i] = 1; } } int isEqual(int num, int * array1, int * array2){ for(int i=0; i < num; i++) { if(array1[i] != array2[i]) return i; } return -1; } void host_Control_Flow(int n_iterations, int num_arrays, int array_stride, int * boolean_array) /* Periodic BC, loop through each array, if 1 then flip the parity of idx to the right, else flip parity of idx to the left. */ { for (int i = 0; i < n_iterations; i++) { for (int id = 0; id < num_arrays; id++) { unsigned int array_address = id * array_stride; int curr_val; int side_val; for (int j = 0; j < array_stride; j++) { curr_val = boolean_array[array_address + j]; if(curr_val == 0) { if(j > 0) { side_val = boolean_array[array_address + j - 1]; boolean_array[array_address + j -1] = (side_val +1) % 2; } else{ side_val = boolean_array[array_address + array_stride - 1]; boolean_array[array_address + array_stride - 1] = (side_val +1) % 2; } } else{ if(j < array_stride -1) { side_val = boolean_array[array_address + j + 1]; boolean_array[array_address + j + 1] = (side_val +1) % 2; } else{ side_val = boolean_array[array_address]; boolean_array[array_address] = (side_val +1) % 2; } } } } } } int main(int argc, char * argv[]){ int array_stride = 32; int num_arrays = atoi(argv[1]); int num_iterations = 10000; cudaDeviceReset(); cudaEvent_t start, intermediate, stop; cudaEventCreate(&start); cudaEventCreate(&intermediate); cudaEventCreate(&stop); float timeForCopy, timeForComputation; clock_t cpuStart, cpuStop; double CPU_time; int status; int * h_bool_array; int * d_bool_array; int * h_device_output; int tpb = 256; dim3 threads = dim3 (tpb, 1, 1); dim3 blocks = dim3 ((num_arrays + tpb -1)/tpb, 1, 1); status = cudaMallocHost(&h_bool_array, sizeof(int) * array_stride * num_arrays); status = cudaMalloc(&d_bool_array, sizeof(int) * array_stride * num_arrays); status = cudaMallocHost(&h_device_output, sizeof(int) * array_stride * num_arrays); initialize(num_arrays, array_stride, h_bool_array); cudaDeviceSynchronize(); cudaEventRecord(start, 0); status = cudaMemcpy(d_bool_array, h_bool_array, sizeof(int) * array_stride * num_arrays,cudaMemcpyHostToDevice); cudaEventRecord(intermediate, 0); // make data point after copy Control_Flow <<< blocks, threads >>> (num_iterations,num_arrays, array_stride, d_bool_array); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timeForCopy, start, intermediate); // time for copy in milliseconds (see http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/group__CUDART__EVENT_g14c387cc57ce2e328f6669854e6020a5.html) cudaEventElapsedTime(&timeForComputation, intermediate, stop); // time for computation in milliseconds status = cudaMemcpy(h_device_output, d_bool_array, sizeof(int) * array_stride * num_arrays,cudaMemcpyDeviceToHost); printf("time for GPU copy %f \n",timeForCopy/1000); printf("time for GPU compute %f \n",timeForComputation/1000); printf("total GPU time %f \n", (timeForCopy + timeForComputation)/1000); cpuStart = clock(); host_Control_Flow(num_iterations, num_arrays, array_stride, h_bool_array); cpuStop = clock(); CPU_time = ((double)(cpuStop - cpuStart))/CLOCKS_PER_SEC; printf("time for CPU compute %f \n", CPU_time); int test = isEqual(num_arrays * array_stride, h_bool_array, h_device_output); if(test == -1) { printf("Both CPU and GPU produced the same output \n"); } else{ printf("CPU ad GPU outputs differ, first instance at idx %d ", test); } cudaFree(h_bool_array); cudaFree(d_bool_array); cudaEventDestroy(start); cudaEventDestroy(intermediate); cudaEventDestroy(stop); }
14,486
/* Author: Garrett Scholtes * Date: 2015-12-02 * * tests.cu - A test bed for the root-finding * functions, including benchmarks. */ #include <stdlib.h> #include <stdio.h> // Mock some expensive function that is continuous and sign changing struct expensive_functor { __host__ __device__ float operator()(float value) const { return 1 - (value + 0.1f) * (value + 0.1f); } }; template <typename UnaryFunction> __global__ void evalFunctorKernel(float * result, const UnaryFunction & f) { *result = f(0.3); } template <typename UnaryFunction> float evaluatesFunctorAt03(const UnaryFunction & f) { float *result_h; float *result_d; float returnVal; cudaMalloc(&result_d, sizeof(float)); result_h = (float *)malloc(sizeof(float)); evalFunctorKernel<<<1, 1>>>(result_d, f); cudaMemcpy(result_h, result_d, sizeof(float), cudaMemcpyDeviceToHost); returnVal = *result_h; free(result_h); cudaFree(result_d); return returnVal; } // Run the tests here int main(void) { expensive_functor someFunctorFunction; float result = evaluatesFunctorAt03(someFunctorFunction);//someFunctorFunction(0.3f); printf("evaluatesFunctorAt03 = %0.2f\n", result); return 0; }
14,487
#include "includes.h" __global__ void subMat(float *a, float *b, float *sub, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if((idx*N) < (N*N)) sub[idx * N] = a[idx * N] - b[idx * N]; }
14,488
#include "includes.h" __global__ void somme( int taille, float * a, float * b, float *c ){ int index=threadIdx.x+blockDim.x*blockIdx.x; if(index>=taille) return; c[index]=a[index]+b[index]; }
14,489
#include <math.h> #include <stdio.h> #include <stdlib.h> // functie kernel prin care adunam doi arrays __global__ void vector_add(float *x, float *y, int n) { // calculam indexul - echivalent cu for-ul // threadId.x - id-ul unui thread blocul actual // blockDim.x - dimensiunea blocului actual // blockIdx.x - id-ul blocului actual int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { x[i] = x[i] + y[i]; } } int main(void) { const int num_elements = 1 << 16; const int num_bytes = num_elements * sizeof(float); float *host_array_x = 0, *host_array_y = 0; // arrays pentru host (CPU) float *device_array_x = 0, *device_array_y = 0; // arrays pentru device (GPU) // alocam memorie pentru host host_array_x = (float *) malloc(num_bytes); host_array_y = (float *) malloc(num_bytes); // alocam memorie pentru device cudaMalloc((void **) &device_array_x, num_bytes); cudaMalloc((void **) &device_array_y, num_bytes); // verificam daca alocarea a fost cu succes if (host_array_x == 0 || host_array_y == 0 || device_array_x == 0 || device_array_y == 0) { printf("[HOST] Couldn't allocate memory\n"); return 0; } // se initializeaza x si y for (int i = 0; i < num_elements; i++) { host_array_x[i] = 4; host_array_y[i] = 2; } // facem transferul host -> device (CPU -> GPU) cudaMemcpy(device_array_x, host_array_x, num_bytes, cudaMemcpyHostToDevice); cudaMemcpy(device_array_y, host_array_y, num_bytes, cudaMemcpyHostToDevice); // stabilim dimensiunea unui bloc (adica numarul de threads dintr-un bloc) const size_t block_size = 256; // numarul de blocuri size_t blocks_no = num_elements / block_size; // daca avem un bloc care nu are dimensiunea 256, incrementam numarul de blocuri if (num_elements % block_size != 0) { ++blocks_no; } vector_add<<<blocks_no, block_size>>>(device_array_x, device_array_y, num_elements); // asteptam ca thread-urile de pe GPU sa-si termine treaba - echivalent cu pthread_join // ca apoi sa facem transferul GPU -> CPU cudaDeviceSynchronize(); // transferul GPU -> CPU (device -> host) cudaMemcpy(host_array_x, device_array_x, num_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(host_array_y, device_array_y, num_bytes, cudaMemcpyDeviceToHost); for (int i = 0; i < 10; ++i) { printf("Result %d: %1.1f + %1.1f = %1.3f\n", i, host_array_x[i] - host_array_y[i], host_array_y[i], host_array_x[i]); } // eliberam memoria pe host free(host_array_x); free(host_array_y); // eliberam memoria pe device cudaFree(device_array_x); cudaFree(device_array_y); return 0; }
14,490
#include "includes.h" __global__ void relu_h(float *X, float *Y, int size_in) { int t = blockIdx.x * blockDim.x + threadIdx.x; if (t < size_in) { Y[t] = 0.0; if (X[t] >= 0) Y[t] = X[t]; } }
14,491
#include "includes.h" /*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/ /*(c) 2016 Brian Tarasinski*/ /*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/ //kernel to transform to pauli basis (up, x, y, down) //to be run on a complete complex density matrix, once for each bit //this operation is its own inverse (can also be used in opposite direction) __global__ void two_qubit_ptm(double *dm, double *ptm_g, unsigned int bit0, unsigned int bit1, unsigned int no_qubits) { const unsigned int x = threadIdx.x; const unsigned int high_x = blockIdx.x * blockDim.x; extern __shared__ double ptm[]; double *data = &ptm[256]; //need blockDim.x double floats // the lowest to bits of x are used to address bit0, the next two are used to address bit1 // global address = <- pos = // aaaxxbbbbyycccc <- aaabbbbccccxxyy int higher_bit = max(bit0, bit1); int lower_bit = min(bit0, bit1); int high_mask = ~ ( (1 << (2*higher_bit+2)) - 1 ); //a mask (of pos) int mid_mask = (~ ( (1 << (2*lower_bit + 4)) - 1)) & (~high_mask); //b mask int low_mask = ~(high_mask | mid_mask) & (~0xf); //c mask int pos = high_x | x; int global_from = (pos & high_mask) | ((pos & mid_mask) >> 2) | ((pos & low_mask) >> 4) | ((pos & 0x3) << (2 * bit0)) | (((pos & 0xc) >>2) << (2 * bit1)); //fetch ptm to shared memmory //need to fetch several values per thread if blockDim.x is less than 256 (only for small dms...) for(int i=0; i < 256; i+=blockDim.x) { if(i+x < 256) { ptm[i+x] = ptm_g[i+x]; } } if (high_x + x >= (1 << (2*no_qubits))) return; //fetch data block to shared memory data[x] = dm[global_from]; __syncthreads(); unsigned int row = x & 0xf; unsigned int idx = x & ~0xf; double acc=0; for(int i=0; i<16; i++) { acc += ptm[16*row + i]*data[idx+i]; } __syncthreads(); dm[global_from] = acc; }
14,492
#include "includes.h" __device__ float computeS(float *sumTable, int rowNumberN, int colNumberM, int startX, int startY, int Kx, int Ky) { startX--; startY--; float S = sumTable[startX + Kx + (Ky + startY) * colNumberM] - (startX < 0 ? 0 : sumTable[startX + (Ky + startY) * colNumberM]) - (startY < 0 ? 0 : sumTable[startX + Kx + startY * colNumberM]) + (startX < 0 || startY < 0 ? 0 : sumTable[startX + startY * colNumberM]); return S; } __global__ void calculateFeatureDifference(float *templateFeatures, int colNumberM, int rowNumberN, float *l1SumTable, float *l2SumTable, float *lxSumTable, float *lySumTable, int Kx, int Ky, float *differences) { int widthLimit = colNumberM - Kx + 1; int heightLimit = rowNumberN - Ky + 1; float meanVector; float varianceVector; float xGradientVector; float yGradientVector; int startX = threadIdx.x + blockIdx.x * blockDim.x; int startY = threadIdx.y + blockIdx.y * blockDim.y; if (startX >= widthLimit || startY >= heightLimit) return; float S1D = computeS(l1SumTable, rowNumberN, colNumberM, startX, startY, Kx, Ky); float S2D = computeS(l2SumTable, rowNumberN, colNumberM, startX, startY, Kx, Ky); meanVector = S1D / (Kx * Ky); varianceVector = S2D / (Kx * Ky) - powf(meanVector, 2); float SxD = computeS(lxSumTable, rowNumberN, colNumberM, startX, startY, Kx, Ky); xGradientVector = 4 * (SxD - (startX + Kx / 2.0) * S1D) / (Kx * Kx * Ky); float SyD = computeS(lySumTable, rowNumberN, colNumberM, startX, startY, Kx, Ky); yGradientVector = 4 * (SyD - (startY + Ky / 2.0) * S1D) / (Ky * Ky * Kx); differences[startX + startY * widthLimit] = norm4df( templateFeatures[0] - meanVector, templateFeatures[1] - varianceVector, templateFeatures[2] - xGradientVector, templateFeatures[3] - yGradientVector); }
14,493
#include <cuda_runtime.h> #include <stdio.h> #define CHECK(call) \ { const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } int getDeviceProperty() { int device; const float KILO = 1000.f; const float MEGA = 1000.f*1000.f; const float GIGA = 1000.f*1000.f*1000.f; CHECK(cudaGetDeviceCount(&device)); for (int i = 0; i < device; i++) { /* Get device properties function call */ cudaDeviceProp property; CHECK(cudaGetDeviceProperties(&property, i)); printf("<~~~~~~~~~ Device ~~~~~~~~~>\n"); printf("ID: %d\n", device); printf("Name %s\n", property.name); printf("Compute Capability %d.%d\n", property.major, property.minor); printf("Clock Rate %.2f Mhz\n", (property.clockRate)/(KILO)); printf("Memory Clock Rate %.2f Ghz\n", (property.memoryClockRate)/(MEGA)); printf("Total Global Memory %.2f GB\n", property.totalGlobalMem/(GIGA)); printf("Total Const Memory %.2f KB\n", property.totalConstMem/(KILO)); printf("Shared Memory Per Block %.2f KB\n", property.sharedMemPerBlock/(KILO)); } return 0; }
14,494
#include "includes.h" __global__ void add_vectors(float *ad, float *bd, int N) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) ad[index] += bd[index]; //adding values in GPU memory }
14,495
#include <cuda.h> #include <stdio.h> #include <vector> void printArray(const float* x, int n) { printf("("); for (int i = 0; i < n; i++) { printf("%f, ", x[i]); } printf(")\n"); } __global__ void f_h(const int n, const float h, const float *x, float *y, bool *run) { *run = true; // int idx = blockIdx.x * blockDim.x + threadIdx.x; float coef = 1 / (n * h) * .3989422804; for (int j = 0; j < n; j++) { float sum = 0; float x_val = x[j]; for (int i = 0; i < n; i++) { float val = (x_val-x[i]) / h; float k = exp(-(val * val) / 2); sum = sum + k; } y[j] = coef * sum; } } __host__ void gpuCall(int n, float h, const float *x_v, float *y_v) { printf("START GPU CALL\n"); float *x, *y; bool *run; cudaMallocManaged(&x, n*sizeof(float)); cudaMallocManaged(&y, n*sizeof(float)); cudaMallocManaged(&run, sizeof(bool)); *run = false; for (int i = 0; i < n; i++) { x[i] = x_v[i]; } //============================================================== printf("X before\n"); printArray(x, n); printf("\n"); printf("Y before\n"); printArray(y, n); //============================================================== f_h<<<1, 1>>>(n, h, x, y, run); cudaDeviceSynchronize(); printf("Did it run? %d\n", *run); //============================================================== printf("\n"); printf("Y\n"); printArray(y, n); //============================================================== cudaFree((float*)x); cudaFree(y); cudaFree(run); }
14,496
#include <stdio.h> #define n 1024 #define NUMTHREADS 256 __global__ void histogram_kernel( unsigned int *data, unsigned int *bin) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { atomicAdd( &(bin[data[i]]), 1 ); } } int main (int argc, char *argv[] ) { int i; int size = n *sizeof(int); unsigned int a[n]; unsigned int bin[10]; unsigned int *dA, *dBin; for (i=0; i < n; i++) { a[i] = i % 10; } cudaMalloc( (void**)&dA, size); cudaMalloc( (void**)&dBin, 10*sizeof(int)); cudaMemcpy( dA, a, size, cudaMemcpyHostToDevice); cudaMemset( dBin,0, 10*sizeof(int)); int nblocks = (n+NUMTHREADS-1)/NUMTHREADS; histogram_kernel<<<nblocks, NUMTHREADS>>>(dA,dBin); cudaMemcpy(bin, dBin, 10*sizeof(int), cudaMemcpyDeviceToHost); cudaFree( dA); cudaFree( dBin); int count = 0; for (i=0; i < 10; i++) { printf("Freq %d = %d\n",i,bin[i]); count = count + bin[i]; } printf("#elements = %d\n",count); }
14,497
 #include <stdio.h> #include <math.h> #include <malloc.h> #include <stdlib.h> #define PRECISION float #define max 999 #define maa 9 /*Block size depends on maximum threads*/ #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 /* Parameters */ int imax , jmax ; int imax2 , jmax2 ; int ima , jma ; int isd , jsd ; int ied , jed ; int numGPUs ; long ncye , nwri ; PRECISION reyn , rtau , csou , rcsu ; PRECISION pini , uini , vini , uwui , runi ; PRECISION ex[maa] , ey[maa] , we[maa] ; PRECISION fn[max][max][maa] , fe[max][max][maa] , fp[max][max][maa] ; PRECISION pn[max][max] , un[max][max] , vn[max][max] ; PRECISION xg[max][max] , yg[max][max] ; #define PTR(i, j) (imax2 * j + i) #define PTRQ(i, j, k) ((j*imax2*maa)+(i*maa)+k) __constant__ PRECISION dex[maa]; __constant__ PRECISION dey[maa]; __constant__ PRECISION dwe[maa]; struct Conditions { int imax, jmax ; int imax2, jmax2 ; int isd, ied, jsd, jed ; PRECISION rtau, rcsu, runi, uwui ; }; //ŠÖ”‚̃vƒƒgƒ^ƒCƒvéŒ¾// void iniset(void) ; void inicon(void) ; void solver(void) ; //********************************‘æˆêˆ—‹æ‰æ**********************************// // // //******************************************************************************// /*__global__ void matrix1(PRECISION *dfe, PRECISION* dfp, PRECISION* dfn, struct Conditions *d_cond) */ // original definition __global__ void matrix1(PRECISION *dfe, PRECISION* dfp, PRECISION* dfn, PRECISION *dmarker, PRECISION *dun, PRECISION *dvn, PRECISION *dpn, struct Conditions *d_cond) { const int i = blockIdx.x * blockDim.x + threadIdx.x ; const int j = blockIdx.y * blockDim.y + threadIdx.y ; //ƒOƒ[ƒoƒ‹•ϐ”‚ÍGPU“à‚ÉŠ±Â‚Å‚«‚È‚¢// const int imax2 = d_cond->imax2 ; const PRECISION rtau = d_cond->rtau ; const PRECISION runi = d_cond->runi; const int imax = d_cond->imax ; const int jmax = d_cond->jmax ; if(i==0){ } else if(i==imax){ } else if(j==0){ } else if(j==jmax){ } else{ for(int k=0;k<maa;k++){ int ii = i - int(dex[k]) ; int jj = j - int(dey[k]) ; const int pointer = PTRQ(ii,jj,k) ; const int pointer2 = PTR(ii,jj); //add external forcing here /* euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i, j, k)] = dwe[k] * (dpn[PTR(i, j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i, j, k)] = dfp[PTRQ(i, j, k)] ; */ //dfp[PTRQ(i, j, k)] = dfn[pointer]+ rtau * (dfe[pointer] - dfn[pointer]) + dpn[pointer]*dmarker[pointer]*(dwe[k]*( -dex[k]*dun[pointer] -dey[k]*dvn[pointer] )) ; dfp[PTRQ(i, j, k)] = dfn[pointer]+ rtau * (dfe[pointer] - dfn[pointer]) + runi*dmarker[pointer2]*(dwe[k]*( -dex[k]*dun[pointer2] -dey[k]*dvn[pointer2] )) ; /* original line */ //dfp[PTRQ(i, j, k)] = dfn[pointer]+ rtau * (dfe[pointer] - dfn[pointer]); } } __syncthreads() ; } //********************************‘æ“ñˆ—‹æ‰æ**********************************// // // //******************************************************************************// __global__ void matrix2(PRECISION *dfp, PRECISION *dpn, PRECISION *dun, PRECISION *dvn, PRECISION *ddps, PRECISION *dduv, struct Conditions *d_cond) { const int i = blockIdx.x * blockDim.x + threadIdx.x ; const int j = blockIdx.y * blockDim.y + threadIdx.y ; const int imax2 = d_cond->imax2 ; const PRECISION rcsu = d_cond->rcsu ; const int imax = d_cond->imax ; const int jmax = d_cond->jmax ; if(i==0){ } else if(i==imax){ } else if(j==0){ } else if(j==jmax){ } else{ PRECISION pss = 0.0 ; PRECISION uss = 0.0 ; PRECISION vss = 0.0 ; for(int k=0;k<maa;k++){ pss += dfp[PTRQ(i, j, k)] ; uss += dex[k] * dfp[PTRQ(i, j, k)] ; vss += dey[k] * dfp[PTRQ(i, j, k)] ; } pss = pss ; uss = rcsu * uss ; vss = rcsu * vss ; ddps[PTR(i, j)] = (pss - dpn[PTR(i, j)]) * (pss - dpn[PTR(i, j)]) ; dduv[PTR(i, j)] = (uss - dun[PTR(i, j)]) * (uss - dun[PTR(i, j)]) + (vss - dvn[PTR(i, j)]) * (vss - dvn[PTR(i, j)]) ; dpn[PTR(i, j)] = pss ; dun[PTR(i, j)] = uss ; dvn[PTR(i, j)] = vss ; } __syncthreads() ; } //********************************‘æŽOˆ—‹æ‰æ**********************************// // // //******************************************************************************// __global__ void matrix3(PRECISION *dfe, PRECISION *dfp, PRECISION *dfn, PRECISION *dpn, PRECISION *dun, PRECISION *dvn, struct Conditions *d_cond) { const int i = blockIdx.x * blockDim.x + threadIdx.x ; const int j = blockIdx.y * blockDim.y + threadIdx.y ; const int imax2 = d_cond->imax2 ; const PRECISION runi = d_cond->runi ; PRECISION euv, qau ; for(int k=0;k<maa;k++){ euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i, j, k)] = dwe[k] * (dpn[PTR(i, j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i, j, k)] = dfp[PTRQ(i, j, k)] ; } __syncthreads() ; } //******************************‘æˆê‹«ŠEˆ—‹æ‰æ********************************// // // //******************************************************************************// __global__ void CUDAboundp(PRECISION *dpn, PRECISION *dun, PRECISION *dvn, struct Conditions *d_cond) { const int i = blockIdx.x * blockDim.x + threadIdx.x ; const int j = blockIdx.y * blockDim.y + threadIdx.y ; const int isd = d_cond->isd ; const int ied = d_cond->ied ; const int jsd = d_cond->jsd ; const int jed = d_cond->jed ; const int imax = d_cond->imax ; const int jmax = d_cond->jmax ; const int imax2 = d_cond->imax2 ; const PRECISION uwui = d_cond->uwui ; if(i == 0 && j == 0){//¶‰º// int i1 = i + 1 ; int j1 = j + 1 ; int i2 = i + 2 ; int j2 = j + 2 ; dpn[PTR(i, j)] = (9.0 * dpn[PTR(i1,j1)] - dpn[PTR(i2,j2)]) / 8.0 ; dun[PTR(i, j)] = 0.0 ; dvn[PTR(i, j)] = 0.0 ; } else if(i == imax && j == 0){//‰E‰º// int i1 = i - 1 ; int j1 = j + 1 ; int i2 = i - 2 ; int j2 = j + 2 ; dpn[PTR(i, j)] = (9.0 * dpn[PTR(i1,j1)] - dpn[PTR(i2,j2)]) / 8.0 ; dun[PTR(i, j)] = 0.0 ; dvn[PTR(i, j)] = 0.0 ; } else if(i == 0 && j == jmax){//¶ã// int i1 = i + 1 ; int j1 = j - 1 ; int i2 = i + 2 ; int j2 = j - 2 ; dpn[PTR(i, j)] = (9.0 * dpn[PTR(i1,j1)] - dpn[PTR(i2,j2)]) / 8.0 ; dun[PTR(i, j)] = 0.5 * uwui ; dvn[PTR(i, j)] = 0.0 ; } else if(i == imax && j == jmax){//‰Eã// int i1 = i - 1 ; int j1 = j - 1 ; int i2 = i - 2 ; int j2 = j - 2 ; dpn[PTR(i, j)] = (9.0 * dpn[PTR(i1,j1)] - dpn[PTR(i2,j2)]) / 8.0 ; dun[PTR(i, j)] = 0.5 * uwui ; dvn[PTR(i, j)] = 0.0 ; } else if(j == 0 && isd<=i && i<=ied){//‰º•Ó// int j1 = j + 1 ; int j2 = j + 2 ; dpn[PTR(i, j)] = (9.0 * dpn[PTR(i,j1)] - dpn[PTR(i,j2)]) / 8.0 ; dun[PTR(i, j)] = 0.0 ; dvn[PTR(i, j)] = 0.0 ; } else if(j == jmax && isd<=i && i<=ied){//ã•Ó// int j1 = j - 1 ; int j2 = j - 2 ; dpn[PTR(i, j)] = (9.0 * dpn[PTR(i,j1)] - dpn[PTR(i,j2)]) / 8.0 ; dun[PTR(i, j)] = uwui ; dvn[PTR(i, j)] = 0.0 ; } else if(i == 0 && jsd<=j && j<=jed){//¶•Ó// int i1 = i + 1 ; int i2 = i + 2 ; dpn[PTR(i, j)] = (9.0 * dpn[PTR(i1,j)] - dpn[PTR(i2,j)]) / 8.0 ; dun[PTR(i, j)] = 0.0 ; dvn[PTR(i, j)] = 0.0 ; } else if(i == imax && jsd<=j && j<=jed){//‰E•Ó// int i1 = i - 1 ; int i2 = i - 2 ; dpn[PTR(i, j)] = (9.0 * dpn[PTR(i1,j)] - dpn[PTR(i2,j)]) / 8.0 ; dun[PTR(i, j)] = 0.0 ; dvn[PTR(i, j)] = 0.0 ; } else{ //’†S‹æ‰æ// } __syncthreads() ; } //******************************‘æ“ñ‹«ŠEˆ—‹æ‰æ********************************// // // //******************************************************************************// __global__ void CUDAboundf(PRECISION *dfe, PRECISION *dfp, PRECISION *dfn, PRECISION *dpn, PRECISION *dun, PRECISION *dvn, struct Conditions *d_cond) { const int i = blockIdx.x * blockDim.x + threadIdx.x ; const int j = blockIdx.y * blockDim.y + threadIdx.y ; const int imax2 = d_cond->imax2 ; const int isd = d_cond->isd ; const int ied = d_cond->ied ; const int jsd = d_cond->jsd ; const int jed = d_cond->jed ; const int imax = d_cond->imax ; const int jmax = d_cond->jmax ; const PRECISION runi = d_cond->runi ; const PRECISION rcsu = d_cond->rcsu ; PRECISION pss , uss , vss ; PRECISION euv , qau ; if(i == 0 && j == 0){//¶‰º// pss = 0.0 ; uss = 0.0 ; vss = 0.0 ; for(int k=0;k<maa;k++){ int ii = i + 1 ; int jj = j + 1 ; dfp[PTRQ(i,j,k)] = dfe[PTRQ(i,j,k)] + (dfn[PTRQ(ii,jj,k)] - dfe[PTRQ(ii,jj,k)]) ; dfp[PTRQ(i,j,k)] = 2.0 * dfp[PTRQ(i,j,k)] - dfp[PTRQ(ii,jj,k)] ; pss += dfp[PTRQ(i,j,k)] ; uss += dex[k] * dfp[PTRQ(i,j,k)] ; vss += dey[k] * dfp[PTRQ(i,j,k)] ; } dpn[PTR(i,j)] = pss ; dun[PTR(i,j)] = rcsu * uss ; dvn[PTR(i,j)] = rcsu * vss ; for(int k=0;k<maa;k++){ euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i,j,k)] = dwe[k] * (dpn[PTR(i,j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i,j,k)] = dfp[PTRQ(i,j,k)] ; } } // else if(i == imax && j == 0){//‰E‰º// pss = 0.0 ; uss = 0.0 ; vss = 0.0 ; for(int k=0;k<maa;k++){ int ii = i - 1 ; int jj = j + 1 ; dfp[PTRQ(i,j,k)] = dfe[PTRQ(i,j,k)] + (dfn[PTRQ(ii,jj,k)] - dfe[PTRQ(ii,jj,k)]) ; dfp[PTRQ(i,j,k)] = 2.0 * dfp[PTRQ(i,j,k)] - dfp[PTRQ(ii,jj,k)] ; pss += dfp[PTRQ(i,j,k)] ; uss += dex[k] * dfp[PTRQ(i,j,k)] ; vss += dey[k] * dfp[PTRQ(i,j,k)] ; } dpn[PTR(i,j)] = pss ; dun[PTR(i,j)] = rcsu * uss ; dvn[PTR(i,j)] = rcsu * vss ; for(int k=0;k<maa;k++){ euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i,j,k)] = dwe[k] * (dpn[PTR(i,j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i,j,k)] = dfp[PTRQ(i,j,k)] ; } } // else if(i == 0 && j == jmax){//¶ã// pss = 0.0 ; uss = 0.0 ; vss = 0.0 ; for(int k=0;k<maa;k++){ int ii = i + 1 ; int jj = j - 1 ; dfp[PTRQ(i,j,k)] = dfe[PTRQ(i,j,k)] + (dfn[PTRQ(ii,jj,k)] - dfe[PTRQ(ii,jj,k)]); dfp[PTRQ(i,j,k)] = 2.0 * dfp[PTRQ(i,j,k)] - dfp[PTRQ(ii,jj,k)] ; pss += dfp[PTRQ(i,j,k)] ; uss += dex[k] * dfp[PTRQ(i,j,k)] ; vss += dey[k] * dfp[PTRQ(i,j,k)] ; } dpn[PTR(i,j)] = pss ; dun[PTR(i,j)] = rcsu * uss ; dvn[PTR(i,j)] = rcsu * vss ; for(int k=0;k<maa;k++){ euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i,j,k)] = dwe[k] * (dpn[PTR(i,j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i,j,k)] = dfp[PTRQ(i,j,k)] ; } } // else if(i == imax && j == jmax){//‰Eã// pss = 0.0 ; uss = 0.0 ; vss = 0.0 ; for(int k=0;k<maa;k++){ int ii = i - 1 ; int jj = j - 1 ; dfp[PTRQ(i,j,k)] = dfe[PTRQ(i,j,k)] + (dfn[PTRQ(ii,jj,k)] - dfe[PTRQ(ii,jj,k)]) ; dfp[PTRQ(i,j,k)] = 2.0 * dfp[PTRQ(i,j,k)] - dfp[PTRQ(ii,jj,k)] ; pss += dfp[PTRQ(i,j,k)] ; uss += dex[k] * dfp[PTRQ(i,j,k)] ; vss += dey[k] * dfp[PTRQ(i,j,k)] ; } dpn[PTR(i,j)] = pss ; dun[PTR(i,j)] = rcsu * uss ; dvn[PTR(i,j)] = rcsu * vss ; for(int k=0;k<maa;k++){ euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i,j,k)] = dwe[k] * (dpn[PTR(i,j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i,j,k)] = dfp[PTRQ(i,j,k)] ; } } // else if(j == 0 && isd<=i && i<=ied){//‰º•Ó// pss = 0.0 ; uss = 0.0 ; vss = 0.0 ; for(int k=0;k<maa;k++){ int jj = j + 1 ; dfp[PTRQ(i,j,k)] = dfe[PTRQ(i,j,k)] + (dfn[PTRQ(i,jj,k)] - dfe[PTRQ(i,jj,k)]) ; dfp[PTRQ(i,j,k)] = 2.0 * dfp[PTRQ(i,j,k)] - dfp[PTRQ(i,jj,k)] ; pss += dfp[PTRQ(i,j,k)] ; uss += dex[k] * dfp[PTRQ(i,j,k)] ; vss += dey[k] * dfp[PTRQ(i,j,k)] ; } dpn[PTR(i,j)] = pss ; dun[PTR(i,j)] = rcsu * uss ; dvn[PTR(i,j)] = rcsu * vss ; for(int k=0;k<maa;k++){ euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i,j,k)] = dwe[k] * (dpn[PTR(i,j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i,j,k)] = dfp[PTRQ(i,j,k)] ; } } // else if(j == jmax && isd<=i && i<=ied){//ã•Ó// pss = 0.0 ; uss = 0.0 ; vss = 0.0 ; for(int k=0;k<maa;k++){ int jj = j - 1 ; dfp[PTRQ(i,j,k)] = dfe[PTRQ(i,j,k)] + (dfn[PTRQ(i,jj,k)] - dfe[PTRQ(i,jj,k)]) ; dfp[PTRQ(i,j,k)] = 2.0 * dfp[PTRQ(i,j,k)] - dfp[PTRQ(i,jj,k)] ; pss += dfp[PTRQ(i,j,k)] ; uss += dex[k] * dfp[PTRQ(i,j,k)] ; vss += dey[k] * dfp[PTRQ(i,j,k)] ; } dpn[PTR(i,j)] = pss ; dun[PTR(i,j)] = rcsu * uss ; dvn[PTR(i,j)] = rcsu * vss ; for(int k=0;k<maa;k++){ euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i,j,k)] = dwe[k] * (dpn[PTR(i,j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i,j,k)] = dfp[PTRQ(i,j,k)] ; } } // else if(i == 0 && jsd<=j && j<=jed){//¶•Ó// pss = 0.0 ; uss = 0.0 ; vss = 0.0 ; for(int k=0;k<maa;k++){ int ii = i + 1 ; dfp[PTRQ(i,j,k)] = dfe[PTRQ(i,j,k)] + (dfn[PTRQ(ii,j,k)] - dfe[PTRQ(ii,j,k)]) ; dfp[PTRQ(i,j,k)] = 2.0 * dfp[PTRQ(i,j,k)] - dfp[PTRQ(ii,j,k)] ; pss += dfp[PTRQ(i,j,k)] ; uss += dex[k] * dfp[PTRQ(i,j,k)] ; vss += dey[k] * dfp[PTRQ(i,j,k)] ; } dpn[PTR(i,j)] = pss ; dun[PTR(i,j)] = rcsu * uss ; dvn[PTR(i,j)] = rcsu * vss ; for(int k=0;k<maa;k++){ euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i,j,k)] = dwe[k] * (dpn[PTR(i,j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i,j,k)] = dfp[PTRQ(i,j,k)] ; } } // else if(i == imax && jsd<=j && j<=jed){//‰E•Ó// pss = 0.0 ; uss = 0.0 ; vss = 0.0 ; for(int k=0;k<maa;k++){ int ii = i - 1 ; dfp[PTRQ(i,j,k)] = dfe[PTRQ(i,j,k)] + (dfn[PTRQ(ii,j,k)] - dfe[PTRQ(ii,j,k)]) ; dfp[PTRQ(i,j,k)] = 2.0 * dfp[PTRQ(i,j,k)] - dfp[PTRQ(ii,j,k)] ; pss += dfp[PTRQ(i,j,k)] ; uss += dex[k] * dfp[PTRQ(i,j,k)] ; vss += dey[k] * dfp[PTRQ(i,j,k)] ; } dpn[PTR(i,j)] = pss ; dun[PTR(i,j)] = rcsu * uss ; dvn[PTR(i,j)] = rcsu * vss ; for(int k=0;k<maa;k++){ euv = dex[k] * dun[PTR(i,j)] + dey[k] * dvn[PTR(i,j)] ; qau = 0.5 * (dun[PTR(i,j)] * dun[PTR(i,j)] + dvn[PTR(i,j)] * dvn[PTR(i,j)]) ; dfe[PTRQ(i,j,k)] = dwe[k] * (dpn[PTR(i,j)] + runi * (euv + 1.5 * euv * euv - qau)) ; dfn[PTRQ(i,j,k)] = dfp[PTRQ(i,j,k)] ; } } // else{ //’†S‹æ‰æ// } __syncthreads() ; } //*****************************ƒƒCƒ“ƒvƒƒOƒ‰ƒ€*********************************// // // // //******************************************************************************// int main() { printf("This is my modification\n"); cudaGetDeviceCount(&numGPUs) ; if(numGPUs == 0){ printf("No GPU detected\n") ; return(0) ; } cudaDeviceProp dev; for (int i=0;i<numGPUs;i++){ cudaGetDeviceProperties(&dev,i) ; printf("Device Number : %d\n", i); printf("Using device : %s\n", dev.name); printf("totalGlobalMem %d\n", dev.totalGlobalMem); printf("sharedMemPerBlock %d\n", dev.sharedMemPerBlock); printf("regsPerBlock %d\n", dev.regsPerBlock); printf("warpSize %d\n", dev.warpSize); printf("memPitch %d\n", dev.memPitch); printf("maxThreadsPerBlock %d\n", dev.maxThreadsPerBlock); printf("maxThreadsDim[3] %d,%d,%d\n", dev.maxThreadsDim[0], dev.maxThreadsDim[1], dev.maxThreadsDim[2]); printf("maxGridSize[3] %d,%d,%d\n", dev.maxGridSize[0], dev.maxGridSize[1], dev.maxGridSize[2]); printf("totalConstMem %d\n", dev.totalConstMem); printf("major.minor %d.%d\n", dev.major, dev.minor); printf("clockRate %d\n", dev.clockRate); printf("textureAlignment %d\n", dev.textureAlignment); printf("deviceOverlap %d\n", dev.deviceOverlap); printf("multiProcessorCount %d\n", dev.multiProcessorCount); } iniset(); if(imax2%BLOCK_SIZE_X != 0 || jmax2%BLOCK_SIZE_Y != 0){ // printf("ŠiŽq”‚ÆBLOCK_SIZE‚Ƃ̊֌W‚ªm9(^„D^)\n") ; printf("Mesh number and BLOCK_SIZE is m9\n") ; return(0) ; } inicon(); solver(); // printf("‚±‚±‚ŏI‚í‚肾‚æ"); printf("Program end\n"); return(0); } //******************************ŒvŽZ—p‰ŠúÝ’è**********************************// // // //******************************************************************************// void iniset() { int i , j ; PRECISION rnyi , taui ; PRECISION w0 , w1 , w2 ; int xsize, ysize ; PRECISION chle ; //‘ã•\’·‚³// // xsize = 4 ; ysize = 4 ; ima = BLOCK_SIZE_X * xsize - 2 ; jma = BLOCK_SIZE_Y * ysize - 2 ; ncye = 50000 ; nwri = 1000 ; uwui = 0.1 ; reyn = 100 ; //ƒŒƒCƒmƒ‹ƒY”// // isd = 1 ; jsd = 1 ; ied = ima ; jed = jma ; imax = ima + 1 ; jmax = jma + 1 ; imax2 = imax + 1 ; jmax2 = jmax + 1 ; // runi = 1.0 ; uini = 0.0 ; vini = 0.0 ; chle = jmax ; rnyi = chle * uwui / reyn ; taui = 0.5 * (6.0 * rnyi + 1.0) ; rtau = 1.0 / taui ; csou = 1.0 / sqrt(3.0) ; pini = runi * csou * csou ; rcsu = 1.0 / pini ; printf("Mesh size, imax=%d, jmax=%d \n", imax, jmax); // ex[0] = 0.0 ; ex[1] = 1.0 ; ex[2] = 0.0 ; ex[3] = - 1.0 ; ex[4] = 0.0 ; ex[5] = 1.0 ; ex[6] = - 1.0 ; ex[7] = - 1.0 ; ex[8] = 1.0 ; // ey[0] = 0.0 ; ey[1] = 0.0 ; ey[2] = 1.0 ; ey[3] = 0.0 ; ey[4] = - 1.0 ; ey[5] = 1.0 ; ey[6] = 1.0 ; ey[7] = - 1.0 ; ey[8] = - 1.0 ; // w0 = 4.0 / 9.0 ; w1 = 1.0 / 9.0 ; w2 = 1.0 / 36.0 ; we[0] = w0 ; we[1] = w1 ; we[2] = w1 ; we[3] = w1 ; we[4] = w1 ; we[5] = w2 ; we[6] = w2 ; we[7] = w2 ; we[8] = w2 ; // for(j=0;j<=jmax;j++){ for(i=0;i<=imax;i++){ //xg[i][j] = ((i-0.5) / ima) ; xg[i][j] = (i / chle) ; yg[i][j] = (j / chle) ; } } } //*******************************‰ŠúðŒÝ’è***********************************// // // //******************************************************************************// void inicon() { int i , j , k ; PRECISION euv , qau ; // for(j=0;j<=jmax;j++){ for(i=0;i<=imax;i++){ pn[i][j] = pini ; un[i][j] = uini ; vn[i][j] = vini ; } } // j = jmax ; for(i=isd;i<=ied;i++){ pn[i][j] = pini ; un[i][j] = uwui ; vn[i][j] = vini ; } // for(k=0;k<maa;k++){ for(j=0;j<=jmax;j++){ for(i=0;i<=imax;i++){ euv = ex[k] * un[i][j] + ey[k] * vn[i][j] ; qau = 0.5 * ( un[i][j] * un[i][j] + vn[i][j] * vn[i][j] ) ; fe[i][j][k] = we[k] * (pn[i][j] + runi * (euv + 1.5*euv*euv - qau)) ; fn[i][j][k] = fe[i][j][k] ; fp[i][j][k] = 0 ; } } } } //*****************************ŽåŒvŽZƒvƒƒOƒ‰ƒ€*********************************// // // //******************************************************************************// void solver() { int i, j, k, nc ; struct Conditions cond; struct Conditions *d_cond; PRECISION dps, duv ; PRECISION rda, cda, rnp, dma ; FILE *fop ; // PRECISION matrixsize1 = sizeof(PRECISION) * maa ; PRECISION matrixsize2 = sizeof(PRECISION) * imax2 * jmax2 ; PRECISION matrixsize3 = sizeof(PRECISION) * imax2 * jmax2 * maa ; // cda = 1.0e-06 ; cda = 1.0e-05 ; rnp = 1.0 / ((imax - 1.0)*(jmax - 1.0)) ; dma = 0.0 ; cond.imax = imax ; cond.jmax = jmax ; cond.imax2 = imax2 ; cond.jmax2 = jmax2 ; cond.isd = isd ; cond.ied = ied ; cond.jsd = jsd ; cond.jed = jed ; cond.rtau = rtau ; cond.rcsu = rcsu ; cond.runi = runi ; cond.uwui = uwui ; // //ƒzƒXƒg‘¤‚̕ϐ”Ý’è// PRECISION *hex, *hey, *hwe, *hfn, *hfe, *hfp, *hpn, *hun, *hvn, *hdps, *hduv ; //Marker array PRECISION *hmarker; // //ƒzƒXƒg‘¤‚̃ƒ‚ƒŠŠm•Û// hex = (PRECISION*)malloc(matrixsize1); hey = (PRECISION*)malloc(matrixsize1); hwe = (PRECISION*)malloc(matrixsize1); hfn = (PRECISION*)malloc(matrixsize3); hfe = (PRECISION*)malloc(matrixsize3); hfp = (PRECISION*)malloc(matrixsize3); hpn = (PRECISION*)malloc(matrixsize2); hun = (PRECISION*)malloc(matrixsize2); hvn = (PRECISION*)malloc(matrixsize2); //allocating array for marker at host hmarker = (PRECISION*)malloc(matrixsize2); hdps = (PRECISION*)malloc(matrixsize2); hduv = (PRECISION*)malloc(matrixsize2); // if (hex == NULL) { printf("cannot allocate memory\n"); return ; } if (hey == NULL) { printf("cannot allocate memory\n"); return ; } if (hwe == NULL) { printf("cannot allocate memory\n"); return ; } if (hfn == NULL) { printf("cannot allocate memory\n"); return ; } if (hfe == NULL) { printf("cannot allocate memory\n"); return ; } if (hfp == NULL) { printf("cannot allocate memory\n"); return ; } if (hpn == NULL) { printf("cannot allocate memory\n"); return ; } if (hun == NULL) { printf("cannot allocate memory\n"); return ; } if (hvn == NULL) { printf("cannot allocate memory\n"); return ; } if (hdps == NULL) { printf("cannot allocate memory\n"); return ; } if (hduv == NULL) { printf("cannot allocate memory\n"); return ; } int ifront, iback, jdown, jtop; ifront = 0.25*imax; iback = 0.75*imax; jdown = 0.25*jmax; jtop = 0.75*jmax; //Add initial value for hmarker for(i=0; i<=imax; i++){ for(j=0; j<=jmax; j++){ if(i >= ifront && i<= iback ){ if(j >= jdown && j <= jtop){ hmarker[PTR(i, j)] = 1.0; } } else hmarker[PTR(i, j)] = 0.0; } } /* FILE *fmarker; fmarker = fopen("marker.txt","w"); for(i=0; i<= imax; i++){ for(j=0; j<=jmax; j++){ fprintf(fmarker, " %d %d %f\n", i, j, hmarker[PTR(i, j)]); } } fclose(fmarker); */ //ƒzƒXƒg‚ÉŠe’l‚ð‘}“ü// for(j=0;j<=jmax;j++){ for(i=0;i<=imax;i++){ for(k=0;k<=8;k++){ hfn[PTRQ(i, j, k)] = fn[i][j][k] ; hfe[PTRQ(i, j, k)] = fe[i][j][k] ; hfp[PTRQ(i, j, k)] = fp[i][j][k] ; } } } for(j=0;j<=jmax;j++){ for(i=0;i<=imax;i++){ hpn[PTR(i, j)] = pn[i][j] ; hun[PTR(i, j)] = un[i][j] ; hvn[PTR(i, j)] = vn[i][j] ; hdps[PTR(i, j)] = 0 ; hduv[PTR(i, j)] = 0 ; } } for(k=0;k<=8;k++){ hex[k] = ex[k] ; hey[k] = ey[k] ; hwe[k] = we[k] ; } // //ƒfƒoƒCƒX‘¤‚̕ϐ”Ý’è// PRECISION *dfn, *dfe, *dfp, *dpn, *dun, *dvn, *ddps, *dduv ; PRECISION *dmarker; //@ //ƒfƒoƒCƒXƒƒ‚ƒŠŠm•Û‹y‚уRƒs[// // printf("(L¥ƒÖ¥M)\n") ; //cutilSafeCall‚͏‘‚¢‚¿‚áƒ_ƒ// printf("Yamazaki nuance, don't get it\n") ; //cutilSafeCall‚͏‘‚¢‚¿‚áƒ_ƒ// cudaMemcpyToSymbol(dwe, hwe, matrixsize1) ; cudaMemcpyToSymbol(dex, hex, matrixsize1) ; cudaMemcpyToSymbol(dey, hey, matrixsize1) ; // cudaMalloc((void**)&dfn, matrixsize3) ; cudaMalloc((void**)&dfe, matrixsize3) ; cudaMalloc((void**)&dfp, matrixsize3) ; cudaMalloc((void**)&dpn, matrixsize2) ; cudaMalloc((void**)&dun, matrixsize2) ; cudaMalloc((void**)&dvn, matrixsize2) ; //allocating device memory for marker array cudaMalloc((void**)&dmarker, matrixsize2) ; cudaMalloc((void**)&ddps, matrixsize2) ; cudaMalloc((void**)&dduv, matrixsize2) ; cudaMalloc((void **)&d_cond, sizeof(struct Conditions)) ; // cudaMemcpy(dfn, hfn, matrixsize3, cudaMemcpyHostToDevice) ; cudaMemcpy(dfe, hfe, matrixsize3, cudaMemcpyHostToDevice) ; cudaMemcpy(dfp, hfp, matrixsize3, cudaMemcpyHostToDevice) ; cudaMemcpy(dpn, hpn, matrixsize2, cudaMemcpyHostToDevice) ; cudaMemcpy(dun, hun, matrixsize2, cudaMemcpyHostToDevice) ; cudaMemcpy(dvn, hvn, matrixsize2, cudaMemcpyHostToDevice) ; //copying memory from host to device for marker array cudaMemcpy(dmarker, hmarker, matrixsize2, cudaMemcpyHostToDevice) ; cudaMemcpy(ddps, hdps, matrixsize2, cudaMemcpyHostToDevice) ; cudaMemcpy(dduv, hduv, matrixsize2, cudaMemcpyHostToDevice) ; cudaMemcpy(d_cond, &cond, sizeof(struct Conditions), cudaMemcpyHostToDevice) ; // //ƒuƒƒbƒNƒTƒCƒY‚ƃOƒŠƒbƒhƒTƒCƒY‚̐ݒè// dim3 threads(BLOCK_SIZE_X, BLOCK_SIZE_Y) ; dim3 block(imax2/BLOCK_SIZE_X, jmax2/BLOCK_SIZE_Y) ; // //unsigned int timer = 0 ; //CUT_SAFE_CALL(cutCreateTimer(&timer)) ; //CUT_SAFE_CALL(cutStartTimer(timer)) ; PRECISION elapsed_time_ms = 0.0f ; cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); // for(nc=1;nc<=ncye;nc++){ //original thread // matrix1<<<block, threads>>>(dfe, dfp, dfn, d_cond) ; matrix1<<<block, threads>>>(dfe, dfp, dfn, dmarker, dun, dvn, dpn, d_cond) ; cudaThreadSynchronize() ; //‰¼‘z—¬‘©–@ŒÄ‚яo‚µˆÊ’u// matrix2<<<block, threads>>>(dfp, dpn, dun, dvn, ddps, dduv, d_cond) ; cudaThreadSynchronize() ; cudaMemcpy(hduv, dduv, matrixsize2, cudaMemcpyDeviceToHost) ; cudaMemcpy(hdps, ddps, matrixsize2, cudaMemcpyDeviceToHost) ; dps = 0.0 ; duv = 0.0 ; for(j=0;j<=jmax;j++){ for(i=0;i<=imax;i++){ dps += hdps[PTR(i, j)] ; duv += hduv[PTR(i, j)] ; } } dps = sqrt( rnp * dps) ; duv = sqrt(0.5 * rnp * duv) ; if(duv > dma){ dma = duv ; } rda = duv / dma ; CUDAboundp<<<block, threads>>>(dpn, dun, dvn, d_cond) ; cudaThreadSynchronize() ; matrix3<<<block, threads>>>(dfe, dfp, dfn, dpn, dun, dvn, d_cond) ; cudaThreadSynchronize() ; CUDAboundf<<<block, threads>>>(dfe, dfp, dfn, dpn, dun, dvn, d_cond) ; cudaThreadSynchronize() ; if(nc%nwri == 0){ printf("Resids = %d %e %e %e\n",nc,dps,duv,rda); } if(rda < cda) break ; // original definition } // printf("Residuals at timestep= %d, L2-pressure= %e, L2-uv= %e, L2-total= %e\n",nc,dps,duv,rda) ; //CUT_SAFE_CALL(cutStopTimer(timer)) ; //printf("Processing time : %f [msec]\n", cutGetTimerValue(timer)) ; //CUT_SAFE_CALL(cutDeleteTimer(timer)) ; cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsed_time_ms, start, stop ); printf("Processing time : %f [msec]\n", elapsed_time_ms) ; cudaEventDestroy( start ); cudaEventDestroy( stop ); cudaMemcpy(hpn, dpn, matrixsize2, cudaMemcpyDeviceToHost) ; cudaMemcpy(hun, dun, matrixsize2, cudaMemcpyDeviceToHost) ; cudaMemcpy(hvn, dvn, matrixsize2, cudaMemcpyDeviceToHost) ; cudaThreadSynchronize() ; printf("Writing result to file\n") ; fop = fopen("result.plt", "w") ; fprintf(fop, " variables=x,y,p,u,v\n" ); fprintf(fop, "zone t = flowfield\n" ); fprintf(fop, "i = %d , j = %d , f=point \n" ,ima , jma) ; for(j=jsd;j<=jed;j++){ for(i=isd;i<=ied;i++){ fprintf (fop, "%e %e %e %e %e\n",xg[i][j], yg[i][j], hpn[PTR(i, j)], hun[PTR(i, j)], hvn[PTR(i, j)]) ; } } /* fprintf(fop, " variables=x,y,p,u,v\n " ); fprintf(fop, "zone t = marker\n" ); fprintf(fop, "i = %d , j = %d , f=point \n" ,ima , jma) ; for(j=jsd;j<=jed;j++){ for(i=isd;i<=ied;i++){ fprintf (fop, "%e %e %e %e %e\n",xg[i][j], yg[i][j], hmarker[PTR(i, j)], hun[PTR(i, j)], hvn[PTR(i, j)]) ; } } */ fclose(fop) ; // free(hex) ; free(hey) ; free(hwe) ; free(hfn) ; free(hfe) ; free(hfp) ; free(hpn) ; free(hun) ; free(hvn) ; free(hdps) ; free(hduv) ; cudaFree(dfe) ; cudaFree(dfp) ; cudaFree(dfn) ; cudaFree(dpn) ; cudaFree(dun) ; cudaFree(dvn) ; cudaFree(ddps) ; cudaFree(dduv) ; }
14,498
extern "C" { __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_10603(float*, float*); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_10603(float* _10606_11332, float* _10607_11333) { int threadIdx_x_11339; int pthreadIdx_x_11339; int blockDim_x_11345; int pblockDim_x_11345; int blockIdx_x_11351; int pblockIdx_x_11351; int threadIdx_y_11357; int pthreadIdx_y_11357; int blockDim_y_11363; int pblockDim_y_11363; int blockIdx_y_11369; int pblockIdx_y_11369; threadIdx_x_11339 = threadIdx_x(); pthreadIdx_x_11339 = threadIdx_x_11339; l11337: ; threadIdx_x_11339 = pthreadIdx_x_11339; blockDim_x_11345 = blockDim_x(); pblockDim_x_11345 = blockDim_x_11345; l11343: ; blockDim_x_11345 = pblockDim_x_11345; blockIdx_x_11351 = blockIdx_x(); pblockIdx_x_11351 = blockIdx_x_11351; l11349: ; blockIdx_x_11351 = pblockIdx_x_11351; threadIdx_y_11357 = threadIdx_y(); pthreadIdx_y_11357 = threadIdx_y_11357; l11355: ; threadIdx_y_11357 = pthreadIdx_y_11357; blockDim_y_11363 = blockDim_y(); pblockDim_y_11363 = blockDim_y_11363; l11361: ; blockDim_y_11363 = pblockDim_y_11363; blockIdx_y_11369 = blockIdx_y(); pblockIdx_y_11369 = blockIdx_y_11369; l11367: ; blockIdx_y_11369 = pblockIdx_y_11369; int _11371; _11371 = blockDim_y_11363 * blockIdx_y_11369; int _11374; _11374 = blockDim_x_11345 * blockIdx_x_11351; int _11372; _11372 = threadIdx_y_11357 + _11371; int _11375; _11375 = threadIdx_x_11339 + _11374; int _11373; _11373 = 2048 * _11372; int idx_11376; idx_11376 = _11373 + _11375; float* i_11381; i_11381 = _10607_11333 + idx_11376; float* i_11377; i_11377 = _10606_11332 + idx_11376; float _11378; _11378 = *i_11377; float _11383; _11383 = _11378; *i_11381 = _11383; return ; } }
14,499
////////////////////////////////////////////////////////////////////////// ////This is the code implementation for GPU Premier League Round 3: sparse /// linear solver ////////////////////////////////////////////////////////////////////////// #include <fstream> #include <iostream> using namespace std; ////////////////////////////////////////////////////////////////////////// ////TODO 0: Please replace the following strings with your team name and author /// names /Note: Please do not use space in the string, use "_" instead ////////////////////////////////////////////////////////////////////////// namespace name { std::string team = "Slim_shaders"; std::string author_1 = "Matthew_Kenney"; std::string author_2 = "Andrw_Yang"; }; // namespace name ////////////////////////////////////////////////////////////////////////// ////TODO: Read the following three CPU implementations for Jacobi, Gauss-Seidel, /// and Red-Black Gauss-Seidel carefully /and understand the steps for these /// numerical algorithms ////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////// ////These are the global variables that define the domain of the problem to /// solver /You will need to use these parameters or macros in your GPU /// implementations ////////////////////////////////////////////////////////////////////////// const int n = 128; ////grid size, we will change this value to up to 128 to test your code const int g = 1; ////padding size const int s = (n + 2 * g) * (n + 2 * g); ////array size #define I(i, j) (i + g) * (n + 2 * g) + (j + g) ////2D coordinate -> array index #define B(i, j) i < 0 || i >= n || j < 0 || j >= n ////check boundary const bool verbose = true; ////set false to turn off print for x and residual const double tolerance = 1e-3; ////tolerance for the iterative solver const int blockDimX = 8; #define BlockI(i, j) (i + g) * (blockDimX + 2 * g) + (j + g) ////2D coordinate -> array index ////////////////////////////////////////////////////////////////////////// ////The following are three sample implementations for CPU iterative solvers void Jacobi_Solver(double *x, const double *b) { double *buf = new double[s]; memcpy(buf, x, sizeof(double) * s); double *xr = x; ////read buffer pointer double *xw = buf; ////write buffer pointer int iter_num = 0; ////iteration number int max_num = 1e5; ////max iteration number double residual = 0.0; ////residual do { ////update x values using the Jacobi iterative scheme for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { xw[I(i, j)] = (b[I(i, j)] + xr[I(i - 1, j)] + xr[I(i + 1, j)] + xr[I(i, j - 1)] + xr[I(i, j + 1)]) / 4.0; } } ////calculate residual residual = 0.0; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { residual += pow(4.0 * xw[I(i, j)] - xw[I(i - 1, j)] - xw[I(i + 1, j)] - xw[I(i, j - 1)] - xw[I(i, j + 1)] - b[I(i, j)], 2); } } // if (verbose) // cout << "res: " << residual << endl; ////swap the buffers double *swap = xr; xr = xw; xw = swap; iter_num++; } while (residual > tolerance && iter_num < max_num); x = xr; cout << "Jacobi solver converges in " << iter_num << " iterations, with residual " << residual << endl; delete[] buf; } void Gauss_Seidel_Solver(double *x, const double *b) { int iter_num = 0; ////iteration number int max_num = 1e5; ////max iteration number double residual = 0.0; ////residual do { ////update x values using the Gauss-Seidel iterative scheme for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { x[I(i, j)] = (b[I(i, j)] + x[I(i - 1, j)] + x[I(i + 1, j)] + x[I(i, j - 1)] + x[I(i, j + 1)]) / 4.0; } } ////calculate residual residual = 0.0; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { residual += pow(4.0 * x[I(i, j)] - x[I(i - 1, j)] - x[I(i + 1, j)] - x[I(i, j - 1)] - x[I(i, j + 1)] - b[I(i, j)], 2); } } if (verbose) cout << "res: " << residual << endl; iter_num++; } while (residual > tolerance && iter_num < max_num); cout << "Gauss-Seidel solver converges in " << iter_num << " iterations, with residual " << residual << endl; } void Red_Black_Gauss_Seidel_Solver(double *x, const double *b) { int iter_num = 0; ////iteration number int max_num = 1e5; ////max iteration number double residual = 0.0; ////residual do { ////red G-S for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if ((i + j) % 2 == 0) ////Look at this line! x[I(i, j)] = (b[I(i, j)] + x[I(i - 1, j)] + x[I(i + 1, j)] + x[I(i, j - 1)] + x[I(i, j + 1)]) / 4.0; } } ////black G-S for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if ((i + j) % 2 == 1) ////And this line! x[I(i, j)] = (b[I(i, j)] + x[I(i - 1, j)] + x[I(i + 1, j)] + x[I(i, j - 1)] + x[I(i, j + 1)]) / 4.0; } } ////calculate residual residual = 0.0; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { residual += pow(4.0 * x[I(i, j)] - x[I(i - 1, j)] - x[I(i + 1, j)] - x[I(i, j - 1)] - x[I(i, j + 1)] - b[I(i, j)], 2); } } if (verbose) cout << "res: " << residual << endl; iter_num++; } while (residual > tolerance && iter_num < max_num); cout << "Red-Black Gauss-Seidel solver converges in " << iter_num << " iterations, with residual " << residual << endl; } ////////////////////////////////////////////////////////////////////////// ////In this function, we are solving a Poisson equation -laplace(p)=b, with /// p=x^2+y^2 and b=4. /The boundary conditions are set on the one-ring ghost /// cells of the grid ////////////////////////////////////////////////////////////////////////// void Test_CPU_Solvers() { double *x = new double[s]; memset(x, 0x0000, sizeof(double) * s); double *b = new double[s]; for (int i = -1; i <= n; i++) { for (int j = -1; j <= n; j++) { b[I(i, j)] = 4.0; ////set the values for the right-hand side } } ////////////////////////////////////////////////////////////////////////// ////test Jacobi for (int i = -1; i <= n; i++) { for (int j = -1; j <= n; j++) { if (B(i, j)) x[I(i, j)] = (double)(i * i + j * j); ////set boundary condition for x } } Jacobi_Solver(x, b); if (verbose) { cout << "\n\nx for Jacobi:\n"; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { cout << x[I(i, j)] << ", "; } cout << std::endl; } } cout << "\n\n"; // ////////////////////////////////////////////////////////////////////////// // ////test Gauss-Seidel // memset(x, 0x0000, sizeof(double) * s); // for (int i = -1; i <= n; i++) { // for (int j = -1; j <= n; j++) { // if (B(i, j)) // x[I(i, j)] = (double)(i * i + j * j); ////set boundary condition for x // } // } // // Gauss_Seidel_Solver(x, b); // // if (verbose) { // cout << "\n\nx for Gauss-Seidel:\n"; // for (int i = 0; i < n; i++) { // for (int j = 0; j < n; j++) { // cout << x[I(i, j)] << ", "; // } // cout << std::endl; // } // } // cout << "\n\n"; // // ////////////////////////////////////////////////////////////////////////// // ////test Red-Black Gauss-Seidel // memset(x, 0x0000, sizeof(double) * s); // for (int i = -1; i <= n; i++) { // for (int j = -1; j <= n; j++) { // if (B(i, j)) // x[I(i, j)] = (double)(i * i + j * j); ////set boundary condition for x // } // } // // Red_Black_Gauss_Seidel_Solver(x, b); // // if (verbose) { // cout << "\n\nx for Red-Black Gauss-Seidel:\n"; // for (int i = 0; i < n; i++) { // for (int j = 0; j < n; j++) { // cout << x[I(i, j)] << ", "; // } // cout << std::endl; // } // } // cout << "\n\n"; // ////////////////////////////////////////////////////////////////////////// delete[] x; delete[] b; } ////////////////////////////////////////////////////////////////////////// ////TODO 1: your GPU variables and functions start here __global__ void GPU_Solver(double *xr, const double *b) { int global_i = blockIdx.x * blockDim.x + threadIdx.x; int global_j = blockIdx.y * blockDim.y + threadIdx.y; int i = threadIdx.x; int j = threadIdx.y; if (global_i >= n || global_j >= n) return; extern __shared__ double xw[]; // update to X matrix ////////////////////// // update xw values // ////////////////////// xw[BlockI(i,j)]=(b[I(global_i,global_j)]+xr[I(global_i-1,global_j)]+xr[I(global_i+1,global_j)] +xr[I(global_i,global_j-1)]+xr[I(global_i,global_j+1)])/4.0; // Load cells that border the block if (i == 0) { if(global_i == 0) xw[BlockI(i-1,j)] = 0.0; else xw[BlockI(i-1,j)]=(b[I(global_i-1,global_j)]+xr[I(global_i-2,global_j)]+xr[I(global_i,global_j)] +xr[I(global_i-1,global_j-1)]+xr[I(global_i-1,global_j+1)])/4.0; } else if(i == blockDim.x - 1){ if(global_i == n - 1) xw[BlockI(i+1,j)] = 0.0; else xw[BlockI(i+1,j)]=(b[I(global_i+1,global_j)]+xr[I(global_i,global_j)]+xr[I(global_i+2,global_j)] +xr[I(global_i+1,global_j-1)]+xr[I(global_i+1,global_j+1)])/4.0; } if (j == 0) { if (global_j == 0) xw[BlockI(i,j-1)] = 0.0; else xw[BlockI(i,j-1)]=(b[I(global_i,global_j-1)]+xr[I(global_i-1,global_j-1)]+xr[I(global_i+1,global_j-1)] +xr[I(global_i,global_j-2)]+xr[I(global_i,global_j)])/4.0; } else if (j == blockDim.y - 1){ if (global_j == n - 1) xw[BlockI(i,j+1)] = 0.0; else xw[BlockI(i,j+1)]=(b[I(global_i,global_j+1)]+xr[I(global_i-1,global_j+1)]+xr[I(global_i+1,global_j+1)] +xr[I(global_i,global_j)]+xr[I(global_i,global_j+2)])/4.0; } __syncthreads(); /////////////////// // swap buffers: // /////////////////// xr[I(global_i, global_j)] = xw[BlockI(i,j)]; } __global__ void Calc_Residual(double *xr, const double *b, float* residual) { int global_i = blockIdx.x * blockDim.x + threadIdx.x; int global_j = blockIdx.y * blockDim.y + threadIdx.y; int i = threadIdx.x; int j = threadIdx.y; int local_thread_id = threadIdx.y * blockDim.x+ threadIdx.x; // Set global residual to zero and set all shared memory to 0: if (i == 0 && j == 0) *residual = 0.f; extern __shared__ float block_residual[]; block_residual[local_thread_id] = 0.f; __syncthreads(); // Calculate additions to block residual: float residual_add = (float) (4.0 * xr[I(global_i,global_j)] - xr[I(global_i-1,global_j)] - xr[I(global_i+1,global_j)] - xr[I(global_i,global_j-1)] - xr[I(global_i,global_j+1)] - b[I(global_i,global_j)]); residual_add *= residual_add; // ^2 // add to block residual atomicAdd(&block_residual[0], residual_add); __syncthreads(); // send back to global memory: if (i == 0 && j == 0) atomicAdd(residual, block_residual[0]); } ////Your implementations end here ////////////////////////////////////////////////////////////////////////// ofstream out; ////////////////////////////////////////////////////////////////////////// ////GPU test function void Test_GPU_Solver() { double *x = new double[s]; memset(x, 0x0000, sizeof(double) * s); double *b = new double[s]; ////////////////////////////////////////////////////////////////////////// ////initialize x and b for (int i = -1; i <= n; i++) { for (int j = -1; j <= n; j++) { b[I(i, j)] = 4.0; ////set the values for the right-hand side } } for (int i = -1; i <= n; i++) { for (int j = -1; j <= n; j++) { if (B(i, j)) x[I(i, j)] = (double)(i * i + j * j); ////set boundary condition for x } } float residual_host = 0.f; double* x_gpu; double* b_gpu; float* residual_gpu; cudaMalloc((void **)&x_gpu, s * sizeof(double)); cudaMalloc((void **)&b_gpu, s * sizeof(double)); cudaMalloc((void **)&residual_gpu, sizeof(float)); cudaMemcpy(x_gpu, x, s * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(b_gpu, b, s * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(residual_gpu, &residual_host, sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); float gpu_time = 0.0f; cudaDeviceSynchronize(); cudaEventRecord(start); ////////////////////////////////////////////////////////////////////////// ////TODO 2: call your GPU functions here ////Requirement: You need to copy data from the CPU arrays, conduct /// computations on the GPU, and copy the values back from GPU to CPU /The /// final positions should be stored in the same place as the CPU function, /// i.e., the array of x /The correctness of your simulation will be evaluated /// by the residual (<1e-3) ////////////////////////////////////////////////////////////////////////// int thread_x = blockDimX; int thread_y = blockDimX; int block_x = ceil((double)n / (double)thread_x); int block_y = ceil((double)n / (double)thread_y); int xw_size= (thread_x + 2 * g) * (thread_y + 2 * g); int residual_arr_size = thread_x * thread_y; int max_num = 1e5; ////max iteration number int iter_num = 0; do{ residual_host = 0.f; GPU_Solver<<<dim3(block_x, block_y), dim3(thread_x, thread_y), xw_size * sizeof(double)>>> (x_gpu, b_gpu); cudaDeviceSynchronize(); Calc_Residual<<<dim3(block_x, block_y), dim3(thread_x, thread_y), residual_arr_size * sizeof(float)>>> (x_gpu, b_gpu, residual_gpu); cudaMemcpy(&residual_host, residual_gpu, sizeof(float), cudaMemcpyDeviceToHost); // cout << "res: " << residual_host << endl; // disable this print before submission iter_num += 1; } while (residual_host > tolerance && iter_num < max_num); // disable print before submission // cout << "GPU Jacobi solver converges in " << iter_num // << " iterations, with residual " << residual_host << endl; cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&gpu_time, start, end); printf("\nGPU runtime: %.4f ms\n", gpu_time); cudaEventDestroy(start); cudaEventDestroy(end); ////////////////////////////////////////////////////////////////////////// cudaMemcpy(x, x_gpu, s * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(b, b_gpu, s * sizeof(double), cudaMemcpyDeviceToHost); ////output x if (verbose) { cout << "\n\nx for your GPU solver:\n"; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { cout << x[I(i, j)] << ", "; } cout << std::endl; } } ////calculate residual double residual = 0.0; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { residual += pow(4.0 * x[I(i, j)] - x[I(i - 1, j)] - x[I(i + 1, j)] - x[I(i, j - 1)] - x[I(i, j + 1)] - b[I(i, j)], 2); } } cout << "\n\nresidual for your GPU solver: " << residual << endl; out << "R0: " << residual << endl; out << "T1: " << gpu_time << endl; ////////////////////////////////////////////////////////////////////////// delete[] x; delete[] b; } int main() { if(name::team=="Team_X"){ printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n"); return 0; } std::string file_name=name::team+"_competition_3_linear_solver.dat"; out.open(file_name.c_str()); if(out.fail()){ printf("\ncannot open file %s to record results\n",file_name.c_str()); return 0; } // Test_CPU_Solvers(); ////You may comment out this line to run your GPU solver only Test_GPU_Solver(); ////Test function for your own GPU implementation return 0; }
14,500
#include <iostream> #include <stdio.h> #include "postProcessing.cuh" // Round the solutions template <class T> __global__ void round_solution(T *d_x, int numNodes){ // Get index int x_thread = threadIdx.x; int idx = x_thread; // Round the solution if (idx < numNodes){ if (d_x[idx] > 0.5) d_x[idx] = 1; else d_x[idx] = 0; } } //template void export_result <float> (const char*, float*, int); template __global__ void round_solution <float> (float*, int); template __global__ void round_solution <double> (double*, int);