serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
4,201
#include "includes.h" __global__ void init(int* U, int* F, int* d, int startNode, size_t gSize) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; if (globalThreadId < gSize) { U[globalThreadId] = 1; F[globalThreadId] = 0; d[globalThreadId] = INT_MAX; } if(globalThreadId == 0) { d[globalThreadId] = 0; U[globalThreadId] = 0; F[globalThreadId] = 1; } }
4,202
#define DIM 64 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <math.h> #include <stdio.h> #include <cuda.h> #include <time.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <chrono> #define TILE_DIM 32 __global__ void MatrixMulKernel(double *M, double *N, double *P, int Width) { int Row = blockIdx.y*blockDim.y + threadIdx.y; int Col = blockIdx.x*blockDim.x + threadIdx.x; if ((Row < Width) && (Col < Width)) { double Pvalue = 0; for (int k = 0; k < Width; k++) { Pvalue += M[Row*Width + k] * N[k*Width + Col]; } P[Row*Width + Col] = Pvalue; } } //FUNZIONE CHE RIEMPIE LA MATRICE DI NUMERI double CASUALI void populateMatrix(double *M) { srand(time(NULL)); for (int i = 0; i < DIM; i++) { for (int j = 0; j < DIM; j++) { //M[i * DIM + j] = (double)(1.0); M[i * DIM + j] = (double)((rand() % 10000) /(double)DIM); } } } __global__ void MatMul(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) { double CValue = 0; int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ double As[TILE_DIM][TILE_DIM]; __shared__ double Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows) As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x]; else As[threadIdx.y][threadIdx.x] = 0.0; if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols) Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col]; else Bs[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; __syncthreads(); } if (Row < CRows && Col < CCols) C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) + (blockIdx.x * blockDim.x)+ threadIdx.x] = CValue; } void LaunchKernel(double *M, double *N, double *P, int size) { //Creazione Streams int n_stream = 4; cudaStream_t stream[n_stream]; for(int i = 0; i < n_stream; i++){ cudaStreamCreate(&stream[i]); } // Divisione della matrice in 2 sottomatrici double *N_3 = (double *)malloc(DIM * DIM/2 *(sizeof(double))); double *N_4 = (double *)malloc(DIM * DIM/2 *(sizeof(double))); for(int i = 0; i < DIM; i++){ for(int j = 0; j < DIM; j++){ if(j < DIM/2) N_3[i*DIM/2 + j]=N[i*DIM+j]; else N_4[i*DIM/2 + (j-DIM/2)] = N[i*DIM + j]; } } //Allocazione dei segmenti di memoria size_t slice = DIM * (DIM/2); double *d_M0, *d_N0, *d_P0; double *d_M1, *d_N1, *d_P1; double *d_M2, *d_N2, *d_P2; double *d_M3, *d_N3, *d_P3; cudaMalloc((void **)&d_M0, DIM * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_N0, DIM * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_P0, (DIM/2)* (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_M1, DIM * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_N1, DIM * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_P1, (DIM/2) * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_M2, DIM * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_N2, DIM * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_P2, (DIM/2) * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_M3, DIM * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_N3, DIM * (DIM/2) * sizeof(double)); cudaMalloc((void **)&d_P3, (DIM/2) * (DIM/2) * sizeof(double)); // DICHIARAZIONE blockDim e gridDim dim3 block(TILE_DIM, TILE_DIM, 1); dim3 grid(ceil((double)DIM / block.x), ceil(((double)DIM/2)/block.y) , 1); // Esecuzione del kernel std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); cudaMemcpyAsync(d_M0, M, slice * sizeof (double), cudaMemcpyHostToDevice, stream[0]); cudaMemcpyAsync(d_N0, N_3, slice * sizeof (double), cudaMemcpyHostToDevice, stream[0]); double *ker0 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double)); cudaMemcpyAsync(d_M1, M, slice * sizeof (double), cudaMemcpyHostToDevice, stream[1]); cudaMemcpyAsync(d_N1, N_4, slice * sizeof (double), cudaMemcpyHostToDevice, stream[1]); double *ker1 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double)); cudaMemcpyAsync(d_M2, M + slice, slice * sizeof (double), cudaMemcpyHostToDevice, stream[2]); cudaMemcpyAsync(d_N2, N_3, slice * sizeof (double), cudaMemcpyHostToDevice, stream[2]); double *ker2 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double)); cudaMemcpyAsync(d_M3, M + slice, slice * sizeof (double), cudaMemcpyHostToDevice, stream[3]); cudaMemcpyAsync(d_N3, N_4, slice * sizeof (double), cudaMemcpyHostToDevice, stream[3]); double *ker3 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double)); MatMul<<<grid, block, block.x * block.y * sizeof(double), stream[0]>>>(d_M0, d_N0, d_P0, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2); MatMul<<<grid, block, block.x * block.y * sizeof(double), stream[1]>>>(d_M1, d_N1, d_P1, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2); MatMul<<<grid, block, block.x * block.y * sizeof(double), stream[2]>>>(d_M2, d_N2, d_P2, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2); MatMul<<<grid, block, block.x * block.y * sizeof(double), stream[3]>>>(d_M3, d_N3, d_P3, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2); cudaDeviceSynchronize(); cudaMemcpyAsync(ker0, d_P0, DIM/2 * DIM/2 * sizeof (double), cudaMemcpyDeviceToHost, stream[0]); cudaMemcpyAsync(ker1, d_P1, DIM/2 * DIM/2 * sizeof (double), cudaMemcpyDeviceToHost, stream[1]); cudaMemcpyAsync(ker2, d_P2, DIM/2 * DIM/2 * sizeof (double), cudaMemcpyDeviceToHost, stream[2]); cudaMemcpyAsync(ker3, d_P3, DIM/2 * DIM/2 * sizeof (double), cudaMemcpyDeviceToHost, stream[3]); std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count(); printf("%lf\n", tempo); // Copio le sottomatrici nella matrixce finale for(int i = 0; i < DIM; i++){ for(int j = 0; j<DIM ; j++){ if(i < DIM/2 && j < DIM/2) P[i * DIM + j ] = ker0[i * DIM/2 + j]; else if(i < DIM/2 && j >= DIM/2) P[i * DIM + j ] = ker1[i * DIM/2 + (j-DIM/2)]; else if(i >= DIM/2 && j < DIM/2) P[i * DIM + j ] = ker2[(i-DIM/2) * DIM/2 + j]; else if(i >= DIM/2 && j >= DIM/2) P[i * DIM + j ] = ker3[(i-DIM/2) * DIM/2 + (j-DIM/2)]; } } cudaFree(d_M0); cudaFree(d_N0); cudaFree(d_P0); cudaFree(d_M1); cudaFree(d_N1); cudaFree(d_P1); cudaFree(d_M2); cudaFree(d_N2); cudaFree(d_P2); cudaFree(d_M3); cudaFree(d_N3); cudaFree(d_P3); } void MatrixMulHost(double *A, double *B, double *C) { //std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); int c, d, k; for (c = 0; c < DIM; c++) { for (d = 0; d < DIM; d++) { double Pvalue = 0; for (k = 0; k < DIM; k++) { Pvalue += A[c * DIM + k] * B[k * DIM + d]; } C[c * DIM + d] = Pvalue; } } //std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); //double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count(); //printf("TEMPO ELABORAZIONE SU HOST: %lf\n", tempo); } int main() { double *A = (double *)malloc(DIM * DIM * sizeof(double)); double *B = (double *)malloc(DIM * DIM * sizeof(double)); double *C = (double *)malloc(DIM * DIM * sizeof(double)); double *C_H = (double *)malloc(DIM * DIM * sizeof(double)); populateMatrix(A); populateMatrix(B); std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); LaunchKernel(&A[0], &B[0], &C[0], DIM); std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count(); }
4,203
#include "includes.h" __global__ void Substep1Kernel (double *Pressure, double *Dens, double *VradInt, double *invdiffRmed, double *Potential, double *Rinf, double *invRinf, double *Vrad, double *VthetaInt, double *Vtheta, double *Rmed, double dt, int nrad, int nsec, double OmegaFrame, int ZMPlus, double IMPOSEDDISKDRIFT, double SIGMASLOPE) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = threadIdx.y + blockDim.y*blockIdx.y; double gradp, gradphi, vradint, vradint2, supp_torque, dxtheta, invdxtheta; double vt2; // i=1->nrad , j=0->nsec if (i > 0 && i<nrad && j<nsec){ gradp = (Pressure[i*nsec + j] - Pressure[(i-1)*nsec + j])*2.0/(Dens[i*nsec + j] + Dens[(i-1)*nsec + j])*invdiffRmed[i]; gradphi = (Potential[i*nsec + j] - Potential[(i-1)*nsec + j])*invdiffRmed[i]; vt2 = Vtheta[i*nsec + j] + Vtheta[i*nsec + (j+1)%nsec] + Vtheta[(i-1)*nsec + j] + Vtheta[(i-1)*nsec + (j+1)%nsec]; vt2 = vt2/4.0 +OmegaFrame*Rinf[i]; vt2 = vt2*vt2; vradint = -gradp - gradphi; vradint2 = vradint + vt2*invRinf[i]; VradInt[i*nsec + j] = Vrad[i*nsec+j] + dt*vradint2; } // i=0->nrad , j=0->nsec if (i<nrad && j<nsec){ supp_torque = IMPOSEDDISKDRIFT*0.5*pow(Rmed[i], -2.5+SIGMASLOPE); dxtheta = 2.0*PI/(double)nsec*Rmed[i]; invdxtheta = 1.0/dxtheta; gradp = (Pressure[i*nsec + j] - Pressure[i*nsec + ((j-1)+nsec)%nsec])*2.0/(Dens[i*nsec +j] +Dens[i*nsec + ((j-1)+nsec)%nsec]) \ *invdxtheta; //if (ZMPlus) gradp *= 1; //gradp *= SG_aniso_coeff; Definir mas adelante SG_aniso_coeff gradphi = (Potential[i*nsec+ j] - Potential[i*nsec + ((j-1)+nsec)%nsec])*invdxtheta; VthetaInt[i*nsec + j] = Vtheta[i*nsec+j] - dt*(gradp+gradphi); VthetaInt[i*nsec + j] += dt*supp_torque; } }
4,204
#include <stdlib.h> #include <stdio.h> #define SIZE 1365 __global__ void func(int* a, int s) { int i = (blockIdx.x * 1024) + threadIdx.x; if(i > s) return; a[i] = i; return; } int main(int argc, char** argv) { int* a; int i, s, s2; s = SIZE; if(s > 1024) s2 = 1024; else s2 = s; cudaMallocManaged(&a, sizeof(int) * s); func<<<(s / 1024) + 1, s2>>>(a, s); cudaDeviceSynchronize(); for(i = 0; i < s; i++) printf("%d: %d\n", i, a[i]); return(0); }
4,205
#include "../include/object.cuh" #include "../include/math_utils.cuh" __host__ __device__ Object::Object(const Material &mat): mat{mat} {} __host__ __device__ Sphere::Sphere(const vec3 &center, float radius, const Material &mat): Object{mat}, center{center}, radius{radius} {} __host__ __device__ bool Sphere::intersect(const vec3 &ray_orig, const vec3 &ray_dir, float &dist, vec3 &hit_loc, vec3 &hit_norm) const { float t0, t1; vec3 L = center - ray_orig; float tca = dot(L, ray_dir); // if (tca < 0) return false; float d2 = L.sqrNorm() - tca * tca; if (d2 > radius * radius) return false; float thc = sqrt(radius * radius - d2); t0 = tca - thc; t1 = tca + thc; if (t1 < t0) swap(t0, t1); if (t1 < EPSILON) return false; dist = t0 < EPSILON ? t1 : t0; hit_loc = ray_orig + (ray_dir * dist); hit_norm = hit_loc - center; return true; } __host__ __device__ Plane::Plane(const vec3 &normal, const vec3 &center, float size, const Material &mat): Object{mat}, normal{normal}, center{center}, size{size} { this->normal.normalize(); } __host__ __device__ bool Plane::intersect(const vec3 &ray_orig, const vec3 &ray_dir, float &dist, vec3 &hit_loc, vec3 &hit_norm) const { if(abs(dot(ray_dir, normal)) < EPSILON) return false; dist = dot((center - ray_orig), normal) / dot(ray_dir, normal); if(dist < EPSILON) return false; hit_loc = ray_orig + (ray_dir * dist); vec3 to_center = center - hit_loc; if(size != INF && to_center.sqrNorm() > size * size) return false; hit_norm = normal; return true; }
4,206
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <sys/time.h> /* Aim : To benchmark the GPU in terms of Read and Write Bandwidth with different types of block sizes. Description : This program finds Read and Write Memory Bandwidth of GPU. The main function creates memory on CPU and copies to GPU, and then read memory from GPU into CPU. This operation is done for three types of bloxk sizes : 1B, 1KB, 1MB. Contributor : Vivek Pabani (A20332117) */ int main(void) { double time_s; long start_time,end_time; struct timeval start,stop; char *host_memory;//, *dev_memory; long blockSize[3] = {1,1024,1048576}; long limit[3] = {1000000,100000,100000}; char blockTypes[3][20] = {"B","KB","MB"}; char operationTypes[2][20] = {"Write","Read"}; int i=0, j=0, k=0, numberOfBlocks = 3, numberOfOperations = 2; /* Operations : 0:Write, 1 Read, */ for(j=0; j<numberOfOperations; ++j) { printf("\n----Operation = %s---- \n\n",operationTypes[j]); printf("Block Size\tTotal Data\tTotal Time ms\tBandwidth MBPS\n\n"); /* Block Sizes : 0 : 1 B 1 : 1 KB 2 : 1 MB */ for(k=0; k<numberOfBlocks; ++k) { printf("1 %s\t\t%7d %2s\t",blockTypes[k],limit[k],blockTypes[k]); char *dev_memory; /* Assign the Host Memory*/ host_memory = (char*) malloc(blockSize[k]*sizeof(char)); /* Assign the Device Memory*/ cudaMalloc((void**)&dev_memory, blockSize[k]*sizeof(char)); /* Write Operation*/ if(j==0) { memset(host_memory,'a',blockSize[k]); /* Time calculation for main operation starts */ gettimeofday(&start,NULL); for (i=0; i<limit[k]; ++i) { cudaMemcpy(dev_memory,&host_memory[i],blockSize[k]*sizeof(char),cudaMemcpyHostToDevice); } gettimeofday(&stop,NULL); /* Time calculation for main operation ends */ } /* Read Operation*/ else if (j==1) { cudaMemset(dev_memory,'a',blockSize[k]); /* Time calculation for main operation starts */ gettimeofday(&start,NULL); for (i=0; i<limit[k]; ++i) { cudaMemcpy(host_memory,&dev_memory[i],blockSize[k]*sizeof(char),cudaMemcpyDeviceToHost); } gettimeofday(&stop,NULL); /* Time calculation for main operation starts */ } start_time=start.tv_sec*1000000 + start.tv_usec; end_time=stop.tv_sec*1000000 + stop.tv_usec;//get end time time_s=end_time-start_time; printf("%8.3f\t%9.5f\n",(time_s/1000),(blockSize[k]*limit[k]*1000)/(time_s*1024*1024)); cudaFree(dev_memory); } } return 0; }
4,207
#include "includes.h" __global__ void MatrixAdd_CUDA(int *A, int *B, int *C) { int i= blockIdx.y*blockDim.y+ threadIdx.y; int j = blockIdx.x*blockDim.x+ threadIdx.x; *(C + i*N + j) = *(A + i*N + j)+ *(B + i*N + j); }
4,208
#include <iostream> #include <chrono> #include <vector> inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { std::cerr << "GPUassert: " << cudaGetErrorString(code) << " " << file << " " << line << std::endl; if (abort) exit(code); } } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } #define EPSILON 1.0e-7 template<typename real_t> using hvector = std::vector<real_t>; template<typename real_t> struct dvector{ real_t *data; dvector(hvector<real_t>& v){ cudaMalloc(&data, v.size() * sizeof(real_t)); cudaMemcpy(data, v.data(), v.size() * sizeof(real_t), cudaMemcpyHostToDevice); } ~dvector(){cudaFree(data);} void to_vector(hvector<real_t>& v){ cudaMemcpy(v.data(), data, v.size() * sizeof(real_t)); } }; /** * ORIGINAL * */ template<typename real_t> __global__ void vector_sum(real_t* a, real_t* b, real_t* c){ auto i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = b[i] + a[i]; } /** * DMR mixed * */ template<typename real_t, typename half_t> __global__ void vector_sum_dmr(real_t* a, real_t* b, real_t* c, half_t* c_half){ auto i = threadIdx.x + blockIdx.x * blockDim.x; auto ai = a[i]; auto bi = b[i]; half_t bh = half_t(bi); half_t ah = half_t(ai); c[i] = bi + ai; c_half[i] = bh + ah; } /** * Compare overload **/ __device__ __forceinline__ bool diff(double lhs, double rhs){ return (fabs(lhs - rhs) > EPSILON); } __device__ __forceinline__ bool diff(double lhs, float rhs){ auto lhs_float = float(lhs); uint32_t ulhs = *((uint32_t*) &lhs_float); uint32_t urhs = *((uint32_t*) &rhs); auto diff_val = (ulhs > urhs) ? ulhs - urhs : urhs - ulhs; return (diff_val > 2); } template<typename real_t, typename half_t> __global__ void comparator(real_t* lhs, half_t* rhs){ auto i = threadIdx.x + blockIdx.x * blockDim.x; auto lhsi = lhs[i]; auto rhsi = rhs[i]; if(diff(lhsi, rhsi)){ printf("Thread %d - lhs %.6e rhs %.6e\n", i, lhsi, rhsi); } } int main(){ //time counters using std::chrono::high_resolution_clock; using std::chrono::duration_cast; using std::chrono::duration; using std::chrono::milliseconds; //sizes constexpr int iterations = 100; constexpr size_t blocks = 8192; constexpr size_t threads = 1024; constexpr size_t size = blocks * threads; hvector<double> a_host(size); hvector<double> b_host(size); hvector<double> c_host(size, 0); hvector<float> c_dmr_host(size, 0); for(int i = 0; i < size; i++){ a_host[i] = i; b_host[i] = 1.0/double(i * 2); } dvector<double> a_dev(a_host); dvector<double> b_dev(b_host); dvector<double> c_dev(c_host); dvector<float> c_dmr_dev(c_dmr_host); dvector<double> c_dmr_full_dev(c_host); //Original no dmr auto original_time_t1 = high_resolution_clock::now(); for(int it = 0; it < iterations; it++){ vector_sum<<<blocks, threads>>>(a_dev.data, b_dev.data, c_dev.data); gpuErrchk(cudaDeviceSynchronize()); } auto original_time_t2 = high_resolution_clock::now(); // full DMR auto full_dmr_time_t1 = high_resolution_clock::now(); for(int it = 0; it < iterations; it++){ vector_sum<<<blocks, threads>>>(a_dev.data, b_dev.data, c_dmr_full_dev.data); vector_sum<<<blocks, threads>>>(a_dev.data, b_dev.data, c_dev.data); gpuErrchk(cudaDeviceSynchronize()); comparator<<<blocks, threads>>>(c_dmr_full_dev.data, c_dev.data); gpuErrchk(cudaDeviceSynchronize()); } auto full_dmr_time_t2 = high_resolution_clock::now(); //Mixed dmr auto mixed_dmr_time_t1 = high_resolution_clock::now(); for(int it = 0; it < iterations; it++){ vector_sum_dmr<<<blocks, threads>>>(a_dev.data, b_dev.data, c_dev.data, c_dmr_dev.data); comparator<<<blocks, threads>>>(c_dev.data, c_dmr_dev.data); gpuErrchk(cudaDeviceSynchronize()); } auto mixed_dmr_time_t2 = high_resolution_clock::now(); /* Getting number of milliseconds as a double. */ duration<double, std::milli> ms_original = original_time_t2 - original_time_t1; duration<double, std::milli> ms_full = full_dmr_time_t2 - full_dmr_time_t1; duration<double, std::milli> ms_mixed = mixed_dmr_time_t2 - mixed_dmr_time_t1; std::cout << "ms_original: " << ms_original.count() << "ms\n"; std::cout << "ms_full: " << ms_full.count() << "ms\n"; std::cout << "ms_mixed: " << ms_mixed.count() << "ms\n"; }
4,209
#include <stdio.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define imin(a,b) (a<b?a:b) const int N=33*1024; const int tpb = 256; // threads per block const int bpg = imin(32, (N+tpb-1) / tpb); __global__ void dot(float *a, float* b, float* c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { c[tid]=a[tid]*b[tid]; tid += blockDim.x * gridDim.x; } } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) int main(int argc, char *argv[]) { float *a, *b, *c; float *d_a, *d_b, *d_c; a = (float*)malloc(N*sizeof(float)); b = (float*)malloc(N*sizeof(float)); c = (float*)malloc(N*sizeof(float)); gpuErrchk(cudaMalloc(&d_a, N*sizeof(float))); gpuErrchk(cudaMalloc(&d_b, N*sizeof(float))); gpuErrchk(cudaMalloc(&d_c, N*sizeof(float))); for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } gpuErrchk(cudaMemcpy(d_a, a, N*sizeof(float), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_b, b, N*sizeof(float), cudaMemcpyHostToDevice)); dot<<<bpg, tpb>>>(d_a, d_b, d_c); gpuErrchk(cudaMemcpy(c, d_c, N*sizeof(float), cudaMemcpyDeviceToHost)); float temp=0; for (int i=0; i<N; i++) { temp += c[i]; } printf ("solved: %.6g\nclosed: %.6g\n", temp, 2*sum_squares((float)(N-1))); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(a); free(b); free(c); return 0; }
4,210
#include "includes.h" __global__ void tanh(float *inout, float *bias, int rows, int cols) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (j >= cols || i >= rows) return; inout[i * cols + j] = tanhf(inout[i * cols + j]) + bias[i]; }
4,211
__global__ void kernel(int * vals, int size){ int tid = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < size; tid += stride) { vals[tid] *= 2; } } extern "C" int foo(int size){ int * vals; int * devVals; cudaMallocHost((void**)&vals, size * sizeof(vals[0])); cudaMalloc((void**)&devVals, size * sizeof(devVals[0])); int i = 0; for (; i < size; i++) { vals[i] = i; } cudaMemcpy(devVals, vals, size * sizeof(vals[0]), cudaMemcpyHostToDevice); kernel<<<1, 16, 16>>>(devVals, size); cudaMemcpy(vals, devVals, size * sizeof(devVals[0]), cudaMemcpyDeviceToHost); i = 0; int tmp = 0; for (; i < size; i++) { tmp += vals[i]; } return tmp; }
4,212
#include <assert.h> extern "C" __device__ void exit(int ret) __THROW { assert(0); }
4,213
#include <cuda_runtime.h> #include <stdio.h> #include <iostream> using namespace std; __global__ void checkIndex(void) { // printf("- thread idx is : "); printf( "thread idx: %d, %d, %d\n" , threadIdx.x , threadIdx.y , threadIdx.z ); printf( "block idx: %d , %d, %d\n", blockIdx.x , blockIdx.y , blockIdx.z ); printf ("block dim: %d , %d, %d\n" ,blockDim.x , blockDim.y , blockDim.z ); printf( "grid dim: %d , %d, %d\n", gridDim.x , gridDim.y , gridDim.z); printf("-"); } int main() { int n = 10; dim3 block(3); dim3 grid((n + block.x -1)/block.x); cout << " grid x: " << grid.x << " grid.y: " << grid.y << " grid.z : " << grid.z << std::endl; cout << " block x: " << block.x << " block.y : " << block.y<< " block.z: " << block.z << std::endl; checkIndex<<<grid, block>>>(); cudaDeviceSynchronize(); } int main1(int argc, char** argv) { std::cout << " Starting." << std::endl; int deviceCount = 0; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { cout << "There is no device available." << std::endl; } else { cout << "Detected " << deviceCount << " cuda capable device." << endl; } int dev = 0; cudaDeviceProp deviceProp; cudaSetDevice(dev); cudaGetDeviceProperties(&deviceProp, dev); cout << "Deivce id: " << dev << " " << deviceProp.name << endl; int dversion, runtimeVersion; cudaDriverGetVersion(&dversion); cudaRuntimeGetVersion(&runtimeVersion); cout << "CUDA driver version : " << dversion << " runtime version: " << runtimeVersion << endl; cout << "Total mem: " << deviceProp.totalGlobalMem/(1024*1024*1024) << endl; cudaDeviceReset(); cout << "After device reset." << std::endl; //int dev2 = 2; //cudaSetDevice(dev2); //cudaGetDeviceProperties(&deviceProp, dev2); //cout << "Deivce id: " << dev2 << " " << deviceProp.name << endl; return 1; }
4,214
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <complex> #include <iostream> #include <algorithm> #include <stdio.h> #include <cufft.h> #include <fstream> #include <vector> #include <numeric> #include <math.h> #define PI 3.14159265359 using namespace std; __device__ cufftComplex com_exp(cufftComplex z) { cufftComplex res; float t = expf(z.x); float z_cos = cosf(z.y); float z_sin = sinf(z.y); res.x = z_cos * t; res.y = z_sin * t; return res; } __device__ cufftComplex com_mul(cufftComplex a, cufftComplex b) { cufftComplex result; result.x = (a.x*b.x) - (a.y*b.y); result.y = (a.x*b.y) + (a.y*b.x); return result; } __global__ void fft_propogate_cu(cufftComplex*p_in, cufftComplex*p_out, double d, float nm, float res, int sizex, int sizey) { int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; double km = (2 * PI*nm) / res; double kx_o, ky_o, kx, ky; cufftComplex I; if (i < sizex && j < sizey) { if (sizex % 2 != 0) { if (i < (sizex + 1) / 2) kx_o = i; else if (i >= (sizex + 1) / 2) kx_o = -(sizex - i); } else if (sizex % 2 == 0) { if (i < (sizex / 2)) kx_o = i; else if (i >= (sizex / 2)) kx_o = -(sizex - i); } if (sizey % 2 != 0) { if (j < (sizey + 1) / 2) ky_o = j; else if (j >= (sizey + 1) / 2) ky_o = -(sizey - j); } else if (sizey % 2 == 0) { if (j < (sizey / 2)) ky_o = j; else if (j >= (sizey / 2)) ky_o = -(sizey - j); } kx = (kx_o / sizex) * 2 * PI; ky = (ky_o / sizey) * 2 * PI; double root_km = km * km - kx * kx - ky * ky; bool rt0 = root_km > 0; if (root_km > 0) { I.x = 0; I.y = (sqrt(root_km * rt0) - km)*d; p_out[i*sizex + j] = com_mul(p_in[i*sizex + j], com_exp(I)); } else { p_out[i*sizex + j].x = 0.0; p_out[i*sizex + j].y = 0.0; } //p_out[i*sizex + j].x = kx_o; //p_out[i*sizex + j].y = ky_o; } } __global__ void tuma_calculation_cu(cufftComplex* fulltuma, int sizex, int sizey, float *tuma_coeff , float *tuma_mean) { int idx = threadIdx.x + blockIdx.x*blockDim.x; float mean, std ; int x, y; x = sizex; y = sizey; if (idx < sizex*sizey) { atomicAdd(tuma_mean, fulltuma[idx].x / (x*y)); atomicAdd(tuma_coeff, (fulltuma[idx].x * fulltuma[idx].x) / (x*y)); } __syncthreads(); //*tuma_coeff -= (*tuma_mean * *tuma_mean); //if (idx < sizex*sizey) //{ // atomicAdd(tuma_coeff, ((fulltuma[idx].x - *tuma_mean)*(fulltuma[idx].x - *tuma_mean))); //} //*tuma_coeff /= (x*y ); }
4,215
#include "includes.h" //*************inclución de librerias*************** //************variables globales*************** int N=93, dimx=1920, dimy=2560, tam_imag=1920*2560; //**********KERNEL************** float *leerMatrizVarianza(int d); //*****************función main********************** __global__ void kernel (float *max, float *var, int *top, int k){ int idx=threadIdx.x + blockIdx.x*blockDim.x; int tam_imag=1920*2560; if(idx<tam_imag){ if(var[idx]>max[idx]){ top[idx]=k; max[idx]=var[idx]; } } }
4,216
// Include header files #include <bits/stdc++.h> #include <cuda.h> #include <cmath> #define ll long long int #define THREADS 32 typedef float2 Complex; const long long ARRAY_SIZE = 1024; const long long ARRAY_BYTES = ARRAY_SIZE * sizeof(Complex); // Parallelized reordering (Doesn't this count as pre-processing?) __global__ void bit_reverse_reorder (Complex *d_rev, Complex *d_a, int s){ int id = (blockIdx.x * blockDim.x) + threadIdx.x; int rev = __brev(id) >> (32-s); if (id < ARRAY_SIZE) d_rev[rev] = d_a[id]; } // FFT driver kernel code __global__ void fft(Complex *a, int j, int m){ int k = blockIdx.x * blockDim.x + threadIdx.x; if(k < m/2 && j+k+m/2 < ARRAY_SIZE){ Complex w, t, u; // w^k (w is root of unity) w.x = __cosf((2*M_PI*k)/m); w.y = -__sinf((2*M_PI*k)/m); // u = a[j+k] u.x = a[j+k].x; u.y = a[j+k].y; // t = w*a[j+k+m/2]; t.x = w.x*a[j+k+m/2].x - w.y*a[j+k+m/2].y; t.y = w.x*a[j+k+m/2].y + w.y*a[j+k+m/2].x; // a[j+k] = u+t; a[j+k].x = u.x + t.x; a[j+k].y = u.y + t.y; // a[j+k+m/2] = u-t; a[j+k+m/2].x = u.x - t.x; a[j+k+m/2].y = u.y - t.y; } } int main(int argc, char *argv[]) { //Creating Complex arrays for data Complex h_a[ARRAY_SIZE], h_rev[ARRAY_SIZE]; Complex *d_a, *d_rev; // Input signal (of the form sin((2*M_PI*f*x)/N)) where N is the sample size // imaginary part of the signal by default is 0 for(int i = 0; i < ARRAY_SIZE; i++) { h_a[i].x = sin((10*M_PI*i)/ARRAY_SIZE); h_a[i].y = 0.0; } // No. of bits in the sample size, used for bit reversal reordering int s = (int)ceil(log2(ARRAY_SIZE)); //Allocate memory for the device arrays cudaMalloc((void**) &d_a, ARRAY_BYTES); cudaMalloc((void**) &d_rev, ARRAY_BYTES); //Copy all elements of sample array from host to device cudaMemcpy(d_a, h_a, ARRAY_BYTES, cudaMemcpyHostToDevice); //Reorder the sample as first step of FFT bit_reverse_reorder<<<(ARRAY_SIZE+THREADS-1)/THREADS, THREADS>>>(d_rev, d_a, s); //Synchronise devices before jumping to fft cudaDeviceSynchronize(); // Naive fft parallelization (TODO: improve upon the efficiency) for (int i=0;i<=s;i++){ int m = 1 << i; for(int j=0;j<ARRAY_SIZE;j+=m){ // Performing in-place fft fft<<<((m/2)+THREADS-1)/THREADS,THREADS>>>(d_rev,j,m); } } // Copy result array to host cudaMemcpy(h_rev, d_rev, ARRAY_BYTES, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_a); cudaFree(d_rev); // TODO // Use h_rev to plot magnitude (sqrt(h_rev[i].x^2 + h_rev[i].y^2)) vs frequency (i) }
4,217
extern "C"{ __global__ void globalForwardReduction(const double *a_d, const double *b_d, const double *c_d, double *d_d, const double *k1_d, const double *k2_d, const double *b_first_d, const double *k1_first_d, const double *k1_last_d, const int nx, const int ny, const int nz, int stride) { int gix = blockIdx.x*blockDim.x + threadIdx.x; int giy = blockIdx.y*blockDim.y + threadIdx.y; int giz = blockIdx.z*blockDim.z + threadIdx.z; int i; int m, n; int idx; int gi3d, gi3d0; double x_m, x_n; gi3d = giz*(nx*ny) + giy*nx + gix; gi3d0 = giz*(nx*ny) + giy*nx + 0; // forward reduction if (stride == nx) { stride /= 2; m = log2((float)stride) - 1; n = log2((float)stride); // the last element x_m = (d_d[gi3d0 + stride-1]*b_d[n] - c_d[m]*d_d[gi3d0 + 2*stride-1])/ \ (b_first_d[m]*b_d[n] - c_d[m]*a_d[n]); x_n = (b_first_d[m]*d_d[gi3d0 + 2*stride-1] - d_d[gi3d0 + stride-1]*a_d[n])/ \ (b_first_d[m]*b_d[n] - c_d[m]*a_d[n]); d_d[gi3d0 + stride-1] = x_m; d_d[gi3d0 + 2*stride-1] = x_n; } else { i = (stride-1) + gix*stride; gi3d = gi3d0 + i; idx = log2((float)stride) - 1; if (gix == 0) { d_d[gi3d] = d_d[gi3d] - d_d[gi3d - stride/2]*k1_first_d[idx] - d_d[gi3d + stride/2]*k2_d[idx]; } else if (i == (nx-1)) { d_d[gi3d] = d_d[gi3d] - d_d[gi3d - stride/2]*k1_last_d[idx]; } else { d_d[gi3d] = d_d[gi3d] - d_d[gi3d - stride/2]*k1_d[idx] - d_d[gi3d + stride/2]*k2_d[idx]; } } } __global__ void globalBackSubstitution(const double *a_d, const double *b_d, const double *c_d, double *d_d, const double *b_first_d, const double b1, const double c1, const double ai, const double bi, const double ci, const int nx, const int ny, const int nz, const int stride) { int gix = blockIdx.x*blockDim.x + threadIdx.x; int giy = blockIdx.y*blockDim.y + threadIdx.y; int giz = blockIdx.z*blockDim.z + threadIdx.z; int i; int idx; int gi3d, gi3d0; gi3d0 = giz*(nx*ny) + giy*nx + 0; i = (stride/2-1) + gix*stride; gi3d = gi3d0 + i; if (stride == 2) { if (i == 0) { d_d[gi3d] = (d_d[gi3d] - c1*d_d[gi3d + 1])/b1; } else { d_d[gi3d] = (d_d[gi3d] - (ai)*d_d[gi3d - 1] - (ci)*d_d[gi3d + 1])/bi; } } else { // rint rounds to the nearest integer idx = rint(log2((double)stride)) - 2; if (gix == 0) { d_d[gi3d] = (d_d[gi3d] - c_d[idx]*d_d[gi3d + stride/2])/b_first_d[idx]; } else { d_d[gi3d] = (d_d[gi3d] - a_d[idx]*d_d[gi3d - stride/2] - c_d[idx]*d_d[gi3d + stride/2])/b_d[idx]; } } } }
4,218
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #define LOG_INPUT if(0) #define LOG_OUTPUT if(1) #define LOG if(0) __global__ void hadamard(float *A, float *B, float *C, int M, int N) { // Complete the kernel code snippet int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < M*N) C[i] = A[i]*B[i]; } /** * Host main routine */ void print_matrix(float *A,int m,int n) { for(int i =0;i<m;i++) { for(int j=0;j<n;j++) printf("%.2f ",A[i*n+j]); printf("\n"); } } int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int t; //number of test cases scanf("%d",&t); while(t--) { int m,n; scanf("%d %d",&m,&n); size_t size = m*n * sizeof(float); LOG printf("[Hadamard product of two matrices ]\n"); // Allocate the host input vector A float *h_A = (float*)malloc(size); // Allocate the host input vector B float *h_B = (float*)malloc(size); // Allocate the host output vector C float *h_C = (float*)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < n*m; ++i) { scanf("%f",&h_A[i]); scanf("%f",&h_B[i]); } float *d_A = NULL, *d_B = NULL, *d_C = NULL; // Allocate the device input vector A cudaMalloc((void**)&d_A, size); // Allocate the device input vector B cudaMalloc((void**)&d_B, size); // Allocate the device output vector C cudaMalloc((void**)&d_C, size); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice); // initialize blocksPerGrid and threads Per Block int threadsPerBlock = 256; int blocksPerGrid = ((m*n)+threadsPerBlock-1)/threadsPerBlock; hadamard<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, m, n); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // Verify that the result vector is correct for (int i = 0; i < n*m; ++i) { if (fabs(h_A[i] * h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } LOG printf("Test PASSED\n"); // Free device global memory // Free host memory err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } print_matrix(h_C,m,n); LOG printf("Done\n"); } return 0; }
4,219
#include <cstdio> #include <vector> #include <iostream> #include <cmath> #include <cstdlib> #include <chrono> using namespace std; const int nx = 41; const int ny = 41; //const int nt = 10; const int nit = 50; //const int c = 1; __global__ void build_up_b(float *b, int rho, float dt, float dx, float dy, float *u , float *v){ //m = j * nx + i int m = blockIdx.x * blockDim.x + threadIdx.x; int bId = blockIdx.x; int tId = threadIdx.x; b[m] = 0; if(bId != 0 && bId != (ny-1) && tId != 0 && tId != (nx-1)){ b[m] = (rho*(1/dt* ((u[m+1]-u[m-1])/(2*dx) + (v[m+nx] - v[m-nx])/(2*dy)) - pow((u[m+1] - u[m-1])/(2*dx),2) - 2*((u[m+nx] - u[m-nx])/(2*dy)* (v[m+1]-v[m-1])/(2*dx)) - pow((v[m+nx] - v[m-nx])/(2*dy),2))); }else if(bId != 0 && bId != (ny-1) && tId == (nx-1)){ //Periodic BC Pressure @x = 2 b[m] = (rho*(1/dt*((u[m-nx+1] - u[m-1])/(2*dx) +(v[m+nx] - v[m-nx])/(2*dy)) -pow((u[m-nx+1] - u[m-1])/(2*dx),2) - 2*((u[m+nx]-u[m-nx])/(2*dy) * (v[m-nx+1] - v[m-1])/(2*dx)) - pow((v[m+nx] - v[m-nx])/(2*dy),2))); }else if(bId != 0 && bId != (ny-1) && tId == (0)){ //Periodic BC Pressure @x = 0 b[m] = (rho*(1/dt*((u[m+1] - u[m+nx-1])/(2*dx) +(v[m+nx] - v[m-nx]) /(2*dy)) - pow((u[m+1] - u[m+nx-1])/(2*dx),2) - 2*((u[m+nx]-u[m-nx])/(2*dy) * (v[m+1] - v[m+nx-1])/(2*dx)) - pow((v[m+nx] - v[m-nx])/(2*dy),2))); } } __global__ void pressure_poisson_periodic(float *p, float *pn, float *b, float dx, float dy){ //m = j * nx + i int m = blockIdx.x * blockDim.x + threadIdx.x; int bId = blockIdx.x; int tId = threadIdx.x; for(int q=0; q<nit; q++){ __syncthreads(); pn[m] = p[m]; __syncthreads(); if(bId != 0 && bId != (ny-1) && tId != 0 && tId != (nx-1)){ p[m]=(((pn[m+1]+pn[m-1])*pow(dy,2)+ (pn[m+nx]+pn[m-nx])*pow(dx,2))/ (2*(pow(dx,2)+pow(dy,2)))- pow(dx,2)*pow(dy,2)/(2*(pow(dx,2)+pow(dy,2)))*b[m]); }else if(bId != 0 && bId != (ny-1) && tId == (nx-1)){ //Periodic BC Pressure @ x = 2 p[m]=(((pn[m-nx+1]+pn[m-1])*pow(dy,2)+ (pn[m+nx]+pn[m-nx])*pow(dx,2))/ (2*(pow(dx,2)+pow(dy,2)))- pow(dx,2)*pow(dy,2)/(2*(pow(dx,2)+pow(dy,2)))*b[m]); }else if(bId != 0 && bId != (ny-1) && tId == (0)){ //Periodic BC Pressure @ x = 0 p[m]=(((pn[m+1]+pn[m+nx-1])*pow(dy,2)+ (pn[m+nx]+pn[m-nx])*pow(dx,2))/ (2*(pow(dx,2)+pow(dy,2)))- pow(dx,2)*pow(dy,2)/(2*(pow(dx,2)+pow(dy,2)))*b[m]); }else if(bId == 0){ p[m] = p[m+nx]; }else if(bId == (ny-1)){ p[m] = p[m-nx]; } } } __global__ void updated_u_v(float *u, float *v, float *un, float *vn, float *p, float dx, float dy, float dt, float rho, float nu, float F){ //m = j * nx + i int m = blockIdx.x * blockDim.x + threadIdx.x; int bId = blockIdx.x; int tId = threadIdx.x; if(bId != 0 && bId != (ny-1) && tId != 0 && tId != (nx-1)){ u[m] = (un[m] - un[m] * dt/dx * (un[m] - un[m-1]) - vn[m] * dt/dy * (un[m] - un[m-nx]) - dt/(2*rho*dx) * (p[m+1] - p[m-1]) + nu * (dt/pow(dx,2)* (un[m+1] - 2*un[m] + un[m-1]) + dt/pow(dy,2) * (un[m+nx] - 2*un[m] + un[m-nx])) + F* dt); v[m] = (vn[m] - un[m] * dt/dx * (vn[m] - vn[m-1]) - vn[m] * dt/dy * (vn[m] - vn[m-nx]) - dt/(2*rho*dy) * (p[m+nx] - p[m-nx]) + nu * (dt/pow(dx,2)* (vn[m+1] - 2*vn[m] + vn[m-1]) + dt/pow(dy,2) * (vn[m+nx] - 2*vn[m] + vn[m-nx]))); }else if(bId != 0 && bId != (ny-1) && tId == (nx-1)){ //Periodic BC u @ x = 2 u[j][nx-1] u[m] = (un[m] - un[m] * dt/dx * (un[m] - un[m-1]) - vn[m] * dt/dy * (un[m] - un[m-nx]) - dt/(2*rho*dx) * (p[m-nx+1] - p[m-1]) + nu * (dt/pow(dx,2)* (un[m-nx+1] - 2*un[m] + un[m-1]) + dt/pow(dy,2) * (un[m+nx] - 2*un[m] + un[m-nx])) + F* dt); //Periodic BC v @ x = 2 v[m] = (vn[m] - un[m] * dt/dx * (vn[m] - vn[m-1]) - vn[m] * dt/dy * (vn[m] - vn[m-nx]) - dt/(2*rho*dy) * (p[m+nx] - p[m-nx]) + nu * (dt/pow(dx,2)* (vn[m-nx+1] - 2*vn[m] + vn[m-1]) + dt/pow(dy,2) * (vn[m+nx] - 2*vn[m] + vn[m-nx]))); }else if(bId !=0 && bId != (ny-1) && tId == 0){ //Periodic BC u @ x = 0 u[m] = (un[m] - un[m] * dt/dx * (un[m] - un[m+nx-1]) - vn[m] * dt/dy * (un[m] - un[m-nx]) - dt/(2*rho*dx) * (p[m+1] - p[m+nx-1]) + nu * (dt/pow(dx,2)* (un[m+1] - 2*un[m] + un[m+nx-1]) + dt/pow(dy,2) * (un[m+nx] - 2*un[m] + un[m-nx])) + F* dt); //Periodic BC v @ x = 0 v[m] = (vn[m] - un[m] * dt/dx * (vn[m] - vn[m+nx-1]) - vn[m] * dt/dy * (vn[m] - vn[m-nx]) - dt/(2*rho*dy) * (p[m+nx] - p[m-nx]) + nu * (dt/pow(dx,2)* (vn[m+1] - 2*vn[m] + vn[m+nx-1]) + dt/pow(dy,2) * (vn[m+nx] - 2*vn[m] + vn[m-nx]))); }else if(bId == 0 || bId == (ny-1)){ //Wall BC: u,v = 0 @ y = 0,2 u[m] = 0; v[m] = 0; } } int main() { //Variable Declarations float dx = 2/(nx - 1.0); float dy = 2/(ny - 1.0); int m; //Physical Variables const float rho = 1.0; const float nu = .1; const float F = 1.0; const float dt = .01; //Initial Conditions float udiff = 1.0; int stepcount = 0; float sumu = 0.0; float sumun = 0.0; float *b; float *p; float *u; float *v; float *un; float *vn; float *pn; cudaMallocManaged(&b,ny*nx*sizeof(float)); cudaMallocManaged(&p,ny*nx*sizeof(float)); cudaMallocManaged(&u,ny*nx*sizeof(float)); cudaMallocManaged(&v,ny*nx*sizeof(float)); cudaMallocManaged(&un,ny*nx*sizeof(float)); cudaMallocManaged(&vn,ny*nx*sizeof(float)); cudaMallocManaged(&pn,ny*nx*sizeof(float)); for(int i=0; i<ny*nx; i++) { u[i]=0.0; v[i]=0.0; un[i]=0.0; vn[i]=0.0; pn[i]=1.0; } auto tic = chrono::steady_clock::now(); while(udiff>.001){ for(int i=0; i<nx; i++){ for(int j=0; j<ny; j++){ un[j*nx+i] = u[j*nx+i]; vn[j*nx+i] = v[j*nx+i]; } } build_up_b<<<ny,nx>>>(b,rho,dt,dx,dy,u,v); cudaDeviceSynchronize(); //b = build_up_b(rho, dt, dx, dy, u, v); pressure_poisson_periodic<<<ny,nx>>>(p,pn,b, dx, dy); cudaDeviceSynchronize(); //p = pressure_poisson_periodic(p, b, dx, dy); updated_u_v<<<ny,nx>>>(u,v,un,vn,p,dx,dy,dt,rho,nu,F); cudaDeviceSynchronize(); sumu = 0.0; sumun = 0.0; for(int i=0;i<nx;i++){ for(int j=0; j<ny;j++){ m = j*nx+i; sumu += u[m]; sumun += un[m]; } } udiff = (sumu - sumun)/ sumu ; if(stepcount % 50 ==0){ std::cout<<"Step: "<<stepcount<<": udiff = "<<udiff<<std::endl; } stepcount += 1; } auto toc = chrono::steady_clock::now(); double time = chrono::duration<double>(toc - tic).count(); std::cout<<"Step: "<<stepcount<<": udiff = "<<udiff<<std::endl; std::cout<<"Step Count = " << stepcount <<std::endl; std::cout<<"Time = " << time <<std::endl; }
4,220
#include <cuda_fp16.h> #define ELEMENT_SIZE 64 #define BLOCK_SIZE 64 #define WEIGHT_MAX_LENGTH 2048 extern "C" //use constant memory for weights if needs to be faster __global__ void weighted_sum_kernel(__half *ret, const long *input, const __half *weights, const int ret0, const int ret1, const int input0, const int input1, const int input2 ) { __shared__ __half weight_cache[BLOCK_SIZE]; __shared__ long cache[BLOCK_SIZE][BLOCK_SIZE / ELEMENT_SIZE]; const int z = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y; const int tid = threadIdx.x; const int ratio = BLOCK_SIZE / ELEMENT_SIZE; const int ratio_sq = BLOCK_SIZE / ratio; const int x_cache = tid / ratio; const int z_cache = tid - x_cache * ratio; const int z_offset = blockIdx.x * ratio; const int z_element = tid / ELEMENT_SIZE; const int z_bit = tid - z_element * ELEMENT_SIZE; float tmp = 0; // half precision performance crippled on Pascal? // __half tmp = 0; for (int x_offset = 0; x_offset < input0; x_offset += BLOCK_SIZE) { for (int x = 0; x < ratio; x++) { const int x_block = x * ratio_sq + x_cache; cache[x_block][z_cache] = x_offset + x_block < input0 && z_cache + z_offset < input2 ? input[((x_offset + x_block) * input1 + y) * input2 + z_cache + z_offset] : 0; } weight_cache[tid] = weights[tid + x_offset]; __syncthreads(); #pragma unroll for (int x = 0; x < BLOCK_SIZE; x++) { if ((cache[x][z_element] >> z_bit) & 1) { tmp += (float) weight_cache[x]; // tmp += weight_cache[x]; } } __syncthreads(); } if (z<ret1) { ret[y * ret1 + z] = (__half) tmp; } }
4,221
// in this code we do not use sparsity because the matrices are small // all matrices therefore are full // we assume that all the matrices are stored in a linear array, in a column major form // the maximum graph size is set to 12 in the variable MAX_N_PERM but it can be increased if the GPU is more powerfull // TO-DO: change the code so that we can scan just a fraction of all the n! possible permutations. // This will allow using multiple GPUs in parallel #include<stdio.h> #include <time.h> #include<float.h> #define MAX_N_PERM 12 typedef unsigned int lint; #define gerror(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // the code will allow us to use different norms // the matrices are assume square // the norm takes as input a permutation as well as two matrices // do not forget that P is from 1...n while the indices in C are from 0 to n-1 __device__ __host__ float fro_norm_square(int n, float *A , float *B, int *perm){ float total = 0; for (int i = 0; i < n; i++){ //go along column for (int j = 0; j < n; j++){ // go along row float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)]; //printf("-- (% f , %f, %f, i = %d, perm_i = %d,j =%d , perm_j = %d)-- ", A[i + n*j],B[ (perm[i]-1) + n*(perm[j]-1) ],value, i, perm[i]-1, j, perm[j]-1); total = total + value*value; } } return total; } __device__ float (*d_ptr_fro_norm_square)(int , float * , float *, int *) = fro_norm_square; // notice that if the matrices are just adjecency matrices where all the weights are 1 then there is no difference between // having this L1_norm or the fro_norm_square above __device__ __host__ float L1_norm(int n, float *A , float *B, int *perm){ float total = 0; for (int i = 0; i < n; i++){ //go along column for (int j = 0; j < n; j++){ // go along row float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)]; total = total + abs(value); } } return total; } __device__ float (*d_ptr_L1_norm)(int , float * , float *, int *) = L1_norm; // this function sets a list to consecutive numbers __host__ __device__ inline void settoconsec(int *v, int n){ for (int i = 0; i < n; i++){ v[i] = i+1; } } inline void copyvec(int *v_dest, int * v_source, int n){ for (int i = 0; i < n; i++){ v_dest[i] = v_source[i]; } } // this function swaps two elelents __host__ __device__ inline void swap(int *v, int ix1, int ix2){ int tmp = v[ix1]; v[ix1] = v[ix2]; v[ix2] = tmp; } // this function prints a vector __host__ __device__ void printvec(int *v, int n){ for (int i = 0; i < n ; i ++){ printf("%d ", v[i]); } printf("\n"); } // this function prints a vector __host__ __device__ void printfloatvec(float *v, int n){ for (int i = 0; i < n ; i ++){ printf("%f ", v[i]); } printf("\n"); } // first we have a piece of code that computes the thing in serial form // we assume that the matrices A and B are square and of the same dimension // we do not use sparse matrices because the matrices are small anyway // the norm we are computing is min_P || A - P^T B P || // we will also return the best permutation float compute_optimal_match(int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), int * bestperm ){ float opt_val = FLT_MAX; int * v = (int *) malloc(n * sizeof(int) ); int * output = (int *) malloc(n * sizeof(int) ); settoconsec(v, n); while( v[n-1] <= n ){ // note that the way we are going the swap here is a bit different because // the elements from v are already in increasing form. Like // 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3 // while in the parallel code the v is in the form // 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1 settoconsec(output, n); for( int i = 0; i < n ; i++){ swap(output, i, v[i]-1); } // at this point the vector output contains a permutation and we can compute a distance float val = (*metric)( n , A , B , output ); if ( val < opt_val ){ opt_val = val; copyvec( bestperm , output , n ); } v[ 0 ] = v[ 0 ] + 1; for (int i = 0; i < n-1 ; i++){ if( v[i] > n ){ v[i] = i+1; v[ i + 1 ] = v[ i + 1 ] + 1; } } } free(output); free(v); return opt_val; } // this function transforms and index into a permutation // the function requires a bit of scrap space __device__ __host__ void index_to_perm(lint r, int n, int *perm, int * scrap){ for (int i = n ; i >=1; i--){ scrap[n - i] = (r % i) + 1; r = r/i; } // note that the way we are going the swap here is a bit different because // the elements from v are not in increasing form like in the cpu code. // In the parallel code the scrap is in the form // 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1 // but in the serial code it is // 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3 settoconsec(perm, n); for( int i = 0; i < n ; i++){ swap(perm, i, i + scrap[i]-1); } } inline int fact(int n){ if (n <=1) return 1; else return n*fact(n-1); } // this computes the optimal matching my testing different permutations in parallel // we pass the nfact from outside to save time // we cannot store the result of all evaluations in memory and then do a parallel max. // there is just too much stuff to try. So each thread needs to keep a local max of several trials __global__ void kernel_to_compute_optimal_match(int chunck_per_cycle, int num_perm_per_thread, lint nfact, int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), float * obj_vals, lint * obj_perms ){ int baseix = blockIdx.x*blockDim.x + threadIdx.x;; lint ix = baseix; // we copy A and B to shared memory because it might be faster when we are computing the norms extern __shared__ float AB_shared_mem[]; // we need to split the shared memory into different parts float * shared_A = AB_shared_mem; float * shared_B = &AB_shared_mem[n*n]; // the first thread of each block does the copy for the corresponding block if (threadIdx.x == 0){ for (int i = 0; i < n*n ; i++){ shared_A[i] = A[i]; shared_B[i] = B[i]; } } __syncthreads(); float best_val = FLT_MAX; lint best_perm_ix; for (int i = 0; i < num_perm_per_thread ; i++){ ix = baseix + chunck_per_cycle*i; // filter the stuff that does not matter if (ix < nfact){ // probably we do not need more than 20 here int perm[MAX_N_PERM]; int scrap[MAX_N_PERM]; index_to_perm( ix , n, perm, scrap); float val = (*metric)( n, shared_A , shared_B, perm); if (val < best_val){ best_val = val; best_perm_ix = ix; } } } obj_vals[baseix] = best_val; obj_perms[baseix] = best_perm_ix; } void test_index_perm(int n ){ // test the function that indexes permutations sequentially int *perm = (int *) malloc(n * sizeof(int)); int *scrap = (int *) malloc(n * sizeof(int)); for (int r = 0; r < fact(n) ; r++){ index_to_perm(r, n, perm, scrap); //printvec(perm,n); } free(perm); free(scrap); } // this function will allocate space for A float * read_graph_into_adj_mat(char * filename, int *graphsize, int directed){ // if we are not given the graph size then we first read the file to try to estimate the size // of the graph by trying to find the largest index used // here we assume that the indices used are 1, 2, ..., n if (*graphsize == -1){ FILE * graphfile = fopen(filename , "r"); int dim = -1; int edge1, edge2; while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){ if (dim < edge1){ dim = edge1; } if (dim < edge2){ dim = edge2; } } fclose(graphfile); *graphsize = dim; } // we use calloc because we want most of the entries to be zero and just have to set a few to non-zero // whatever edges are not specified in the file we are reading we will assume are zero float *A = (float *) calloc( (*graphsize) , (*graphsize)*sizeof(float) ) ; FILE * graphfile = fopen(filename , "r"); int edge1, edge2; while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){ if (edge1 <= (*graphsize) && edge2 <= (*graphsize) && edge1 >=1 && edge2 >=1 ){ A[(edge1-1) + (edge2-1)* (*graphsize) ] = 1; // if the graph is undirected, we force it to be undirected if (directed == 0){ A[(edge2-1) + (edge1-1)* (*graphsize) ] = 1; } } } fclose(graphfile); return A; } // this writes a vector to an output file void save_vec_to_file(int * vec, int n , char* output_file){ // we only write if there is stuff to write. Otherwise we leave things as they are if (n > 1){ FILE * vec_file = fopen(output_file , "w"); for (int i = 0; i < n-1; i++){ fprintf(vec_file,"%d ", vec[i]); } fprintf(vec_file,"%d", vec[n-1]); fclose(vec_file); } } int main(int argc,char *argv[]){ if (argc != 8){ printf("The arguments must be filenameA, filenameB, outputfile, L1vsL2, directed/undirected, gpu/cpu, size\n"); return 0; } char * filenameA = (char *) argv[1]; char * filenameB = (char *) argv[2]; char * fileoutput = (char *) argv[3]; int norm_to_use = atoi( argv[4] ); int directed = atoi( argv[5] ); int cpu_vs_gpu = atoi( argv[6] ); int graphsize = atoi( argv[7] ); int sizeA = graphsize; int sizeB = graphsize; float *A = read_graph_into_adj_mat( filenameA , &sizeA , directed ); float *B = read_graph_into_adj_mat( filenameB , &sizeB , directed ); if ( sizeA != sizeB ){ printf("Error, graphs of different sizes\n"); return 0; } clock_t cpu_start, cpu_end; float cputime; int n = sizeA; lint nfact = fact(n); if (cpu_vs_gpu == 1){ int * bestperm = (int *) malloc(n * sizeof( int ) ); //this is where we will keep the best perm cpu_start = clock(); // we might want to try different norms float val; if (norm_to_use == 1){ val = compute_optimal_match(n, A, B, &L1_norm , bestperm); } if (norm_to_use == 2){ val = compute_optimal_match(n, A, B, &fro_norm_square , bestperm); } cpu_end = clock(); printf("CPU Opt Val = %f\n", val); printvec(bestperm, n); cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC; printf("SERIAL: Time to compute opt = %f\n",cputime); fflush(stdout); // store the vector in the output save_vec_to_file(bestperm, n , fileoutput); // free the vector free(bestperm); }else{ // now we have some GPU code cudaSetDevice( 0 ); cudaDeviceReset(); // here we compute the division of work // we try to make everything result in an iteger division of work int numthreadsperblock = 1024; int numblocks = 1024; int chunck_per_cycle = numblocks*numthreadsperblock; int num_stuff_per_thread = 1 + (nfact / chunck_per_cycle ); //printf("Threads per block = %d, Num blocks = %d , chunck_per_cycle = %d, num_stuff_per_thread = %d\n",numthreadsperblock,numblocks,chunck_per_cycle,num_stuff_per_thread); float * d_A; float * d_B; float * d_obj_vals; lint * d_obj_perms; float * h_obj_vals = (float *) malloc( chunck_per_cycle*sizeof(float) ); lint * h_obj_perms = (lint *) malloc( chunck_per_cycle*sizeof(lint) ); // create some timing variables cudaEvent_t gpu_start, gpu_end; float gputime; cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_end); cudaEventRecord(gpu_start, 0); cudaMalloc((void **)&d_A, n*n*sizeof(float) ); cudaMalloc((void **)&d_B, n*n*sizeof(float) ); cudaMalloc((void **)&d_obj_vals, chunck_per_cycle*sizeof(float) ); cudaMalloc((void **)&d_obj_perms, chunck_per_cycle*sizeof(lint) ); cudaMemcpy( (void*) d_A , (void*) A , n*n*sizeof(float) , cudaMemcpyHostToDevice ); cudaMemcpy( (void*) d_B , (void*) B , n*n*sizeof(float) , cudaMemcpyHostToDevice ); cudaEventRecord(gpu_end, 0); cudaEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill cudaEventElapsedTime(&gputime, gpu_start, gpu_end); printf ("PARALLEL: Time it took to allocate space: %f\n", gputime/1000); fflush(stdout); // this is the function pointer that we will pass to the GPU float (*h_d_per_metric)(int , float *, float * , int * ); // we might want to use different norms if (norm_to_use == 1){ cudaMemcpyFromSymbol(&h_d_per_metric, d_ptr_L1_norm, sizeof( float (*)(int , float *, float * , int * ) )); } if (norm_to_use == 2){ cudaMemcpyFromSymbol(&h_d_per_metric, d_ptr_fro_norm_square, sizeof( float (*)(int , float *, float * , int * ) )); } cudaEventRecord(gpu_start, 0); kernel_to_compute_optimal_match<<<numblocks,numthreadsperblock,n*n*2*sizeof(float)>>>(chunck_per_cycle,num_stuff_per_thread , nfact, n, d_A, d_B, h_d_per_metric , d_obj_vals, d_obj_perms); cudaEventRecord(gpu_end, 0); cudaEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill cudaEventElapsedTime(&gputime, gpu_start, gpu_end); printf ("PARALLEL: Time it took to run the kernel: %f\n", gputime/1000); fflush(stdout); // now we copy the stuff back to the CPU and get the maximum by hand cudaEventRecord(gpu_start, 0); cudaMemcpy( (void*) h_obj_vals , (void*) d_obj_vals , chunck_per_cycle*sizeof(float) , cudaMemcpyDeviceToHost ); cudaMemcpy( (void*) h_obj_perms , (void*) d_obj_perms , chunck_per_cycle*sizeof(float) , cudaMemcpyDeviceToHost ); cudaEventRecord(gpu_end, 0); cudaEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill cudaEventElapsedTime(&gputime, gpu_start, gpu_end); printf ("PARALLEL: Time it took to copy stuff back to the CPU: %f\n", gputime/1000); fflush(stdout); cpu_start = clock(); float best_gpu_val = FLT_MAX; lint best_ix; for (int i = 0 ; i < chunck_per_cycle ; i++){ float val = h_obj_vals[i]; if (val < best_gpu_val){ best_gpu_val = val; best_ix = i; } } int * perm = (int *) malloc(n * sizeof(int)); int * scrap = (int *) malloc(n * sizeof(int)); index_to_perm(best_ix, n, perm, scrap); printf("GPU Opt Val = %f\n", best_gpu_val); printvec(perm, n); save_vec_to_file(perm, n , fileoutput); cpu_end = clock(); cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC; printf("SERIAL: Time to compute the last step = %f\n",cputime); fflush(stdout); cudaFree(d_A); cudaFree(d_B); cudaFree(d_obj_vals); cudaFree(d_obj_perms); free(h_obj_vals); free(h_obj_perms); free(perm); free(scrap); gerror( cudaPeekAtLastError() ); cudaDeviceSynchronize(); } free(A); free(B); return 0; }
4,222
#include <cuda.h> #include <stdio.h> #include <sys/time.h> #include <stdio.h> #define N 128 #define NELEMS (N * N) #define SCHEME 1 #define TRANSP 1 #define CUDA_CHECK_RETURN(value) \ { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) \ { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } \ } double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } __global__ void init1(float *a, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i < n) && (j < n)) a[i + j * n] = (float)(threadIdx.x + blockDim.y * blockIdx.x); } __global__ void init2(float *a, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i < n) && (j < n)) a[j + i * n] = (float)(threadIdx.y + blockDim.x * threadIdx.y); } __global__ void tr(const float *a, float *b, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; if ((i < n) && (j < n)) b[j * n + i] = a[i * n + j]; } int main() { size_t size = sizeof(float) * NELEMS; double tgpu = 0, tmem = 0; float elapsedTime = 0; cudaEvent_t start, stop; /* Allocate vectors on host */ float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); if (h_A == NULL || h_B == NULL) { fprintf(stderr, "Allocation error.\n"); exit(EXIT_FAILURE); } for (int i = 0; i < NELEMS; ++i) { h_A[i] = rand() / (float)RAND_MAX; } /* Allocate vectors on device */ float *d_A = NULL, *d_B = NULL; tmem = -wtime(); CUDA_CHECK_RETURN(cudaMalloc((void **)&d_A, size)); CUDA_CHECK_RETURN(cudaMalloc((void **)&d_B, size)); /* Copy the host vectors to device */ CUDA_CHECK_RETURN(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice)) tmem += wtime(); /* Launch the kernel */ cudaEventCreate(&start); cudaEventCreate(&stop); tgpu = -wtime(); int threadsPerBlock = 256; int blocksPerGrid = (NELEMS + threadsPerBlock - 1) / threadsPerBlock; #if TRANSP == 1 int threadsPerBlockDim = 32; dim3 blockDim(threadsPerBlockDim, threadsPerBlockDim, 1); int blocksPerGridDimX = ceilf(N / (float)threadsPerBlockDim); int blocksPerGridDimY = ceilf(N / (float)threadsPerBlockDim); dim3 gridDim(blocksPerGridDimX, blocksPerGridDimY, 1); cudaEventRecord(start, 0); tr<<<gridDim, blockDim>>>(d_A, d_B, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); tgpu += wtime(); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); /* Copy the device vectors to host */ tmem -= wtime(); CUDA_CHECK_RETURN(cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost)); tmem += wtime(); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) if (fabs(h_A[i * N + j] - h_B[j * N + i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d , %d! Ex: %f, Real: %f\n", i, j, h_A[i * N + j], h_B[j * N + i]); exit(EXIT_FAILURE); } } printf("Transponse\n"); printf("GPU version (sec.): %.6lf\n", tgpu); printf("Memory ops. (sec.): %.6lf\n", tmem); printf("Total time (sec.): %.6lf\n", tgpu + tmem); printf("Events Time %.6f\n", elapsedTime); #else #if SCHEME == 1 /* Launch the kernel */ cudaEventCreate(&start); cudaEventCreate(&stop); tgpu = -wtime(); cudaEventRecord(start, 0); init1<<<blocksPerGrid, threadsPerBlock>>>(d_A, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); tgpu += wtime(); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); /* Copy the device vectors to host */ tmem -= wtime(); CUDA_CHECK_RETURN(cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost)); tmem += wtime(); printf("\nInit scheme 1\n"); printf("GPU version (sec.): %.6lf\n", tgpu); printf("Events Time %.6f\n", elapsedTime); #else /* Launch the kernel */ cudaEventCreate(&start); cudaEventCreate(&stop); tgpu = -wtime(); cudaEventRecord(start, 0); init2<<<blocksPerGrid, threadsPerBlock>>>(d_B, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); tgpu += wtime(); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); /* Copy the device vectors to host */ tmem -= wtime(); CUDA_CHECK_RETURN(cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost)); tmem += wtime(); printf("\nInit scheme 2\n"); printf("GPU version (sec.): %.6lf\n", tgpu); printf("Events Time %.6f\n", elapsedTime); #endif #endif cudaFree(d_A); cudaFree(d_B); free(h_A); free(h_B); cudaDeviceReset(); return 0; }
4,223
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void addArrays(int* A, int* B, int* C) { int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; } int main(void) { int N = 1024; int *A, *B, *C; // Crea los buffer, con Unified Memory, para los datos de entrada y salida cudaMallocManaged(&A, N * sizeof(int)); cudaMallocManaged(&B, N * sizeof(int)); cudaMallocManaged(&C, N * sizeof(int)); // Inicializa los buffer del host con los valores de entrada for (int i = 0; i < N; i++) { A[i] = i; //0,1,2,...,1023 B[i] = N - i; //1023,1022,...,0 } // Ejecuta la kernel en la GPU (4 bloques * 256 hilos = 1024 elementos calculados) addArrays <<<4, 256>>> (A, B, C); // Espera que termine la kernel cudaDeviceSynchronize(); // Presenta el resultado for (int i = 0; i < N; i++) { printf("Resultados %d: (%d + %d = %d)\n", i, A[i], B[i], C[i]); } // Libera los recursos cudaFree(A); cudaFree(B); cudaFree(C); }
4,224
__device__ float relu (float x) { return fmaxf(x, 0.0); } extern "C" __global__ void reluKernel (int length, float *source, float *destination) { int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { destination[index] = relu(source[index]); } }
4,225
#include <stdio.h> extern "C" void test() { printf("success!\n"); }
4,226
#include <stdio.h> __global__ void UpdatePositions( int N,double L, double2* r, double2* r5){ int i= threadIdx.x + blockIdx.x*blockDim.x; if (i<N){ r[i].x = r5[i].x; r[i].y = r5[i].y; if (r[i].x*r[i].x + r[i].y*r[i].y > L*L) printf("%d is outside region. %1.4f %1.4f\n",i,r[i].x,r[i].y); } }
4,227
// 1D convolution example using CUDA C++ // Each block takes in a bunch of elements and computes a 1D convolution using multiple threads #include <iostream> // Global parameters #define NUMBLOCKS 8 #define BLOCKSIZE 4 #define RADIUS 1 #define NUMELEMENTS (NUMBLOCKS * BLOCKSIZE) // Function and macro to handle CUDA errors static void handleError(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << " in " << file << " at line " << line << std::endl; exit(EXIT_FAILURE); } } #define cudaCheck(err) (handleError(err, __FILE__, __LINE__)) // 1D convolution kernel __global__ void conv1d(float *in, float *out) { __shared__ float temp[BLOCKSIZE + 2*RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x + RADIUS; int lindex = threadIdx.x + RADIUS; // Read in data corresponding to the input elements to be processed temp[lindex] = in[gindex]; // Read in boundary data ('halo' on either side of the current filters primary context) if (threadIdx.x < RADIUS) { // Left side of 'halo' temp[lindex - RADIUS] = in[gindex - RADIUS]; // Right side of 'halo' temp[lindex + BLOCKSIZE] = in[gindex + BLOCKSIZE]; } // Ensure thread-safety __syncthreads(); // Perform convolution float result = 0.0; for (int offset = -RADIUS; offset <= RADIUS; ++offset) { result += temp[lindex + offset]; } result = result; // Store the result out[gindex - RADIUS] = result; } int main(void) { // Initialize host copies of in, out float in[NUMELEMENTS + 2*RADIUS], out[NUMELEMENTS]; // Zero pad on either side for (int i = 0; i < NUMELEMENTS + 2*RADIUS; ++i) { if (i < RADIUS) { in[i] = 0.0; } else if (i < NUMELEMENTS + RADIUS) { in[i] = 1.0; } else { in[i] = 0.0; } } // Sizes int size_in = (NUMELEMENTS + 2*RADIUS) * sizeof(float); int size_out = NUMELEMENTS * sizeof(float); // Initialize device copies float *d_in, *d_out; cudaCheck(cudaMalloc((void **)&d_in, size_in)); cudaCheck(cudaMalloc((void **)&d_out, size_out)); // Copy variables from host to device cudaCheck(cudaMemcpy(d_in, in, size_in, cudaMemcpyHostToDevice)); // Launch the conv1d kernel conv1d<<<NUMBLOCKS, BLOCKSIZE>>>(d_in, d_out); // Check for kernel launch errors cudaCheck(cudaPeekAtLastError()); // Copy variables from device to host cudaCheck(cudaMemcpy(out, d_out, size_out, cudaMemcpyDeviceToHost)); // Print the result for (int i = 0; i < NUMELEMENTS; ++i) { std::cout << out[i] << " "; } std::cout << std::endl; }
4,228
#include <stdio.h> #include <stdlib.h> #include <time.h> void init(float *A, int wA, int hA) { for (int h=0; h<hA; h++) for (int w=0; w<wA; w++) A[w+h*wA] = (float)rand() / (float)RAND_MAX; } void compute(float *A, float *B, float *C, int wA, int hA, int wB) { for (int h=0; h<hA; h++) { for (int w=0; w<wB; w++) { float temp = 0.0f; for (int i=0; i<wA; i++) temp += A[i+h*wA] * B[w+i*wB]; C[w+h*wB] = temp; } } } int main() { clock_t start, stop; int w, h, i, iter, max_iter = 10; //int wA = 2, hA = 2, wB = 2, hB = 2; int wA = 320, hA = 320, wB = 640, hB = 320; size_t sizeA = wA*hA*sizeof(float); size_t sizeB = wB*hB*sizeof(float); size_t sizeC = hA*wB*sizeof(float); float *A, *B, *C; A = (float*) malloc(sizeA); B = (float*) malloc(sizeB); C = (float*) malloc(sizeC); // seed random number generator srand(time(NULL)); // initialize A init(A, wA, hA); init(B, wB, hB); // compute C start = clock(); for (iter=0; iter<max_iter; iter++) compute(A, B, C, wA, hA, wB); stop = clock(); double cpu_time = (double) (stop-start) / CLOCKS_PER_SEC/ max_iter; printf("CPU time = %lf\n", cpu_time); /* // output A printf("A = \n"); for (h=0; h<hA; h++) { for (w=0; w<wA; w++) printf("%5.2f\t", A[w+h*wA]); printf("\n"); } // output B printf("B = \n"); for (h=0; h<hB; h++) { for (w=0; w<wB; w++) printf("%5.2f\t", B[w+h*wB]); printf("\n"); } // output C printf("C = \n"); for (h=0; h<hA; h++) { for (w=0; w<wB; w++) printf("%5.2f\t", C[w+h*wB]); printf("\n"); } */ return 0; }
4,229
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3) { for (int i=2; i<=N-3; i++) { double _t_59_ = u1[i+2][j][k+2]; double _t_135_ = u1[i+2][j][k+2]; _t_59_ -= u1[i+2][j][k-2]; double _t_154_ = u1[i+2][j][k-2]; double _t_56_ = c2 * _t_59_; double _t_60_ = u1[i+2][j][k+1]; double _t_174_ = u1[i+2][j][k+1]; _t_60_ -= u1[i+2][j][k-1]; double _t_193_ = u1[i+2][j][k-1]; _t_56_ += c1 * _t_60_; double _t_58_ = 2.0 * mu[i+2][j][k]; _t_58_ += la[i+2][j][k]; double _t_57_ = _t_58_ * met2[i+2][j][k]; double _t_55_ = _t_57_ * met1[i+2][j][k]; double _t_54_ = _t_55_ * _t_56_; double _t_53_ = _t_54_ * strx[i]; double _t_65_ = u2[i+2][j][k+2]; double _t_140_ = u2[i+2][j][k+2]; _t_65_ -= u2[i+2][j][k-2]; double _t_159_ = u2[i+2][j][k-2]; double _t_63_ = c2 * _t_65_; double _t_66_ = u2[i+2][j][k+1]; double _t_179_ = u2[i+2][j][k+1]; _t_66_ -= u2[i+2][j][k-1]; double _t_198_ = u2[i+2][j][k-1]; _t_63_ += c1 * _t_66_; double _t_64_ = la[i+2][j][k] * met3[i+2][j][k]; double _t_62_ = _t_64_ * met1[i+2][j][k]; double _t_61_ = _t_62_ * _t_63_; _t_53_ += _t_61_ * stry[j]; double _t_69_ = la[i+2][j][k] * met4[i+2][j][k]; double _t_67_ = _t_69_ * met1[i+2][j][k]; double _t_70_ = u3[i+2][j][k+2]; double _t_146_ = u3[i+2][j][k+2]; _t_70_ -= u3[i+2][j][k-2]; double _t_165_ = u3[i+2][j][k-2]; double _t_68_ = c2 * _t_70_; double _t_71_ = u3[i+2][j][k+1]; double _t_185_ = u3[i+2][j][k+1]; _t_71_ -= u3[i+2][j][k-1]; double _t_204_ = u3[i+2][j][k-1]; _t_68_ += c1 * _t_71_; _t_53_ += _t_67_ * _t_68_; double _t_77_ = u1[i-2][j][k+2]; _t_135_ -= u1[i-2][j][k+2]; _t_77_ -= u1[i-2][j][k-2]; _t_154_ -= u1[i-2][j][k-2]; double _t_74_ = c2 * _t_77_; double _t_78_ = u1[i-2][j][k+1]; _t_174_ -= u1[i-2][j][k+1]; _t_78_ -= u1[i-2][j][k-1]; _t_193_ -= u1[i-2][j][k-1]; _t_74_ += c1 * _t_78_; double _t_76_ = 2.0 * mu[i-2][j][k]; _t_76_ += la[i-2][j][k]; double _t_75_ = _t_76_ * met2[i-2][j][k]; double _t_73_ = _t_75_ * met1[i-2][j][k]; double _t_72_ = _t_73_ * _t_74_; _t_53_ += _t_72_ * strx[i]; double _t_83_ = u2[i-2][j][k+2]; _t_140_ -= u2[i-2][j][k+2]; _t_83_ -= u2[i-2][j][k-2]; _t_159_ -= u2[i-2][j][k-2]; double _t_81_ = c2 * _t_83_; double _t_84_ = u2[i-2][j][k+1]; _t_179_ -= u2[i-2][j][k+1]; _t_84_ -= u2[i-2][j][k-1]; _t_198_ -= u2[i-2][j][k-1]; _t_81_ += c1 * _t_84_; double _t_82_ = la[i-2][j][k] * met3[i-2][j][k]; double _t_80_ = _t_82_ * met1[i-2][j][k]; double _t_79_ = _t_80_ * _t_81_; _t_53_ += _t_79_ * stry[j]; double _t_87_ = la[i-2][j][k] * met4[i-2][j][k]; double _t_85_ = _t_87_ * met1[i-2][j][k]; double _t_88_ = u3[i-2][j][k+2]; _t_146_ -= u3[i-2][j][k+2]; _t_88_ -= u3[i-2][j][k-2]; _t_165_ -= u3[i-2][j][k-2]; double _t_86_ = c2 * _t_88_; double _t_89_ = u3[i-2][j][k+1]; _t_185_ -= u3[i-2][j][k+1]; _t_89_ -= u3[i-2][j][k-1]; _t_204_ -= u3[i-2][j][k-1]; _t_86_ += c1 * _t_89_; _t_53_ += _t_85_ * _t_86_; double _t_52_ = c2 * _t_53_; double _t_96_ = u1[i+1][j][k+2]; double _t_136_ = u1[i+1][j][k+2]; _t_96_ -= u1[i+1][j][k-2]; double _t_155_ = u1[i+1][j][k-2]; double _t_93_ = c2 * _t_96_; double _t_97_ = u1[i+1][j][k+1]; double _t_175_ = u1[i+1][j][k+1]; _t_97_ -= u1[i+1][j][k-1]; double _t_194_ = u1[i+1][j][k-1]; _t_93_ += c1 * _t_97_; double _t_95_ = 2.0 * mu[i+1][j][k]; _t_95_ += la[i+1][j][k]; double _t_94_ = _t_95_ * met2[i+1][j][k]; double _t_92_ = _t_94_ * met1[i+1][j][k]; double _t_91_ = _t_92_ * _t_93_; double _t_90_ = _t_91_ * strx[i]; double _t_102_ = u2[i+1][j][k+2]; double _t_141_ = u2[i+1][j][k+2]; _t_102_ -= u2[i+1][j][k-2]; double _t_160_ = u2[i+1][j][k-2]; double _t_100_ = c2 * _t_102_; double _t_103_ = u2[i+1][j][k+1]; double _t_180_ = u2[i+1][j][k+1]; _t_103_ -= u2[i+1][j][k-1]; double _t_199_ = u2[i+1][j][k-1]; _t_100_ += c1 * _t_103_; double _t_101_ = la[i+1][j][k] * met3[i+1][j][k]; double _t_99_ = _t_101_ * met1[i+1][j][k]; double _t_98_ = _t_99_ * _t_100_; _t_90_ += _t_98_ * stry[j]; double _t_106_ = la[i+1][j][k] * met4[i+1][j][k]; double _t_104_ = _t_106_ * met1[i+1][j][k]; double _t_107_ = u3[i+1][j][k+2]; double _t_147_ = u3[i+1][j][k+2]; _t_107_ -= u3[i+1][j][k-2]; double _t_166_ = u3[i+1][j][k-2]; double _t_105_ = c2 * _t_107_; double _t_108_ = u3[i+1][j][k+1]; double _t_186_ = u3[i+1][j][k+1]; _t_108_ -= u3[i+1][j][k-1]; double _t_205_ = u3[i+1][j][k-1]; _t_105_ += c1 * _t_108_; _t_90_ += _t_104_ * _t_105_; double _t_114_ = u1[i-1][j][k+2]; _t_136_ -= u1[i-1][j][k+2]; _t_114_ -= u1[i-1][j][k-2]; _t_155_ -= u1[i-1][j][k-2]; double _t_111_ = c2 * _t_114_; double _t_115_ = u1[i-1][j][k+1]; _t_175_ -= u1[i-1][j][k+1]; _t_115_ -= u1[i-1][j][k-1]; _t_194_ -= u1[i-1][j][k-1]; _t_111_ += c1 * _t_115_; double _t_113_ = 2.0 * mu[i-1][j][k]; _t_113_ += la[i-1][j][k]; double _t_112_ = _t_113_ * met2[i-1][j][k]; double _t_110_ = _t_112_ * met1[i-1][j][k]; double _t_109_ = _t_110_ * _t_111_; _t_90_ += _t_109_ * strx[i]; double _t_120_ = u2[i-1][j][k+2]; _t_141_ -= u2[i-1][j][k+2]; _t_120_ -= u2[i-1][j][k-2]; _t_160_ -= u2[i-1][j][k-2]; double _t_118_ = c2 * _t_120_; double _t_121_ = u2[i-1][j][k+1]; _t_180_ -= u2[i-1][j][k+1]; _t_121_ -= u2[i-1][j][k-1]; _t_199_ -= u2[i-1][j][k-1]; _t_118_ += c1 * _t_121_; double _t_119_ = la[i-1][j][k] * met3[i-1][j][k]; double _t_117_ = _t_119_ * met1[i-1][j][k]; double _t_116_ = _t_117_ * _t_118_; _t_90_ += _t_116_ * stry[j]; double _t_124_ = la[i-1][j][k] * met4[i-1][j][k]; double _t_122_ = _t_124_ * met1[i-1][j][k]; double _t_125_ = u3[i-1][j][k+2]; _t_147_ -= u3[i-1][j][k+2]; _t_125_ -= u3[i-1][j][k-2]; _t_166_ -= u3[i-1][j][k-2]; double _t_123_ = c2 * _t_125_; double _t_126_ = u3[i-1][j][k+1]; _t_186_ -= u3[i-1][j][k+1]; _t_126_ -= u3[i-1][j][k-1]; _t_205_ -= u3[i-1][j][k-1]; _t_123_ += c1 * _t_126_; _t_90_ += _t_122_ * _t_123_; _t_52_ += c1 * _t_90_; double r1ic0jc0kc0 = r1[i][j][k]; r1ic0jc0kc0 += _t_52_ * stry[j]; double _t_144_ = c2 * _t_146_; double _v_64_ = c2 * _t_135_; double _v_67_ = c2 * _t_140_; double _v_73_ = c2 * _t_154_; double _v_76_ = c2 * _t_159_; double _v_79_ = c2 * _t_165_; double _v_89_ = c2 * _t_185_; double _v_83_ = c2 * _t_174_; double _v_86_ = c2 * _t_179_; double _v_92_ = c2 * _t_193_; double _v_95_ = c2 * _t_198_; double _v_98_ = c2 * _t_204_; _t_144_ += c1 * _t_147_; double _v_65_ = c1 * _t_136_; double _v_68_ = c1 * _t_141_; double _v_74_ = c1 * _t_155_; double _v_77_ = c1 * _t_160_; double _v_80_ = c1 * _t_166_; double _v_90_ = c1 * _t_186_; double _v_84_ = c1 * _t_175_; double _v_87_ = c1 * _t_180_; double _v_93_ = c1 * _t_194_; double _v_96_ = c1 * _t_199_; double _v_99_ = c1 * _t_205_; double _t_145_ = mu[i][j][k+2] * met4[i][j][k+2]; double _t_143_ = _t_145_ * met1[i][j][k+2]; double _t_142_ = _t_143_ * _t_144_; double _t_128_ = _t_142_ * stry[j]; double _t_132_ = _v_64_; _t_132_ += _v_65_; double _t_134_ = 2.0 * mu[i][j][k+2]; double _t_139_ = mu[i][j][k+2] * met3[i][j][k+2]; _t_134_ += la[i][j][k+2]; double _t_133_ = _t_134_ * met2[i][j][k+2]; double _t_11_ = la[i][j][k+2] * met2[i][j][k+2]; double _t_131_ = _t_133_ * met1[i][j][k+2]; double _t_130_ = _t_131_ * _t_132_; double _t_129_ = _t_130_ * strx[i]; _t_128_ += _t_129_ * stry[j]; double _t_138_ = _v_67_; _t_138_ += _v_68_; double _t_137_ = _t_139_ * met1[i][j][k+2]; _t_128_ += _t_137_ * _t_138_; double _t_151_ = _v_73_; _t_151_ += _v_74_; double _t_153_ = 2.0 * mu[i][j][k-2]; _t_153_ += la[i][j][k-2]; double _t_152_ = _t_153_ * met2[i][j][k-2]; double _t_23_ = la[i][j][k-2] * met2[i][j][k-2]; double _t_150_ = _t_152_ * met1[i][j][k-2]; double _t_149_ = _t_150_ * _t_151_; double _t_148_ = _t_149_ * strx[i]; _t_128_ += _t_148_ * stry[j]; double _t_157_ = _v_76_; _t_157_ += _v_77_; double _t_158_ = mu[i][j][k-2] * met3[i][j][k-2]; double _t_164_ = mu[i][j][k-2] * met4[i][j][k-2]; double _t_156_ = _t_158_ * met1[i][j][k-2]; _t_128_ += _t_156_ * _t_157_; double _t_163_ = _v_79_; _t_163_ += _v_80_; double _t_162_ = _t_164_ * met1[i][j][k-2]; double _t_161_ = _t_162_ * _t_163_; _t_128_ += _t_161_ * stry[j]; double _t_127_ = c2 * _t_128_; double _t_183_ = _v_89_; _t_183_ += _v_90_; double _t_184_ = mu[i][j][k+1] * met4[i][j][k+1]; double _t_182_ = _t_184_ * met1[i][j][k+1]; double _t_181_ = _t_182_ * _t_183_; double _t_167_ = _t_181_ * stry[j]; double _t_171_ = _v_83_; _t_171_ += _v_84_; double _t_173_ = 2.0 * mu[i][j][k+1]; double _t_178_ = mu[i][j][k+1] * met3[i][j][k+1]; _t_173_ += la[i][j][k+1]; double _t_172_ = _t_173_ * met2[i][j][k+1]; double _t_36_ = la[i][j][k+1] * met2[i][j][k+1]; double _t_170_ = _t_172_ * met1[i][j][k+1]; double _t_169_ = _t_170_ * _t_171_; double _t_168_ = _t_169_ * strx[i+2]; _t_167_ += _t_168_ * stry[j]; double _t_177_ = _v_86_; _t_177_ += _v_87_; double _t_176_ = _t_178_ * met1[i][j][k+1]; _t_167_ += _t_176_ * _t_177_; double _t_190_ = _v_92_; _t_190_ += _v_93_; double _t_192_ = 2.0 * mu[i][j][k-1]; _t_192_ += la[i][j][k-1]; double _t_191_ = _t_192_ * met2[i][j][k-1]; double _t_48_ = la[i][j][k-1] * met2[i][j][k-1]; double _t_189_ = _t_191_ * met1[i][j][k-1]; double _t_188_ = _t_189_ * _t_190_; double _t_187_ = _t_188_ * strx[i-2]; _t_167_ += _t_187_ * stry[j]; double _t_196_ = _v_95_; _t_196_ += _v_96_; double _t_197_ = mu[i][j][k-1] * met3[i][j][k-1]; double _t_203_ = mu[i][j][k-1] * met4[i][j][k-1]; double _t_195_ = _t_197_ * met1[i][j][k-1]; _t_167_ += _t_195_ * _t_196_; double _t_202_ = _v_98_; _t_202_ += _v_99_; double _t_201_ = _t_203_ * met1[i][j][k-1]; double _t_200_ = _t_201_ * _t_202_; _t_167_ += _t_200_ * stry[j]; _t_127_ += c1 * _t_167_; r1ic0jc0kc0 += _t_127_; double _t_6_ = _t_139_; double _t_4_ = _t_6_ * met1[i][j][k+2]; double _t_7_ = u1[i][j+2][k+2]; _t_7_ -= u1[i][j-2][k+2]; double _t_5_ = c2 * _t_7_; double _t_8_ = u1[i][j+1][k+2]; _t_8_ -= u1[i][j-1][k+2]; _t_5_ += c1 * _t_8_; double _t_3_ = _t_4_ * _t_5_; double _t_2_ = _t_3_ * stry[j+2]; double _t_1_ = _t_2_ * strx[i]; double _t_9_ = _t_11_ * met1[i][j][k+2]; double _t_12_ = u2[i][j+2][k+2]; _t_12_ -= u2[i][j-2][k+2]; double _t_10_ = c2 * _t_12_; double _t_13_ = u2[i][j+1][k+2]; _t_13_ -= u2[i][j-1][k+2]; _t_10_ += c1 * _t_13_; _t_1_ += _t_9_ * _t_10_; double _t_18_ = _t_158_; double _t_16_ = _t_18_ * met1[i][j][k-2]; double _t_19_ = u1[i][j+2][k-2]; _t_19_ -= u1[i][j-2][k-2]; double _t_17_ = c2 * _t_19_; double _t_20_ = u1[i][j+1][k-2]; _t_20_ -= u1[i][j-1][k-2]; _t_17_ += c1 * _t_20_; double _t_15_ = _t_16_ * _t_17_; double _t_14_ = _t_15_ * stry[j]; _t_1_ += _t_14_ * strx[i]; double _t_21_ = _t_23_ * met1[i][j][k-2]; double _t_24_ = u2[i][j+2][k-2]; _t_24_ -= u2[i][j-2][k-2]; double _t_22_ = c2 * _t_24_; double _t_25_ = u2[i][j+1][k-2]; _t_25_ -= u2[i][j-1][k-2]; _t_22_ += c1 * _t_25_; _t_1_ += _t_21_ * _t_22_; double _t_0_ = c2 * _t_1_; double _t_31_ = _t_178_; double _t_29_ = _t_31_ * met1[i][j][k+1]; double _t_32_ = u1[i][j+2][k+1]; _t_32_ -= u1[i][j-2][k+1]; double _t_30_ = c2 * _t_32_; double _t_33_ = u1[i][j+1][k+1]; _t_33_ -= u1[i][j-1][k+1]; _t_30_ += c1 * _t_33_; double _t_28_ = _t_29_ * _t_30_; double _t_27_ = _t_28_ * stry[j-2]; double _t_26_ = _t_27_ * strx[i]; double _t_34_ = _t_36_ * met1[i][j][k+1]; double _t_37_ = u2[i][j+2][k+1]; _t_37_ -= u2[i][j-2][k+1]; double _t_35_ = c2 * _t_37_; double _t_38_ = u2[i][j+1][k+1]; _t_38_ -= u2[i][j-1][k+1]; _t_35_ += c1 * _t_38_; _t_26_ += _t_34_ * _t_35_; double _t_43_ = _t_197_; double _t_41_ = _t_43_ * met1[i][j][k-1]; double _t_44_ = u1[i][j+2][k-1]; _t_44_ -= u1[i][j-2][k-1]; double _t_42_ = c2 * _t_44_; double _t_45_ = u1[i][j+1][k-1]; _t_45_ -= u1[i][j-1][k-1]; _t_42_ += c1 * _t_45_; double _t_40_ = _t_41_ * _t_42_; double _t_39_ = _t_40_ * stry[j]; _t_26_ += _t_39_ * strx[i]; double _t_46_ = _t_48_ * met1[i][j][k-1]; double _t_49_ = u2[i][j+2][k-1]; _t_49_ -= u2[i][j-2][k-1]; double _t_47_ = c2 * _t_49_; double _t_50_ = u2[i][j+1][k-1]; _t_50_ -= u2[i][j-1][k-1]; _t_47_ += c1 * _t_50_; _t_26_ += _t_46_ * _t_47_; _t_0_ += c1 * _t_26_; r1ic0jc0kc0 += _t_0_; r1[i][j][k] = r1ic0jc0kc0; } } } __global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_i= (int)(blockDim.z); int i0 = (int)(blockIdx.z)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.z); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) { double _t_56_ = u2[i+2][j+2][k]; _t_56_ -= u2[i-2][j+2][k]; double _t_78_ = u2[i+2][j+2][k]; _t_78_ -= u2[i+2][j-2][k]; double _t_83_ = u2[i-2][j+2][k]; _t_83_ -= u2[i-2][j-2][k]; double _t_61_ = u2[i+2][j-2][k]; _t_61_ -= u2[i-2][j-2][k]; double _t_54_ = c2 * _t_56_; double _t_57_ = u2[i+1][j+2][k]; _t_57_ -= u2[i-1][j+2][k]; double _t_89_ = u2[i+1][j+2][k]; _t_89_ -= u2[i+1][j-2][k]; double _t_94_ = u2[i-1][j+2][k]; _t_94_ -= u2[i-1][j-2][k]; double _t_62_ = u2[i+1][j-2][k]; _t_62_ -= u2[i-1][j-2][k]; _t_54_ += c1 * _t_57_; double _t_55_ = mu[i][j+2][k] * met1[i][j+2][k]; double _t_53_ = _t_55_ * met1[i][j+2][k]; double _t_52_ = _t_53_ * _t_54_; double _t_59_ = c2 * _t_61_; double _v_40_ = c2 * _t_78_; double _v_43_ = c2 * _t_83_; double _v_47_ = c2 * _t_89_; double _v_50_ = c2 * _t_94_; _t_59_ += c1 * _t_62_; double _t_60_ = mu[i][j-2][k] * met1[i][j-2][k]; double _t_58_ = _t_60_ * met1[i][j-2][k]; _t_52_ += _t_58_ * _t_59_; double _t_51_ = c2 * _t_52_; double _t_77_ = la[i+2][j][k] * met1[i+2][j][k]; double _t_75_ = _t_77_ * met1[i+2][j][k]; double _t_76_ = _v_40_; double _t_79_ = u2[i+2][j+1][k]; _t_79_ -= u2[i+2][j-1][k]; double _t_67_ = u2[i+2][j+1][k]; _t_67_ -= u2[i-2][j+1][k]; double _t_72_ = u2[i+2][j-1][k]; _t_72_ -= u2[i-2][j-1][k]; double _t_84_ = u2[i-2][j+1][k]; _t_84_ -= u2[i-2][j-1][k]; _t_76_ += c1 * _t_79_; double _t_74_ = _t_75_ * _t_76_; double _t_82_ = la[i-2][j][k] * met1[i-2][j][k]; double _t_80_ = _t_82_ * met1[i-2][j][k]; double _t_81_ = _v_43_; _t_81_ += c1 * _t_84_; _t_74_ += _t_80_ * _t_81_; _t_51_ += c2 * _t_74_; double _t_88_ = la[i+1][j][k] * met1[i+1][j][k]; double _t_86_ = _t_88_ * met1[i+1][j][k]; double _t_87_ = _v_47_; double _t_90_ = u2[i+1][j+1][k]; _t_90_ -= u2[i+1][j-1][k]; double _t_68_ = u2[i+1][j+1][k]; _t_68_ -= u2[i-1][j+1][k]; double _t_73_ = u2[i+1][j-1][k]; _t_73_ -= u2[i-1][j-1][k]; double _t_95_ = u2[i-1][j+1][k]; _t_95_ -= u2[i-1][j-1][k]; _t_87_ += c1 * _t_90_; double _t_85_ = _t_86_ * _t_87_; double _t_93_ = la[i-1][j][k] * met1[i-1][j][k]; double _t_91_ = _t_93_ * met1[i-1][j][k]; double _t_92_ = _v_50_; _t_92_ += c1 * _t_95_; double _v_34_ = c1 * _t_68_; double _v_37_ = c1 * _t_73_; _t_85_ += _t_91_ * _t_92_; _t_51_ += c1 * _t_85_; double _t_65_ = c2 * _t_67_; double _v_36_ = c2 * _t_72_; _t_65_ += _v_34_; double _t_66_ = mu[i][j+1][k] * met1[i][j+1][k]; double _t_64_ = _t_66_ * met1[i][j+1][k]; double _t_63_ = _t_64_ * _t_65_; double _t_70_ = _v_36_; _t_70_ += _v_37_; double _t_71_ = mu[i][j-1][k] * met1[i][j-1][k]; double _t_69_ = _t_71_ * met1[i][j-1][k]; _t_63_ += _t_69_ * _t_70_; _t_51_ += c1 * _t_63_; double r1ic0jc0kc0 = r1[i][j][k]; r1ic0jc0kc0 += _t_51_; double _t_7_ = u1[i][j+2][k+2]; _t_7_ -= u1[i][j+2][k-2]; double _t_5_ = c2 * _t_7_; double _t_8_ = u1[i][j+2][k+1]; _t_8_ -= u1[i][j+2][k-1]; _t_5_ += c1 * _t_8_; double _t_6_ = mu[i][j+2][k] * met3[i][j+2][k]; double _t_4_ = _t_6_ * met1[i][j+2][k]; double _t_3_ = _t_4_ * _t_5_; double _t_2_ = _t_3_ * stry[j+1]; double _t_1_ = _t_2_ * strx[i]; double _t_11_ = mu[i][j+2][k] * met2[i][j+2][k]; double _t_9_ = _t_11_ * met1[i][j+2][k]; double _t_12_ = u2[i][j+2][k+2]; _t_12_ -= u2[i][j+2][k-2]; double _t_10_ = c2 * _t_12_; double _t_13_ = u2[i][j+2][k+1]; _t_13_ -= u2[i][j+2][k-1]; _t_10_ += c1 * _t_13_; _t_1_ += _t_9_ * _t_10_; double _t_19_ = u1[i][j-2][k+2]; _t_19_ -= u1[i][j-2][k-2]; double _t_17_ = c2 * _t_19_; double _t_20_ = u1[i][j-2][k+1]; _t_20_ -= u1[i][j-2][k-1]; _t_17_ += c1 * _t_20_; double _t_18_ = mu[i][j-2][k] * met3[i][j-2][k]; double _t_16_ = _t_18_ * met1[i][j-2][k]; double _t_15_ = _t_16_ * _t_17_; double _t_14_ = _t_15_ * stry[j]; _t_1_ += _t_14_ * strx[i]; double _t_23_ = mu[i][j-2][k] * met2[i][j-2][k]; double _t_21_ = _t_23_ * met1[i][j-2][k]; double _t_24_ = u2[i][j-2][k+2]; _t_24_ -= u2[i][j-2][k-2]; double _t_22_ = c2 * _t_24_; double _t_25_ = u2[i][j-2][k+1]; _t_25_ -= u2[i][j-2][k-1]; _t_22_ += c1 * _t_25_; _t_1_ += _t_21_ * _t_22_; double _t_0_ = c2 * _t_1_; double _t_32_ = u1[i][j+1][k+2]; _t_32_ -= u1[i][j+1][k-2]; double _t_30_ = c2 * _t_32_; double _t_33_ = u1[i][j+1][k+1]; _t_33_ -= u1[i][j+1][k-1]; _t_30_ += c1 * _t_33_; double _t_31_ = mu[i][j+1][k] * met3[i][j+1][k]; double _t_29_ = _t_31_ * met1[i][j+1][k]; double _t_28_ = _t_29_ * _t_30_; double _t_27_ = _t_28_ * stry[j-1]; double _t_26_ = _t_27_ * strx[i]; double _t_36_ = mu[i][j+1][k] * met2[i][j+1][k]; double _t_34_ = _t_36_ * met1[i][j+1][k]; double _t_37_ = u2[i][j+1][k+2]; _t_37_ -= u2[i][j+1][k-2]; double _t_35_ = c2 * _t_37_; double _t_38_ = u2[i][j+1][k+1]; _t_38_ -= u2[i][j+1][k-1]; _t_35_ += c1 * _t_38_; _t_26_ += _t_34_ * _t_35_; double _t_44_ = u1[i][j-1][k+2]; _t_44_ -= u1[i][j-1][k-2]; double _t_42_ = c2 * _t_44_; double _t_45_ = u1[i][j-1][k+1]; _t_45_ -= u1[i][j-1][k-1]; _t_42_ += c1 * _t_45_; double _t_43_ = mu[i][j-1][k] * met3[i][j-1][k]; double _t_41_ = _t_43_ * met1[i][j-1][k]; double _t_40_ = _t_41_ * _t_42_; double _t_39_ = _t_40_ * stry[j]; _t_26_ += _t_39_ * strx[i]; double _t_48_ = mu[i][j-1][k] * met2[i][j-1][k]; double _t_46_ = _t_48_ * met1[i][j-1][k]; double _t_49_ = u2[i][j-1][k+2]; _t_49_ -= u2[i][j-1][k-2]; double _t_47_ = c2 * _t_49_; double _t_50_ = u2[i][j-1][k+1]; _t_50_ -= u2[i][j-1][k-1]; _t_47_ += c1 * _t_50_; _t_26_ += _t_46_ * _t_47_; _t_0_ += c1 * _t_26_; r1ic0jc0kc0 += _t_0_; r1[i][j][k] = r1ic0jc0kc0; } } extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) { double *r1; cudaMalloc (&r1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for r1\n"); cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u1; cudaMalloc (&u1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u1\n"); cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u2; cudaMalloc (&u2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u2\n"); cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u3; cudaMalloc (&u3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u3\n"); cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *mu; cudaMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *la; cudaMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met1; cudaMalloc (&met1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met1\n"); cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met2; cudaMalloc (&met2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met2\n"); cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met3; cudaMalloc (&met3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met3\n"); cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met4; cudaMalloc (&met4, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met4\n"); cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *strx; cudaMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice); double *stry; cudaMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); curvi_1 <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); dim3 blockconfig_1 (16, 2, 2); dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z)); curvi_2 <<<gridconfig_1, blockconfig_1>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); }
4,230
#include <stdio.h> #include <stdlib.h> #define N 10 #define THREADS_PER_BLOCK 10 __global__ void gpuSum(int *a, int *b, int *c, int n) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); while (idx < n) { c[idx] = a[idx] + b[idx]; idx += blockDim.x * gridDim.x; } } void fill_matrix(int *arr) { for (int i = 0 ; i < N ; i++) { for (int j = 0 ; j < N ; j++) { arr[(i * N) + j] = (i * N) + j; } } } void print_matrix(int *arr) { for (int i = 0 ; i < N ; i++) { for (int j = 0 ; j < N ; j++) { printf("%d\t", arr[(i * N) + j]); } printf("\n"); } printf("\n"); } int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = sizeof(int) * N * N; // Allocate memory on host. a = (int*) malloc(size); b = (int*) malloc(size); c = (int*) malloc(size); // Allocate memory on device. cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); // Initialize array. fill_matrix(a); fill_matrix(b); // Copy from host to device. cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Perform sum on device. gpuSum<<<N*N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N*N); // Retrieve values from device to host. cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); // Print results. print_matrix(a); print_matrix(b); print_matrix(c); // Free host variables. free(a); free(b); free(c); // Free device variables. cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
4,231
#include <stdio.h> /* * Refactor firstParallel so that it can run on the GPU. */ __global__ void firstParallel() { printf("This should be running in parallel.\n"); } int main() { firstParallel<<<1, 5>>>(); cudaDeviceSynchronize(); }
4,232
#include <cuda_runtime.h> #include <stdio.h> __global__ void checkDimension(){ printf("threadIdx:(%d,%d,%d), blockIdx:(%d,%d,%d),blockDim:(%d,%d,%d),gridDim:(%d,%d,%d)\n", threadIdx.x,threadIdx.y,threadIdx.z,blockIdx.x,blockIdx.y,blockIdx.z,blockDim.x,blockDim.y,blockDim.z, gridDim.x,gridDim.y,gridDim.z); } int main(){ // 数据总量 int nElem = 6; dim3 block(3); dim3 grid((nElem + block.x - 1)/block.x); printf("grid:(%d,%d,%d)\n",grid.x,grid.y,grid.z); printf("block:(%d,%d,%d)\n",block.x,block.y,block.z); checkDimension<<<grid,block>>>(); // reset device before exit; cudaDeviceReset(); return 0; }
4,233
#include <cstdio> #include <cstdlib> #include <iostream> #include <fstream> int* d; int* graph; __constant__ int cuda_bf; __constant__ int cuda_total_vertex; __constant__ int cuda_tempVertex; #define INF 1e9 #define H2D cudaMemcpyHostToDevice #define D2H cudaMemcpyDeviceToHost using namespace std; int init_device () { cudaSetDevice(0); return 0; } #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) //extern __shared__ int D[]; __global__ void floyd_warshall_1(int* dist,int k ,int kbf){ int idx,idy; idx = k ; idy = k ; int i = cuda_bf * idx + threadIdx.y; int j = cuda_bf * idy + threadIdx.x; if(i>=cuda_total_vertex||j>=cuda_total_vertex) return ; __shared__ int D[32*32]; D[threadIdx.y*cuda_bf + threadIdx.x] = dist[i*cuda_tempVertex + j]; __syncthreads(); // Put to shared memory??? int x = 0; //int dij = dist[i*total_vertex + j]; //int dik = dist[i*total_vertex + k]; //int dkj = dist[k*total_vertex + j]; int dij ,dik,dkj; int a = threadIdx.y * cuda_bf + threadIdx.x; int b = threadIdx.y * cuda_bf; while( x < cuda_bf ){ dij = D[a]; dik = D[b + x]; dkj = D[x*cuda_bf + threadIdx.x]; if(dij>dik+dkj){ D[a] = dik + dkj; } __syncthreads(); x++; } dist[i*cuda_tempVertex + j] = D[threadIdx.y*cuda_bf + threadIdx.x]; return ; } __global__ void floyd_warshall_2(int* dist,int k , int kbf ){ int idx,idy; if(blockIdx.x % 2 == 0 ){ idx = (blockIdx.x/2) >= k ? (blockIdx.x/2+1):(blockIdx.x/2); idy = k; } else { idx = k; idy = (blockIdx.x/2) >= k ? (blockIdx.x/2+1):(blockIdx.x/2); } int i = cuda_bf * idx + threadIdx.y; int j = cuda_bf * idy + threadIdx.x; //bool flag = 0; //if(i>=cuda_total_vertex||j>=cuda_total_vertex) // return; __shared__ int D2[32*32*2]; D2[threadIdx.y * cuda_bf + threadIdx.x] = dist[i*cuda_tempVertex + j]; D2[(cuda_bf*cuda_bf) + (threadIdx.y *cuda_bf ) + (threadIdx.x)] = dist[ (kbf+threadIdx.y) * cuda_tempVertex + (kbf +threadIdx.x)]; __syncthreads(); // Put to shared memory??? int x = 0; int dij ,dik,dkj; int a = (threadIdx.y * cuda_bf + threadIdx.x); int b; if(blockIdx.x%2==0){ b = cuda_bf*cuda_bf + threadIdx.x; } else{ b = cuda_bf*cuda_bf + cuda_bf*threadIdx.y; } dij = D2[a]; while(x<cuda_bf){ if(blockIdx.x%2==0){ dik = D2[cuda_bf*threadIdx.y + x]; dkj = D2[b + (x*cuda_bf)]; } else{ dik = D2[b + x]; dkj = D2[x*cuda_bf + threadIdx.x]; } if(dij>dik+dkj){ dij = dik + dkj; } __syncthreads(); x++; } dist[i*cuda_tempVertex + j] = dij; return ; } __global__ void floyd_warshall_3(int* dist, int k ,int kbf){ int idx,idy; idy = blockIdx.y >= k? blockIdx.y + 1 : blockIdx.y; idx = blockIdx.x >= k? blockIdx.x + 1 : blockIdx.x; int i = cuda_bf * idx + threadIdx.y; int j = cuda_bf * idy + threadIdx.x; //if(i>=cuda_total_vertex||j>=cuda_total_vertex) // return ; __shared__ int D3[32*32*3]; D3[threadIdx.y * cuda_bf + threadIdx.x] = dist[i*cuda_tempVertex + j]; D3[(cuda_bf*cuda_bf) + (threadIdx.y*cuda_bf) + threadIdx.x] = dist[(cuda_bf*idx+threadIdx.y)*cuda_tempVertex + (kbf + threadIdx.x)]; D3[(2*cuda_bf*cuda_bf) + (threadIdx.y*cuda_bf) + threadIdx.x] = dist[(kbf+threadIdx.y)*cuda_tempVertex + (idy*cuda_bf+threadIdx.x)]; __syncthreads(); // Put to shared memory??? int x = 0; int dij ,dik,dkj; int a =threadIdx.y * cuda_bf + threadIdx.x; int b = cuda_bf*cuda_bf + threadIdx.y*cuda_bf; int c = 2*cuda_bf*cuda_bf + threadIdx.x; dij = D3[a]; while(x<cuda_bf){ dik = D3[b + x]; dkj = D3[x*cuda_bf + c]; if(dij>dik+dkj){ dij = dik + dkj; } x++; } dist[i*cuda_tempVertex + j] = dij; return ; } __global__ void floyd_warshall_beta_1(int* dist, int k , int kbf ){ int idx,idy; idx = k; idy = k; int i = cuda_bf * idx + (blockIdx.x%cuda_bf); int j = cuda_bf * idy + threadIdx.x; if(i>=cuda_total_vertex||j>=cuda_total_vertex) return ; // Put to shared memory??? int dij = dist[i*cuda_tempVertex + j]; int dik = dist[i*cuda_tempVertex + kbf]; int dkj = dist[kbf*cuda_tempVertex + j]; if(dij>dik+dkj){ dist[i*cuda_tempVertex+j] = dik + dkj; } return ; } __global__ void floyd_warshall_beta_2(int* dist, int k , int kbf ){ int idx,idy; int temp = blockIdx.x / cuda_bf; if( (temp) % 2 == 0 ){ idx = (temp/2) >= k ? (temp/2+1):(temp/2); idy = k; } else { idx = k; idy = (temp/2) >= k ? (temp/2+1):(temp/2); } int i = cuda_bf * idx + (blockIdx.x%cuda_bf); int j = cuda_bf * idy + threadIdx.x; if(i>=cuda_total_vertex||j>=cuda_total_vertex) return ; // Put to shared memory??? int dij = dist[i*cuda_tempVertex + j]; int dik = dist[i*cuda_tempVertex + kbf]; int dkj = dist[kbf*cuda_tempVertex + j]; if(dij>dik+dkj){ dist[i*cuda_tempVertex+j] = dik + dkj; } return ; } __global__ void floyd_warshall_beta_3(int* dist, int k , int kbf ,int grid_size ){ int idx,idy; int temp = ((blockIdx.y*gridDim.x) + blockIdx.x) / cuda_bf; idx = temp/grid_size >= k? temp/grid_size + 1 : temp/grid_size; idy = temp % grid_size >= k? temp%grid_size + 1 : temp % grid_size; int i = cuda_bf * idx + (blockIdx.x%cuda_bf); int j = cuda_bf * idy + threadIdx.x; if(i>=cuda_total_vertex||j>=cuda_total_vertex) return ; // Put to shared memory??? int x = kbf + cuda_bf; int dij ,dik,dkj; while(kbf<x){ dij = dist[i*cuda_tempVertex + j]; dik = dist[i*cuda_tempVertex + kbf]; dkj = dist[kbf*cuda_tempVertex + j]; if(dij>dik+dkj){ dist[i*cuda_tempVertex + j] = dik + dkj; } //__syncthreads(); kbf++; } return; } int main(int argc,char* argv[]){ cudaEvent_t total_start, total_stop; cudaEvent_t com_start, com_stop; cudaEvent_t mem_start, mem_stop; cudaEvent_t io_start, io_stop; float total_temp=0,total_total=0,io_temp =0 , io_total=0 , com_temp =0,com_total=0 , mem_temp=0 , mem_total=0; cudaEventCreate(&total_start); cudaEventCreate(&total_stop); cudaEventCreate(&com_start); cudaEventCreate(&com_stop); cudaEventCreate(&mem_start); cudaEventCreate(&mem_stop); cudaEventCreate(&io_start); cudaEventCreate(&io_stop); cudaEventRecord(total_start); init_device(); struct cudaDeviceProp prop; cudaGetDeviceProperties(&prop,0); //fprintf(stderr,"clock rate %lf\n",prop.clockRate); int bf = atoi(argv[3]); int total_vertex; int edge_num; ifstream input; ofstream output; input.open(argv[1]); input >> total_vertex; input >> edge_num; int tempVertex = total_vertex % bf ? (total_vertex + (bf - (total_vertex%bf) )): total_vertex; //fprintf(stderr,"tempVertex:%d\n",tempVertex); d = new int[tempVertex*tempVertex]; graph = new int[tempVertex*tempVertex]; for(int i=0;i<tempVertex;i++){ for(int j=0;j<tempVertex;j++){ graph[i*tempVertex+j] = INF; } graph[i*tempVertex + i ]=0; } cudaEventRecord(io_start); for(int i=0;i<edge_num;i++){ int a,b; input >> a; input >> b; input >> graph[(a-1)*tempVertex + (b-1) ]; //fprintf(stderr,"graph %d %d :%d\n",a,b,graph[a*tempVertex+b]); } cudaEventRecord(io_stop); cudaEventSynchronize(io_stop); cudaEventElapsedTime(&io_temp,io_start,io_stop); io_total += io_temp; int* cuda_graph; cudaMalloc((void**)&cuda_graph,sizeof(int)*tempVertex*tempVertex); cudaCheckErrors("malloc gpu"); cudaEventRecord(mem_start); cudaMemcpy(cuda_graph,graph,sizeof(int)*tempVertex*tempVertex,H2D); cudaCheckErrors("memcpy gpu"); cudaMemcpyToSymbol(cuda_bf,&bf,sizeof(int)); cudaMemcpyToSymbol(cuda_total_vertex,&total_vertex,sizeof(int)); cudaMemcpyToSymbol(cuda_tempVertex,&tempVertex,sizeof(int)); cudaEventRecord(mem_stop); cudaEventSynchronize(mem_stop); cudaEventElapsedTime(&mem_temp,mem_start,mem_stop); mem_total += mem_temp; //int FWblockDim = total_vertex%bf ? (total_vertex/bf + 1) : total_vertex/bf; //int remainBF = total_vertex%bf? total_vertex%bf : bf ; int FWblockDim = tempVertex / bf ; dim3 threadStr(bf,bf); dim3 blockStr(FWblockDim-1,FWblockDim-1); dim3 blockStr2((FWblockDim-1)*bf,FWblockDim-1); cudaEventRecord(com_start); if( bf ==20 && edge_num/total_vertex <= 6){ for(int K=0;K<FWblockDim;K++){ printf("K=%d phase1\n",K); // Phase 1 floyd_warshall_1<<< 1,threadStr>>>( cuda_graph,K,K*bf); cudaCheckErrors("phase 1"); //cudaDeviceSynchronize(); // Phase 2 //printf("K=%d phase2\n",K); if(FWblockDim>1){ floyd_warshall_2<<< (FWblockDim-1)*2 ,threadStr>>>( cuda_graph,K,K*bf); cudaCheckErrors("phase 2 col"); //cudaDeviceSynchronize(); // Phase 3 //printf("K=%d phase3\n",K); floyd_warshall_3<<<blockStr,threadStr>>>(cuda_graph,K,K*bf); cudaCheckErrors("phase 3"); //cudaDeviceSynchronize(); } } } else{ for(int K=0;K<FWblockDim;K++){ // Phase 1 //printf("K=%d phase1\n",K); for(int i=0;i<bf;i++){ floyd_warshall_beta_1<<<bf,bf>>>(cuda_graph,K,K*bf + i); cudaCheckErrors("phase 1"); } //printf("K=%d phase2\n",K); //Phase 2 if(FWblockDim>1){ for(int i=0;i<bf;i++){ floyd_warshall_beta_2<<<(FWblockDim-1)*2*bf,bf>>>(cuda_graph,K,K*bf + i ); cudaCheckErrors("phase 2 col"); } //printf("K=%d phase3\n",K); //Phase 3 //fprintf(stderr,"qqq %d\n",(FWblockDim-1)*(FWblockDim-1)*bf); floyd_warshall_beta_3<<<blockStr2,bf>>>(cuda_graph,K,K*bf,FWblockDim-1); cudaCheckErrors("phase 3"); } } } cudaDeviceSynchronize(); // 時間計算是否要擺到前面?? cudaEventRecord(com_stop); cudaEventSynchronize(com_stop); cudaEventElapsedTime(&com_temp,com_start,com_stop); com_total+=com_temp; cudaEventRecord(mem_start); cudaMemcpy(graph,cuda_graph,sizeof(int)*tempVertex*tempVertex,D2H); cudaCheckErrors("copy back error"); cudaEventRecord(mem_stop); cudaEventSynchronize(mem_stop); cudaEventElapsedTime(&mem_temp,mem_start,mem_stop); mem_total += mem_temp; cudaEventRecord(io_start); output.open(argv[2]); // 每行最後面到底要不要加SPACE!!!!!!! for(int i=0;i<total_vertex;i++){ for(int j=0;j<total_vertex;j++){ if(graph[i*tempVertex+j]==INF){ output<<"INF"; } else{ output<<graph[i*tempVertex+j]; } output<<" "; } output<<endl; } cudaEventRecord(io_stop); cudaEventSynchronize(io_stop); cudaEventElapsedTime(&io_temp,io_start,io_stop); io_total += io_temp; cudaEventRecord(total_stop); cudaEventSynchronize(total_stop); cudaEventElapsedTime(&total_temp, total_start, total_stop); fprintf(stderr, "\n\n"); fprintf(stderr, "TOTAL = %f\n", total_temp); fprintf(stderr, "COMPUTE = %f\n", com_total); fprintf(stderr, "MEMORY = %f\n", mem_total); fprintf(stderr, "IO = %f\n", io_total); return 0; }
4,234
#include <iostream> __global__ void add(int *a, int *b, int *c){ int index = threadIdx.x + blockIdx.x * blockDim.x; //if (index < n) c[index] = a[index] + b[index]; } void random_ints(int *p, int s){ for(int i=0; i < s; i++){ p[i] = rand(); } } #define N (2048*2048) #define THREADS_PER_BLOCK 512 int main(){ int *a, *b, *c; // host copies int *d_a, *d_b, *d_c; //device copies int size = N * sizeof(int); // Space allocation for device copies cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); // Copy inputs to Device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add Kernel on GPU with N threads add<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a, d_b, d_c); //add<<<(N + M-1) / M,M>>>(d_a, d_b, d_c, N); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); std::cout<<"Done!"<<std::endl; return 0; }
4,235
#include <fstream> #include <string> #include <iostream> #include <map> #include <cstdlib> #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <ctype.h> #include <math.h> #include <unistd.h> #include <time.h> #include <assert.h> #include <cuda.h> // number of amino acids plus gap symbol static int Q = 21; // device selection (copied from previous assignment) static void selectGpu(int *gpu_num, int *num_devs) { // gpu_num: (I/O): I: Default choice, // O: best device, changed only if more than one device // num_devs: (O) Number of found devices. int best = *gpu_num; cudaGetDeviceCount(num_devs); if ( *num_devs > 1 ) { int dev_num; int max_cores = 0; for (dev_num = 0; dev_num < *num_devs; dev_num++) { cudaDeviceProp dev_properties; cudaGetDeviceProperties(&dev_properties, dev_num); if (max_cores < dev_properties.multiProcessorCount) { max_cores = dev_properties.multiProcessorCount; best = dev_num; } } *gpu_num = best; } } // device test (copied from previous assignment) static void testDevice(int devID) { // Check if we can run. Maybe do something more... cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { /* Simulated device. */ printf("There is no device supporting CUDA.\n"); cudaThreadExit(); } else printf("Using GPU device number %d.\n", devID); } void getAlignmentDim(char* file_name, size_t &B, size_t &N){ std::string line; std::ifstream infile(file_name); // get length of sequences: N getline(infile, line); N = line.length(); infile.close(); infile.clear(); // get number of sequences: B B = 0; infile.open(file_name); if (infile){ while (getline(infile, line)){ B++; } } } void readAlignment(char* file_name, int* aln, size_t B, size_t N){ // amino acid to number dictionary std::map<char,int> aa_dict; aa_dict['R'] = 1; aa_dict['H'] = 2; aa_dict['K'] = 3; aa_dict['D'] = 4; aa_dict['E'] = 5; aa_dict['S'] = 6; aa_dict['T'] = 7; aa_dict['N'] = 8; aa_dict['Q'] = 9; aa_dict['C'] = 10; aa_dict['G'] = 11; aa_dict['P'] = 12; aa_dict['A'] = 13; aa_dict['I'] = 14; aa_dict['L'] = 15; aa_dict['M'] = 16; aa_dict['F'] = 17; aa_dict['W'] = 18; aa_dict['Y'] = 19; aa_dict['V'] = 20; aa_dict['X'] = 21; aa_dict['-'] = 21; // fill aln matrix: BxN std::string line; std::ifstream infile(file_name); if (infile){ int b = 0; while (getline(infile, line)){ for (int i = 0; i < line.length(); i++){ aln[b*N + i] = aa_dict[line[i]]; } b++; } } } int delta(int a, int b){ return a == b; } void getFreqSingle(int* aln, float* f_single, size_t B, size_t N){ for (int i = 0; i < N; i++){ for (int k = 0; k < Q; k++) f_single[k*N + i] = 0.0; for (int b = 0; b < B; b++){ int k = aln[b*N + i] - 1; f_single[k*N + i] += 1.0; } for (int k = 0; k < Q; k++) f_single[k*N + i] /= (float)B; } } __global__ void getFreqSingleOnDevice(int* aln, float* f_single, size_t B, size_t N, int Q){ // Grid dimensions: N x Q int i = threadIdx.x; if(i < N){ for (int k = 0; k < Q; k++) f_single[k*N + i] = 0.0; for (int b = 0; b < B; b++){ int k = aln[b*N + i] - 1; f_single[k*N + i] += 1.0; } for (int k = 0; k < Q; k++) f_single[k*N + i] /= (float)B; } } void getFreqPair(int* aln, float* f_pair, size_t B, size_t N){ for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ for (int k = 0; k < Q; k++) for (int l = 0; l < Q; l++) f_pair[(Q*i + k) * Q*N + (Q*j + l)] = 0.0; for (int b = 0; b < B; b++){ int k = aln[b*N + i] - 1; int l = aln[b*N + j] - 1; f_pair[(Q*i + k) * Q*N + (Q*j + l)] += 1.0; } for (int k = 0; k < Q; k++){ for (int l = 0; l < Q; l++){ f_pair[(Q*i + k) * Q*N + (Q*j + l)] /= (float)B; } } } } } __global__ void getFreqPairOnDevice(int* aln, float* f_pair, size_t B, size_t N, int Q){ // Grid dimensions: N x N int i = threadIdx.x; int j = threadIdx.y; if(i<N && j<N){ for (int k = 0; k < Q; k++) for (int l = 0; l < Q; l++) f_pair[(Q*i + k) * Q*N + (Q*j + l)] = 0.0; for (int b = 0; b < B; b++){ int k = aln[b*N + i] - 1; int l = aln[b*N + j] - 1; f_pair[(Q*i + k) * Q*N + (Q*j + l)] += 1.0; } for (int k = 0; k < Q; k++){ for (int l = 0; l < Q; l++){ f_pair[(Q*i + k) * Q*N + (Q*j + l)] /= (float)B; //f_pair[(Q*j + l) * Q*N + (Q*i + k)] = f_pair[(Q*i + k) * Q*N + (Q*j + l)]; } } } } void getCovMat(float* f_single, float* f_pair, float* cov_mat, size_t B, size_t N){ for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ for (int k = 0; k < Q; k++){ for (int l = 0; l < Q; l++){ cov_mat[(Q*i + k) * Q*N + (Q*j + l)] = f_pair[(Q*i + k) * Q*N + (Q*j + l)] - (f_single[k*N + i] * f_single[l*N + j]); } } } } } __global__ void getCovMatOnDevice(float* f_single, float* f_pair, float* cov_mat, size_t B, size_t N, int Q){ // Grid dimensions: N x N int i = threadIdx.x; int j = threadIdx.y; for (int k = 0; k < Q; k++){ for (int l = 0; l < Q; l++){ cov_mat[(Q*i + k) * Q*N + (Q*j + l)] = f_pair[(Q*i + k) * Q*N + (Q*j + l)] - (f_single[k*N + i] * f_single[l*N + j]); } } } int main(int argc, char** argv){ // Check available device. int devID = 0, num_devs = 1; selectGpu(&devID, &num_devs); testDevice(devID); // number of sequences in aln size_t B; // length of each sequence size_t N; // Observe B and N from input file getAlignmentDim(argv[1], B, N); // Read aln from input file // each amino acid/gap is represented by an integer std::cout << "Read alignment.." << std::endl; int* aln = (int*) std::malloc(B*N * sizeof(int)); readAlignment(argv[1], aln, B, N); // Host calculations: time_t start_h = clock(); // calculate column-wise amino acid frequencies float* f_single = (float*) std::malloc(Q*N * sizeof(float)); getFreqSingle(aln, f_single, B, N); // calculate column-wise amino acid frequencies // for each possible pair of amino acids and columns float* f_pair = (float*) std::malloc(Q*N * Q*N * sizeof(float)); getFreqPair(aln, f_pair, B, N); // calculate covariance matrix from frequencies float* cov_mat = (float*) std::malloc(Q*N * Q*N * sizeof(float)); getCovMat(f_single, f_pair, cov_mat, B, N); time_t end_h = clock(); // Device calculations: time_t start_d = clock(); // calculate column-wise amino acid frequencies int* aln_d; float* f_single_d; assert(cudaSuccess == cudaMalloc((void**) &aln_d, B*N * sizeof(int))); assert(cudaSuccess == cudaMalloc((void**) &f_single_d, Q*N * sizeof(float))); cudaMemcpy(aln_d, aln, B*N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(f_single_d, f_single, Q*N * sizeof(float), cudaMemcpyHostToDevice); std::cout << "Calculate single frequencies.." << std::endl; getFreqSingleOnDevice <<< N, 1 >>> (aln_d, f_single_d, B, N, Q); // calculate column-wise amino acid frequencies // for each possible pair of amino acids and columns float* f_pair_d; assert(cudaSuccess == cudaMalloc((void**) &f_pair_d, Q*N * Q*N * sizeof(float))); cudaMemcpy(f_pair_d, f_pair, Q*N * Q*N * sizeof(float), cudaMemcpyHostToDevice); std::cout << "Calculate pair frequencies.." << std::endl; getFreqPairOnDevice <<< N, N >>> (aln_d, f_pair_d, B, N, Q); // calculate covariance matrix from frequencies float* cov_mat_d; assert(cudaSuccess == cudaMalloc((void**) &cov_mat_d, Q*N * Q*N * sizeof(float))); cudaMemcpy(cov_mat_d, cov_mat, Q*N * Q*N * sizeof(float), cudaMemcpyHostToDevice); std::cout << "Calculate covariance matrix.." << std::endl; getCovMatOnDevice <<< N, N >>> (f_single_d, f_pair_d, cov_mat_d, B, N, Q); // copy covariance matrix back from device float* cov_mat_from_d = (float*) malloc(Q*N * Q*N * sizeof(float)); cudaMemcpy(cov_mat_from_d, cov_mat_d, Q*N * Q*N * sizeof(float), cudaMemcpyDeviceToHost); time_t end_d = clock(); float t_full = ((float)end_d - (float)start_h) / CLOCKS_PER_SEC; float t_host = ((float)end_h - (float)start_h) / CLOCKS_PER_SEC; float t_dev = ((float)end_d - (float)start_d) / CLOCKS_PER_SEC; printf("\nTiming:\nFull: %f\nHost: %f\nDevice: %f\n\n", t_full, t_host, t_dev); std::cout << B << ' ' << N << ' ' << Q << std::endl; //for (int i = 0; i < B; i++){ // for (int j = 0; j < N; j++){ // std::cout << aln[i*N +j] << ' '; // } // std::cout << std::endl; //} //for (int i = 0; i < Q; i++){ // for (int j = 0; j < N; j++){ // std::cout << f_single[i*N + j] << ' '; // } // std::cout << std::endl; //} float err = 0.0; for (int i = 0; i < N*Q; i++){ int j; //std::cout << std::endl << i << ' '; for (j = 0; j < N*Q; j++){ err += (cov_mat_from_d[i*N*Q + j] - cov_mat[i*N*Q + j]) / N*Q; //std::cout << ' ' << j << '/' << cov_mat_from_d[i*N*Q + j] << '/' << cov_mat[i*N*Q + j]; } } std::cout << std::endl << err << std::endl; return 0; }
4,236
#include "includes.h" // Type your code here, or load an example. __global__ void square(int *array, int n) { int tid = blockIdx.x; if (tid < n) array[tid] = array[tid] * array[tid]; }
4,237
#include <stdio.h> #include <cuda.h> const int N = 10; __global__ void square(int * matrix, int * result, int size) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; unsigned ii = id / size; unsigned jj = id % size; for (unsigned kk = 0; kk < size; ++kk) { result[ii * size + jj] += matrix[kk * size + ii] * matrix[kk * size + jj]; } } void init_matrix(int * matrix, int size) { for (int i = 0; i <= size; i++) { matrix[i] = i; } for (int i = 1; i < size; i++) { for (int j = i * size, k = i; j < i * size + size; j++, k++) { matrix[j] = k; } } printf("matrix initialized\n"); } void print_matrix(int * matrix, int size) { printf("printing matrix:\n"); for (int i = 0; i < size * size; i++) { if (i % size == 0 && i != 0) { printf("\n"); } printf("%d\t", matrix[i]); } printf("\n"); } int main(int argc, char ** argv) { int size = N * N; int space = sizeof(int) * size; int * matrix = (int *)malloc(space); int * result, * d_matrix, * d_result; cudaMalloc((void **) &d_matrix, space); cudaMalloc((void **) &d_result, space); cudaMemcpy(d_matrix, matrix, size, cudaMemcpyHostToDevice); cudaMemcpy(d_result, result, size, cudaMemcpyHostToDevice); init_matrix(matrix, N); printf("initialized out\n"); print_matrix(matrix, N); square<<<N, N>>>(matrix, d_result, N); cudaMemcpy(result, d_result, N, cudaMemcpyDeviceToHost); print_matrix(result, N); return 0; }
4,238
// Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) //Since this is matrix multiplication, A.width must be equal to B.height and the final matrix has height A.height and width B.width #include <stdio.h> #include <math.h> #include <stdlib.h> typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 32 //32 is the max since it means there are 32*32=1024 threads operating for each block #define ARR_DIM (2048*2048) #define WIDTH 2048 #define HEIGHT 2048 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(float *A, float *B, float *C); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(Matrix *A, Matrix *B, Matrix *C) { // Load A and B to device memory Matrix d_A; d_A.width = A->width; d_A.height = A->height; size_t size = A->width * A->height * sizeof(float); cudaMalloc((void **)&d_A.elements, size); cudaMemcpy(d_A.elements, A->elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B->width; d_B.height = B->height; size = B->width * B->height * sizeof(float); cudaMalloc((void **)&d_B.elements, size); cudaMemcpy(d_B.elements, B->elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C->width; d_C.height = C->height; size = C->width * C->height * sizeof(float); cudaMalloc((void **)&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B->width / dimBlock.x, A->height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A.elements, d_B.elements, d_C.elements); //This is causing a segmentation fault // Read C from device memory cudaMemcpy(C->elements, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(float *A, float *B, float *C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < WIDTH; ++e) Cvalue += A[row * WIDTH + e] * B[e * WIDTH + col]; C[row * WIDTH + col] = Cvalue; //printf("VAL=%f row=%d col=%d\n", Cvalue, row, col); } __global__ void cudaRandomize(float *arr){ float val; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; arr[row*WIDTH + col] = 1.654981; } float* generateRandArray(){ float* a = (float *)malloc(ARR_DIM*sizeof(float)); int i = 0; for(i; i < ARR_DIM; i++){ a[i] = rand()%100 + 1; } return a; } int main(){ Matrix *A,*B,*C; A = (Matrix *)malloc(sizeof(Matrix)); B = (Matrix *)malloc(sizeof(Matrix)); C = (Matrix *)malloc(sizeof(Matrix)); A->width = WIDTH; A->height = HEIGHT; B->width=WIDTH; B->height = HEIGHT; C->width = WIDTH; C->height = HEIGHT; A->elements = (float *)malloc(ARR_DIM*sizeof(float)); B->elements = (float *)malloc(ARR_DIM*sizeof(float)); float * d_A, *d_B; size_t size = A->width * A->height * sizeof(float); cudaMalloc((void**)&d_A, size); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(A->width / dimBlock.x, A->height / dimBlock.y); cudaRandomize<<<dimGrid, dimBlock>>>(d_A); cudaMemcpy(A->elements, d_A, size, cudaMemcpyDeviceToHost); cudaFree(d_A); size = B->width * B->height * sizeof(float); cudaMalloc((void**)&d_B, size); dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid2(B->width / dimBlock.x, B->height / dimBlock.y); cudaRandomize<<<dimGrid2, dimBlock2>>>(d_B); cudaMemcpy(B->elements, d_B, size, cudaMemcpyDeviceToHost); cudaFree(d_B); C->elements = (float *)malloc(ARR_DIM*sizeof(float)); for(int i = 0; i < 500; i++){ printf("i=%d\n",i); MatMul(A,B,C); } free(A->elements); free(B->elements); free(C->elements); return 0; }
4,239
// Copyright (c) 2017 Madhavan Seshadri // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) extern "C" { __global__ void dgemm(double *A, double *B, double *C, int *m, int *n, int *k, double *alpha, double *beta){ int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; if(ROW<*m && COL<*n){ double sum = 0; for(int i = 0;i<*k;i++) { sum+=(*alpha) * A[ROW * (*k) + i] * B[i*(*n)+COL]; } C[ROW*(*n)+COL] = sum + (*beta) * C[ROW*(*n)+COL]; } } }
4,240
#include "includes.h" __global__ void saxpy_float4s_shmem_doublebuffer ( float* y, float* x, float a, clock_t * timer_vals) { volatile __shared__ float sdata_x0_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x1_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x2_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x3_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y0_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y1_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y2_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y3_0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x0_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x1_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x2_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x3_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y0_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y1_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y2_1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y3_1 [COMPUTE_THREADS_PER_CTA]; int tid = threadIdx.x ; unsigned int idx0, idx1; idx0 = blockIdx.x * COMPUTE_THREADS_PER_CTA + tid; idx1 = COMPUTE_THREADS_PER_CTA * CTA_COUNT + blockIdx.x * COMPUTE_THREADS_PER_CTA + tid; float4 * x_as_float4 = (float4 *)x; float4 * y_as_float4 = (float4 *)y; float4 result_y; for (int i=0; i < NUM_ITERS/4; i+=2) { float4 tmp1_x, tmp1_y; __syncthreads(); tmp1_x = x_as_float4[idx0]; tmp1_y = y_as_float4[idx0]; if (i!=0) { result_y.x = a * sdata_x0_1[tid] + sdata_y0_1[tid]; result_y.y = a * sdata_x1_1[tid] + sdata_y1_1[tid]; result_y.z = a * sdata_x2_1[tid] + sdata_y2_1[tid]; result_y.w = a * sdata_x3_1[tid] + sdata_y3_1[tid]; y_as_float4[idx1] = result_y; idx1 += 2 * COMPUTE_THREADS_PER_CTA * CTA_COUNT ; } sdata_x0_0[tid] = tmp1_x.x; sdata_x1_0[tid] = tmp1_x.y; sdata_x2_0[tid] = tmp1_x.z; sdata_x3_0[tid] = tmp1_x.w; sdata_y0_0[tid] = tmp1_y.x; sdata_y1_0[tid] = tmp1_y.y; sdata_y2_0[tid] = tmp1_y.z; sdata_y3_0[tid] = tmp1_y.w; __syncthreads(); tmp1_x = x_as_float4[idx1]; tmp1_y = y_as_float4[idx1]; result_y.x = a * sdata_x0_0[tid] + sdata_y0_0[tid]; result_y.y = a * sdata_x1_0[tid] + sdata_y1_0[tid]; result_y.z = a * sdata_x2_0[tid] + sdata_y2_0[tid]; result_y.w = a * sdata_x3_0[tid] + sdata_y3_0[tid]; y_as_float4[idx0] = result_y; idx0 += 2 * COMPUTE_THREADS_PER_CTA * CTA_COUNT ; sdata_x0_1[tid] = tmp1_x.x; sdata_x1_1[tid] = tmp1_x.y; sdata_x2_1[tid] = tmp1_x.z; sdata_x3_1[tid] = tmp1_x.w; sdata_y0_1[tid] = tmp1_y.x; sdata_y1_1[tid] = tmp1_y.y; sdata_y2_1[tid] = tmp1_y.z; sdata_y3_1[tid] = tmp1_y.w; } __syncthreads(); result_y.x = a * sdata_x0_1[tid] + sdata_y0_1[tid]; result_y.y = a * sdata_x1_1[tid] + sdata_y1_1[tid]; result_y.z = a * sdata_x2_1[tid] + sdata_y2_1[tid]; result_y.w = a * sdata_x3_1[tid] + sdata_y3_1[tid]; y_as_float4[idx1] = result_y; }
4,241
#include <cstdio> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <vector> #include <thrust/iterator/zip_iterator.h> typedef thrust::tuple<double, double> D2; typedef thrust::device_vector<double>::iterator DIter; typedef thrust::tuple<DIter, DIter> DIter2Tuple; typedef thrust::zip_iterator<DIter2Tuple> DIter2Iterator; struct up { __host__ __device__ int operator() (D2 pair) { if (thrust::get<0>(pair) < thrust::get<1>(pair)) { return 1; } return 0; } }; int main() { std::vector<double> data; double temp; while (scanf("%lf", &temp) > 0) { data.push_back(temp); } thrust::device_vector<double> gpu_data(data); thrust::device_vector<int> is_up(data.size()-1); DIter2Iterator start = thrust::make_zip_iterator( thrust::make_tuple(gpu_data.begin(), gpu_data.begin()+1) ); DIter2Iterator end = thrust::make_zip_iterator( thrust::make_tuple(gpu_data.end()-1, gpu_data.end()) ); thrust::transform(start, end, is_up.begin(), up()); int num_up = thrust::reduce(is_up.begin(), is_up.end(), 0, thrust::plus<int>()); printf("num_up: %d\n", num_up); }
4,242
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) { comp = var_2 + var_3 - sinf((var_4 + (+1.2894E-41f / var_5))); comp = (var_6 * +1.8840E7f + (-1.1719E-36f * (var_7 / (-1.6936E-37f + var_8)))); float tmp_1 = -1.1775E-43f; comp += tmp_1 * var_9 / (var_10 + -1.1235E17f - var_11); for (int i=0; i < var_1; ++i) { comp = (var_12 - var_13); } if (comp > sinf((+1.4881E36f / (-1.4537E-43f * var_14 * (var_15 + log10f((-1.3986E-11f / (-1.5525E-42f / -1.7609E14f + log10f(-1.4843E36f - var_16 * (+0.0f / asinf((var_17 - +1.7630E35f)))))))))))) { comp = (var_18 - (+1.2324E-44f * +1.7307E10f)); float tmp_2 = (+1.0020E-35f - var_19 * -1.5871E-44f / atan2f(expf(-1.9903E-17f), -1.1727E36f)); comp += tmp_2 / var_20 * tanhf(var_21 - (var_22 - var_23)); comp = -1.3157E34f / +1.7804E-35f / +1.4261E-37f; } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24); cudaDeviceSynchronize(); return 0; }
4,243
#include <stdio.h> __global__ void perm(int pad[]) { int t = threadIdx.x; int dxN; if ( t >= 0 ) { dxN = pad[0]; } if ( t < 14 ) { if ( t >= 0 ) { // The following branch is the reason // comment it out to get the correct behavior if (dxN + 1 == 1) goto ERROR; pad[2] = 5 / (dxN + 1); } } if ( t == 1 ) { pad[0] = 112211; } __syncthreads(); if ( t == 17 ) { pad[1] = pad[0]; } ERROR: pad[2] = 321321321; } int main() { int h_pad[3]; int *dev_pad = 0; cudaMalloc(&dev_pad, sizeof(h_pad)); cudaMemset(dev_pad, 0, sizeof(h_pad)); perm<<< 1, 20 >>>(dev_pad); cudaMemcpy(h_pad, dev_pad, sizeof(h_pad), cudaMemcpyDeviceToHost); printf("pad[0] = %d pad[1] = %d pad[2] = %d\n", h_pad[0], h_pad[1], h_pad[2]); return 0; }
4,244
#include <cuda.h> __global__ void foo(int *p) { p[threadIdx.x] = threadIdx.x; }
4,245
#include<ctime> #include <cmath> #include<iostream> #include <cstdlib> using namespace std; #define BLOCK_SIZE 1024 __global__ void gpuSum(int *prices,int *sumpricesout,int days,int seconds,int N) { int currentday = blockIdx.x*blockDim.x + threadIdx.x; if(currentday<days) { int start = currentday * seconds; int end = start+seconds; int totprice=0; for(int j=start;j<end;++j) totprice+=prices[j]; sumpricesout[currentday] = totprice; } } int main() { int days =1200000; int seconds = 1000; clock_t start; double duration; start = std::clock(); int N = days*seconds; int * prices = new int[days*seconds]; int * sumpricesout = new int[days]; int * sumpricesoutCPU = new int[days]; for(int i=0;i<N;i++) { prices[i]=rand()%100; } for(int i=0;i<N;i=i+seconds) { for(int j=i;j<i+seconds;j++) sumpricesoutCPU[i/seconds]+=prices[j]; } duration = (std::clock() - start) / (double)CLOCKS_PER_SEC; cout<<"CPU: "<< duration <<"s"<< '\n'; /*for(int i=0;i<days;i++) { cout<<sumpricesoutCPU[i]<<endl; }*/ long sizePrices = N * sizeof(int); long sizeSumPrices = days * sizeof(int); int *dPrices,*dSumPrices; start = std::clock(); cudaMalloc(&dPrices,sizePrices); cudaMalloc(&dSumPrices,sizeSumPrices); cudaMemcpy(dPrices,prices,sizePrices,cudaMemcpyHostToDevice); cudaMemcpy(dSumPrices,sumpricesout,sizeSumPrices,cudaMemcpyHostToDevice); gpuSum<<<(int)ceil(days/(float)BLOCK_SIZE),BLOCK_SIZE>>>(dPrices,dSumPrices,days,seconds,N); cudaMemcpy(sumpricesout,dSumPrices,sizeSumPrices,cudaMemcpyDeviceToHost); duration = (std::clock() - start) / (double)CLOCKS_PER_SEC; cout<<"GPU: "<< duration <<"s"<< '\n'; //cout<<"CUDA!"<<endl; /*for(int i=0;i<days;i++) { cout<<sumpricesout[i]<<endl; }*/ int error = 0; for(int i=0;i<days;i++) { error+=sumpricesout[i] - sumpricesoutCPU[i]; } cout<<"Error: "<< error<<endl; //cout<<(int)ceil(days/(float)BLOCK_SIZE)<<endl; cudaFree(dPrices); cudaFree(dSumPrices); return 0; }
4,246
#include "includes.h" __global__ void dot(float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int cacheIndex = threadIdx.x; float temp = 0.0; for (int tid = threadIdx.x + blockIdx.x*blockDim.x; tid<N; tid += blockDim.x*gridDim.x) { temp += a[tid]*b[tid]; } cache[cacheIndex] = temp; __syncthreads(); // reduction for (int i = blockDim.x/2; i>0; i /= 2) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); } if (threadIdx.x == 0) c[blockIdx.x] = cache[0]; }
4,247
/* * Université Pierre et Marie Curie * Calcul de transport de neutrons * Version séquentielle */ //nvcc -o exec neutron-par.cu -O3 --generate-code arch =compute_35, code=sm_35 && ./exec //nvcc -o exec neutron-par.cu -O3 --generate-code arch=compute_35,code=sm_35 && ./exec #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <sys/time.h> #include<curand_kernel.h> #include <thrust/remove.h> #include <cuda_runtime.h> #include <curand.h> #include <iostream> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #define THREADS_PER_BLOCK 1024 #define OUTPUT_FILE "/tmp/cuda-absorbed.dat" char info[] = "\ Usage:\n\ neutron-cuda H Nb C_c C_s\n\ \n\ H : épaisseur de la plaque\n\ Nb : nombre d'échantillons\n\ C_c: composante absorbante\n\ C_s: componente diffusante\n\ \n\ Exemple d'execution : \n\ neutron-seq 1.0 500000000 0.5 0.5\n\ "; /* * générateur uniforme de nombres aléatoires dans l'intervalle [0,1) */ struct drand48_data alea_buffer; struct is_not_zero { __host__ __device__ bool operator()(float x) { return x == 0; } }; struct is_even { __host__ __device__ bool operator()(const int x) { return (x % 2) == 0; } }; void init_uniform_random_number() { srand48_r(0, &alea_buffer); } float uniform_random_number() { double res = 0.0; drand48_r(&alea_buffer,&res); return res; } /* * notre gettimeofday() */ double my_gettimeofday(){ struct timeval tmp_time; gettimeofday(&tmp_time, NULL); return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L); } /* * main() */ __global__ void neutron_gpu(int n,int* r,int* t,int* b, float* absorbed,float c, float c_c, float c_s, float h){ // distance parcourue par le neutron avant la collision float L; // direction du neutron (0 <= d <= PI) float d; // variable aléatoire uniforme float u; // position de la particule (0 <= x <= h) float x; //(n,r,t,b,absorbed,c,c_c,c_s,L,h,d,x,u) int j, old; unsigned int seed; curandState state; j = threadIdx.x+blockIdx.x*blockDim.x; seed = j; curand_init(seed, 0, 0, &state); /*if (j == 0) printf(" j=%d r=%d t=%d b=%d\n",j,*r, *t, *b);*/ if(j<n){ d = 0.0; x = 0.0; while (1) { u = curand_uniform(&state); L = -(1 / c) * log(u); x = x + L * cos(d); if (x < 0) { atomicAdd(r, 1); break; } else if (x >= h) { atomicAdd(t, 1); break; } else if ((u = curand_uniform(&state)) < c_c / c) { old = atomicAdd(b, 1); absorbed[old] = x; /* if(absorbed[*b]==0){ printf("x=%f et *b=%d\n",x,*b); } */ break; } else { u = curand_uniform(&state); d = u * M_PI; } } } } int main(int argc, char *argv[]) { // La distance moyenne entre les interactions neutron/atome est 1/c. // c_c et c_s sont les composantes absorbantes et diffusantes de c. float c, c_c, c_s; // épaisseur de la plaque float h; // nombre d'échantillons int n; // nombre de neutrons refléchis, absorbés et transmis int r, b, t; // chronometrage double start, finish; //int i, j = 0; // compteurs float* absorbed; float* g_absorbed; int *gpu_r, *gpu_t, *gpu_b; if( argc == 1) fprintf( stderr, "%s\n", info); // valeurs par defaut h = 1.0; n = 500000000;//500000000 c_c = 0.5; c_s = 0.5; // recuperation des parametres if (argc > 1) h = atof(argv[1]); if (argc > 2) n = atoi(argv[2]); if (argc > 3) c_c = atof(argv[3]); if (argc > 4) c_s = atof(argv[4]); r = b = t = 0; c = c_c + c_s; // affichage des parametres pour verificatrion printf("Épaisseur de la plaque : %4.g\n", h); printf("Nombre d'échantillons : %d\n", n); printf("C_c : %g\n", c_c); printf("C_s : %g\n", c_s); absorbed = (float *) calloc(n, sizeof(float)); int NB_BLOCK=(n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; //ALLOCATION GPU start = my_gettimeofday(); cudaMalloc((void**)&g_absorbed, n*sizeof(float)); cudaMalloc((void**)&gpu_b, sizeof(int)); cudaMalloc((void**)&gpu_r, sizeof(int)); cudaMalloc((void**)&gpu_t, sizeof(int)); //COPIE CPU -> GPU // debut du chronometrage cudaMemcpy(gpu_r, &r, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_t, &t, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gpu_b, &b, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(g_absorbed, absorbed,n*sizeof(float), cudaMemcpyHostToDevice); //APPEL AU KERNEL neutron_gpu<<<NB_BLOCK,THREADS_PER_BLOCK>>>(n, gpu_r, gpu_t, gpu_b, g_absorbed, c, c_c, c_s, h); cudaDeviceSynchronize(); // fin du chronometrage //COPIE GPU -> CPU cudaMemcpy(&b, gpu_b, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(absorbed, g_absorbed,b*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&r, gpu_r, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&t, gpu_t, sizeof(int), cudaMemcpyDeviceToHost); finish = my_gettimeofday(); printf("Nombre neutrons refléchis : %d\n",r); printf("Nombre neutrons absorbés : %d\n",b); printf("Nombre neutrons transmis : %d\n",t); printf("\nPourcentage des neutrons refléchis : %4.2g\n", (float) r / (float) n); printf("Pourcentage des neutrons absorbés : %4.2g\n", (float) b / (float) n); printf("Pourcentage des neutrons transmis : %4.2g\n", (float) t / (float) n); printf("\nTemps total de calcul: %.8g sec\n", finish - start); printf("Millions de neutrons /s: %.2g\n", (double) n / ((finish - start)*1e6)); // ouverture du fichier pour ecrire les positions des neutrons absorbés /* int j; FILE *f_handle = fopen(OUTPUT_FILE, "w"); if (!f_handle) { fprintf(stderr, "Cannot open " OUTPUT_FILE "\n"); exit(EXIT_FAILURE); } for (j = 0; j < b; j++){ fprintf(f_handle, "%f\n", absorbed[j]); } fclose(f_handle); printf("Result written in " OUTPUT_FILE "\n"); */ cudaFree(g_absorbed); cudaFree(gpu_r); cudaFree(gpu_t); cudaFree(gpu_b); free(absorbed); return EXIT_SUCCESS; }
4,248
#include <stdio.h> #include <cuda_runtime.h> __global__ void print(int *test) { int id = threadIdx.x; printf("%d: %d\n", id, test[id]); __syncthreads(); } int main() { int test_h[20], *test_d; for(int i = 0; i < 20; i++){ test_h[i] = i; } size_t pitch = 0; cudaError_t result = cudaMallocPitch((void**)&test_d, &pitch, 20*sizeof(int), 1); result = cudaMemcpyAsync((void*)test_d, (const void*)test_h, 20*sizeof(int), cudaMemcpyHostToDevice); print<<<1,20>>>(test_d); cudaFree(test_d); return 0; }
4,249
#include <stdio.h> #include <stdlib.h> int main(void) { cudaDeviceProp prop; int whichDevice; cudaGetDevice(&whichDevice); cudaGetDeviceProperties(&prop, whichDevice); if (! prop.deviceOverlap) { printf("Le GPU ne gère pas les recouvrement !\n"); printf("Pas d'accélération possible avec les flux...\n"); } else { printf("Le GPU gère les recouvrement :)\n"); printf("Utilise les Flux, et que ca saute !\n"); } return 0; }
4,250
#include "includes.h" // Optimized using shared memory and on chip memory // Compile source: $- nvcc src/TokamakSimulation.cu -o nBody -lglut -lm -lGLU -lGL // Run Executable: $- ./nBody //To stop hit "control c" in the window you launched it from. //Make movies https://gist.github.com/JPEGtheDev/db078e1b066543ce40580060eee9c1bf #define NR_NEUTRONS 8 #define NR_ELECTRONS 8 #define NR_PROTONS 8 //atomic mass (u) #define MASS_PROTON 1.007276 #define MASS_NEUTRON 1.008664 #define MASS_ELECTRON 5.485799e-4 #define BLOCK 256 #define XWindowSize 2500 #define YWindowSize 2500 #define DRAW 10 #define DAMP 1.0 #define DT 0.001 #define STOP_TIME 10.0 #define G 6.67408E-11 #define H 1.0 #define EYE 8.5 #define FAR 80.0 #define SHAPE_CT 24 #define SHAPE_SIZE 256 #define PATH "./objects/Tokamak_256.obj" //256 vertices-shape (for array simplicity) #define N 16*16*16 //*********************** // TODO: // Check units velocity calculation mag // ಠ_ಠ //*********************** // Globals float4 *p; float3 *v, *f, *reactor,*r_GPU0, *r_GPU1; float4 *p_GPU0, *p_GPU1; __device__ float3 getBodyBodyForce(float4 p0, float4 p1){ float3 f; float dx = p1.x - p0.x; float dy = p1.y - p0.y; float dz = p1.z - p0.z; float r2 = dx*dx + dy*dy + dz*dz; float inv_r = 1/sqrt(r2); float force = (G*p0.w*p1.w)/(r2);// - (H*p0.w*p1.w)/(r2*r2); f.x = force*dx*inv_r; f.y = force*dy*inv_r; f.z = force*dz*inv_r; return(f); } __global__ void getForces(float4 *g_pos, float3 *force, int offset, int device_ct){ int ii; float3 force_b2b, forceSum; float4 posMe; __shared__ float4 shPos[BLOCK]; int id = threadIdx.x + blockDim.x*blockIdx.x; forceSum.x = 0.0; forceSum.y = 0.0; forceSum.z = 0.0; posMe.x = g_pos[id+offset].x; posMe.y = g_pos[id+offset].y; posMe.z = g_pos[id+offset].z; posMe.w = g_pos[id+offset].w; for(int j=0; j < gridDim.x*device_ct; j++) { shPos[threadIdx.x] = g_pos[threadIdx.x + blockDim.x*j]; __syncthreads(); #pragma unroll 32 for(int i=0; i < blockDim.x; i++) { ii = i + blockDim.x*j; if(ii != id+offset && ii < N) { force_b2b = getBodyBodyForce(posMe, shPos[i]); forceSum.x += force_b2b.x; forceSum.y += force_b2b.y; forceSum.z += force_b2b.z; } } } if(id <N){ force[id].x = forceSum.x; force[id].y = forceSum.y; force[id].z = forceSum.z; } }
4,251
//////////////////////////////////////////////////////////// //Ho Thien Luan -> History Tracking! // 1. multi_pat_asm_naive_cpu.cu // 2. // // // //////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <time.h> #define FILENAME_MAXLEN 256 int main(int argc, char **argv) { char inputFile[FILENAME_MAXLEN]; char patternFile[FILENAME_MAXLEN]; strcpy( inputFile, argv[2]) ; strcpy( patternFile, argv[1]) ; //int k_par = 4; int k_par; k_par = strtol(argv[3], NULL, 10); //////////////////////////////////////////////////////////////////////////////////// //Process input patterns int pattern_size; char *h_pattern = NULL ; int len; size_t sizeOfTableEntry ; size_t sizeOfTableInBytes ; // numOfTableEntry * sizeOfTableEntry size_t sizeOfTableDecodeInBytes ; // numOfTableEntry * sizeOfTableEntry size_t sizeOfPatternInBytes ; // no_of_patterns * sizeOfTableEntry int max_pattern_length = 0; int no_of_patterns = 0; FILE* fpattern = fopen( patternFile, "rb"); assert ( NULL != fpattern ) ; // obtain pattern file fseek (fpattern , 0 , SEEK_END); pattern_size = ftell (fpattern); rewind (fpattern); // allocate a buffer to contains all patterns h_pattern = (char *) malloc (sizeof(char)*pattern_size); assert( NULL != h_pattern ); // copy the file into the buffer pattern_size = fread (h_pattern, 1, pattern_size, fpattern); fclose(fpattern); //printf ("pattern size = %d\n",pattern_size); //printf ("pattern = %s\n",h_pattern); //Processing to get max_pattern_length & no_of_patterns len = 0; for( int i = 0 ; i < pattern_size ; i++){ if ( '\n' == h_pattern[i] ){ if ( (i > 0) && ('\n' != h_pattern[i-1]) ){ // non-empty line no_of_patterns = no_of_patterns + 1; if (max_pattern_length < len+1) {max_pattern_length = len+1;} } len = 0 ; }else{ len++ ; } } // Create pattern_table, pattern_length_table sizeOfTableEntry = sizeof(int) ; sizeOfPatternInBytes = no_of_patterns * sizeOfTableEntry; // 1-D to store size of each patterns sizeOfTableInBytes = no_of_patterns * max_pattern_length * sizeOfTableEntry; //2-D to store patterns sizeOfTableDecodeInBytes = 4 * (max_pattern_length-1) * sizeOfTableEntry; // 1-D to store size of each patterns int* pattern_table = (int*) malloc( sizeOfTableInBytes ) ; int* pattern_length_table = (int*) malloc( sizeOfPatternInBytes ) ; int* h_pattern_decode = (int*) malloc( sizeOfTableDecodeInBytes ) ; //Processing to fill pattern_table & pattern_length_table len = 0; int no_patterns = 0; for( int i = 0 ; i < pattern_size ; i++){ if ( '\n' == h_pattern[i] ){ if ( (i > 0) && ('\n' != h_pattern[i-1]) ){ // non-empty line pattern_length_table[no_patterns] = len; no_patterns = no_patterns + 1; } len = 0 ; }else{ pattern_table[no_patterns*max_pattern_length + len] = h_pattern[i]; len++ ; } } //Print to pattern_table/pattern_length_table to check /* for (int i = 0; i < no_of_patterns; i++) { printf("\npattern no %d has length = %d-> ",i, pattern_length_table[i]); for (int j = 0; j < pattern_length_table[i]; j++) { printf("%4d",pattern_table[i*max_pattern_length+j]); } } */ //printf ("\n"); //Preprocessing unsigned int vector_A = 0; unsigned int vector_C = 0; unsigned int vector_G = 0; unsigned int vector_T = 0; int b = 4; int pow_2b = 1 << b; for (int i = 0; i< (max_pattern_length-1); i++) { vector_A = 0; vector_C = 0; vector_G = 0; vector_T = 0; for (int j = 0; j< no_of_patterns; j++) { vector_A = vector_A << b; if (pattern_table[i + max_pattern_length*j] != 65) {vector_A = vector_A + 1;}; } h_pattern_decode[4*i] = vector_A; for (int j = 0; j< no_of_patterns; j++) { vector_C = vector_C << b; if (pattern_table[i + max_pattern_length*j] != 67) {vector_C = vector_C + 1;}; } h_pattern_decode[4*i+1] = vector_C; for (int j = 0; j< no_of_patterns; j++) { vector_G = vector_G << b; if (pattern_table[i + max_pattern_length*j] != 71) {vector_G = vector_G + 1;}; } h_pattern_decode[4*i+2] = vector_G; for (int j = 0; j< no_of_patterns; j++) { vector_T = vector_T << b; if (pattern_table[i + max_pattern_length*j] != 84) {vector_T = vector_T + 1;}; } h_pattern_decode[4*i+3] = vector_T; } // for (int i = 0; i < (max_pattern_length-1)*4; i++) { // printf("i = %d -> h_pattern_decode = %d\n",i,h_pattern_decode[i]); // } /////////////////////////////////////////////////////////////// //Prepare input string int input_size; char *h_input_string = NULL ; int *h_matched_result = NULL ; //open to read file FILE* fpin = fopen( inputFile, "rb"); assert ( NULL != fpin ) ; // sets the file position of the stream to the given offset. fseek (fpin , 0 , SEEK_END); input_size = ftell (fpin); rewind (fpin); // allocate memory to contain the whole file h_input_string = (char *) malloc (sizeof(char)*input_size); assert( NULL != h_input_string ); size_t size_matched_result = sizeOfTableEntry * input_size * no_of_patterns; h_matched_result = (int *) malloc (size_matched_result); // each input has no_of_patterns results assert( NULL != h_matched_result ); memset( h_matched_result, 0, size_matched_result ) ; // copy the file into the buffer input_size = fread (h_input_string, 1, input_size, fpin); fclose(fpin); ////////////////// //printf("\ninput size -> %4d -> \n",input_size); // printf("%s\n",h_input_string); //AmSM with Naive Method in CPU unsigned int vector = 0; struct timespec t_start, t_end; double elapsedTime; clock_gettime (CLOCK_REALTIME, &t_start); //printf ("starttime s = %li, ns = %li\n",t_start.tv_sec, t_start.tv_nsec); for(int i = 0; i < input_size-max_pattern_length+1; i++) { vector = 0; for (int k = 0; k < max_pattern_length-1; k++) { if (h_input_string[i+k] == 65) { vector = vector + h_pattern_decode[4*k]; } else if (h_input_string[i+k] == 67) { vector = vector + h_pattern_decode[4*k+1]; } else if (h_input_string[i+k] == 71) { vector = vector + h_pattern_decode[4*k+2]; } else if (h_input_string[i+k] == 84) { vector = vector + h_pattern_decode[4*k+3]; } //printf("vector = %d, \n",vector); } for (int j = no_of_patterns-1; j >= 0; j--) { h_matched_result[i*no_of_patterns+j] = vector % pow_2b; vector = vector >> b; } } clock_gettime(CLOCK_REALTIME, &t_end); //printf ("endtime s = %li, ns = %li\n",t_end.tv_sec, t_end.tv_nsec); elapsedTime = (t_end.tv_sec*1000+t_end.tv_nsec/1000000)-(t_start.tv_sec*1000+t_start.tv_nsec/1000000); // Print Result int total_result = 0; for(int i = 0; i < input_size-max_pattern_length+1; i++) { for (int j = 0; j < no_of_patterns; j++) { //printf("Input location %d with pattern %d has Hamming distance = %d\n",i, j, h_matched_result[i*no_of_patterns+j]); if(h_matched_result[i*no_of_patterns+j] <= k_par) {total_result++;} } } printf("\n\n\n"); printf("###########################################################\n"); printf("#--Multi Fix-Length Patterns Approximate String Matching--#\n"); printf("#---------------------------------------------------------#\n"); printf("#---------------Proposed PMASM Alg. in CPU----------------#\n"); printf("###########################################################\n"); printf("#--No of Patterns |\t\t %10d \t #\n",no_of_patterns); printf("#---------------------------------------------------------#\n"); printf("#--Pattern Length |\t\t %10d \t #\n",max_pattern_length-1); printf("#---------------------------------------------------------#\n"); printf("#--Input Size (bytes) |\t\t %10d \t #\n", input_size ); printf("#---------------------------------------------------------#\n"); printf("#--Total matched with k = %d |\t\t %10d \t #\n", k_par, total_result); printf("#---------------------------------------------------------#\n"); printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", elapsedTime); printf("#---------------------------------------------------------#\n"); printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000000) ); printf("###########################################################\n"); free(h_pattern); free(h_input_string); free(h_matched_result); free(pattern_table); free(pattern_length_table); free(h_pattern_decode); return 0; }
4,252
// Constant memory __constant__ int legendU[2500]; // upper legends, concatenated __constant__ int sizesOfLegendsU[100]; // sizes of each of upper legends __constant__ int shiftsOfLegendsU[100]; // prefix sums of sizes, e.g. where legends begins __constant__ int legendL[2500]; // left legends, concatenated __constant__ int sizesOfLegendsL[100]; // sizes of each of left legends __constant__ int shiftsOfLegendsL[100]; // prefix sums of sizes, e.g. where legends begins __constant__ int heightWidth[4]; // height, width, height*width of puzzle, population size __constant__ int numberOfMutations = 4; // create offspring __device__ void mutate(int * gridPopulation, int * randomCross, int index); __device__ void cross(int * gridPopulation, int * gridChildren, int i1, int i2, int * randomCross); // count fintess __device__ void needlemanParallel(int * fitness, int* legend1D, int sizeOfLegend, int beginningOfLegend, int * gridSlice, int sliceSize); /// Single thread computation of fitness __device__ int countFitness(int * gridPopulation, int index); __device__ int fitnessColumn(int * gridPopulation, int index, int column, int * H0, int * H1, int * gridSlice); __device__ int fitnessRow(int * gridPopulation, int index, int row, int * H0, int * H1, int * gridSlice); __device__ int needlemanOpt(int* legend1D, int sizeOfLegend, int beginningOfLegend, int* gridSlice, int sliceSize, int * H0, int * H1); // creates new generation of individuals and mutates them // gridPopulation - current population // gridChildren - array, in which new generation will be stored // randomCross - random permutation of 1.. size of grid, determines which bits will be copied from which parent. // also used for selection which bits to mutate. // randomSelection - random permutation 1.. number of individuals, determines touples of parents extern "C" __global__ void createChildren(int * gridPopulation, int * gridChildren, int * randomCross, int * randomSelection) { int ind = blockDim.x * blockIdx.x + threadIdx.x ; // number of individual in population int p1 = ind * heightWidth[2]; // beginning of grid int p2 = randomSelection[ind]* heightWidth[2]; // beginning of grid of the other parent cross(gridPopulation, gridChildren, p1, p2, randomCross); mutate(gridChildren, randomCross, p1); } // puts together a child from two parents, individuals bits are determined by permutation randomCross __device__ void cross(int * gridPopulation, int * gridChildren, int parent1, int parent2, int * randomCross){ for (int i = 0; i < heightWidth[2]/2 ; i++) { gridChildren[parent1 + randomCross[i]] = gridPopulation[parent1 + randomCross[i]]; } for (int i = heightWidth[2]/2; i < heightWidth[2] ; i++) { gridChildren[parent1 + randomCross[i]] = gridPopulation[parent2 + randomCross[i]]; } } // changes few bits in new individual __device__ void mutate(int * gridChildren, int * randomPerm, int index){ int x = randomPerm[0]; for (int i = 0; i < numberOfMutations; i++) { x = randomPerm[x]; gridChildren[index + x] = 1 - gridChildren[index + x]; } } // creates representation of column, which is comparable to legend and runs NW function extern "C" __global__ void countFitnessOfAllColumns(int * gridPopulation, int * fitness){ int columnIndex = blockDim.x * blockIdx.x + threadIdx.x ; // column number relative to all grids in population int numberOfIndividual = columnIndex / heightWidth[1]; // number in population int absStart = numberOfIndividual * heightWidth[2]; // start in array of population int column = columnIndex - numberOfIndividual * heightWidth[1]; // relative to grid int sliceSize = 1; int * columnIntRepr = new int [heightWidth[0]]; columnIntRepr[0] = 0; int combo = 0; for (int i = 0; i < heightWidth[0]; i++) { if (gridPopulation[absStart + i*heightWidth[1] + column] == 1) { combo++; } else { if (combo != 0) { columnIntRepr[sliceSize++] = combo; } combo = 0; } } if (combo != 0) { columnIntRepr[sliceSize++] = combo; // for the case the last square is filled } needlemanParallel(&fitness[numberOfIndividual],legendU, sizesOfLegendsU[column], shiftsOfLegendsU[column], columnIntRepr, sliceSize); } // creates representation of row, which is comparable to legend and runs NW function extern "C" __global__ void countFitnessOfAllRows(int * gridPopulation, int * fitness){ int absRadek = blockDim.x * blockIdx.x + threadIdx.x ; int numberOfIndividual = absRadek / heightWidth[0]; int absStart = numberOfIndividual * heightWidth[2]; // index v tajence populace int row = absRadek - numberOfIndividual * heightWidth[0]; int sliceSize = 1; int combo = 0; int * rowIntRepr = new int [heightWidth[1]]; rowIntRepr[0] = 0; for (int i = 0; i < heightWidth[1]; i++) { if (gridPopulation[absStart + row*heightWidth[1] + i] == 1) { combo++; } else { if (combo != 0) { rowIntRepr[sliceSize++] = combo; } combo = 0; } } if (combo != 0) { rowIntRepr[sliceSize++] = combo; // for the case the last square is filled } needlemanParallel(&fitness[numberOfIndividual], legendL, sizesOfLegendsL[row], shiftsOfLegendsL[row], rowIntRepr, sliceSize); } // computes Needleman-Wunsch function, which measures the difference between two integer arrays and adds it to fitness. // here, one array is legend of one row/column and the other is actual slice of individual's grid. __device__ void needlemanParallel(int * fitness, int* legend, int sizeOfLegend, int shiftsOfLegends, int* sliceIntRepr, int sliceSize){ int * H0 = new int[sliceSize]; int * H1 = new int[sliceSize]; int fitnessLocal = needlemanOpt(legend, sizeOfLegend, shiftsOfLegends, sliceIntRepr, sliceSize, H0, H1); atomicAdd(fitness, fitnessLocal); free(sliceIntRepr); free(H0); free(H1); } /// /// Single thread computation of evolution extern "C" __global__ void evolution(int * gridPopulation, int * gridChildren, int * fitness, int* fitnessChildren, int* randomCross, int * randomSelection){ __shared__ int differenceArray[320]; int ind = blockDim.x * blockIdx.x + threadIdx.x ; // number of individual in population int ind2 = randomSelection[ind]; // the other individual cross(gridPopulation, gridChildren, ind * heightWidth[2], ind2 * heightWidth[2], randomCross); mutate(gridChildren, randomCross, ind * heightWidth[2]); fitnessChildren[ind] = countFitness(gridChildren, ind * heightWidth[2]); } __device__ int countFitness(int * gridPopulation, int index ){ int fitness = 0; int biggerSize = max(heightWidth[0], heightWidth[1])/2; // take the maximal possible size int * H0 = new int[biggerSize]; int * H1 = new int[biggerSize]; int * gridSlice = new int [biggerSize]; gridSlice[0] = 0; for (int column = 0; column < heightWidth[1]; column++) { // sloupce fitness += fitnessColumn(gridPopulation, index, column, H0, H1, gridSlice); } for (int row = 0; row < heightWidth[0]; row++) { // radky fitness += fitnessRow(gridPopulation, index, row, H0, H1, gridSlice); } free(gridSlice); free(H0); free(H1); return fitness; } __device__ int fitnessColumn(int * gridPopulation, int index, int column, int * H0, int * H1, int * gridSlice){ int sliceSize = 1; int combo = 0; gridSlice[0] = 0; for (int i = 0; i < heightWidth[0]; i++) { if (gridPopulation[index + i*heightWidth[1] + column] == 1) { combo++; } else { if (combo != 0) { gridSlice[sliceSize++] = combo; } combo = 0; } } if (combo != 0) { gridSlice[sliceSize++] = combo; // for the case the last square is filled } return needlemanOpt(legendU, sizesOfLegendsU[column], shiftsOfLegendsU[column], gridSlice, sliceSize, H0, H1); } __device__ int fitnessRow(int * gridPopulation, int index, int row, int * H0, int * H1, int * gridSlice){ int sliceSize = 1; int combo = 0; for (int i = 0; i < heightWidth[1]; i++) { if (gridPopulation[index + row*heightWidth[1] + i] == 1) { combo++; } else { if (combo != 0) { gridSlice[sliceSize++] = combo; } combo = 0; } } if (combo != 0) { gridSlice[sliceSize++] = combo; // for the case the last square is filled } return needlemanOpt(legendL, sizesOfLegendsL[row], shiftsOfLegendsL[row], gridSlice, sliceSize, H0, H1); } __device__ int needlemanOpt(int* legend1D, int sizeOfLegend, int beginningOfLegend, int* gridSlice, int sliceSize, int* H0, int *H1){ H0[0] = 0; H1[0] = 0; for (int i = 1; i < sliceSize; i++) { H0[i] = H0[i - 1] - gridSlice[i]; } //--------------- for (int j = 1; j < sizeOfLegend; j++) { int legendJ = legend1D[beginningOfLegend + j]; H1[0] = H0[0] - legendJ; for (int i = 1; i < sliceSize; i++) { H1[i] = max(H1[i-1] - gridSlice[i], max(H0[i ] - legendJ, H0[i-1] - abs(legendJ - gridSlice[i]))); } int * swap = H0; H0 = H1; H1 = swap; } return H0[sliceSize - 1]; // swapped, so H0; }
4,253
#include <cuda.h> #include <stdio.h> #include <chrono> #include <random> __global__ void calc_kernel(int a, int *dA, int dim) { int t_x = threadIdx.x; int b_x = blockIdx.x; dA[(dim * b_x) + t_x] = a * t_x + b_x; } int random_int() { // randomize the seed, create distribution auto seed = std::chrono::system_clock::now().time_since_epoch().count(); std::mt19937 gen(seed); std::uniform_int_distribution<int> int_dist(1, 100); // return random int between 1 and 100 return int_dist(gen); } int main() { const int a = random_int(); const int n = 16; const int blocks = 2; const int threads_per_block = 8; int hA[n], *dA; cudaMalloc((void **)&dA, sizeof(int) * n); calc_kernel<<<blocks, threads_per_block>>>(a, dA, n / 2); cudaMemcpy(&hA, dA, sizeof(int) * n, cudaMemcpyDeviceToHost); for (int i : hA) { printf("%d ", i); } printf("\n"); cudaFree(dA); }
4,254
#include <iostream> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <sys/time.h> #include <cufft.h> #define NX 2048 using namespace std; int main(int argc, char *argv[]) { struct timeval tt1, tt2; int ms; float fms; // create cufft plan cufftHandle plan; cufftPlan2d(&plan, NX,NX, CUFFT_Z2Z); // allocate cufftDoubleComplex type host memory cufftDoubleComplex *data; data = (cufftDoubleComplex*)malloc(NX*NX * sizeof(cufftDoubleComplex)); // data initialization for(int j=0 ; j < NX ; j++) for(int k=0 ; k < NX ; k++) { data[k + j*NX].x = sin(double(j)+double(k)); data[k + j*NX].y = cos(double(j)+double(k)); } // check initial value of a data element cout << "initial value = " << data[43].x << " + " << data[43].y << "i" << endl; // allocate cufftDoubleComplex type device memory cufftDoubleComplex *devPtr; cudaMalloc((void**)&devPtr, sizeof(cufftDoubleComplex)*NX*NX); // copy data to device memory cudaMemcpy(devPtr, data, sizeof(cufftDoubleComplex)*NX*NX, cudaMemcpyHostToDevice); cudaThreadSynchronize(); gettimeofday( &tt1, NULL ); // run fft cufftExecZ2Z(plan, devPtr, devPtr, CUFFT_FORWARD); cudaThreadSynchronize(); gettimeofday( &tt2, NULL ); // make inverse transform cufftExecZ2Z(plan, devPtr, devPtr, CUFFT_INVERSE); // transfer result back from device cudaMemcpy(data, devPtr, sizeof(cufftDoubleComplex)*NX*NX, cudaMemcpyDeviceToHost); // destroy cufft plan cufftDestroy(plan); // free device memory cudaFree(devPtr); // check initial value of the same data element. Initial and final values should match // after a forward and inverse transform. cout << "final value = " << data[43].x/double(NX*NX) << " + " << data[43].y/double(NX*NX) << "i" << endl; // free host memory free(data); // timing ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.f; cout << "Computation time = " << fms << " seconds" << endl; }
4,255
#include <stdio.h> #define N 10000 __global__ void add(int *a, int *b, int *c) //tidak ada operasi di CPU. Ada 10 threads { int tID= threadIdx.x; //tID = selalu 1, namun blockID = menyesuaikan if (tID < N) { c[tID] = a[tID] + b[tID]; //blockID=0, tID=0, menjumlahkan a[0] dan b[0], //blockID=1, tID=0, menjumlahkan a[1] dan b[1], //blockID=2, tID=0, menjumlahkan a[2] dan b[2], dst. } } int main() { int *a, *b, *c; a = (int*)malloc(N*sizeof(int)); b = (int*)malloc(N*sizeof(int)); c = (int*)malloc(N*sizeof(int)); int *dev_a, *dev_b, *dev_c; cudaMalloc((void **) &dev_a, N*sizeof(int)); cudaMalloc((void **) &dev_b, N*sizeof(int)); cudaMalloc((void **) &dev_c, N*sizeof(int));// Fill Arrays for (int i = 0; i < N; i++) { a[i] = i, b[i] = 1; //CPU } cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); add<<<1,N>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } return 0; }
4,256
#include <stdio.h> #include <stdlib.h> #include <string.h> void meanFilterCPU(unsigned char *image, unsigned char *filteredImage, int imgWidth, int imgHeight, short bitsPerPixel, int window) { int bottomBoundaryOfWindow, topBoundaryOfWindow, leftBoundaryOfWindow, rightBoundaryOfWindow; int halfOfWindowSize = (window - 1) / 2; for (size_t i = 0; i < imgHeight; i++) { int calculatedBottomBoundary = i - halfOfWindowSize; int calculatedTopBoundary = i + halfOfWindowSize; bottomBoundaryOfWindow = (calculatedBottomBoundary <= 0) ? 0 : calculatedBottomBoundary; topBoundaryOfWindow = (calculatedTopBoundary >= (imgHeight - 1)) ? (imgHeight - 1) : calculatedTopBoundary; for (size_t j = 0; j < imgWidth; j++) { int calculatedLeftBoundary = j - halfOfWindowSize; int calculatedRightBoundary = j + halfOfWindowSize; leftBoundaryOfWindow = (calculatedLeftBoundary <= 0) ? 0 : calculatedLeftBoundary; rightBoundaryOfWindow = (calculatedRightBoundary >= (imgWidth - 1)) ? (imgWidth - 1) : calculatedRightBoundary; int sum = 0; for (size_t y = bottomBoundaryOfWindow; y <= topBoundaryOfWindow; y++) { for (size_t x = leftBoundaryOfWindow; x <= rightBoundaryOfWindow; x++) { if (bitsPerPixel == 8) { sum += image[y * imgWidth + x]; } else if (bitsPerPixel == 24) { int possition = (y * imgWidth + x) * 3; unsigned char firstByteOfPixel = image[possition]; unsigned char secondByteOfPixel = image[possition + 1]; unsigned char thirdByteOfPixel = image[possition + 2]; int grayscaleValue = ((firstByteOfPixel << 16) & 0x00ff0000) | ((secondByteOfPixel << 8) & 0x0000ff00) | (thirdByteOfPixel & 0x000000ff); sum += grayscaleValue; } } } int pixelsInWindow = (rightBoundaryOfWindow - leftBoundaryOfWindow + 1) * (topBoundaryOfWindow - bottomBoundaryOfWindow + 1); int meanValue = sum / pixelsInWindow; if (bitsPerPixel == 8) { int possitionInImg = i * imgWidth + j; filteredImage[possitionInImg] = meanValue; } else if (bitsPerPixel == 24) { int possitionInImg = (i * imgWidth + j) * 3; unsigned char firstByteOfPixel = (meanValue >> 16) & 0Xff; unsigned char secondByteOfPixel = (meanValue >> 8) & 0xff; unsigned char thirdByteOfPixel = meanValue & 0xff; filteredImage[possitionInImg] = firstByteOfPixel; filteredImage[possitionInImg + 1] = secondByteOfPixel; filteredImage[possitionInImg + 2] = thirdByteOfPixel; } } } } __global__ void meanFilterGPU(unsigned char *image, unsigned char *filteredImage, int imgWidth, int imgHeight, short bitsPerPixel, int window) { int column = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (column < imgWidth && row < imgHeight) { int halfOfWindowSize = window / 2; int bottomBoundaryOfWindow, topBoundaryOfWindow, leftBoundaryOfWindow, rightBoundaryOfWindow; int calculatedBottomBoundary = row - halfOfWindowSize; int calculatedTopBoundary = row + halfOfWindowSize; int calculatedLeftBoundary = column - halfOfWindowSize; int calculatedRightBoundary = column + halfOfWindowSize; bottomBoundaryOfWindow = (calculatedBottomBoundary < 0) ? 0 : calculatedBottomBoundary; topBoundaryOfWindow = (calculatedTopBoundary > (imgHeight - 1)) ? (imgHeight - 1) : calculatedTopBoundary; leftBoundaryOfWindow = (calculatedLeftBoundary < 0) ? 0 : calculatedLeftBoundary; rightBoundaryOfWindow = (calculatedRightBoundary > (imgWidth - 1)) ? (imgWidth - 1) : calculatedRightBoundary; int sum = 0; for (int y = bottomBoundaryOfWindow; y <= topBoundaryOfWindow; y++) { for (int x = leftBoundaryOfWindow; x <= rightBoundaryOfWindow; x++) { if (bitsPerPixel == 8) { sum += image[y * imgWidth + x]; } else if (bitsPerPixel == 24) { int possition = (y * imgWidth + x) * 3; unsigned char firstByteOfPixel = image[possition]; unsigned char secondByteOfPixel = image[possition + 1]; unsigned char thirdByteOfPixel = image[possition + 2]; int grayscaleValue = ((firstByteOfPixel << 16) & 0x00ff0000) | ((secondByteOfPixel << 8) & 0x0000ff00) | (thirdByteOfPixel & 0x000000ff); sum += grayscaleValue; } } } int pixelsInWindow = (rightBoundaryOfWindow - leftBoundaryOfWindow + 1) * (topBoundaryOfWindow - bottomBoundaryOfWindow + 1); int meanValue = sum / pixelsInWindow; if (bitsPerPixel == 8) { int possitionInImg = row * imgWidth + column; filteredImage[possitionInImg] = meanValue; } else if (bitsPerPixel == 24) { int possitionInImg = (row * imgWidth + column) * 3; unsigned char firstByteOfPixel = (meanValue >> 16) & 0Xff; unsigned char secondByteOfPixel = (meanValue >> 8) & 0xff; unsigned char thirdByteOfPixel = meanValue & 0xff; filteredImage[possitionInImg] = firstByteOfPixel; filteredImage[possitionInImg + 1] = secondByteOfPixel; filteredImage[possitionInImg + 2] = thirdByteOfPixel; } } } int main(int argc, char **argv) { unsigned char *bitmapHeaders, *imgPixels, *cpuFilteredImg, *gpuFilteredImg_d, *gpuFilteredImg_h, *img_d; int imgWidth, imgHeight, offset, imgSize, window; short bitsPerPixel; bitmapHeaders = (unsigned char *)malloc(sizeof(char) * 54); // FILE *imgFile = fopen("512.bmp", "rb"); // FILE *imgFile = fopen("img_640.bmp", "rb"); // window = 3; FILE *imgFile = fopen(argv[1], "rb"); window = atoi(argv[2]); //read bitmap image headers to get imgWidth and imgHeight of the image //imgWidth is 4 byte and starts @ 19th byte of header. //imgHeight is 4 byte and starts @ 23rd byte of header. fread(bitmapHeaders, sizeof(unsigned char), 54, imgFile); memcpy(&imgWidth, bitmapHeaders + 18, sizeof(int)); memcpy(&imgHeight, bitmapHeaders + 22, sizeof(int)); memcpy(&bitsPerPixel, bitmapHeaders + 28, sizeof(short)); memcpy(&offset, bitmapHeaders + 10, sizeof(int)); if (bitsPerPixel == 8) { imgSize = imgWidth * imgHeight; } else if (bitsPerPixel == 24) { imgSize = 3 * imgWidth * imgHeight; } printf("imgWidtht : %d\n", imgWidth); printf("imgHeight : %d\n", imgHeight); printf("bitsPerPixel : %d\n", bitsPerPixel); printf("image size : %d\n", imgSize); printf("offset : %d\n", offset); int diffBtwnHeadersAndPixels = offset - 54; char *bytsBtwnHeadersAndPixels = (char *)malloc(sizeof(char) * diffBtwnHeadersAndPixels); fread(bytsBtwnHeadersAndPixels, sizeof(char), diffBtwnHeadersAndPixels, imgFile); //skip bytes between headers and image pixels imgPixels = (unsigned char *)malloc(sizeof(unsigned char) * imgSize); fread(imgPixels, sizeof(char), imgSize, imgFile); cpuFilteredImg = (unsigned char *)malloc(sizeof(unsigned char) * imgSize); gpuFilteredImg_h = (unsigned char *)malloc(sizeof(unsigned char) * imgSize); cudaMalloc((void **)&img_d, imgSize); cudaMalloc((void **)&gpuFilteredImg_d, imgSize); cudaMemcpy(img_d, imgPixels, imgSize, cudaMemcpyHostToDevice); meanFilterCPU(imgPixels, cpuFilteredImg, imgWidth, imgHeight, bitsPerPixel, window); dim3 dimBlock(32, 32); dim3 dimGrid(imgWidth / 32, imgHeight / 32); meanFilterGPU<<<dimGrid, dimBlock>>>(img_d, gpuFilteredImg_d, imgWidth, imgHeight, bitsPerPixel, window); cudaMemcpy(gpuFilteredImg_h, gpuFilteredImg_d, imgSize, cudaMemcpyDeviceToHost); if (bitsPerPixel == 8) { for (size_t i = 0; i < imgSize; i++) { printf("%d pixelBeforeFilter:%d cpuFilteredPixel:%d gpuFilteredPixel:%d\n", i, imgPixels[i], cpuFilteredImg[i], gpuFilteredImg_h[i]); } } else if (bitsPerPixel == 24) { for (int i = 0; i < imgSize; i += 3) { int pixelBeforeFilter = (imgPixels[i] << 16) & 0X00ff0000 | (imgPixels[i + 1] << 8) & 0X0000ff00 | imgPixels[i + 2] & 0X000000ff; int cpuFilteredPixel = (cpuFilteredImg[i] << 16) & 0X00ff0000 | (cpuFilteredImg[i + 1] << 8) & 0X0000ff00 | cpuFilteredImg[i + 2] & 0X000000ff; int gpuFilteredPixel = (gpuFilteredImg_h[i] << 16) & 0X00ff0000 | (gpuFilteredImg_h[i + 1] << 8) & 0X0000ff00 | gpuFilteredImg_h[i + 2] & 0X000000ff; printf("%d pixelBeforeFilter:%d cpuFilteredPixel:%d gpuFilteredPixel:%d\n", i, pixelBeforeFilter, cpuFilteredPixel, gpuFilteredPixel); } } fclose(imgFile); free(imgPixels); free(bytsBtwnHeadersAndPixels); free(bitmapHeaders); free(cpuFilteredImg); free(gpuFilteredImg_h); cudaFree(img_d); cudaFree(gpuFilteredImg_d); return 0; }
4,257
#define N 1024 #include<stdio.h> #include<stdlib.h> #include<iostream> //#include<curand_kernel.h> using namespace std; /* __device__ int getRand(curandState *s, int a, int b){ float rand_int = curand_uniform(s); rand_int = rand_int * (b - a) + a; return rand_int; } */ __global__ void add_array(int *a, int *b, int *c){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; c[i * N + j] = a[i * N + j] + b[i * N + j]; printf("%d %d %d index: %2d, %2d block: %2d, %2d thread: %2d, %2d\n", a[i * N + j], b[i * N + j], c[i * N + j], i, j, blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y); } __global__ void build_array(int *a, int *b, int *c){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; /* int k = blockIdx.x * blockDim.y + threadIdx.x; unsigned int seed[3]; seed[0] = i; seed[1] = j; seed[2] = k; curandState s[3]; for(int v = 0; v < 2; v++) curand_init(seed[v], 0, 0, &s[v]); a[i * N + j] = getRand(&s[0], 0, 10); b[i * N + j] = getRand(&s[1], 0, 10); c[i * N + j] = getRand(&s[2], 0, 10); */ a[i * N + j] = 1; b[i * N + j] = 2; c[i * N + j] = 3; printf("%d %d %d index: %2d, %2d block: %2d, %2d thread: %2d, %2d\n", a[i * N + j], b[i * N + j], c[i * N + j], i, j, blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y); } void random_ints(int *array, int size){ int i; for(i = 0; i < size; i++) array[i] = rand() % 10; } int main() { int *a, *b, *c; int *gpu_a, *gpu_b, *gpu_c; a = (int *) malloc(sizeof(int) * N * N); b = (int *) malloc(sizeof(int) * N * N); c = (int *) malloc(sizeof(int) * N * N); cudaMalloc((void **) &gpu_a, sizeof(int) * N * N); cudaMalloc((void **) &gpu_b, sizeof(int) * N * N); cudaMalloc((void **) &gpu_c, sizeof(int) * N * N); cudaMemcpy(gpu_a, a, sizeof(int) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b, b, sizeof(int) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(gpu_c, c, sizeof(int) * N * N, cudaMemcpyHostToDevice); dim3 threadsPerBlock(16, 16); dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y); build_array<<<numBlocks, threadsPerBlock>>>(gpu_a, gpu_b, gpu_c); cudaMemcpy(c, gpu_c, sizeof(int) * N * N, cudaMemcpyDeviceToHost); /* for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) printf("%d index: %d, %d\n", c[i * N + j], i, j); } */ free(a); free(b); free(c); cudaFree(gpu_a); cudaFree(gpu_b); cudaFree(gpu_c); return 0; }
4,258
#include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> __device__ char key[] = "$1&1234-1234-123456"; __device__ int f(int n, int byte, int c) { for (int bitIndex = 0; bitIndex <= 7; bitIndex++) { int bit = (byte >> bitIndex) & 1; if (bit + ((n - bit) & ~1) == n) { n = (n - bit) >> 1; } else { n = ((c - bit) ^ n) >> 1; } } return n; } __global__ void keygen(char* mathId, int hash_base, char* res) { res += 16*(blockIdx.x*blockDim.x+threadIdx.x); int hash = hash_base + blockIdx.x*blockDim.x + threadIdx.x; for(int byteIndex = 18; byteIndex >= 0; byteIndex--){ hash = f(hash, (int)key[byteIndex], 0x105C3); } for(int byteIndex = 15; byteIndex >= 0; byteIndex--){ hash = f(hash, (int)mathId[byteIndex], 0x105C3); } int n1 = 0; while (f(f(hash, n1 & 0xFF, 0x105C3), n1 >> 8, 0x105C3) != 0xA5B6) { ++n1; } n1 = floor(((n1 + 0x72FA) & 0xFFFF) * 99999.0 / 0xFFFF); int temp = n1/1000*1000 + n1%100*10 + n1%1000/100; temp = ceil((temp/99999.0)*0xFFFF); temp = f(f(0, temp & 0xFF, 0x1064B), temp >> 8, 0x1064B); for(int byteIndex = 18; byteIndex >= 0; byteIndex--){ temp = f(temp, (int)key[byteIndex], 0x1064B); } for(int byteIndex = 15; byteIndex >= 0; byteIndex--){ temp = f(temp, (int)mathId[byteIndex], 0x1064B); } int n2 = 0; while (f(f(temp, n2 & 0xFF, 0x1064B), n2 >> 8, 0x1064B) != 0xA5B6) { ++n2; } n2 = floor((n2 & 0xFFFF) * 99999.0 / 0xFFFF); res[10] = n1 % 10 + 48; res[1] = (n1/=10) % 10 + 48; res[6] = (n1/=10) % 10 + 48; res[2] = (n1/=10) % 10 + 48; res[3] = (n1/=10) % 10 + 48; res[5] = n2 % 10 + 48; res[0] = (n2/=10) % 10 + 48; res[9] = (n2/=10) % 10 + 48; res[11] =(n2/=10) % 10 + 48; res[7] = (n2/=10) % 10 + 48; res[4] = '-'; res[8] = '-'; res[12] = ':'; res[13] = ':'; res[14] = '1'; res[15] = 0; } #if !defined Thread_Num #define Thread_Num 1024 #endif int main(int argc, char** argv){ int hashStart = 0x0; int hashEnd = 0x10000; if(argc==4){ sscanf(argv[2],"%x",&hashStart); sscanf(argv[3],"%x",&hashEnd); } if(argc==3){ sscanf(argv[2],"%x",&hashStart); hashEnd = hashStart + 1; } int Total_Number = hashEnd - hashStart; int Block_Num = (Total_Number + Thread_Num - 1)/ Thread_Num; char* math_id = NULL; size_t math_id_size = strlen(argv[1])*sizeof(char); cudaMalloc((void**)&math_id, math_id_size); cudaMemcpy(math_id, argv[1], math_id_size, cudaMemcpyHostToDevice); char h_res[16*Block_Num*Thread_Num]; char* d_res = NULL; size_t res_size = 16*Block_Num*Thread_Num*sizeof(char); cudaMalloc((void**)&d_res, res_size); printf("Hash MathId Key Password\n"); keygen<<<Block_Num,Thread_Num>>>(math_id,hashStart,d_res); cudaMemcpy(h_res, d_res, 16*Total_Number*sizeof(char), cudaMemcpyDeviceToHost); for(int hash_del = 0; hash_del<Total_Number; hash_del++){ printf("%04X %s 1234-1234-123456 %s\n", hashStart+hash_del, argv[1], h_res+16*hash_del); } return 0; }
4,259
#include "includes.h" __global__ void vecAdd(float * in1, float * in2, float * out, int len) { //@@ Insert code to implement vector addition here int i = blockIdx.x * blockDim.x+ threadIdx.x; if( i<len ) out[i] = in1[i]+in2[i]; }
4,260
#include<iostream> #include<stdio.h> #include<stdlib.h> #include <cuda.h> #include <math.h> #include <thrust/scan.h> #include <thrust/device_ptr.h> int checkResults(float*res, float* cudaRes,int length) { int nDiffs=0; const float smallVal = 0.2f; // Keeping this extra high as we have repetitive addition and sequence matters for(int i=0; i<length; i++) if(fabs(cudaRes[i]-res[i])>smallVal){ nDiffs++; } return nDiffs; } void initializeArray(FILE* fp,float* arr, int nElements) { for( int i=0; i<nElements; i++){ int r=fscanf(fp,"%f",&arr[i]); if(r == EOF){ rewind(fp); } arr[i]-=5; // This is to make the data zero mean. Otherwise we reach large numbers and lose precision } } void inclusiveScan_SEQ(float *in, float *out,int length) { float sum=0.f; for (int i =0; i < length; i++) { sum+=in[i]; out[i]=sum; } } int main(int argc, char* argv[]) { if(argc!=2){ printf("Usage %s N\n",argv[0]); return 1; } int N=atoi(argv[1]); FILE *fp = fopen("problem1.inp","r"); int size = N * sizeof(float); //allocate resources float *h_in = (float *)malloc(size); float *h_out = (float *)malloc(size); float *cuda_out= (float *)malloc(size); float time = 0.f; initializeArray(fp,h_in, N); //start inclusive timing cudaEvent_t startIn,stopIn; cudaEventCreate(&startIn); cudaEventCreate(&stopIn); cudaEventRecord(startIn, 0); float *d_in; //float *d_out; cudaMalloc(&d_in, size); //cudaMalloc(&d_out, size); //copy the memory to device assert(cudaSuccess == cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice)); //set up the pointer thrust::device_ptr<float> dev_ptr(d_in); //perform in-place inclusive scan thrust::inclusive_scan(dev_ptr,dev_ptr + N, dev_ptr); cudaMemcpy(cuda_out, d_in, size, cudaMemcpyDeviceToHost); //stop inclusive timing cudaEventRecord(stopIn, 0); cudaEventSynchronize(stopIn); cudaEventElapsedTime(&time, startIn, stopIn); cudaEventDestroy(startIn); cudaEventDestroy(stopIn); inclusiveScan_SEQ(h_in, h_out,N); int nDiffs = checkResults(h_out, cuda_out,N); if(nDiffs)printf("Test Failed\n"); // This should never print printf("%d\n%f\n%f\n",N,cuda_out[N-1],time); //printf("%f\n", time); //free resources free(h_in); free(h_out); free(cuda_out); return 0; }
4,261
#include <stdio.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } __global__ void doubleElements(int *a, int N) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 1000; int *a; size_t size = N * sizeof(int); /* * `cudaMallocManaged` を使用して、ホストとデバイスの両方で使用できるポインタ `a` を割り当てます。 */ cudaMallocManaged(&a, size); init(a, N); size_t threads_per_block = 256; size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block; doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); cudaDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); /* * `cudaFree` を使用して、`cudaMallocManaged` で割り当てたメモリを解放します。 */ cudaFree(a); }
4,262
//********************************************// // MAC0219/5742 - EP3 // // EP3 - Mandelbrot // // Bruna Bazaluk, Felipe Serras, Ricardo Kojo // //********************************************// //*Arquivo que contem as funções para processamento em gpu.*// #include <iostream> #include <stdio.h> #include <stdlib.h> #include <string> #include <thrust/complex.h> // Manipulação de números complexos para CPU e GPU #include <png.h> using namespace std; #define ITERATIONS 1000 //Estabelece os Headers de arquivos externos a serem utilizados: inline void setColorValue(png_byte *ptr, double val); int printImage(string file_name, int w, int h, float *buffer_image); float maximize(float *array, int array_size); // Versão da função de criação da imagem buffer, que define a pertencência dos numeros complexos em relação ao conjunto de // Mandelbrot para a gpu. Ele é em muito similar a versão da gpu. as principais diferenças encontram-se na // forma de percorrer os pixels: __global__ void mbrot_func_gpu(float c0_r, float c0_i, float c1_r, float c1_i, int w, int h, int iteractions, float *buffer_image) { // Considera-se que a imagem de buffer já foi alocada, pois ela deve ser alocada na memória da gpu: float d_x = (c1_r - c0_r) / (float)w; float d_y = (c1_i - c0_i) / (float)h; // Para cada chamada o índice e o passo do loop são calculados em função do número da thread e o número do bloco // da gpu que a está executando. Isso garante que nenhuma thread realiza o mesmo trabalho que outra: int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < w * h; i += stride) { int y = i / w; int x = i % w; thrust::complex<float> current; current.real(0); current.imag(0); thrust::complex<float> last; last.real(0); last.imag(0); thrust::complex<float> c; c.real(c0_r + (x * d_x)); c.imag(c0_i + (y * d_y)); // printf("%d ",i); float abs = 0.0; bool mandel = 1; for (int t = 1; t < iteractions; ++t) { current = last * last + c; abs = thrust::abs(current); if (abs > 2) { mandel = 0; buffer_image[y * w + x] = (float)t; break; // pintar baseado no t em que parou } last = current; } if (mandel) { buffer_image[y * w + x] = 0.0; } } } // Versão de normalização de buffer para a cpu: __global__ void normalizeBuffer_gpu(float *buffer_image, int buffer_size, float buffer_max) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < buffer_size; i += stride) { buffer_image[i] = buffer_image[i] / buffer_max; } } //Função principal para o processamento em GPU: float *main_gpu(float C0_REAL, float C0_IMAG, float C1_REAL, float C1_IMAG, int WIDTH, int HEIGHT, int THREADS, string SAIDA) { int blockSize = THREADS; int numBlocks = (WIDTH * HEIGHT + blockSize - 1) / blockSize; float *buffer_image; cudaMallocManaged(&buffer_image, WIDTH * HEIGHT * sizeof(float)); // Aloca memória da gpu para a imagem de buffer if (buffer_image == NULL) { cerr << "Falha ao criar o Buffer da imagem." << endl; return buffer_image; } // Gera-se a imagem de buffer: mbrot_func_gpu<<<numBlocks, blockSize>>>(C0_REAL, C0_IMAG, C1_REAL, C1_IMAG, WIDTH, HEIGHT, ITERATIONS, buffer_image); cudaDeviceSynchronize(); // Espera-se o fim dos cálculos para continuação da parte sequencia cudaDeviceSynchronize(); // Espera mais um poquinho. float *buffer_image_cpu = (float *)malloc(WIDTH * HEIGHT * sizeof(float)); cudaMemcpy(buffer_image_cpu, buffer_image, WIDTH * HEIGHT * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(buffer_image); // Libera a memória do cuda alocada para o buffer return buffer_image_cpu; // Hora de dizer tchau. }
4,263
typedef int2 Record; __global__ void mapImpl_kernel(Record *d_R, int delta, int rLen,int *d_output1, int *d_output2) { const int by = blockIdx.y; const int bx = blockIdx.x; const int tx = threadIdx.x; const int ty = threadIdx.y; const int tid=tx+ty*blockDim.x; const int bid=bx+by*gridDim.x; const int numThread=blockDim.x; const int resultID=(bid)*numThread+tid; //Record value; for(int pos=resultID;pos<rLen;pos+=delta) { //value=d_R[pos]; d_output1[pos]=d_R[pos].x; d_output2[pos]=d_R[pos].y; } }
4,264
#include <cuda.h> #include "cuda_runtime.h" // #include <cutil.h> #include "texture_fetch_functions.h" #include "device_functions.h" #include "device_launch_parameters.h" #include <cuda_profiler_api.h> #include <stdio.h> #include <iostream> #define DATATYPE int #define ARRAYLEN 1000000 inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { std::cerr << std::endl << " CUDA error " << file << "(" << line << ")" << " : " << errorMessage << " -> " << cudaGetErrorString(err) << "(" << (int) err << ") "<< std::endl << std::endl; cudaDeviceReset(); std::exit(EXIT_FAILURE); } } #define __CUDA_ERROR(msg) \ { \ cudaDeviceSynchronize(); \ __getLastCudaError (msg, __FILE__, __LINE__);\ } #define repeat128(x) for(int i=0;i<128;i++){x} const size_t size_w = 4096; const size_t size_h = 4096; typedef int2 mytype; typedef mytype arr_t[size_w]; const mytype A_val = make_int2(1,2); const mytype B_val = make_int2(1,2); __global__ void matrix_add_2D(const arr_t * __restrict__ A, const arr_t * __restrict__ B, arr_t * __restrict__ C, const size_t sw, const size_t sh){ size_t idy = threadIdx.x+blockDim.x*(size_t)blockIdx.x; size_t idx = threadIdx.y+blockDim.y*(size_t)blockIdx.y; mytype a = A[idx][idy]; mytype b = A[idx][idy+1]; if ((idx < sh) && (idy < sw)) C[idx][idy] = make_int2(a.x + b.x,a.y+b.y); } int main(){ arr_t *A,*B,*C; cudaSetDevice(2); size_t ds = size_w*size_h*sizeof(mytype); cudaError_t err = cudaMallocManaged(&A, ds); if (err != cudaSuccess) {std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; return 0;} cudaMallocManaged(&B, ds); cudaMallocManaged(&C, ds); for (int x = 0; x < size_h; x++) for (int y = 0; y < size_w; y++){ A[x][y] = A_val; B[x][y] = B_val; C[x][y] = make_int2(1,2);} int attr = 0; cudaDeviceGetAttribute(&attr, cudaDevAttrConcurrentManagedAccess,0); if (attr){ cudaMemPrefetchAsync(A, ds, 0); cudaMemPrefetchAsync(B, ds, 0); cudaMemPrefetchAsync(C, ds, 0);} dim3 threads(32,32); dim3 blocks((size_w+threads.x-1)/threads.x, (size_h+threads.y-1)/threads.y); matrix_add_2D<<<blocks,threads>>>(A,B,C, size_w, size_h); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) {std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; return 0;} // for (int x = 0; x < size_h; x++) // for (int y = 0; y < size_w; y++) // if (C[x][y] != A_val+B_val) {std::cout << "mismatch at: " << x << "," << y << " was: " << C[x][y] << " should be: " << A_val+B_val << std::endl; return 0;} ; std::cout << "Success!" << std::endl; return 0; }
4,265
__global__ void two_threads(int *A, int *B) { int tid = threadIdx.x; A[tid] += B[tid]; }
4,266
#include <iostream> #include <random> #include <cuda_runtime_api.h> double* InitializeArray(const int length,const int seed) { double* A = (double*)malloc(length * sizeof(double)); std::default_random_engine e; std::uniform_real_distribution<double> dist(0,10); e.seed(seed); for(int i=0;i<length;++i) A[i] = dist(e); return A; } void printArray(double* A,const int length,const std::string& str) { std::cout<<"Array "<< str << ":"; for(int i=0;i<length;++i) std::cout<<" "<<A[i]; std::cout<<std::endl; } double* ArraySum(double* A,double* B,const int length) { double* C = (double*)malloc(length*sizeof(double)); for(int i=0;i<length;++i) C[i] = A[i] + B[i]; return C; } __global__ void ArraySumKernel(double *A, double *B, double *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } int main() { const int length = 10; const size_t size = length * sizeof(double); double *h_A,*h_B,*h_C; h_A = InitializeArray(length, 0); h_B = InitializeArray(length, 10); h_C = (double*)malloc(size); printArray(h_A,length,"A"); printArray(h_B,length,"B"); std::cout<<"CPU Array Sum Result:\n"; printArray(ArraySum(h_A,h_B,length),length,"C"); std::cout<<"GPU Array Sum Result:\n"; const int dev = 0; cudaSetDevice(0); double *d_A,*d_B,*d_C; cudaMalloc((void **)&d_A,size); cudaMalloc((void **)&d_B,size); cudaMalloc((void **)&d_C,size); cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice); cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice); ArraySumKernel<<<1, length>>>(d_A, d_B, d_C); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); printArray(h_C, length, "C"); free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
4,267
#include <iostream> #include <stdio.h> #include <sys/time.h> #include <string.h> using namespace std; #define IDX2C(i,j,ld) (((i)*(ld))+(j)) __global__ void load(float * mat,int channel_id, int channel_count, float * unroll, int height_stride,int width_stride, int mat_height,int mat_width, int filter_height,int filter_width, int res_height,int res_width) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i < res_height && j < res_width){ for(int x = 0;x < filter_height;x++){ for(int y = 0;y < filter_width;y++){ unroll[IDX2C(IDX2C(i,j,res_width),IDX2C(x,y,filter_width) + channel_id * filter_height * filter_width ,channel_count * filter_height * filter_width)] = mat[IDX2C(i * height_stride + x,j * width_stride + y,mat_width)]; } } } __syncthreads(); } __global__ void MatMul(float * A, float * B ,float * C,int m,int n,int k) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0; if (i < m && j < k){ for(int x = 0;x < n;x++){ sum += A[i * n + x] * B[x * k + j]; } C[i * k + j] = sum; } } int main() { timeval t1, t2; int x,y; cout << "Input threadsPerBlock.x:"; cin >> x; cout << "Input threadsPerBlock.y:"; cin >> y; dim3 threadsPerBlock(x,y); int height, width; cout << "Input problem size:"; cin >> height; width = height; int filter_height = 3; int filter_width = 3; int channel = 3; int stride; cout << "Input stride:"; cin >> stride; int padding = ((((height - filter_height) / stride + 1) * stride - (height - filter_height)) % stride ) / 2; float* Mat[channel]; float* filter[channel]; int Mat_size = sizeof(float) * (height + 2 * padding) * (width + 2 * padding); int res_size = sizeof(float) * ((height - filter_height + 2 * padding) / stride + 1) * ((width - filter_width + 2 * padding) / stride + 1); int filter_size = sizeof(float) * filter_height * filter_width; for(int k = 0;k < channel;k++){ Mat[k] = (float*) malloc(Mat_size); memset(Mat[k],0,sizeof(Mat[k])); for(int i = padding;i < height + padding;i++){ for(int j = padding;j < width + padding;j++){ Mat[k][IDX2C(i,j,width + 2 * padding)] = IDX2C(i,j,width + 2 * padding); } } } for(int i = 0;i < channel;i++){ filter[i] = (float*)malloc(filter_size); for(int j = 0;j < filter_height * filter_width;j++){ filter[i][j] = j + 1; } } float * res; res = (float*)malloc(res_size); gettimeofday(&t1, NULL); float * d_Mat[channel], *d_filter[channel], *d_res; for(int i = 0;i < channel;i++){ cudaMalloc(&d_Mat[i], Mat_size); cudaMemcpy(d_Mat[i],Mat[i],Mat_size,cudaMemcpyHostToDevice); } for(int i = 0;i < channel;i++){ cudaMalloc(&d_filter[i],filter_size); cudaMemcpy(d_filter[i],filter[i],filter_size,cudaMemcpyHostToDevice); } cudaMalloc(&d_res, res_size); float * unroll; cudaMalloc(&unroll,res_size * filter_height * filter_width * channel ); int res_height = (height - filter_height + 2 * padding) / stride + 1; int res_width = (width - filter_width + 2 * padding) / stride + 1; dim3 numBlocks((res_height % threadsPerBlock.x) ? res_height / threadsPerBlock.x + 1 : res_height / threadsPerBlock.x ,(res_width % threadsPerBlock.y) ? res_width / threadsPerBlock.y + 1 : res_width / threadsPerBlock.y); for(int i = 0;i < channel;i++){ load<<<numBlocks, threadsPerBlock>>>(d_Mat[i],i,channel, unroll,stride,stride,height + 2 * padding,width + 2 * padding,filter_height,filter_width,res_height,res_width); } float * W; cudaMalloc(&W,filter_size * channel); for(int i = 0;i < channel;i++){ cudaMemcpy(W + i * (filter_height * filter_width),filter[i],filter_size,cudaMemcpyHostToDevice); } dim3 numBlocks1((res_height * res_width % threadsPerBlock.x) ? res_height * res_width / threadsPerBlock.x + 1 :res_height * res_width / threadsPerBlock.x ,(filter_height * filter_width % threadsPerBlock.y) ? filter_height * filter_width / threadsPerBlock.y + 1 : filter_height * filter_width / threadsPerBlock.y); MatMul<<<numBlocks1,threadsPerBlock>>>(unroll,W,d_res,res_height*res_width,filter_height * filter_width * channel, 1); gettimeofday(&t2, NULL); printf("convolution time is:%ldμs\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec); cudaMemcpy(res, d_res, res_size, cudaMemcpyDeviceToHost); /* for(int x = 0;x < channel;x++){ cout << "Input channel " << x << " after padding:" << endl; for(int i = 0;i < height + 2 * padding;i++){ for(int j = 0;j < width + 2 * padding;j++){ cout << Mat[x][i * (width + 2 * padding) + j] << " "; } cout << endl; } } cout << endl; for(int x = 0;x < channel;x++){ cout << "Filter channel " << x << ":" << endl; for(int i = 0;i < filter_height;i++){ for(int j = 0;j < filter_width;j++){ cout << filter[x][i * filter_width + j] << ' '; } cout << endl; } } cout << endl; cout << "Res:" << endl; for(int i = 0;i < ((height - filter_height + 2 * padding) / stride + 1);i++){ for(int j = 0;j < ((width - filter_width + 2 * padding) / stride + 1);j++){ cout << res[i * ((width - filter_width + 2 * padding) / stride + 1) + j] << ' '; } cout << endl; } */ for(int i = 0;i < channel;i++){ cudaFree(d_Mat[i]); } for(int i = 0;i < channel;i++){ cudaFree(d_filter[i]); } cudaFree(d_res); for(int k = 0;k < channel;k++){ free(Mat[k]); } for(int i = 0;i < channel;i++){ free(filter[i]); } free(res); }
4,268
#include <stdio.h> __global__ void kernel(int *d, int n){ __shared__ int s[64]; int tid = threadIdx.x; int tr = n - tid - 1; s[tid] = d[tid]; __syncthreads(); d[tid] = s[tr]; } int main(int argc, char* argv[]){ //initialization code int size; float total_time; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); size = 64; int totalSize = size * sizeof(int); int *a,*r,*d,*dev_d; cudaMalloc((void**)&dev_d,totalSize); a = (int*) malloc(totalSize); r = (int*) malloc(totalSize); d = (int*) malloc(totalSize); //end mallocs //problem specific int idx; for(idx=0;idx<size;idx++){ a[idx] = idx; r[idx] = size-idx-1; d[idx] = 0; } //copy to dev cudaMemcpy(dev_d,a,totalSize,cudaMemcpyHostToDevice); int iteration = 0; float avg_time = 0.0; for(iteration=0;iteration<1;iteration++){ //call kernel and measure times cudaEventRecord(start,0); kernel<<<1,64>>>(dev_d,size); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time,start,stop); printf("\n time for %i threads : %f \n",size,total_time); avg_time+=total_time; } avg_time/=10.0; printf("average time for %i size vector mult is %f ",size,avg_time); //copy back and prints cudaMemcpy(d,dev_d,totalSize,cudaMemcpyDeviceToHost); for(idx = 0;idx < size; idx++) if(d[idx] != r[idx]) printf("Verificar- Hay un error"); //free free(a); free(r); free(d); cudaFree(dev_d); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
4,269
#include <iostream> #include <cstdlib> #include <math.h> #include <chrono> // matrix multiply on gpu __global__ void dgem_gpu(int n, float *A, float *B, float *C) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; // demo filler C[i+j*n] = B[i+j*n]; } void square_dgemm_naive (int n, float* A, float* B, float* C) { for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) { float cij = C[i+j*n]; for( int k = 0; k < n; k++ ) cij += A[i+k*n] * B[k+j*n]; C[i+j*n] = cij; } } int check(int n, float *A, float *B) { for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) { double diff = std::abs(A[i + j * n] - B[i + j * n]); if (diff > 0.0003) { printf("diff is %f\n", diff); return 0; } } return 1; } int main(void) { int N = 1000; int size = N*N; // square matrix float *A, *B, *C, *verify; // Works on cpu and gpu cudaMallocManaged(&A, size*sizeof(float)); cudaMallocManaged(&B, size*sizeof(float)); cudaMallocManaged(&C, size*sizeof(float)); cudaMallocManaged(&verify, size*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < size; i++) { A[i] = i * 0.000000001; B[i] = i * 0.000000001; C[i] = 0.0f; verify[i] = 0.0f; } // this is to generate answer auto serialStart = std::chrono::system_clock::now(); square_dgemm_naive(N, A, B, verify); auto serialEnd = std::chrono::system_clock::now(); std::chrono::duration<double> serialElapsed = serialEnd - serialStart; std::cout << serialElapsed.count() << "s\n"; // Run kernel on the GPU // use this one for actual work auto gpuStart = std::chrono::system_clock::now(); // dgem_gpu<<<N, N>>>(N, A, B, C); // comment this one out, just for testing dgem_gpu<<<N, N>>>(N, A, C, verify); auto gpuEnd = std::chrono::system_clock::now(); std::chrono::duration<double> gpuElapsed = gpuEnd - gpuStart; std::cout << gpuElapsed.count() << "s\n"; // wait for threads to finish cudaDeviceSynchronize(); int correct = check(N, C, verify); // Free memory cudaFree(A); cudaFree(B); cudaFree(C); cudaFree(verify); if (correct == 0) { printf("INVALID OUTPUT\n"); exit(1); } printf("Correct output!\n"); return 0; }
4,270
#include <iostream> #include <chrono> #include <cassert> #include <cmath> #include <cstdlib> #include <vector> #include <algorithm> #define BLOCKSIZE 128 // MUST BE ASSOCIATIVE __device__ inline int f(int a, int b){ return a + b; } /** * Implements an inclusive prefix-scan algorithm ON EACH BLOCK using a recursive pattern modeled * after https://en.wikipedia.org/wiki/Prefix_sum#/media/File:Prefix_sum_16.svg. * * Arguments: x points to an array of size n, while out points to an array of size ceil(n / BLOCKSIZE). * * Description of stages: * * - First there's an downsweep step. In the first iteration, every second thread sums with * its predecessor. In the second iteration, every fourth thread sums with its second predecessor. * This continues until the last thread contains the sum of the whole block. * For example, element #1 stores sum of #0 and #1. Element #7 stores sum of #0 through #7. * Element #2 stores only itself, as it is an odd-ordered element (0-indexing!). * For an 8-element array with all values = 1, the array will look like 1 2 1 4 1 2 1 8. * * - Then there's a upsweep step. This propagates changes from lower elements to upper elements. * It is difficult to explain this algorithm without sounding like a robot, so I'd recommend * the wikipedia link, as it shows visually what's happening for n=16. * I'll just posit that the element #6 in the array 1 2 1 4 1 2 1 8 will get summed as elements #3 + #5 + #6. **/ __global__ void scan(const int n, int *x, int *out){ __shared__ int scan_v[BLOCKSIZE]; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; // We use shared memory to reduce latency. scan_v[tid] = x[i]; __syncthreads(); // Downsweep step for(int i = 1, j = 1; j < BLOCKSIZE; i = (i << 1) + 1, j *= 2){ if((tid & i) == i){ scan_v[tid] = f(scan_v[tid], scan_v[tid - j]); } __syncthreads(); } // Upsweep step for(int i = BLOCKSIZE / 2; i >= 1; i /= 2){ int oth = tid - i; if(((tid + 1) & (2 * i - 1)) == i and 0 <= oth){ scan_v[tid] = f(scan_v[tid], scan_v[oth]); } __syncthreads(); } x[i] = scan_v[tid]; if(tid == blockDim.x - 1){ out[blockIdx.x] = scan_v[tid]; } } /** * Given an array in of size n, and an array out of size n * BLOCKSIZE, * add the value in[i - 1] to out[BLOCKSIZE * i + k], for all k from [0, BLOCKSIZE). **/ __global__ void propagate(const int n, int *in, int *out){ int bid = blockIdx.x; int tcount = blockDim.x; int tid = threadIdx.x; int i = bid * tcount + tid; if(bid == 0){ return; } out[i] = f(out[i], in[bid - 1]); } // Since this algorithm recursively calculates prefix scans on blocks, and blocks of blocks etc, // we get the number of recursive calls we'll have to do, as well as the size of the array at each // recursive step. std::vector<int> get_levels(const int n, int block_size){ std::vector<int> res; int x = n; while(x > 1){ res.push_back(x); x = (x + block_size - 1) / block_size; } res.push_back(1); return res; } int main(){ const int n = (1 << 28); const int block_size = BLOCKSIZE; assert(n % block_size == 0); std::vector<int> levels = get_levels(n, block_size); for(int i : levels){ std::cout << i << ' '; } std::cout << std::endl; int *x = (int *) malloc(n * sizeof(int)); assert(x != NULL); for(int i = 0; i < n; i++){ x[i] = 1; } int *d_arrays[levels.size()]; for(int i = 0; i < levels.size(); i++){ cudaMalloc(&d_arrays[i], levels[i] * sizeof(int)); assert(d_arrays[i] != NULL); } cudaMemcpy(d_arrays[0], x, levels[0] * sizeof(int), cudaMemcpyHostToDevice); for(int i = 1; i < levels.size(); i++){ int block_count = levels[i]; scan<<<block_count, block_size>>>(levels[i - 1], d_arrays[i - 1], d_arrays[i]); } for(int i = levels.size() - 1; i >= 1; i--){ int block_count = levels[i]; propagate<<<block_count, block_size>>>(levels[i - 1], d_arrays[i], d_arrays[i - 1]); } int *result = (int *) malloc(n * sizeof(int)); cudaMemcpy(result, d_arrays[0], n * sizeof(int), cudaMemcpyDeviceToHost); for(int i = 0; i < n; i++){ if(result[i] != i + 1){ std::cerr << i << ' ' << i + 1 << ' ' << result[i] << '\n'; return -1; } } std::cout << "memory usage: " << n * sizeof(int) << " bytes" << std::endl; }
4,271
__global__ void deriv_entropy(int n_train, int n_classes, float* targets, float* sigma_o, float* d_entropy) { int tx = threadIdx.x; int bx = blockIdx.x; int stride = blockDim.x; int idx; for(idx=bx*n_classes+tx; idx<n_train*n_classes; idx+=stride) { if(idx < n_train*n_classes) d_entropy[idx] = -targets[idx] / sigma_o[idx]; } }
4,272
// Only thing we care about is that these headers are found #include <cuda.h> #include <cuda_runtime_api.h> int main(int argc, char** argv) { return 0; }
4,273
#include "includes.h" __device__ void updateU(const int nbrOfGrids, double *d_u1, double *d_u2, double *d_u3, const double *d_u1Temp, const double *d_u2Temp, const double *d_u3Temp) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x; for (int i = index; i < nbrOfGrids; i += stride) { if ((i > 0) && (i < nbrOfGrids - 1)) { d_u1[i] = d_u1Temp[i]; d_u2[i] = d_u2Temp[i]; d_u3[i] = d_u3Temp[i]; } } } __device__ void step(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, double *d_u1Temp, double *d_u2Temp, double *d_u3Temp, const double *d_f1, const double *d_f2, const double *d_f3, const double *d_tau, const double *d_h) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x; for (int i = index; i < nbrOfGrids; i += stride) { if ((i > 0) && (i < nbrOfGrids - 1)) { d_u1Temp[i] = d_u1[i] - *d_tau / *d_h * (d_f1[i] - d_f1[i - 1]); d_u2Temp[i] = d_u2[i] - *d_tau / *d_h * (d_f2[i] - d_f2[i - 1]); d_u3Temp[i] = d_u3[i] - *d_tau / *d_h * (d_f3[i] - d_f3[i - 1]); } } } __device__ void halfStep(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, double *d_u1Temp, double *d_u2Temp, double *d_u3Temp, const double *d_f1, const double *d_f2, const double *d_f3, const double *d_tau, const double *d_h) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x; for (int i = index; i < nbrOfGrids; i += stride) { if ((i > 0) && (i < nbrOfGrids - 1)) { d_u1Temp[i] = (d_u1[i + 1] + d_u1[i]) / 2 - *d_tau / 2 / *d_h * (d_f1[i + 1] - d_f1[i]); d_u2Temp[i] = (d_u2[i + 1] + d_u2[i]) / 2 - *d_tau / 2 / *d_h * (d_f2[i + 1] - d_f2[i]); d_u3Temp[i] = (d_u3[i + 1] + d_u3[i]) / 2 - *d_tau / 2 / *d_h * (d_f3[i + 1] - d_f3[i]); } } } __device__ void updateFlux(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, double *d_f1, double *d_f2, double *d_f3, const double *d_gama) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x; double rho, m, e, p; for (int i = index; i < nbrOfGrids; i += stride) { rho = d_u1[i]; m = d_u2[i]; e = d_u3[i]; p = (*d_gama - 1) * (e - m * m / rho / 2); d_f1[i] = m; d_f2[i] = m * m / rho + p; d_f3[i] = m / rho * (e + p); } } __device__ void d_boundaryCondition(const int nbrOfGrids, double *d_u1, double *d_u2, double *d_u3) { d_u1[0] = d_u1[1]; d_u2[0] = -d_u2[1]; d_u3[0] = d_u3[1]; d_u1[nbrOfGrids - 1] = d_u1[nbrOfGrids - 2]; d_u2[nbrOfGrids - 1] = -d_u2[nbrOfGrids - 2]; d_u3[nbrOfGrids - 1] = d_u3[nbrOfGrids - 2]; } __global__ void laxWendroffStep(const int nbrOfGrids, double *d_u1, double *d_u2, double *d_u3, double *d_u1Temp, double *d_u2Temp, double *d_u3Temp, double *d_f1, double *d_f2, double *d_f3, const double *d_tau, const double *d_h, const double *d_gama) { updateFlux(nbrOfGrids, d_u1, d_u2, d_u3, d_f1, d_f2, d_f3, d_gama); halfStep(nbrOfGrids, d_u1, d_u2, d_u3, d_u1Temp, d_u2Temp, d_u3Temp, d_f1, d_f2, d_f3, d_tau, d_h); d_boundaryCondition(nbrOfGrids, d_u1Temp, d_u2Temp, d_u3Temp); updateFlux(nbrOfGrids, d_u1Temp, d_u2Temp, d_u3Temp, d_f1, d_f2, d_f3, d_gama); step(nbrOfGrids, d_u1, d_u2, d_u3, d_u1Temp, d_u2Temp, d_u3Temp, d_f1, d_f2, d_f3, d_tau, d_h); updateU(nbrOfGrids, d_u1, d_u2, d_u3, d_u1Temp, d_u2Temp, d_u3Temp); d_boundaryCondition(nbrOfGrids, d_u1, d_u2, d_u3); }
4,274
#include <stdio.h> #include <future> #include <thread> #include <chrono> #include <iostream> #define N 1000000 __constant__ int factor = 0; __global__ void vectorAdd(int *a, int *b, int *c) { int i = blockIdx.x*blockDim.x + threadIdx.x; c[i] = factor*(a[i] + b[i]); } __global__ void matrixAdd(int **a,int **b, int**c) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; c[i][j] = a[i][j] + b[i][j]; } #define PRINT(x) \ std::cout << #x " = " << x << std::endl void func(const char* ptr) { std::cout << "ptr = " << ptr << std::endl; } int main(int argc, char** argv) { // start time auto startTime = std::chrono::high_resolution_clock::now(); printf("Hello World\n"); // get the number of devices int numDevices; cudaGetDeviceCount(&numDevices); PRINT(numDevices); cudaDeviceProp prop; for (auto i=0 ; i<numDevices; i++) { cudaGetDeviceProperties(&prop, i); PRINT(prop.name); PRINT(prop.totalGlobalMem); PRINT(prop.sharedMemPerBlock); PRINT(prop.regsPerBlock); PRINT(prop.warpSize); PRINT(prop.memPitch); PRINT(prop.maxThreadsPerBlock); PRINT(prop.maxThreadsDim[0]); PRINT(prop.maxThreadsDim[1]); PRINT(prop.maxThreadsDim[2]); PRINT(prop.maxGridSize[0]); PRINT(prop.maxGridSize[1]); PRINT(prop.maxGridSize[2]); PRINT(prop.totalConstMem); PRINT(prop.major); PRINT(prop.minor); PRINT(prop.clockRate); PRINT(prop.textureAlignment); PRINT(prop.deviceOverlap); PRINT(prop.multiProcessorCount); PRINT(prop.kernelExecTimeoutEnabled); PRINT(prop.integrated); PRINT(prop.canMapHostMemory); PRINT(prop.computeMode); PRINT(prop.maxTexture1D); PRINT(prop.maxTexture2D[0]); PRINT(prop.maxTexture2D[1]); PRINT(prop.maxTexture3D[0]); PRINT(prop.maxTexture3D[1]); PRINT(prop.maxTexture3D[2]); // PRINT(prop.maxTexture2DArray[0]); // PRINT(prop.maxTexture2DArray[1]); // PRINT(prop.maxTexture2DArray[2]); PRINT(prop.concurrentKernels); } // stop time auto stopTime = std::chrono::high_resolution_clock::now(); PRINT((stopTime - startTime).count()); printf("Goodbye World\n"); }
4,275
#include <stdio.h> #include <stdlib.h> #include <cuda.h> void initialize (int N, float *a, float *b, float *c){ for (int i = 0; i < N; i++){ if (i < N){ c[i] = 0; a[i] = 1 + i; b[i] = 1 - i; } } } void addVectors (int N, float *a, float *b, float *c){ for (int i = 0; i < N; i++){ if (i < N){ c[i] = a[i] + b[i]; } } } int main (int argc, char **argv){ if (argc != 2) exit (1); int N = atoi(argv[1]); float *a, *b, *c; a = (float *) malloc(N*sizeof(float)); b = (float *) malloc(N*sizeof(float)); c = (float *) malloc(N*sizeof(float)); initialize(N,a,b,c); addVectors(N,a,b,c); for (int i = 0; i < 5; i++) { printf("%f\n", c[i]); } free(a); free(b); free(c); }
4,276
#include <vector> #include <iostream> #include "stdio.h" #include <cufft.h> #define cuda_safe_call(err) __cuda_safe_call(err, __FILE__, __LINE__) inline void __cuda_safe_call(cudaError err, const char *file, const int line) { if (cudaSuccess != err) printf("cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); } inline int check_cufft(cufftResult err) { if (err == CUFFT_SUCCESS) { //std::cout << "cufft success!" << std::endl; return 0; } else { if (err == CUFFT_INVALID_PLAN) printf("cuFFT plan error: INVALID PLAN\n"); else if (err == CUFFT_ALLOC_FAILED) printf("cuFFT plan error: ALLOC FAILED\n"); else if (err == CUFFT_INVALID_TYPE) printf("cuFFT plan error: INVALID TYPE\n"); else if (err == CUFFT_INVALID_VALUE) printf("cuFFT plan error: INVALID VALUE\n"); else if (err == CUFFT_INTERNAL_ERROR) printf("cuFFT plan error: INTERNAL ERROR\n"); else if (err == CUFFT_EXEC_FAILED) printf("cuFFT plan error: EXEC FAILED\n"); else if (err == CUFFT_SETUP_FAILED) printf("cuFFT plan error: SETUP FAILED\n"); else if (err == CUFFT_INVALID_SIZE) printf("cuFFT plan error: INVALID SIZE\n"); else if (err == CUFFT_UNALIGNED_DATA) printf("cuFFT plan error: UNALIGNED DATA\n"); else printf("cuFFT plan error: OTHER\n"); return 1; } } int main() { // Grid et al. // ------------------------ const int nloops = 1000; const int itot = 512; const int jtot = 512; const int ktot = 512; const int ncells = itot*jtot*ktot; // Field at host // ------------------------ std::vector<double> field(ncells); // Create device field & tmp // ------------------------ double* field_g; double* tmp_g; cuda_safe_call(cudaMalloc((void**)&field_g, ncells*sizeof(double))); cuda_safe_call(cudaMalloc((void**)&tmp_g, ncells*sizeof(double))); cuda_safe_call(cudaMemcpy(field_g, field.data(), ncells, cudaMemcpyHostToDevice)); // Create FFT plan // ------------------------ cufftHandle iplanf; const int rank = 1; // Double input int i_ni[] = {itot}; int i_istride = 1; int i_idist = itot; // Double-complex output int o_ni[] = {itot/2+1}; int o_istride = 1; int o_idist = itot/2+1; check_cufft( cufftPlanMany(&iplanf, rank, i_ni, i_ni, i_istride, i_idist, o_ni, o_istride, o_idist, CUFFT_D2Z, jtot*ktot) ); // Calculate FFTs // ------------------------ for (int i=0; i<nloops; ++i) { check_cufft( cufftExecD2Z(iplanf, (cufftDoubleReal*)field_g, (cufftDoubleComplex*)tmp_g) ); cudaThreadSynchronize(); } return 0; }
4,277
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <iostream> #include <ctype.h> #include <cuda.h> #define CEIL(a,b) ((a+b-1)/b) #define SWAP(a,b,t) t=b; b=a; a=t; #define DATAMB(bytes) (bytes/1024/1024) #define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0)) typedef unsigned char uch; typedef unsigned long ul; typedef unsigned int ui; uch *TheImg, *CopyImg; // Where images are stored in CPU uch *GPUImg, *GPUCopyImg, *GPUResult; // Where images are stored in GPU struct ImgProp{ int Hpixels; int Vpixels; uch HeaderInfo[54]; ul Hbytes; } ip; #define IPHB ip.Hbytes #define IPH ip.Hpixels #define IPV ip.Vpixels #define IMAGESIZE (IPHB*IPV) #define IMAGEPIX (IPH*IPV) // Kernel that flips the given image horizontally // each thread only flips a single pixel (R,G,B) __global__ void Hflip(uch *ImgDst, uch *ImgSrc, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYmirrorcol = Hpixels - 1 - MYcol; ui MYoffset = MYrow * RowBytes; ui MYsrcIndex = MYoffset + 3 * MYcol; ui MYdstIndex = MYoffset + 3 * MYmirrorcol; // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex]; ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1]; ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2]; } // Improved Hflip() kernel that flips the given image horizontally // BlkPerRow, RowBytes variables are passed, rather than calculated __global__ void Hflip2(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui BlkPerRow, ui RowBytes) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYmirrorcol = Hpixels - 1 - MYcol; ui MYoffset = MYrow * RowBytes; ui MYsrcIndex = MYoffset + 3 * MYcol; ui MYdstIndex = MYoffset + 3 * MYmirrorcol; // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex]; ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1]; ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2]; } // Improved Hflip2() kernel that flips the given image horizontally // Grid is launched using 2D block numbers __global__ void Hflip3(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui RowBytes) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; //ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); //ui MYrow = MYbid / BlkPerRow; //ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; ui MYrow = blockIdx.y; ui MYcol = MYbid*ThrPerBlk + MYtid; if (MYcol >= Hpixels) return; // col out of range ui MYmirrorcol = Hpixels - 1 - MYcol; ui MYoffset = MYrow * RowBytes; ui MYsrcIndex = MYoffset + 3 * MYcol; ui MYdstIndex = MYoffset + 3 * MYmirrorcol; // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex]; ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1]; ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2]; } // Improved Hflip3() kernel that flips the given image horizontally // Each kernel takes care of 2 consecutive pixels; half as many blocks are launched __global__ void Hflip4(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui RowBytes) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; //ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); //ui MYrow = MYbid / BlkPerRow; //ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; ui MYrow = blockIdx.y; ui MYcol2 = (MYbid*ThrPerBlk + MYtid)*2; if (MYcol2 >= Hpixels) return; // col (and col+1) are out of range ui MYmirrorcol = Hpixels - 1 - MYcol2; ui MYoffset = MYrow * RowBytes; ui MYsrcIndex = MYoffset + 3 * MYcol2; ui MYdstIndex = MYoffset + 3 * MYmirrorcol; // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex]; ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1]; ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2]; if ((MYcol2 + 1) >= Hpixels) return; // only col+1 is out of range ImgDst[MYdstIndex - 3] = ImgSrc[MYsrcIndex + 3]; ImgDst[MYdstIndex - 2] = ImgSrc[MYsrcIndex + 4]; ImgDst[MYdstIndex - 1] = ImgSrc[MYsrcIndex + 5]; } // Improved Hflip3() kernel that flips the given image horizontally // Each kernel takes care of 4 consecutive pixels; 1/4 as many blocks are launched __global__ void Hflip5(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui RowBytes) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; //ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); //ui MYrow = MYbid / BlkPerRow; //ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; ui MYrow = blockIdx.y; ui MYcol4 = (MYbid*ThrPerBlk + MYtid) * 4; if (MYcol4 >= Hpixels) return; // col (and col+1) are out of range ui MYmirrorcol = Hpixels - 1 - MYcol4; ui MYoffset = MYrow * RowBytes; ui MYsrcIndex = MYoffset + 3 * MYcol4; ui MYdstIndex = MYoffset + 3 * MYmirrorcol; // swap pixels RGB @MYcol , @MYmirrorcol for (ui a = 0; a<4; a++){ ImgDst[MYdstIndex - a * 3] = ImgSrc[MYsrcIndex + a * 3]; ImgDst[MYdstIndex - a * 3 + 1] = ImgSrc[MYsrcIndex + a * 3 + 1]; ImgDst[MYdstIndex - a * 3 + 2] = ImgSrc[MYsrcIndex + a * 3 + 2]; if ((MYcol4 + a + 1) >= Hpixels) return; // next pixel is out of range } } // Improved Hflip3() kernel that flips the given image horizontally // Each kernel: copies a pixel from GlobalMem into shared memory (PixBuffer[]) // and writes back into the flipped Global Memory location __global__ void Hflip6(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui RowBytes) { __shared__ uch PixBuffer[3072]; // holds 3*1024 Bytes (1024 pixels). ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYtid3 = MYtid * 3; //ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); //ui MYrow = MYbid / BlkPerRow; //ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; ui MYrow = blockIdx.y; ui MYcol = MYbid*ThrPerBlk + MYtid; if (MYcol >= Hpixels) return; // col out of range ui MYmirrorcol = Hpixels - 1 - MYcol; ui MYoffset = MYrow * RowBytes; ui MYsrcIndex = MYoffset + 3 * MYcol; ui MYdstIndex = MYoffset + 3 * MYmirrorcol; // swap pixels RGB @MYcol , @MYmirrorcol PixBuffer[MYtid3] = ImgSrc[MYsrcIndex]; PixBuffer[MYtid3 + 1] = ImgSrc[MYsrcIndex + 1]; PixBuffer[MYtid3 + 2] = ImgSrc[MYsrcIndex + 2]; __syncthreads(); ImgDst[MYdstIndex] = PixBuffer[MYtid3]; ImgDst[MYdstIndex + 1] = PixBuffer[MYtid3 + 1]; ImgDst[MYdstIndex + 2] = PixBuffer[MYtid3 + 2]; } // Improved Hflip6() kernel that flips the given image horizontally // Each kernel: uses Shared Memory (PixBuffer[]) to read in 12 Bytes // (4 pixels). 12Bytes are flipped inside Shared Memory // After that, they are written into Global Mem as 3 int's // Horizontal resolution MUST BE A POWER OF 4. __global__ void Hflip7(ui *ImgDst32, ui *ImgSrc32, ui RowInts) { __shared__ ui PixBuffer[3072]; // holds 3*1024*4 Bytes (1024*4 pixels). ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYtid3 = MYtid * 3; ui MYrow = blockIdx.y; ui MYcolIndex = (MYbid*ThrPerBlk + MYtid)*3; if (MYcolIndex >= RowInts) return; // index is out of range ui MYmirrorcol = RowInts - 1 - MYcolIndex; ui MYoffset = MYrow * RowInts; ui MYsrcIndex = MYoffset + MYcolIndex; ui MYdstIndex = MYoffset + MYmirrorcol - 2; // -2 is to copy 3 Bytes at a time uch SwapB; uch *SwapPtr; // read 4 pixel blocks (12B = 3 int's) into Shared Memory // PixBuffer: [B0 G0 R0 B1] [G1 R1 B2 G2] [R2 B3 G3 R3] // Our Target: [B3 G3 R3 B2] [G2 R2 B1 G1] [R1 B0 G0 R0] PixBuffer[MYtid3] = ImgSrc32[MYsrcIndex]; PixBuffer[MYtid3+1] = ImgSrc32[MYsrcIndex+1]; PixBuffer[MYtid3+2] = ImgSrc32[MYsrcIndex+2]; __syncthreads(); // swap these 4 pixels inside Shared Memory SwapPtr = (uch *)(&PixBuffer[MYtid3]); // [B0 G0 R0 B1] [G1 R1 B2 G2] [R2 B3 G3 R3] SWAP(SwapPtr[0], SwapPtr[9], SwapB) // [B3 G0 R0 B1] [G1 R1 B2 G2] [R2 B0 G3 R3] SWAP(SwapPtr[1], SwapPtr[10], SwapB) // [B3 G3 R0 B1] [G1 R1 B2 G2] [R2 B0 G0 R3] SWAP(SwapPtr[2], SwapPtr[11], SwapB) // [B3 G3 R3 B1] [G1 R1 B2 G2] [R2 B0 G0 R0] SWAP(SwapPtr[3], SwapPtr[6], SwapB) // [B3 G3 R3 B2] [G1 R1 B1 G2] [R2 B0 G0 R0] SWAP(SwapPtr[4], SwapPtr[7], SwapB) // [B3 G3 R3 B2] [G2 R1 B1 G1] [R2 B0 G0 R0] SWAP(SwapPtr[5], SwapPtr[8], SwapB) // [B3 G3 R3 B2] [G2 R2 B1 G1] [R1 B0 G0 R0] __syncthreads(); //write the 4 pixels (3 int's) from Shared Memory into Global Memory ImgDst32[MYdstIndex] = PixBuffer[MYtid3]; ImgDst32[MYdstIndex+1] = PixBuffer[MYtid3+1]; ImgDst32[MYdstIndex+2] = PixBuffer[MYtid3+2]; } // Improved Hflip7() that swaps 12Bytes (4 pixels) using registers __global__ void Hflip8(ui *ImgDst32, ui *ImgSrc32, ui RowInts) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYrow = blockIdx.y; ui MYcolIndex = (MYbid*ThrPerBlk + MYtid) * 3; if (MYcolIndex >= RowInts) return; // index is out of range ui MYmirrorcol = RowInts - 1 - MYcolIndex; ui MYoffset = MYrow * RowInts; ui MYsrcIndex = MYoffset + MYcolIndex; ui MYdstIndex = MYoffset + MYmirrorcol - 2; // -2 is to copy 3 Bytes at a time ui A, B, C, D, E, F; // read 4 pixel blocks (12B = 3 int's) into 3 long registers A = ImgSrc32[MYsrcIndex]; B = ImgSrc32[MYsrcIndex + 1]; C = ImgSrc32[MYsrcIndex + 2]; // Do the shuffling using these registers //NOW: A=[B1,R0,G0,B0] B=[G2,B2,R1,G1] C=[R3,G3,B3,R2] //OUR TARGET: D=[B2,R3,G3,B3] E=[G1,B1,R2,G2] F=[R0,G0,B1,R1] D = (C >> 8) | ((B << 8) & 0xFF000000); // D=[B2,R3,G3,B3] E = (B << 24) | (B >> 24) | ((A >> 8) & 0x00FF0000) | ((C << 8) & 0x0000FF00); // E=[G1,B1,R2,G2] F = ((A << 8) & 0xFFFF0000) | ((A >> 16) & 0x0000FF00) | ((B >> 8) & 0x000000FF); // F=[R0,G0,B1,R1] //write the 4 pixels (3 int's) from Shared Memory into Global Memory ImgDst32[MYdstIndex] = D; ImgDst32[MYdstIndex + 1] = E; ImgDst32[MYdstIndex + 2] = F; } // Kernel that flips the given image vertically // each thread only flips a single pixel (R,G,B) __global__ void Vflip(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui Vpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYmirrorrow = Vpixels - 1 - MYrow; ui MYsrcOffset = MYrow * RowBytes; ui MYdstOffset = MYmirrorrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; ui MYdstIndex = MYdstOffset + 3 * MYcol; // swap pixels RGB @MYrow , @MYmirrorrow ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex]; ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1]; ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2]; } // Improved Vflip() kernel that flips the given image vertically // BlkPerRow, RowBytes variables are passed, rather than calculated __global__ void Vflip2(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui Vpixels, ui BlkPerRow, ui RowBytes) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYmirrorrow = Vpixels - 1 - MYrow; ui MYsrcOffset = MYrow * RowBytes; ui MYdstOffset = MYmirrorrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; ui MYdstIndex = MYdstOffset + 3 * MYcol; // swap pixels RGB @MYrow , @MYmirrorrow ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex]; ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1]; ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2]; } // Improved Vflip2() kernel that flips the given image vertically // Grid is launched using 2D block numbers __global__ void Vflip3(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui Vpixels, ui RowBytes) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; //ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); //ui MYrow = MYbid / BlkPerRow; //ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; ui MYrow = blockIdx.y; ui MYcol = MYbid*ThrPerBlk + MYtid; if (MYcol >= Hpixels) return; // col out of range ui MYmirrorrow = Vpixels - 1 - MYrow; ui MYsrcOffset = MYrow * RowBytes; ui MYdstOffset = MYmirrorrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; ui MYdstIndex = MYdstOffset + 3 * MYcol; // swap pixels RGB @MYrow , @MYmirrorrow ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex]; ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1]; ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2]; } // Improved Vflip3() kernel that flips the given image vertically // Each kernel takes care of 2 consecutive pixels; half as many blocks are launched __global__ void Vflip4(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui Vpixels, ui RowBytes) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; //ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); //ui MYrow = MYbid / BlkPerRow; //ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; ui MYrow = blockIdx.y; ui MYcol2 = (MYbid*ThrPerBlk + MYtid)*2; if (MYcol2 >= Hpixels) return; // col is out of range ui MYmirrorrow = Vpixels - 1 - MYrow; ui MYsrcOffset = MYrow * RowBytes; ui MYdstOffset = MYmirrorrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol2; ui MYdstIndex = MYdstOffset + 3 * MYcol2; // swap pixels RGB @MYrow , @MYmirrorrow ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex]; ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1]; ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2]; if ((MYcol2+1) >= Hpixels) return; // only col+1 is out of range ImgDst[MYdstIndex + 3] = ImgSrc[MYsrcIndex + 3]; ImgDst[MYdstIndex + 4] = ImgSrc[MYsrcIndex + 4]; ImgDst[MYdstIndex + 5] = ImgSrc[MYsrcIndex + 5]; } // Improved Vflip3() kernel that flips the given image vertically // Each kernel takes care of 4 consecutive pixels; 1/4 as many blocks are launched __global__ void Vflip5(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui Vpixels, ui RowBytes) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; //ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); //ui MYrow = MYbid / BlkPerRow; //ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; ui MYrow = blockIdx.y; ui MYcol4 = (MYbid*ThrPerBlk + MYtid)*4; if (MYcol4 >= Hpixels) return; // col is out of range ui MYmirrorrow = Vpixels - 1 - MYrow; ui MYsrcOffset = MYrow * RowBytes; ui MYdstOffset = MYmirrorrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol4; ui MYdstIndex = MYdstOffset + 3 * MYcol4; // swap pixels RGB @MYrow , @MYmirrorrow for (ui a=0; a<4; a++){ ImgDst[MYdstIndex + a * 3] = ImgSrc[MYsrcIndex + a * 3]; ImgDst[MYdstIndex + a * 3 + 1] = ImgSrc[MYsrcIndex + a * 3 + 1]; ImgDst[MYdstIndex + a * 3 + 2] = ImgSrc[MYsrcIndex + a * 3 + 2]; if ((MYcol4 + a + 1) >= Hpixels) return; // next pixel is out of range } } // Improved Vflip3() kernel that flips the given image vertically // Each kernel: copies a pixel from GlobalMem into shared memory (PixBuffer[]) // and writes back into the flipped Global Memory location __global__ void Vflip6(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui Vpixels, ui RowBytes) { __shared__ uch PixBuffer[3072]; // holds 3*1024 Bytes (1024 pixels). ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYtid3 = MYtid*3; //ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui NumBlocks = gridDim.x; //ui BlkPerRow = CEIL(Hpixels,ThrPerBlk); //ui RowBytes = (Hpixels * 3 + 3) & (~3); //ui MYrow = MYbid / BlkPerRow; //ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; ui MYrow = blockIdx.y; ui MYcol = MYbid*ThrPerBlk + MYtid; if (MYcol >= Hpixels) return; // col is out of range ui MYmirrorrow = Vpixels - 1 - MYrow; ui MYsrcOffset = MYrow * RowBytes; ui MYdstOffset = MYmirrorrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; ui MYdstIndex = MYdstOffset + 3 * MYcol; // swap pixels RGB @MYrow , @MYmirrorrow PixBuffer[MYtid3] = ImgSrc[MYsrcIndex]; PixBuffer[MYtid3 + 1] = ImgSrc[MYsrcIndex + 1]; PixBuffer[MYtid3 + 2] = ImgSrc[MYsrcIndex + 2]; __syncthreads(); ImgDst[MYdstIndex] = PixBuffer[MYtid3]; ImgDst[MYdstIndex + 1] = PixBuffer[MYtid3 + 1]; ImgDst[MYdstIndex + 2] = PixBuffer[MYtid3 + 2]; } // Improved Vflip6() kernel that uses shared memory to copy 4 Bytes at a time (int). // It no longer worries about the pixel RGB boundaries __global__ void Vflip7(ui *ImgDst32, ui *ImgSrc32, ui Vpixels, ui RowInts) { __shared__ ui PixBuffer[1024]; // holds 1024 int = 4096B ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYrow = blockIdx.y; ui MYcolIndex = MYbid*ThrPerBlk + MYtid; if (MYcolIndex >= RowInts) return; // index is out of range ui MYmirrorrow = Vpixels - 1 - MYrow; ui MYsrcOffset = MYrow * RowInts; ui MYdstOffset = MYmirrorrow * RowInts; ui MYsrcIndex = MYsrcOffset + MYcolIndex; ui MYdstIndex = MYdstOffset + MYcolIndex; // swap pixels RGB @MYrow , @MYmirrorrow PixBuffer[MYtid] = ImgSrc32[MYsrcIndex]; __syncthreads(); ImgDst32[MYdstIndex] = PixBuffer[MYtid]; } // Improved Vflip7() kernel that uses shared memory to copy 8 Bytes at a time (2 int). __global__ void Vflip8(ui *ImgDst32, ui *ImgSrc32, ui Vpixels, ui RowInts) { __shared__ ui PixBuffer[2048]; // holds 2048 int = 8192B ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYtid2 = MYtid * 2; ui MYrow = blockIdx.y; ui MYcolIndex = (MYbid*ThrPerBlk + MYtid) * 2; if (MYcolIndex >= RowInts) return; // index is out of range ui MYmirrorrow = Vpixels - 1 - MYrow; ui MYsrcOffset = MYrow * RowInts; ui MYdstOffset = MYmirrorrow * RowInts; ui MYsrcIndex = MYsrcOffset + MYcolIndex; ui MYdstIndex = MYdstOffset + MYcolIndex; // swap pixels RGB @MYrow , @MYmirrorrow PixBuffer[MYtid2] = ImgSrc32[MYsrcIndex]; if ((MYcolIndex+1) < RowInts) PixBuffer[MYtid2+1] = ImgSrc32[MYsrcIndex+1]; __syncthreads(); ImgDst32[MYdstIndex] = PixBuffer[MYtid2]; if ((MYcolIndex + 1) < RowInts) ImgDst32[MYdstIndex+1] = PixBuffer[MYtid2+1]; } // Modified Vflip8() kernel that uses Global Memory only // to copy 8 Bytes at a time (2 int). It does NOT use shared memory __global__ void Vflip9(ui *ImgDst32, ui *ImgSrc32, ui Vpixels, ui RowInts) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYrow = blockIdx.y; ui MYcolIndex = (MYbid*ThrPerBlk + MYtid) * 2; if (MYcolIndex >= RowInts) return; // index is out of range ui MYmirrorrow = Vpixels - 1 - MYrow; ui MYsrcOffset = MYrow * RowInts; ui MYdstOffset = MYmirrorrow * RowInts; ui MYsrcIndex = MYsrcOffset + MYcolIndex; ui MYdstIndex = MYdstOffset + MYcolIndex; // swap pixels RGB @MYrow , @MYmirrorrow ImgDst32[MYdstIndex] = ImgSrc32[MYsrcIndex]; if ((MYcolIndex + 1) < RowInts) ImgDst32[MYdstIndex + 1] = ImgSrc32[MYsrcIndex+1]; } // Kernel that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) (one Byte at a time) __global__ void PixCopy(uch *ImgDst, uch *ImgSrc, ui FS) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if (MYgtid > FS) return; // outside the allocated memory ImgDst[MYgtid] = ImgSrc[MYgtid]; } // Improved PixCopy() that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) // Each thread copies 2 consecutive Bytes __global__ void PixCopy2(uch *ImgDst, uch *ImgSrc, ui FS) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; ui MYaddr = MYgtid * 2; if (MYaddr > FS) return; // outside the allocated memory ImgDst[MYaddr] = ImgSrc[MYaddr]; // copy pixel if ((MYaddr + 1) > FS) return; // outside the allocated memory ImgDst[MYaddr + 1] = ImgSrc[MYaddr + 1]; // copy consecutive pixel } // Improved PixCopy() that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) // Each thread copies 4 consecutive Bytes __global__ void PixCopy3(uch *ImgDst, uch *ImgSrc, ui FS) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; ui MYaddr = MYgtid * 4; for (ui a=0; a<4; a++){ if ((MYaddr+a) > FS) return; ImgDst[MYaddr+a] = ImgSrc[MYaddr+a]; } } // Improved kernel that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) // Uses shared memory as a temporary local buffer. // copies one byte at a time __global__ void PixCopy4(uch *ImgDst, uch *ImgSrc, ui FS) { __shared__ uch PixBuffer[1024]; // Shared Memory: holds 1024 Bytes. ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui MYaddr = MYgtid * 4; if (MYgtid > FS) return; // outside the allocated memory PixBuffer[MYtid] = ImgSrc[MYgtid]; __syncthreads(); ImgDst[MYgtid] = PixBuffer[MYtid]; } // Improved kernel that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) {which must both be passed as integer pointers} // Uses shared memory as a temporary local buffer. // copies 4 bytes (32 bits) at a time __global__ void PixCopy5(ui *ImgDst32, ui *ImgSrc32, ui FS) { __shared__ ui PixBuffer[1024]; // Shared Mem: holds 1024 int (4096 Bytes) ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui MYaddr = MYgtid * 4; if ((MYgtid*4) > FS) return; // outside the allocated memory PixBuffer[MYtid] = ImgSrc32[MYgtid]; __syncthreads(); ImgDst32[MYgtid] = PixBuffer[MYtid]; } // Improved kernel that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) {which must both be passed as integer pointers} // This kernel does not use shared memory, only 32-bit pointers ... // copies 4 bytes (32 bits) at a time __global__ void PixCopy6(ui *ImgDst32, ui *ImgSrc32, ui FS) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; //ui MYaddr = MYgtid * 4; if ((MYgtid * 4) > FS) return; // outside the allocated memory ImgDst32[MYgtid] = ImgSrc32[MYgtid]; } // Improved kernel that copies an image from one part of the // GPU memory (ImgSrc) to another (ImgDst) {which must both be passed as integer pointers} // This kernel does not use shared memory, only 32-bit pointers ... // copies 8 bytes (2x32 bits) at a time __global__ void PixCopy7(ui *ImgDst32, ui *ImgSrc32, ui FS) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; if ((MYgtid * 4) > FS) return; // outside the allocated memory ImgDst32[MYgtid] = ImgSrc32[MYgtid]; MYgtid++; if ((MYgtid * 4) > FS) return; // next 32 bits ImgDst32[MYgtid] = ImgSrc32[MYgtid]; } /* // helper function that wraps CUDA API calls, reports any error and exits void chkCUDAErr(cudaError_t error_id) { if (error_id != CUDA_SUCCESS) { printf("CUDA ERROR :::%\n", cudaGetErrorString(error_id)); exit(EXIT_FAILURE); } } */ // Read a 24-bit/pixel BMP file into a 1D linear array. // Allocate memory to store the 1D image and return its pointer. uch *ReadBMPlin(char* fn) { static uch *Img; FILE* f = fopen(fn, "rb"); if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); } uch HeaderInfo[54]; fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width; int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height; int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes; //save header for re-use memcpy(ip.HeaderInfo, HeaderInfo,54); printf("\n Input File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); // allocate memory to store the main image (1 Dimensional array) Img = (uch *)malloc(IMAGESIZE); if (Img == NULL) return Img; // Cannot allocate memory // read the image from disk fread(Img, sizeof(uch), IMAGESIZE, f); fclose(f); return Img; } // Write the 1D linear-memory stored image into file. void WriteBMPlin(uch *Img, char* fn) { FILE* f = fopen(fn, "wb"); if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); } //write header fwrite(ip.HeaderInfo, sizeof(uch), 54, f); //write data fwrite(Img, sizeof(uch), IMAGESIZE, f); printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); fclose(f); } int main(int argc, char **argv) { char Flip = 'H'; float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime; // GPU code run times cudaError_t cudaStatus, cudaStatus2; cudaEvent_t time1, time2, time3, time4; char InputFileName[255], OutputFileName[255], ProgName[255]; ui BlkPerRow, BlkPerRowInt, BlkPerRowInt2; ui ThrPerBlk = 256, NumBlocks, NB2, NB4, NB8, GPUDataTransfer; ui RowBytes, RowInts; cudaDeviceProp GPUprop; ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; ui *GPUCopyImg32, *GPUImg32; char SupportedBlocks[100]; int KernelNum=1; char KernelName[255]; strcpy(ProgName, "imflipG"); switch (argc){ case 6: KernelNum = atoi(argv[5]); case 5: ThrPerBlk=atoi(argv[4]); case 4: Flip = toupper(argv[3][0]); case 3: strcpy(InputFileName, argv[1]); strcpy(OutputFileName, argv[2]); break; default: printf("\n\nUsage: %s InputFilename OutputFilename [V/H/C/T] [ThrPerBlk] [Kernel=1-9]", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp H", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp V 128",ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp V 128 2", ProgName); printf("\n\nH=horizontal flip, V=vertical flip, T=Transpose, C=copy image\n\n"); exit(EXIT_FAILURE); } if ((Flip != 'V') && (Flip != 'H') && (Flip != 'C') && (Flip != 'T')) { printf("Invalid flip option '%c'. Must be 'V','H', 'T', or 'C' ... \n", Flip); exit(EXIT_FAILURE); } if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) { printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk); exit(EXIT_FAILURE); } if ((KernelNum < 1) || (KernelNum > 9)) { printf("Invalid kernel number %d ... \n", KernelNum); printf("\n\nNothing executed ... Exiting ...\n\n"); exit(EXIT_FAILURE); } // Create CPU memory to store the input and output images TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated if (TheImg == NULL){ printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } CopyImg = (uch *)malloc(IMAGESIZE); if (CopyImg == NULL){ free(TheImg); printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } // Choose which GPU to run on, change this on a multi-GPU system. int NumGPUs = 0; cudaGetDeviceCount(&NumGPUs); if (NumGPUs == 0){ printf("\nNo CUDA Device is available\n"); exit(EXIT_FAILURE); } cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); exit(EXIT_FAILURE); } cudaGetDeviceProperties(&GPUprop, 0); SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024; SupportedMBlocks = SupportedKBlocks / 1024; sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K'); MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock; cudaEventCreate(&time1); cudaEventCreate(&time2); cudaEventCreate(&time3); cudaEventCreate(&time4); cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer // Allocate GPU buffer for the input and output images cudaStatus = cudaMalloc((void**)&GPUImg, IMAGESIZE); cudaStatus2 = cudaMalloc((void**)&GPUCopyImg, IMAGESIZE); if ((cudaStatus != cudaSuccess) || (cudaStatus2 != cudaSuccess)){ fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory"); exit(EXIT_FAILURE); } // These are the same pointers as GPUCopyImg and GPUImg, however, casted to an integer pointer GPUCopyImg32 = (ui *)GPUCopyImg; GPUImg32 = (ui *)GPUImg; // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy CPU to GPU failed!"); exit(EXIT_FAILURE); } cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done RowBytes = (IPH * 3 + 3) & (~3); RowInts = RowBytes / 4; BlkPerRow = CEIL(IPH,ThrPerBlk); BlkPerRowInt = CEIL(RowInts, ThrPerBlk); BlkPerRowInt2 = CEIL(CEIL(RowInts,2), ThrPerBlk); NumBlocks = IPV*BlkPerRow; dim3 dimGrid2D(BlkPerRow, ip.Vpixels); dim3 dimGrid2D2(CEIL(BlkPerRow,2), ip.Vpixels); dim3 dimGrid2D4(CEIL(BlkPerRow,4), ip.Vpixels); dim3 dimGrid2Dint(BlkPerRowInt, ip.Vpixels); dim3 dimGrid2Dint2(BlkPerRowInt2, ip.Vpixels); switch (Flip){ case 'H': switch (KernelNum){ case 1: Hflip <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH); strcpy(KernelName, "Hflip : Each thread copies 1 pixel. Computes everything."); break; case 2: Hflip2 <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, BlkPerRow, RowBytes); strcpy(KernelName, "Hflip2 : Each thread copies 1 pixel. Uses pre-computed values."); break; case 3: Hflip3 <<< dimGrid2D, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, RowBytes); strcpy(KernelName, "Hflip3 : Each therad copies 1 pixel (using a 2D grid)"); break; case 4: Hflip4 <<< dimGrid2D2, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, RowBytes); strcpy(KernelName, "Hflip4 : Each therad copies 2 consecutive pixels"); break; case 5: Hflip5 <<< dimGrid2D4, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, RowBytes); strcpy(KernelName, "Hflip5 : Each therad copies 4 consecutive pixels"); break; case 6: Hflip6 <<< dimGrid2D, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, RowBytes); strcpy(KernelName, "Hflip6 : Uses Shared Memory to copy one pixel at a time"); break; case 7: Hflip7 <<< dimGrid2D4, ThrPerBlk >>> (GPUCopyImg32, GPUImg32, RowInts); strcpy(KernelName, "Hflip7 : Flips 4 pixels (12B) at a time inside Shared Mem"); break; case 8: Hflip8 <<< dimGrid2D4, ThrPerBlk >>> (GPUCopyImg32, GPUImg32, RowInts); strcpy(KernelName, "Hflip8 : Flips 4 pixels (12B) using only registers"); break; default:printf("...... Kernel Number=%d ... NOT IMPLEMENTED .... \n", KernelNum); strcpy(KernelName, "*** NOT IMPLEMENTED ***"); break; } GPUResult = GPUCopyImg; GPUDataTransfer = 2*IMAGESIZE; break; case 'V': switch (KernelNum){ case 1: Vflip <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV); strcpy(KernelName, "Vflip : Each thread copies 1 pixel. Computes everything."); break; case 2: Vflip2 <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, BlkPerRow, RowBytes); strcpy(KernelName, "Vflip2 : Each thread copies 1 pixel. Uses pre-computed values."); break; case 3: Vflip3 <<< dimGrid2D, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, RowBytes); strcpy(KernelName, "Vflip3 : Each therad copies 1 pixel (using a 2D grid)"); break; case 4: Vflip4 <<< dimGrid2D2, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, RowBytes); strcpy(KernelName, "Vflip4 : Each therad copies 2 consecutive pixels"); break; case 5: Vflip5 <<< dimGrid2D4, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, RowBytes); strcpy(KernelName, "Vflip5 : Each therad copies 4 consecutive pixels"); break; case 6: Vflip6 <<< dimGrid2D, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, RowBytes); strcpy(KernelName, "Vflip6 : Uses Shared Memory to copy one pixel at a time"); break; case 7: Vflip7 <<< dimGrid2Dint, ThrPerBlk >>> (GPUCopyImg32, GPUImg32, IPV, RowInts); strcpy(KernelName, "Vflip7 : Uses Shared Memory to copy 1 int at a time"); break; case 8: Vflip8 <<< dimGrid2Dint2, ThrPerBlk >>> (GPUCopyImg32, GPUImg32, IPV, RowInts); strcpy(KernelName, "Vflip8 : Uses Shared Memory to copy 2 int at a time"); break; case 9: Vflip9 <<< dimGrid2Dint2, ThrPerBlk >>> (GPUCopyImg32, GPUImg32, IPV, RowInts); strcpy(KernelName, "Vflip9 : Uses only Global Memory to copy 2 int at a time"); break; default:printf("...... Kernel Number=%d ... NOT IMPLEMENTED .... \n", KernelNum); strcpy(KernelName, "*** NOT IMPLEMENTED ***"); break; } GPUResult = GPUCopyImg; GPUDataTransfer = 2 * IMAGESIZE; break; case 'T': switch (KernelNum){ case 1: Hflip <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH); Vflip <<< NumBlocks, ThrPerBlk >>> (GPUImg, GPUCopyImg, IPH, IPV); strcpy(KernelName, "Hflip<<< >>>() , Vflip<<< >>>()"); break; case 2: Hflip2 <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, BlkPerRow, RowBytes); Vflip2 <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, BlkPerRow, RowBytes); strcpy(KernelName, "Hflip2<<< >>>() , Vflip2<<< >>>()"); break; case 3: Hflip3 <<< dimGrid2D, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, RowBytes); Vflip3 <<< dimGrid2D, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, RowBytes); strcpy(KernelName, "Hflip3<<< >>>() , Vflip3<<< >>>()"); break; case 4: Hflip4 <<< dimGrid2D2, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, RowBytes); Vflip4 <<< dimGrid2D2, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, RowBytes); strcpy(KernelName, "Hflip4<<< >>>() , Vflip4<<< >>>()"); break; case 5: Hflip5 <<< dimGrid2D4, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, RowBytes); Vflip5 <<< dimGrid2D4, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, RowBytes); strcpy(KernelName, "Hflip5<<< >>>() , Vflip5<<< >>>()"); break; case 6: Hflip6 <<< dimGrid2D, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, RowBytes); Vflip6 <<< dimGrid2D, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV, RowBytes); strcpy(KernelName, "Hflip6<<< >>>() , Vflip6<<< >>>()"); break; default:printf("...... Kernel Number=%d ... NOT IMPLEMENTED .... \n", KernelNum); strcpy(KernelName, "*** NOT IMPLEMENTED ***"); break; } GPUResult = GPUImg; GPUDataTransfer = 4 * IMAGESIZE; break; case 'C': NumBlocks = CEIL(IMAGESIZE,ThrPerBlk); NB2 = CEIL(NumBlocks,2); NB4 = CEIL(NumBlocks,4); NB8 = CEIL(NumBlocks,8); switch (KernelNum){ case 1: PixCopy <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IMAGESIZE); strcpy(KernelName, "PixCopy : Each kernel copies one Byte at a time"); break; case 2: PixCopy2 <<< NB2, ThrPerBlk >>> (GPUCopyImg, GPUImg, IMAGESIZE); strcpy(KernelName, "PixCopy2 : Each kernel copies 2 consecutive Bytes at a time"); break; case 3: PixCopy3 <<< NB4, ThrPerBlk >>> (GPUCopyImg, GPUImg, IMAGESIZE); strcpy(KernelName, "PixCopy3 : Each kernel copies 4 consecutive Bytes at a time"); break; case 4: PixCopy4 <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IMAGESIZE); strcpy(KernelName, "PixCopy4 : Uses Shared Memory to copy one Byte at a time"); break; case 5: PixCopy5 <<< NB4, ThrPerBlk >>> (GPUCopyImg32, GPUImg32, IMAGESIZE); strcpy(KernelName, "PixCopy5 : Uses Shared Memory to copy one int (32b) at a time"); break; case 6: PixCopy6 <<< NB4, ThrPerBlk >>> (GPUCopyImg32, GPUImg32, IMAGESIZE); strcpy(KernelName, "PixCopy6 : Uses only Global Memory to copy one int (32b) at a time"); break; case 7: PixCopy7 <<< NB8, ThrPerBlk >>> (GPUCopyImg32, GPUImg32, IMAGESIZE); strcpy(KernelName, "PixCopy7 : Uses only Global Memory to copy two int (8 Bytes) at a time"); break; default:printf("...... Kernel Number=%d ... NOT IMPLEMENTED .... \n",KernelNum); strcpy(KernelName, "*** NOT IMPLEMENTED ***"); break; } GPUResult = GPUCopyImg; GPUDataTransfer = 2 * IMAGESIZE; break; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } cudaEventRecord(time3, 0); // Copy output (results) from GPU buffer to host (CPU) memory. cudaStatus = cudaMemcpy(CopyImg, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy GPU to CPU failed!"); exit(EXIT_FAILURE); } cudaEventRecord(time4, 0); cudaEventSynchronize(time1); cudaEventSynchronize(time2); cudaEventSynchronize(time3); cudaEventSynchronize(time4); cudaEventElapsedTime(&totalTime, time1, time4); cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2); cudaEventElapsedTime(&kernelExecutionTime, time2, time3); cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4); cudaStatus = cudaDeviceSynchronize(); //checkError(cudaGetLastError()); // screen for errors in kernel launches if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk printf("\n--------------------------------------------------------------------------\n"); printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n", GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk); printf("--------------------------------------------------------------------------\n"); printf("%s %s %s %c %u %u [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, Flip, ThrPerBlk, KernelNum, NumBlocks, BlkPerRow); printf("--------------------------------------------------------------------------\n"); printf("%s\n",KernelName); printf("--------------------------------------------------------------------------\n"); printf("CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrCPUtoGPU)); printf("Kernel Execution =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecutionTime, DATAMB(GPUDataTransfer), DATABW(GPUDataTransfer, kernelExecutionTime)); printf("GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU)); printf("--------------------------------------------------------------------------\n"); printf("Total time elapsed =%7.2f ms %4d MB ... %6.2f GB/s\n", totalTime, DATAMB((2*IMAGESIZE+GPUDataTransfer)), DATABW((2 * IMAGESIZE + GPUDataTransfer), totalTime)); printf("--------------------------------------------------------------------------\n\n"); // Deallocate CPU, GPU memory and destroy events. cudaFree(GPUImg); cudaFree(GPUCopyImg); cudaEventDestroy(time1); cudaEventDestroy(time2); cudaEventDestroy(time3); cudaEventDestroy(time4); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } free(TheImg); free(CopyImg); return(EXIT_SUCCESS); }
4,278
#include <iostream> #include <ctime> #include <cstdlib> using namespace std; #define MTX_DIM 100 #define BLOCK_SIZE 10 __device__ __managed__ float *A, *B, *C; __global__ void calcGravity(const size_t n){ int row = threadIdx.x + blockDim.x * blockIdx.x; int col = blockIdx.x*BLOCK_SIZE + threadIdx.x; if(row<n && col<n){ float result = 0; for(int j=0;j<n;j++){ result+=A[row*n+j]*B[j*n+col]; } C[row*n+col]=result; } } int main(int argc, char* argv[]){ cudaMallocManaged(&A, (size_t) MTX_DIM*MTX_DIM*sizeof(float)); cudaMallocManaged(&B, (size_t) MTX_DIM*MTX_DIM*sizeof(float)); cudaMallocManaged(&C, (size_t) MTX_DIM*MTX_DIM*sizeof(float)); struct timespec start, finish; double elapsed; for(int i=0;i<MTX_DIM;i++){ for(int j=0;j<MTX_DIM;j++){ A[i*MTX_DIM+j]=rand(); B[i*MTX_DIM+j]=rand(); } } dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((MTX_DIM+dimBlock.x-1) / dimBlock.x, (MTX_DIM+dimBlock.y-1) / dimBlock.y); cout<<"Sending to GPU"<<endl; // launch the kernel clock_gettime(CLOCK_MONOTONIC, &start); calcGravity<<<dimGrid, dimBlock>>>(MTX_DIM); cudaDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - start.tv_sec); elapsed += (finish.tv_nsec - start.tv_nsec) / 1000000000.0; cout<<"Time elapsed: "<<elapsed<<" seconds"<<endl; return 0; }
4,279
#include "includes.h" // Optimized using shared memory and on chip memory // Compile source: $- nvcc src/TokamakSimulation.cu -o nBody -lglut -lm -lGLU -lGL // Run Executable: $- ./nBody //To stop hit "control c" in the window you launched it from. //Make movies https://gist.github.com/JPEGtheDev/db078e1b066543ce40580060eee9c1bf #define NR_NEUTRONS 8 #define NR_ELECTRONS 8 #define NR_PROTONS 8 //atomic mass (u) #define MASS_PROTON 1.007276 #define MASS_NEUTRON 1.008664 #define MASS_ELECTRON 5.485799e-4 #define BLOCK 256 #define XWindowSize 2500 #define YWindowSize 2500 #define DRAW 10 #define DAMP 1.0 #define DT 0.001 #define STOP_TIME 10.0 #define G 6.67408E-11 #define H 1.0 #define EYE 8.5 #define FAR 80.0 #define SHAPE_CT 24 #define SHAPE_SIZE 256 #define PATH "./objects/Tokamak_256.obj" //256 vertices-shape (for array simplicity) #define N 16*16*16 //*********************** // TODO: // Check units velocity calculation mag // ಠ_ಠ //*********************** // Globals float4 *p; float3 *v, *f, *reactor,*r_GPU0, *r_GPU1; float4 *p_GPU0, *p_GPU1; __device__ float3 getMagForce(float4 p0, float3 v0, float3 dl_tail, float3 dl_head, float I){ //dl is the section of wire float3 dB, dl; dl.x = dl_head.x-dl_tail.x; dl.y = dl_head.y-dl_tail.y; dl.z = dl_head.z-dl_tail.z; float rx = p0.x-dl_tail.x; float ry = p0.y-dl_tail.y; float rz = p0.z-dl_tail.z; float r2 = rx*rx+ry*ry+rz*rz; float inv_r2 = 1/r2; float inv_r = 1/sqrtf(r2); float3 rhat = {rx*inv_r, ry*inv_r, rz*inv_r}; //(dl cross rhat)/r2 = force //gamma is mu0*I/4Pi which simplifies to Ie-7 float gamma = I; dB.x = gamma*(dl.y*rhat.z-dl.z*rhat.y)*inv_r2; dB.y = gamma*(dl.z*rhat.x-dl.x*rhat.z)*inv_r2; dB.z = gamma*(dl.x*rhat.y-dl.y*rhat.x)*inv_r2; return (dB); } __global__ void getForcesMag(float4 *g_pos, float3 *vel, float3 *force, int offset, float3 *g_reactor){ int id = threadIdx.x + blockDim.x*blockIdx.x; float3 total_force, B, dB, dl_tail, dl_head, velMe; float4 posMe; __shared__ float3 shared_r[BLOCK]; total_force.x = B.x = 0.0; total_force.y = B.y = 0.0; total_force.z = B.z = 0.0; posMe.x = g_pos[id+offset].x; posMe.y = g_pos[id+offset].y; posMe.z = g_pos[id+offset].z; posMe.w = g_pos[id+offset].w; velMe.x = vel[id].x; velMe.y = vel[id].y; velMe.z = vel[id].z; for(int k=0;k<SHAPE_CT;k++){ shared_r[threadIdx.x] = g_reactor[threadIdx.x + blockDim.x*k]; __syncthreads(); for(int j = 1; j<=SHAPE_SIZE; j++){ dl_tail = shared_r[(j-1)]; dl_head = shared_r[(j%SHAPE_SIZE)]; dB = getMagForce(posMe, velMe, dl_tail, dl_head, 1.0); //current[i] =1 B.x += dB.x; B.y += dB.y; B.z += dB.z; } } total_force.x = (velMe.y*B.z-velMe.z*B.y); total_force.y = (velMe.z*B.x-velMe.x*B.z); total_force.z = (velMe.x*B.y-velMe.y*B.x); if(id<N){ force[id].x += total_force.x; force[id].y += total_force.y; force[id].z += total_force.z; } }
4,280
#include <stdio.h> #include <math.h> #include <time.h> #include <cuda.h> //Code written by Alan Fleming //CONSTANTS #define MATRIXSIZE 131072 #define BLOCKSIZE 1024 //Code to preform sum reduction using the cpu int SumReductionCPU(int* x, int N){ int sum = 0; for(int i = 0; i < N; i++){ sum += x[i]; } return sum; } __global__ void sumReductionKernal(int* input, int* output) { //initialize Partial Result for thread __shared__ int partialResult[2 * BLOCKSIZE]; unsigned int start = 2*blockIdx.x * blockDim.x; partialResult[threadIdx.x] = input[start + threadIdx.x]; partialResult[blockDim.x + threadIdx.x] = input[start +blockDim.x + threadIdx.x]; //Preform sum reduction for(unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride){ partialResult[threadIdx.x] += partialResult[threadIdx.x + stride]; } } __syncthreads(); if(threadIdx.x == 0){ //write block sum to global memory output[blockIdx.x] = partialResult[0]; } } int main() { int *a = (int *)malloc(sizeof(int) * MATRIXSIZE); //allocate space for array int *b = (int *)malloc(sizeof(int) * MATRIXSIZE); //allocate space for array //initialize array int init = 1325; for(int i=0; i<MATRIXSIZE;i++){ init = 3125 * init % 6553; a[i] = (init - 1000) % 97; b[i] = 0; } //Test CPU reduction //Get start time clock_t t1 = clock(); //Calculate reduction int cpuResult = SumReductionCPU(a, MATRIXSIZE); //Get stop time clock_t t2 = clock(); //Calculate runtime float cpuTime= (float(t2-t1)/CLOCKS_PER_SEC*1000); //Allocate memory on GPU compution. dev_b is used to store the results of the first pass of reduction int *dev_a, *dev_b; cudaMalloc((void **)(&dev_a), MATRIXSIZE *sizeof(int)); cudaMalloc((void **)(&dev_b), MATRIXSIZE *sizeof(int)); //copy memory to gpu cudaMemcpy(dev_a,a, MATRIXSIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b, MATRIXSIZE * sizeof(int), cudaMemcpyHostToDevice); //calculate dimentions for gpu dim3 dimBlock(BLOCKSIZE); dim3 dimGrid(ceil(double(MATRIXSIZE)/dimBlock.x/2)); //Set up cuda events for recording runtime cudaEvent_t start,stop; float gpuTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); //Calculate GPU Reduction for each block sumReductionKernal<<<dimGrid, dimBlock>>>(dev_a, dev_b); //Calculate GPU Recuction for block results sumReductionKernal<<<dimGrid, dimBlock>>>(dev_b, dev_a); //calculate runtime cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpuTime,start,stop); //destroy cuda events cudaEventDestroy(start); cudaEventDestroy(stop); //copy sum from gpu cudaMemcpy(a, dev_a, sizeof(int), cudaMemcpyDeviceToHost); //print speedup printf("CPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)gpuTime, double(cpuTime / gpuTime)); //print reduction results printf("CPU Result: %d\nGPU Result: %d\n", cpuResult, a[0]); //verify results if(cpuResult == a[0]) { printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } //free memory free(a); cudaFree(dev_a); cudaFree(dev_b); return 0; }
4,281
// Note that in this model we do not check // the error codes and status of kernel call. #include <cstdio> #include <cmath> __global__ void set(int *A, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) A[idx] = idx; } int main(void) { const int N = 128; int *A; cudaMallocManaged((void**)&A, N * sizeof(int)); set<<<2, 64>>>(A, N); cudaDeviceSynchronize(); for (int i = 0; i < N; i++) printf("%i ", A[i]); printf("\n"); cudaFree((void*)A); return 0; }
4,282
#include <stdio.h> #include <cuda_runtime.h> #define N 64 __global__ void add(int *a, int *b, int *c) { int idx = blockIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } int main() { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; // Allocate memory space for host a, b, and c a = (int *)malloc(N*sizeof(int)); b = (int *)malloc(N*sizeof(int)); c = (int *)malloc(N*sizeof(int)); // Allocate memory space for device copies of a, b, and c cudaMalloc((void **) &dev_a, N*sizeof(int)); cudaMalloc((void **) &dev_b, N*sizeof(int)); cudaMalloc((void **) &dev_c, N*sizeof(int)); // Fill host Arrays for (int i = 0; i < N; i++) { a[i] = i; b[i] = 1; } // Copy from host to device cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); // Launch the add() kernel on GPU add<<<N,1>>>(dev_a, dev_b, dev_c); // N blocks of 1 thread // Copy result back to host cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost); // Free device memory cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); // Print the result for (int i = 0; i < N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } return 0; }
4,283
#include <stdio.h> #define N 24 #define THREADS 8 __global__ void reduce(float *A, float *results) { __shared__ float sdata[THREADS]; int i = blockDim.x*blockIdx.x+threadIdx.x; sdata[threadIdx.x] = A[i]; for(unsigned s = blockDim.x/2;s > 0; s>>=1) { if(threadIdx.x < s && sdata[threadIdx.x] < sdata[threadIdx.x+s]) sdata[threadIdx.x] = sdata[threadIdx.x+s]; __syncthreads(); } if(threadIdx.x == 0) results[blockIdx.x] = sdata[0]; } int main() { float A[N], *A_d, *results, *results_d, result; int i; dim3 dimBlock(THREADS); dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x); cudaSetDevice(0); for (i=0; i<N; i++) A[i] = N-i; A[3] = 2*N; A[N-3] = -N; cudaMalloc((void **) &A_d, sizeof(float)*N); cudaMemcpy(A_d, A, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMalloc((void **) &results_d, dimGrid.x*sizeof(float)); reduce<<<dimGrid, dimBlock>>>(A_d, results_d); results = (float*)malloc(dimGrid.x*sizeof(float)); cudaMemcpy(results, results_d, dimGrid.x*sizeof(float), cudaMemcpyDeviceToHost); result = results[0]; for(i=1;i<dimGrid.x;i++) if(result < results[i]) result = results[i]; printf("%f\n", result); cudaFree(A_d); cudaFree(results_d); }
4,284
#include"stdio.h" #include<cuda_runtime.h> #include<curand.h> #include<curand_kernel.h> #include <sys/time.h> #define N 1024 // Kernel definition __global__ void random_gpu(double* C,long* time,curandState*state) { long i = threadIdx.x; long seed=(*time)*(i+1);//因为所有给定时间一定,所以我们只能通过对时间进行简单处理 int offset=0;//完全独立的序列,所以offset全部为零来节约时间 curand_init (seed,i,offset,&state[i]);//设置第i个随机序列 C[i]=curand_uniform_double(&state[i]);//获得第i个随机序列的随机值 } long getCurrentTime() { struct timeval tv; gettimeofday(&tv,NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } long*getCurrentTimeForDev() { long *time; cudaMalloc(&time,sizeof(long)); long *timenow=new long; *timenow=getCurrentTime(); cudaMemcpy(time,timenow,sizeof(long),cudaMemcpyHostToDevice); return time; } int main() { size_t size = N * sizeof(double); double* C=new double[N]; long st=getCurrentTime(); curandState *state; double *d_C; cudaMalloc(&state,sizeof(curandState)*N);//设立随机状态列 cudaMalloc(&d_C, size); random_gpu<<<1,N>>>(d_C,getCurrentTimeForDev(),state); cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); long ed=getCurrentTime(); printf("gpu running time:%ld\n",ed-st); cudaFree(d_C); for(int i=0;i<10;i++) { printf("%f ",C[i]); } delete[] C; printf("\n"); }
4,285
#include "includes.h" __device__ void down_sweep_512(uint* data_block) { for (uint i = 512; i >= 2; i >>= 1) { for (uint j = 0; j < (511 + blockDim.x) / i; ++j) { const auto element = 511 - (j * blockDim.x + threadIdx.x) * i; if (element < 512) { const auto other_element = element - (i >> 1); const auto value = data_block[other_element]; data_block[other_element] = data_block[element]; data_block[element] += value; } } __syncthreads(); } } __device__ void up_sweep_512(uint* data_block) { uint starting_elem = 1; for (uint i = 2; i <= 512; i <<= 1) { for (uint j = 0; j < (511 + blockDim.x) / i; ++j) { const uint element = starting_elem + (j * blockDim.x + threadIdx.x) * i; if (element < 512) { data_block[element] += data_block[element - (i >> 1)]; } } starting_elem += i; __syncthreads(); } } __global__ void prefix_sum_reduce(uint* dev_main_array, uint* dev_auxiliary_array, const uint array_size) { // Use a data block size of 512 __shared__ uint data_block[512]; // Let's do it in blocks of 512 (2^9) const uint last_block = array_size >> 9; if (blockIdx.x < last_block) { const uint first_elem = blockIdx.x << 9; // Load elements into shared memory, add prev_last_elem data_block[threadIdx.x] = dev_main_array[first_elem + threadIdx.x]; data_block[threadIdx.x + blockDim.x] = dev_main_array[first_elem + threadIdx.x + blockDim.x]; __syncthreads(); up_sweep_512((uint*) &data_block[0]); if (threadIdx.x == 0) { dev_auxiliary_array[blockIdx.x] = data_block[511]; data_block[511] = 0; } __syncthreads(); down_sweep_512((uint*) &data_block[0]); // Store back elements // assert( first_elem + threadIdx.x + blockDim.x < number_of_events * VeloTracking::n_modules + 2); dev_main_array[first_elem + threadIdx.x] = data_block[threadIdx.x]; dev_main_array[first_elem + threadIdx.x + blockDim.x] = data_block[threadIdx.x + blockDim.x]; __syncthreads(); } // Last block is special because // it may contain an unspecified number of elements else { const auto elements_remaining = array_size & 0x1FF; // % 512 if (elements_remaining > 0) { const auto first_elem = array_size - elements_remaining; // Initialize all elements to zero data_block[threadIdx.x] = 0; data_block[threadIdx.x + blockDim.x] = 0; // Load elements const auto elem_index = first_elem + threadIdx.x; if (elem_index < array_size) { data_block[threadIdx.x] = dev_main_array[elem_index]; } if ((elem_index + blockDim.x) < array_size) { data_block[threadIdx.x + blockDim.x] = dev_main_array[elem_index + blockDim.x]; } __syncthreads(); up_sweep_512((uint*) &data_block[0]); // Store sum of all elements if (threadIdx.x == 0) { dev_auxiliary_array[blockIdx.x] = data_block[511]; data_block[511] = 0; } __syncthreads(); down_sweep_512((uint*) &data_block[0]); // Store back elements if (elem_index < array_size) { dev_main_array[elem_index] = data_block[threadIdx.x]; } if ((elem_index + blockDim.x) < array_size) { dev_main_array[elem_index + blockDim.x] = data_block[threadIdx.x + blockDim.x]; } } } }
4,286
#include "includes.h" #define N 50 #define NewN 100 #define LifeN 500 #define numofthreads 512 int numofeles=0,capacity; struct chromosome { long long weight=0, value=0; bool chromo[100003]; }; chromosome chromoele[N],*cudaChromo,*cudaNewpopulation,newpopulation[NewN],res,x[2]; int weight[100001],value[100001],*devValue,*devWeight,*devnumeles; __global__ void gan(chromosome *cudaChromo, chromosome* cudaNewpopulation,const int capacity) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < N) { for (int i = idx; i < NewN;i+=N) if (cudaNewpopulation[i].weight<=capacity&&cudaNewpopulation[i].value>cudaChromo[idx].value) cudaChromo[idx] = cudaNewpopulation[i]; } }
4,287
/*************************************************** * Module that adds a new row at the top of the matrix with all ones * Author: Alonso Vidales <alonso.vidales@tras2.es> * * To be compiled with nvcc -ptx matrix_add_bias_top.cu * Debug: nvcc -arch=sm_20 -ptx matrix_add_bias_top.cu * **************************************************/ //#include <stdio.h> #ifdef __cplusplus extern "C" { #endif // CUDA Kernel __global__ void matrixAddBiasTop(double* C, double* A, int width, int resW, int resH, int resultSize) { int x = threadIdx.x + (blockIdx.x * resW); int y = threadIdx.y + (blockIdx.y * resH); int resultPos = y * width + x; if (resultPos < resultSize && x < width) { if (y == 0) { C[resultPos] = 1; } else { C[resultPos] = A[resultPos - width]; } } } #ifdef __cplusplus } #endif
4,288
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> #define size 4 using namespace std; __global__ void add(int *x,int *y,int *z){ const int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid<size){ z[tid] = x[tid] + y[tid]; } } __global__ void multiplyVectorAndMatrix(int *p, int *q, int *r){ const int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid<size){ for(int i=0;i<size;i++){ r[tid] += p[(tid*size)+i] * q[i]; } } } __global__ void matrixMultiplication(int *g, int *h, int *ii){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if((row<size) && (col<size)){ for(int i=0;i<size;i++){ sum += g[(row*size)+i] * h[(i*size)+col]; } __syncthreads(); ii[(row*size)+col] = sum; } } int main(){ //ADDITION OF TWO VECTORS int x[size],y[size],z[size]; for(int i=0;i<size;i++){ x[i] = rand()%100+1; y[i] = rand()%50+1; z[i] = 0; } cout<<"1st Vector: "; for(int i=0;i<size;i++){ cout<<x[i]<<" "; } cout<<endl<<"2nd Vector: "; for(int i=0;i<size;i++){ cout<<y[i]<<" "; } cout<<endl; int byte_size = size*sizeof(int); cout<<"Addition using CPU: "; for(int i=0;i<size;i++){ cout<<x[i]+y[i]<<" "; } cout<<endl; cout<<"Addition using GPU: "; int *a,*b,*c; cudaMalloc(&a,byte_size); cudaMemcpy(a,x,byte_size,cudaMemcpyHostToDevice); cudaMalloc(&b,byte_size); cudaMemcpy(b,y,byte_size,cudaMemcpyHostToDevice); cudaMalloc(&c,byte_size); cudaMemcpy(c,z,byte_size,cudaMemcpyHostToDevice); add<<<2,size/2>>>(a,b,c); cudaMemcpy(&z,c,byte_size,cudaMemcpyDeviceToHost); for(int i=0;i<size;i++){ cout<<z[i]<<" "; } cout<<endl; //MULTIPLICATION: MATRIX AND VECTOR int m[size][size],n[size],o[size]; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ m[i][j] = rand()%10+1; } n[i] = rand()%10+1; o[i] = 0; } cout<<endl; cout<<"Matrix:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<m[i][j]<<" "; } cout<<endl; } cout<<endl<<"Vector: "; for(int i=0;i<size;i++){ cout<<n[i]<<" "; } cout<<endl<<endl; size_t matrix_size = size*size*sizeof(int); size_t vector_size = size*sizeof(int); cout<<"Multiplication using CPU: "; for(int i=0;i<size;i++){ o[i] = 0; for(int j=0;j<size;j++){ o[i]+=m[i][j]*n[j]; } } for(int i=0;i<size;i++){ cout<<o[i]<<" "; o[i] = 0; } cout<<endl; cout<<"Multiplication using GPU: "; int *p,*q,*r; cudaMalloc(&p,matrix_size); cudaMemcpy(p,m,matrix_size,cudaMemcpyHostToDevice); cudaMalloc(&q,vector_size); cudaMemcpy(q,n,vector_size,cudaMemcpyHostToDevice); cudaMalloc(&r,vector_size); cudaMemcpy(r,o,vector_size,cudaMemcpyHostToDevice); multiplyVectorAndMatrix<<<2,size/2>>>(p,q,r); cudaMemcpy(&o,r,vector_size,cudaMemcpyDeviceToHost); for(int i=0;i<size;i++){ cout<<o[i]<<" "; } cout<<endl; //Matrix Multiplication int d[size][size],e[size][size],f[size][size]; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ d[i][j] = rand()%10+1; e[i][j] = rand()%10+1; } } cout<<endl; cout<<"Matrix:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<d[i][j]<<" "; } cout<<endl; } cout<<endl; cout<<"Matrix:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<e[i][j]<<" "; } cout<<endl; } cout<<endl; cout<<"Multiplication using CPU:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ f[i][j] = 0; for(int k=0;k<size;k++){ f[i][j] += d[i][k] * e[k][j]; } } } for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<f[i][j]<<" "; f[i][j] = 0; } cout<<endl; } cout<<endl; cout<<"Multiplication using GPU:"<<endl; int *g,*h,*ii; cudaMalloc(&g,matrix_size); cudaMemcpy(g,d,matrix_size,cudaMemcpyHostToDevice); cudaMalloc(&h,matrix_size); cudaMemcpy(h,e,matrix_size,cudaMemcpyHostToDevice); cudaMalloc(&ii,matrix_size); cudaMemcpy(ii,f,matrix_size,cudaMemcpyHostToDevice); dim3 threadsPerblock(size,size); dim3 blocksPerGrid(1,1); if(size*size>512) { threadsPerblock.x = 512; threadsPerblock.y=512; blocksPerGrid.x = ceil(double(size)/double(threadsPerblock.x)); blocksPerGrid.y = ceil(double(size)/double(threadsPerblock.y)); } matrixMultiplication<<<blocksPerGrid,threadsPerblock>>>(g,h,ii); cudaMemcpy(&f,ii,matrix_size,cudaMemcpyDeviceToHost); for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<f[i][j]<<" "; } cout<<endl; } cout<<endl; }
4,289
#include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void vecAdd(double * a, double * b, double * c, int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) c[id] = a[id] + b[id]; } int main(int argc, char * argv[]) { int n = 100, i; double *h_a, *h_b, *h_c; double *d_a, *d_b, *d_c; size_t bytes = n * sizeof(double); h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); for (i = 0; i < n; i++) h_a[i] = h_b[i] = i; cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; blockSize = 10; gridSize = (int) ceil((float) n / blockSize); vecAdd <<<gridSize, blockSize>>>(d_a, d_b, d_c, n); cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); for (i = 0; i < n; i++) printf(" %f + %f =%f\n", h_a[i], h_b[i], h_c[i]); cudaFree(d_a);cudaFree(d_b);cudaFree(d_c); free(h_a);free(h_b);free(h_c); return 0; }
4,290
#include "includes.h" __global__ void downSanple420_gpu(cudaTextureObject_t ch1, cudaTextureObject_t ch2, int16_t *downCh1, int16_t *downCh2, size_t width, size_t height) { int2 threadCoord = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if (threadCoord.x < width && (threadCoord.y << 1) < height) { int2 pixelCoord; cudaTextureObject_t *ch; int16_t *downCh; // Remember thread divergence happens at the wrap level only, that will parallelize well if (threadCoord.x < (width >> 1)) { pixelCoord = make_int2(threadCoord.x << 1, threadCoord.y << 1); ch = &ch1; downCh = downCh1; } else { pixelCoord = make_int2((threadCoord.x - (width >> 1)) << 1, threadCoord.y << 1); ch = &ch2; downCh = downCh2; } int16_t bias = (pixelCoord.x & 1) + 1; int16_t pixel = (tex2D<int16_t>(*ch, pixelCoord.x, pixelCoord.y) + tex2D<int16_t>(*ch, pixelCoord.x + 1, pixelCoord.y) + tex2D<int16_t>(*ch, pixelCoord.x, pixelCoord.y + 1) + tex2D<int16_t>(*ch, pixelCoord.x + 1, pixelCoord.y + 1) + bias) >> 2; downCh[(pixelCoord.y >> 1) * width + (pixelCoord.x >> 1)] = pixel; } }
4,291
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <curand_kernel.h> #include <curand.h> #define SEED 921 #define NUM_ITER 25600000 #define TRIALS_PER_THREAD 100000 double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } __global__ void pi_seq(int *g_count, curandState *states) { int id = blockIdx.x * blockDim.x + threadIdx.x; // Initialize the random state int seed = id; curand_init(seed, id, 0, &states[id]); double x, y, z; for (int i = 0; i < TRIALS_PER_THREAD; i++) { x = curand_uniform(&states[id]); y = curand_uniform(&states[id]); z = sqrt((x*x) + (y*y)); // Check if point is in unit circle if (z <= 1.0) g_count[id]++; } } int main(int argc, char* argv[]) { int count = 0; double x, y, z, pi; srand(SEED); // Important: Multiply SEED by "rank" when you introduce MPI! double cpu_start = cpuSecond(); // Calculate PI following a Monte Carlo method for (int iter = 0; iter < NUM_ITER; iter++) { // Generate random (X,Y) points x = (double)random() / (double)RAND_MAX; y = (double)random() / (double)RAND_MAX; z = sqrt((x*x) + (y*y)); // Check if point is in unit circle if (z <= 1.0) count++; } // Estimate Pi and display the result pi = ((double)count / (double)NUM_ITER) * 4.0; double cpu_end = cpuSecond(); printf("The result estimated by CPU is %lf in %lfs\n", pi, cpu_end - cpu_start); int num_block = 1, num_threads = 256; // Allocate curandState for every CUDA thread on host curandState *dev_random; cudaMalloc(&dev_random, num_block * num_threads * sizeof(curandState)); int *g_count; double gpu_start = cpuSecond(); cudaMallocManaged(&g_count, sizeof(int) * num_threads); pi_seq<<<num_block, num_threads>>>(g_count, dev_random); cudaDeviceSynchronize(); int g_sum = 0; for (int i = 0; i < num_threads; i++) g_sum += g_count[i]; double g_pi = ((double)g_sum / ((double)TRIALS_PER_THREAD * num_threads)) * 4.0; double gpu_end = cpuSecond(); printf("The result estimated by GPU is %lf in %lfs\n", g_pi, gpu_end - gpu_start); return 0; }
4,292
#include <cuda_runtime.h> #include <iostream> //grid has one blob, blob has 1024 threads // dim3 BlocksperGrid(1); // dim3 ThreadsperBlock(1024); __global__ void OneDimAdd(float *d_A, float *d_B, float *d_C, int numElements) { int i = threadIdx.x; if(i<numElements) { d_C[i] = d_A[i] + d_B[i]; } } //grid has one blob, blob has (4,256) threads // dim3 BlocksperGrid(1); // dim3 ThreadsperBlock(4,256); __global__ void TwoDimAdd(float *d_A, float *d_B, float *d_C, int numElements) { int i = threadIdx.y*blockDim.x + threadIdx.x; if(i<numElements) { d_C[i] = d_A[i] + d_B[i]; } } //grid has (2,3) blob, blob has (2,128) threads // dim3 BlocksperGrid(2,2); // dim3 ThreadsperBlock(2,128); __global__ void TwoandTwoDimAdd(float *d_A, float *d_B, float *d_C, int numElements) { int blockindex = gridDim.x*blockIdx.y + blockIdx.x; int i = blockDim.x * blockDim.y* blockindex + threadIdx.y*blockDim.x + threadIdx.x; if (i < numElements) { d_C[i] = d_A[i] + d_B[i]; } } //OneDimAdd、TwoDimAdd、TwoandTwoDimAdd实质上的计算方法都是每个线程处理一个元素的加法 // dim3 BlocksperGrid(1); // dim3 ThreadsperBlock(256); 每个线程计算四个元素的加法 __global__ void ThreadsDimAdd(float *d_A, float *d_B, float *d_C, int numElements) { int i = threadIdx.x; if(i<256) { for(int j=0;j<4;j++) { d_C[i*4+j] = d_A[i*4+j] + d_B[i*4+j]; } } } int main() { int deviceCount; cudaGetDeviceCount(&deviceCount); for(int i=0;i<deviceCount;i++) { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); std::cout << "使用GPU device " << i << ": " << devProp.name << std::endl; std::cout << "设备全局内存总量: " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl; std::cout << "SM的数量:" << devProp.multiProcessorCount << std::endl; std::cout << "每个线程块的共享内存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "每个线程块的最大线程数:" << devProp.maxThreadsPerBlock << std::endl; std::cout << "设备上一个线程块(Block)种可用的32位寄存器数量: " << devProp.regsPerBlock << std::endl; std::cout << "每个EM的最大线程数:" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "每个EM的最大线程束数:" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; std::cout << "设备上多处理器的数量: " << devProp.multiProcessorCount << std::endl; std::cout << "======================================================" << std::endl; } int numElements = 1024; float *A = new float[numElements]; float *B = new float[numElements]; float *C = new float[numElements]; for(int i=0;i<numElements;i++) { A[i] = i*1.0; B[i] = i*1.0; C[i] = 0.0; } int size = numElements*sizeof(float); float *d_A = NULL; float *d_B = NULL; float *d_C = NULL; cudaMalloc((void **)&d_A, size); cudaMalloc((void **)&d_B, size); cudaMalloc((void **)&d_C, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); dim3 BlocksperGrid(1); dim3 ThreadsperBlock(256); // int threadsPerBlock = 256; // int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; int loop = 10; // while(loop-- > 1) ThreadsDimAdd<<<BlocksperGrid, ThreadsperBlock>>>(d_A, d_B, d_C, numElements); cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); for(int i=0;i<numElements;i++) { if(A[i] + B[i] - C[i]>10e-6) { std::cout<<"err\n"; } } std::cout<<"\n"; cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); delete[] A; delete[] B; delete[] C; return 0; }
4,293
#include <stdio.h> #include <inttypes.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #ifndef CONFIG_DEFINED #define CONFIG_DEFINED // Number of real digits per Digit stored. static const int PRECISION = 4; // 10 ^ PRECISION: Used in many calcs. static const int MAGNITUDE = 10000; // Maximum number of Digits that will be stored per integer or decimal. static const int DIGITS = 25; static const int DMO = DIGITS - 1; // DIGITS MINUS ONE. Used in loops and indexing. static const int HUGE_DIGITS = DIGITS * 2; // DIGITS TIMES TWO static const int HDMO = HUGE_DIGITS - 1; // HUGE DIGITS MINUS ONE. Used in loops and indexing. #endif #ifndef TYPES_DEFINED #define TYPES_DEFINED // An arbitrary precision integer. struct ArbInt { bool sign; uint16_t digits[DIGITS]; }; // An arbitrary int used for multiplication calcs. struct ArbHugeInt { bool sign; uint16_t digits[DIGITS * 2]; }; // An arbitrary precision decimal. struct ArbDec { bool sign = true; uint16_t digits[DIGITS]; int decpos = 0; }; #endif __device__ void PrintArb(ArbDec *num) { if (num->sign == false) printf("-"); bool started = false; bool first = true; for (int ii = 0; ii < num->decpos; ii++) { if (started == false && num->digits[ii] != 0) started = true; if (started) { if (first) { first = false; printf("%d", num->digits[ii]); } else printf("%04d", num->digits[ii]); } } int endat = num->decpos; for (int ii = DMO; ii >= num->decpos; ii--) { if (num->digits[ii] != 0) { endat = ii; if (endat < 0) endat = 0; break; } } if (endat > num->decpos - 1) { printf("."); for (int ii = num->decpos; ii <= endat; ii++) { printf("%04d", num->digits[ii]); } } } /**************** Zero-related ****************************************************/ /* Zero out all elements in an Arb number. */ __device__ void ZeroOut(ArbInt *arb) { for (int kk = 0; kk < DIGITS; kk++) arb->digits[kk] = 0; } __device__ void ZeroOut(ArbDec *arb) { for (int kk = 0; kk < DIGITS; kk++) arb->digits[kk] = 0; } __device__ void ZeroOut(ArbHugeInt *arb) { for (int kk = 0; kk < HUGE_DIGITS; kk++) arb->digits[kk] = 0; } /* Retrieve a zero value. */ __device__ ArbInt GetZeroInt() { struct ArbInt arb; arb.sign = true; ZeroOut(&arb); return arb; } __device__ ArbDec GetZeroDec() { struct ArbDec arb; arb.sign = true; arb.decpos = 0; ZeroOut(&arb); return arb; } /******************* Convert from regular number types or between arb types ******************************/ __device__ ArbInt ArbFromInteger(int num) { struct ArbInt arb = GetZeroInt(); // Determine the sign. arb.sign = num >= 0; // Get rid of the sign. if (num < 0) num = -num; for (int ii = DIGITS - 1; ii >= 0; ii--) { int this_digit = num % MAGNITUDE; arb.digits[ii] = this_digit; num = num / MAGNITUDE; } return arb; } __device__ ArbDec ArbFromDouble(double num) { // Start with an int. int as_int = (int)num; ArbInt i = ArbFromInteger(as_int); int zero = -1; double rest = num - (double)as_int; struct ArbDec arb = GetZeroDec(); arb.sign = num >= 0; for (int kk = 0; kk < DIGITS; kk++) { arb.digits[kk] = 0; if (i.digits[kk] != 0) zero = kk; } if (zero == -1) { // This number has no whole part. arb.decpos = 0; zero = DIGITS; } // Copy the int digits over int numofnonzero = DIGITS - zero; for (int kk = 0; kk < numofnonzero; kk++) { arb.digits[kk] = i.digits[kk + zero]; } arb.decpos = numofnonzero; // We go 8 * magnitude deep. // Get rid of the sign if (rest < 0) rest = -rest; for (int ii = 0; ii < 8; ii++) { rest = rest * MAGNITUDE; int part = (int)rest; arb.digits[ii + numofnonzero] = part; rest = rest - (double)part; } return arb; } __device__ ArbDec Construct(bool sign, int decpos, uint16_t *copyFromDigits) { struct ArbDec arb; arb.sign = sign; arb.decpos = decpos; for (int kk = 0; kk < DIGITS; kk++) arb.digits[kk] = copyFromDigits[kk]; return arb; } /************************ Comparison of Arb Number ******************************/ __device__ bool GreaterThan(ArbDec *a, ArbDec *b) { if (a->decpos == b->decpos) { for (int ii = 0; ii < DIGITS; ii++) if (a->digits[ii] != b->digits[ii]) return a->digits[ii] > b->digits[ii]; } else { return a->decpos > b->decpos; } return false; } __device__ bool GreaterThan(ArbInt *a, ArbInt *b) { for (int ii = 0; ii < DIGITS; ii++) if (a->digits[ii] != b->digits[ii]) return a->digits[ii] > b->digits[ii]; return false; } __device__ bool GreaterThan(ArbHugeInt *a, ArbHugeInt *b) { for (int ii = 0; ii < HUGE_DIGITS; ii++) if (a->digits[ii] != b->digits[ii]) return a->digits[ii] > b->digits[ii]; return false; } /************************************ Mathematical Operations on Arb numbers **************************************************************/ __device__ void AddHugePositiveIntegers(ArbHugeInt *a, ArbHugeInt *b, ArbHugeInt *result) { uint16_t carry = 0; result->sign = true; bool all_zeros = true; for (int ii = 0; ii < HUGE_DIGITS; ii++) { result->digits[ii] = 0; if (all_zeros == true && (a->digits[ii] != 0 || b->digits[ii] != 0)) all_zeros = false; } if (all_zeros) { result->sign = true; return; } for (int ii = HUGE_DIGITS - 1; ii >= 0; ii--) { int sum = a->digits[ii] + b->digits[ii] + carry; if (sum >= MAGNITUDE) { sum = sum - MAGNITUDE; carry = 1; } else carry = 0; result->digits[ii] = sum; } } // Multiply a and b. __device__ void MultiplyHugePositiveIntegers(ArbInt *a, ArbInt *b, ArbHugeInt *result, ArbHugeInt *each_line, ArbHugeInt *sum_result) { uint16_t carry = 0; bool a_on_top = GreaterThan(a, b); // struct ArbHugeInt result; for (int kk = 0; kk < HUGE_DIGITS; kk++) { result->digits[kk] = 0; sum_result->digits[kk] = 0; } int a_size = -1; for (int kk = 0; kk < DIGITS; kk++) { if (a->digits[kk] != 0) { a_size = kk; break; } } int b_size = -1; for (int kk = 0; kk < DIGITS; kk++) { if (b->digits[kk] != 0) { b_size = kk; break; } } if (a_size == -1 || b_size == -1) { result->sign = true; return; } a_size -= 1; b_size -= 1; int top_size = (a_on_top ? a_size : b_size); int bot_size = (a_on_top ? b_size : a_size); for (int ii = DIGITS - 1; ii > bot_size; ii--) { int idx_move = DIGITS - ii - 1; //struct ArbHugeInt thisline; for (int kk = 0; kk < HUGE_DIGITS; kk++) each_line->digits[kk] = 0; each_line->sign = true; for (int jj = DIGITS - 1; jj > top_size; jj--) { int prod = (a_on_top ? (a->digits[jj] * b->digits[ii]) : (a->digits[ii] * b->digits[jj])) + carry; if (prod >= MAGNITUDE) { carry = prod / MAGNITUDE; prod = prod - (carry * MAGNITUDE); } else { carry = 0; } int idx = jj - idx_move + DIGITS; each_line->digits[idx] = prod; } if (carry > 0) { int idx = top_size - idx_move + DIGITS; each_line->digits[idx] = carry; } AddHugePositiveIntegers(sum_result, each_line, result); // Copy result back onto sum_result. for (int kk = 0; kk < HUGE_DIGITS; kk++) { sum_result->digits[kk] = result->digits[kk]; } } bool a_sign = a->sign; bool b_sign = b->sign; if (a_sign == false && b_sign == false) result->sign = true; else if (a_sign == false || b_sign == false) result->sign = false; } __device__ void AddTwoPositiveArbitrary(ArbDec *a, ArbDec *b, ArbDec *result) { uint16_t carry = 0; //struct ArbDec result; for (int ii = 0; ii < DIGITS; ii++) result->digits[ii] = 0; result->sign = true; result->decpos = a->decpos > b->decpos ? a->decpos : b->decpos; int pos_diff = a->decpos - b->decpos; bool a_on_top = GreaterThan(a, b); if (pos_diff < 0) pos_diff = -pos_diff; for (int ii = DMO; ii >= 0; ii--) { int bot_idx = ii - pos_diff; int sum = carry + (a_on_top ? a->digits[ii] : b->digits[ii]) + (bot_idx < 0 ? 0 : (a_on_top ? b->digits[bot_idx] : a->digits[bot_idx])); if (sum >= MAGNITUDE) { carry = 1; sum = sum - MAGNITUDE; } else carry = 0; result->digits[ii] = sum; } if (carry > 0) { // Shift the array right to make room for the carry. for (int jj = DMO - 1; jj > 0; jj--) { result->digits[jj] = result->digits[jj - 1]; } result->digits[0] = carry; result->decpos = result->decpos + 1; } } __device__ void SubtractTwoPositiveArbitrary(ArbDec *a, ArbDec *b, ArbDec *result) { uint16_t take = 0; //struct ArbDec result; bool all_zeros = true; for (int ii = 0; ii < DIGITS; ii++) { result->digits[ii] = 0; if (all_zeros == true && (a->digits[ii] != 0 || b->digits[ii] != 0)) all_zeros = false; } if (all_zeros) { result->sign = true; result->decpos = 0; return; } result->sign = true; result->decpos = a->decpos > b->decpos ? a->decpos : b->decpos; int pos_diff = a->decpos - b->decpos; bool a_on_top = GreaterThan(a, b); if (a_on_top == false) result->sign = false; if (pos_diff < 0) pos_diff = -pos_diff; for (int ii = DMO; ii >= 0; ii--) { int bot_idx = ii - pos_diff; int sum = (a_on_top ? a->digits[ii] : b->digits[ii]) - (bot_idx < 0 ? 0 : (a_on_top ? b->digits[bot_idx] : a->digits[bot_idx])) - take; if (sum < 0) { take = 1; sum += MAGNITUDE; } else take = 0; result->digits[ii] = sum; } if (result->digits[0] == 0 && result->decpos > 0) { // Shift the array left to cinch up the zero. for (int jj = 0; jj < DMO - 1; jj++) { result->digits[jj] = result->digits[jj + 1]; } result->digits[DMO] = 0; result->decpos = result->decpos - 1; } } __device__ void MultiplyArbitrary(ArbDec *a, ArbDec *b, ArbDec *result, ArbInt *a_int, ArbInt *b_int, ArbHugeInt *huge_scratch, ArbHugeInt *each_line, ArbHugeInt *sum_result) { // OPTIMIZE: If a or b is zero, return zero. // OPTIMIZE: If a or b is one, return the other one. //struct ArbDec result; result->sign = true; for (int kk = 0; kk < DIGITS; kk++) result->digits[kk] = 0; bool a_on_top = GreaterThan(a, b); //struct ArbInt a_int; a_int->sign = a->sign; //struct ArbInt b_int; b_int->sign = b->sign; // Shift everything RIGHT size-number of digits so we have overflow. // This is the precision penalty paid when digits approach the max digits. int a_zeroes = -1; for (int jj = DMO; jj >= 0; jj--) { if (a->digits[jj] != 0) { a_zeroes = DMO - jj; break; } } int b_zeroes = -1; for (int jj = DMO; jj >= 0; jj--) { if (b->digits[jj] != 0) { b_zeroes = DMO - jj; break; } } // OPTIMIZED: IF EITHER a and b are zero, just return zero. if (a_zeroes == -1 || b_zeroes == -1) { return; } // Shift everything right to make it useful for integer math. for (int jj = DMO; jj >= 0; jj--) { if (jj >= a_zeroes) { a_int->digits[jj] = a->digits[jj - a_zeroes]; } else { a_int->digits[jj] = 0; } if (jj >= b_zeroes) { b_int->digits[jj] = b->digits[jj - b_zeroes]; } else { b_int->digits[jj] = 0; } } if (a_on_top) MultiplyHugePositiveIntegers(a_int, b_int, huge_scratch, each_line, sum_result); else MultiplyHugePositiveIntegers(b_int, a_int, huge_scratch, each_line, sum_result); int size = (DMO - a_zeroes) + (DMO - b_zeroes); result->sign = huge_scratch->sign; for (int jj = 0; jj <= DMO; jj++) { int idx_huge = HDMO - size + jj - 1; if (idx_huge <= HDMO) result->digits[jj] = huge_scratch->digits[idx_huge]; else result->digits[jj] = 0; } result->decpos = a->decpos + b->decpos; // Now trim off leading zeros by shifting left. int result_non_zero_idx = -1; for (int jj = 0; jj < DMO; jj++) { if (result->digits[jj] != 0) { result_non_zero_idx = jj; break; } } if (result_non_zero_idx == -1) { result->decpos = 0; result->sign = true; } else { if (result_non_zero_idx > result->decpos) result_non_zero_idx = result->decpos; if (result_non_zero_idx > 0) { int over = DMO - result_non_zero_idx; for (int jj = 0; jj < DMO; jj++) { result->digits[jj] = jj > over ? 0 : result->digits[jj + result_non_zero_idx]; } result->decpos = result->decpos - result_non_zero_idx; if (result->decpos < 0) result->decpos = 0; } } bool a_sign = a->sign; bool b_sign = b->sign; if (a_sign == false && b_sign == false) result->sign = true; else if (a_sign == false || b_sign == false) result->sign = false; } __device__ void Add(ArbDec *a, ArbDec *b, ArbDec *result) { // -a + b = b - a if (a->sign == false && b->sign == true) { SubtractTwoPositiveArbitrary(b, a, result); } // a + -b = a - b else if (a->sign == true && b->sign == false) { SubtractTwoPositiveArbitrary(a, b, result); } // a + b = a + b else if (a->sign == true && b->sign == true) { AddTwoPositiveArbitrary(a, b, result); } // -a + -b = -(a+b) else if (a->sign == false && b->sign == false) { AddTwoPositiveArbitrary(a, b, result); result->sign = false; result; } } __device__ void Subtract(ArbDec *a, ArbDec *b, ArbDec *result) { // -a - b = -(a+b) if (a->sign == false && b->sign == true) { AddTwoPositiveArbitrary(a, b, result); result->sign = false; } // a - -b = a + b else if (a->sign == true && b->sign == false) { AddTwoPositiveArbitrary(a, b, result); } // a - b = a - b else if (a->sign == true && b->sign == true) { SubtractTwoPositiveArbitrary(a, b, result); } // -a - -b = -a + b = b - a else if (a->sign == false && b->sign == false) { SubtractTwoPositiveArbitrary(b, a, result); } } __device__ bool KeepGoing(ArbDec *x, ArbDec *y, ArbDec *x2, ArbDec *y2, ArbDec *sum, ArbInt *a_int, ArbInt *b_int, ArbHugeInt *huge_scratch, ArbHugeInt *each_line, ArbHugeInt *sum_result) { // x ^ 2 MultiplyArbitrary(x, x, x2, a_int, b_int, huge_scratch, each_line, sum_result); // y ^ 2 MultiplyArbitrary(y, y, y2, a_int, b_int, huge_scratch, each_line, sum_result); // sum the squares Add(x2, y2, sum); // Keep going if the sum is less than four. bool result = sum->decpos == 0 ? true : sum->digits[sum->decpos - 1] < 4; return result; } __device__ void TimesTwo(ArbDec *a, ArbDec *result, ArbDec *two, ArbInt *a_int, ArbInt *b_int, ArbHugeInt *huge_scratch, ArbHugeInt *each_line, ArbHugeInt *sum_result) { MultiplyArbitrary(a, two, result, a_int, b_int, huge_scratch, each_line, sum_result); } __global__ void kernel(uint16_t *c, int xsize, int y_scale_sign, int y_scale_decpos, uint16_t *y_scale_digits, int y_base_sign, int y_base_decpos, uint16_t *y_base_digits, int x_scale_sign, int x_scale_decpos, uint16_t *x_scale_digits, int x_base_sign, int x_base_decpos, uint16_t *x_base_digits, int max) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx = xsize * idx_y + idx_x; // The number two! ArbDec two = GetZeroDec(); two.sign = true; two.decpos = 1; two.digits[0] = 2; // Indexes.. where are we? ArbDec idx_x_arb = ArbFromDouble((double)idx_x); ArbDec idx_y_arb = ArbFromDouble((double)idx_y); // Contract the bases and scales. ArbDec x_base = Construct(x_base_sign, x_base_decpos, x_base_digits); ArbDec x_scale = Construct(x_scale_sign, x_scale_decpos, x_scale_digits); ArbDec y_base = Construct(y_base_sign, y_base_decpos, y_base_digits); ArbDec y_scale = Construct(y_scale_sign, y_scale_decpos, y_scale_digits); // Scratchpad vars for small memory footprint. // DONT use GetZeroDec here because i want to do a single loop. ArbDec x0; ArbDec x_mult; ArbDec y0; ArbDec y_mult; ArbDec x; ArbDec y; ArbInt a_int; ArbInt b_int; ArbHugeInt huge_scratch; ArbHugeInt each_line; ArbHugeInt sum_result; ArbDec x2; ArbDec y2; ArbDec sum; ArbDec diff; ArbDec temp; ArbDec x_times_2; ArbDec x_times_2_times_y; for (int kk = 0; kk < DIGITS; kk++) { x0.sign = true; y0.sign = true; x_mult.sign = true; y_mult.sign = true; x.sign = true; y.sign = true; a_int.sign = true; b_int.sign = true; huge_scratch.sign = true; each_line.sign = true; sum_result.sign = true; x2.sign = true; y2.sign = true; sum.sign = true; diff.sign = true; temp.sign = true; x_times_2.sign = true; x_times_2_times_y.sign = true; x0.digits[kk] = 0; y0.digits[kk] = 0; x_mult.digits[kk] = 0; y_mult.digits[kk] = 0; x.digits[kk] = 0; y.digits[kk] = 0; a_int.digits[kk] = 0; b_int.digits[kk] = 0; huge_scratch.digits[kk] = 0; each_line.digits[kk] = 0; sum_result.digits[kk] = 0; x2.digits[kk] = 0; y2.digits[kk] = 0; sum.digits[kk] = 0; diff.digits[kk] = 0; temp.digits[kk] = 0; x_times_2.digits[kk] = 0; x_times_2_times_y.digits[kk] = 0; } // Calculate x0 from base and scale MultiplyArbitrary(&idx_x_arb, &x_scale, &x_mult, &a_int, &b_int, &huge_scratch, &each_line, &sum_result); Add(&x_base, &x_mult, &x0); // Calculate y0 from base and scale. MultiplyArbitrary(&idx_y_arb, &y_scale, &y_mult, &a_int, &b_int, &huge_scratch, &each_line, &sum_result); Add(&y_base, &y_mult, &y0); uint16_t ii = 0; while (KeepGoing(&x, &y, &x2, &y2, &sum, &a_int, &b_int, &huge_scratch, &each_line, &sum_result) && ii < max) { // Calculate x MultiplyArbitrary(&x, &x, &x2, &a_int, &b_int, &huge_scratch, &each_line, &sum_result); MultiplyArbitrary(&y, &y, &y2, &a_int, &b_int, &huge_scratch, &each_line, &sum_result); Subtract(&x2, &y2, &diff); Add(&diff, &x0, &temp); // Calculate y TimesTwo(&x, &x_times_2, &two, &a_int, &b_int, &huge_scratch, &each_line, &sum_result); MultiplyArbitrary(&x_times_2, &y, &x_times_2_times_y, &a_int, &b_int, &huge_scratch, &each_line, &sum_result); Add(&x_times_2_times_y, &y0, &y); // Move temp into x. for (int kk = 0; kk < DIGITS; kk++) x.digits[kk] = temp.digits[kk]; x.sign = temp.sign; x.decpos = temp.decpos; // Increase the iterator!! ii++; } //printf("%d took %d iterations\n", idx, ii); c[idx] = ii; } int main() { uint32_t j = 0; return 0; }
4,294
#include <iostream> #include <sys/times.h> #include <unistd.h> __global__ void calcInterval (double * data, const long cntSteps, const long cntThreads, const double step) { double x; double sum=0.0; int idThread=blockDim.x * blockIdx.x + threadIdx.x;; long cntStepsPerThread = cntSteps / cntThreads; long localmax = (idThread+1)*cntStepsPerThread; if (idThread==cntThreads-1) localmax=cntSteps; for (long i = idThread*cntStepsPerThread; i < localmax; i ++) { x = (i + .5)*step; sum = sum + 4.0/(1.+ x*x); } data[idThread]=sum; } int main(int argc, char** argv) { const unsigned long cntSteps=500000000; /* default # of rectangles */ double step = 1./static_cast<double>(cntSteps); const double PI25DT = 3.141592653589793238462643; double pi=0.; const int cntThreads=256; const int cntBlocks=256; const long cntThreadsTotal=cntThreads*cntBlocks; std::cout << "\ncomputing on GPU (" << cntThreadsTotal << ") threads " << std::endl; clock_t clockStart, clockStop; tms tmsStart, tmsStop; clockStart = times(&tmsStart); double * gpuValues = NULL; cudaMalloc((void**) &gpuValues,cntThreadsTotal*sizeof(double)); calcInterval<<<cntBlocks,cntThreads>>>(gpuValues,cntSteps,cntThreadsTotal,step); double * cpuValues = new double[cntThreadsTotal]; cudaMemcpy (cpuValues, gpuValues, cntThreadsTotal * sizeof(double), cudaMemcpyDeviceToHost); cudaFree (gpuValues); for (long i=0; i<cntThreadsTotal; i++) { pi+=cpuValues[i]*step; } delete[] cpuValues; clockStop = times(&tmsStop); std::cout << "The value of PI is " << pi << " Error is " << fabs(pi - PI25DT) << std::endl; std::cout << "The time to calculate PI was " ; double secs= (clockStop - clockStart)/static_cast<double>(sysconf(_SC_CLK_TCK)); std::cout << secs << " seconds\n" << std::endl; return 0; }
4,295
#include "cuda.h" __global__ void addOneKernel(float* out, const float* in, int numElements) { int stride = blockDim.x * gridDim.x; int tidx = blockDim.x * blockIdx.x + threadIdx.x; for (; tidx < numElements; tidx += stride) { out[tidx] = in[tidx] + 1; } } // Kernel Wrapper void addOne(float* out_h, const float* in_h, int numElements) { float *in_d, *out_d; size_t size = sizeof(float) * numElements; cudaMalloc(&in_d, size); cudaMalloc(&out_d, size); cudaMemcpy(in_d, in_h, size, cudaMemcpyHostToDevice); int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; addOneKernel<<<blocksPerGrid, threadsPerBlock>>>(out_d, in_d, numElements); cudaMemcpy(out_h, out_d, size, cudaMemcpyDeviceToHost); cudaFree(in_d); cudaFree(out_d); }
4,296
#include "includes.h" __global__ void vecAddKernel(float *A, float *B, float *C, int n){ int i = threadIdx.x+blockDim.x*blockIdx.x; if(i<n) C[i] = A[i]+B[i]; }
4,297
#include "includes.h" __global__ void simple_sinf(float* out, const size_t _data_size, int fnCode, const float _dx, const float _frange_start) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < _data_size) { float x = _frange_start + i * _dx; int idx = 2 * i; out[idx] = x; switch (fnCode) { case 0: out[idx + 1] = sinf(x); break; case 1: out[idx + 1] = cosf(x); break; case 2: out[idx + 1] = tanf(x); break; case 3: out[idx + 1] = log10f(x); break; } } }
4,298
#include <stdbool.h> #include <stdio.h> typedef unsigned char uchar; #define N_THREADS 32 #define N_BLOCKS 48 #define TOTAL_IDX (blockIdx.x * blockDim.x + threadIdx.x) #define PLAN_LEN_MAX 255 typedef uchar Direction; #define dir_reverse(dir) ((Direction)(3 - (dir))) #define DIR_N 4 #define DIR_FIRST 0 #define DIR_UP 0 #define DIR_RIGHT 1 #define DIR_LEFT 2 #define DIR_DOWN 3 /* stack implementation */ __device__ __shared__ static struct dir_stack_tag { uchar i; uchar buf[PLAN_LEN_MAX]; } stack[N_THREADS]; #define STACK_I (stack[threadIdx.x].i) #define stack_get(i) (stack[threadIdx.x].buf[i]) #define stack_set(i, val) (stack[threadIdx.x].buf[i] = (val)) __device__ static inline void stack_init(void) { STACK_I = 0; } __device__ static inline void stack_put(Direction dir) { stack_set(STACK_I, dir); ++STACK_I; } __device__ static inline bool stack_is_empty(void) { return STACK_I == 0; } __device__ static inline Direction stack_pop(void) { --STACK_I; return stack_get(STACK_I); } __device__ static inline Direction stack_peak(void) { return stack_get(STACK_I - 1); } /* state implementation */ #define STATE_WIDTH 4 #define STATE_N (STATE_WIDTH*STATE_WIDTH) static char assert_state_width_is_four[STATE_WIDTH==4 ? 1 : -1]; #define POS_X(pos) ((pos) & 3) #define POS_Y(pos) ((pos) >> 2) /* * goal: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] */ __device__ __shared__ static struct state_tag { uchar tile[STATE_N]; uchar empty; uchar h_value; /* ub of h_value is 6*16 */ } state[N_THREADS]; #define STATE_TILE(i) (state[threadIdx.x].tile[(i)]) #define STATE_EMPTY (state[threadIdx.x].empty) #define STATE_HVALUE (state[threadIdx.x].h_value) __device__ static uchar inline distance(uchar i, uchar j) { return i > j ? i - j : j - i; } #define H_DIFF(opponent, empty, empty_dir) \ h_diff_table[opponent][empty][empty_dir] __device__ __shared__ static int h_diff_table[STATE_N][STATE_N][DIR_N]; __device__ static void init_mdist(void) { for (int opponent = 0; opponent < STATE_N; ++opponent) { int goal_x = POS_X(opponent), goal_y = POS_Y(opponent); for (int i = 0; i < STATE_N; ++i) { int from_x = POS_X(i), from_y = POS_Y(i); for (uchar dir = 0; dir < DIR_N; ++dir) { if (dir == DIR_LEFT) H_DIFF(opponent, i, dir) = goal_x > from_x ? -1 : 1; if (dir == DIR_RIGHT) H_DIFF(opponent, i, dir) = goal_x < from_x ? -1 : 1; if (dir == DIR_UP) H_DIFF(opponent, i, dir) = goal_y > from_y ? -1 : 1; if (dir == DIR_DOWN) H_DIFF(opponent, i, dir) = goal_y < from_y ? -1 : 1; } } } } __device__ static inline void state_init_hvalue(void) { uchar from_x[STATE_N], from_y[STATE_N]; for (int i = 0; i < STATE_N; ++i) { from_x[STATE_TILE(i)] = POS_X(i); from_y[STATE_TILE(i)] = POS_Y(i); } for (int i = 1; i < STATE_N; ++i) { state[threadIdx.x].h_value += distance(from_x[i], POS_X(i)); state[threadIdx.x].h_value += distance(from_y[i], POS_Y(i)); } } __device__ static void state_tile_fill(const uchar v_list[STATE_WIDTH * STATE_WIDTH]) { for (int i = 0; i < STATE_N; ++i) { if (v_list[i] == 0) STATE_EMPTY = i; STATE_TILE(i) = v_list[i]; } } __device__ static inline bool state_is_goal(void) { return state[threadIdx.x].h_value == 0; } static char assert_direction2 [DIR_UP == 0 && DIR_RIGHT == 1 && DIR_LEFT == 2 && DIR_DOWN == 3 ? 1 : -1]; __device__ __shared__ static bool movable_table[STATE_N][DIR_N]; __device__ static void init_movable_table(void) { for (int i = 0; i < STATE_N; ++i) for (unsigned int d = 0; d < DIR_N; ++d) { if (d == DIR_RIGHT) movable_table[i][d] = (POS_X(i) < STATE_WIDTH - 1); else if (d == DIR_LEFT) movable_table[i][d] = (POS_X(i) > 0); else if (d == DIR_DOWN) movable_table[i][d] = (POS_Y(i) < STATE_WIDTH - 1); else if (d == DIR_UP) movable_table[i][d] = (POS_Y(i) > 0); } } __device__ static inline bool state_movable(Direction dir) { return movable_table[STATE_EMPTY][dir]; } static char assert_direction [DIR_UP == 0 && DIR_RIGHT == 1 && DIR_LEFT == 2 && DIR_DOWN == 3 ? 1 : -1]; __device__ __constant__ const static int pos_diff_table[DIR_N] = {-STATE_WIDTH, 1, -1, +STATE_WIDTH}; __device__ static inline bool state_move_with_limit(Direction dir, unsigned int f_limit) { int new_empty = STATE_EMPTY + pos_diff_table[dir]; int opponent = STATE_TILE(new_empty); int new_h_value = STATE_HVALUE + H_DIFF(opponent, new_empty, dir); if (STACK_I + 1 + new_h_value > f_limit) return false; STATE_HVALUE = new_h_value; STATE_TILE(STATE_EMPTY) = opponent; STATE_EMPTY = new_empty; return true; } __device__ static inline void state_move(Direction dir) { int new_empty = STATE_EMPTY + pos_diff_table[dir]; int opponent = STATE_TILE(new_empty); STATE_HVALUE += H_DIFF(opponent, new_empty, dir); STATE_TILE(STATE_EMPTY) = opponent; STATE_EMPTY = new_empty; } /* * solver implementation */ __device__ static bool idas_internal(int f_limit, int *ret_nodes_expanded) { uchar dir = 0; int nodes_expanded = 0; for (;;) { if (state_is_goal()) { *ret_nodes_expanded = nodes_expanded; return true; } if ((stack_is_empty() || stack_peak() != dir_reverse(dir)) && state_movable(dir)) { ++nodes_expanded; if (state_move_with_limit(dir, f_limit)) { stack_put(dir); dir = 0; continue; } } while (++dir == DIR_N) { if (stack_is_empty()) { *ret_nodes_expanded = nodes_expanded; return false; } dir = stack_pop(); state_move(dir_reverse(dir)); } } } __global__ void idas_kernel(uchar *input, uchar *plan) { int nodes_expanded = 0, nodes_expanded_first = 0; int f_limit; bool found; int id = threadIdx.x + blockIdx.x * blockDim.x; init_mdist(); init_movable_table(); stack_init(); state_tile_fill(input + id * STATE_N); state_init_hvalue(); { f_limit = STATE_HVALUE; nodes_expanded_first = 0; found = idas_internal(f_limit, &nodes_expanded); } if (!found) { ++f_limit; nodes_expanded = 0; found = idas_internal(f_limit, &nodes_expanded); f_limit += nodes_expanded==nodes_expanded_first ? 1 : 2; for (;;f_limit+=2) { nodes_expanded = 0; found = idas_internal(f_limit, &nodes_expanded); if (found) break; } } plan[0] = (int) STACK_I; /* len of plan */ for (uchar i = 0; i < STACK_I; ++i) plan[i + 1] = stack_get(i); } /* host implementation */ #include <errno.h> #include <stdio.h> #include <stdlib.h> #define exit_failure(...) \ do \ { \ printf(__VA_ARGS__); \ exit(EXIT_FAILURE); \ } while (0) static int pop_int_from_str(const char *str, char **end_ptr) { long int rv = strtol(str, end_ptr, 0); errno = 0; if (errno != 0) exit_failure("%s: %s cannot be converted into long\n", __func__, str); else if (end_ptr && str == *end_ptr) exit_failure("%s: reach end of string", __func__); if (rv > INT_MAX || rv < INT_MIN) exit_failure("%s: too big number, %ld\n", __func__, rv); return (int) rv; } #define MAX_LINE_LEN 100 static void load_state_from_file(const char *fname, uchar *s) { FILE *fp; char str[MAX_LINE_LEN]; char *str_ptr = str, *end_ptr; fp = fopen(fname, "r"); if (!fp) exit_failure("%s: %s cannot be opened\n", __func__, fname); if (!fgets(str, MAX_LINE_LEN, fp)) exit_failure("%s: fgets failed\n", __func__); for (int i = 0; i < STATE_N; ++i) { s[i] = pop_int_from_str(str_ptr, &end_ptr); str_ptr = end_ptr; } fclose(fp); } #undef MAX_LINE_LEN #define CUDA_CHECK(call) \ do \ { \ const cudaError_t e = call; \ if (e != cudaSuccess) \ exit_failure("Error: %s:%d code:%d, reason: %s\n", __FILE__, \ __LINE__, e, cudaGetErrorString(e)); \ } while (0) static void avoid_unused_static_assertions(void) { (void) assert_direction[0]; (void) assert_direction2[0]; (void) assert_state_width_is_four[0]; } int main(int argc, char *argv[]) { uchar s_list[STATE_N]; uchar *s_list_device; uchar plan[PLAN_LEN_MAX]; uchar *plan_device; int insize = sizeof(uchar) * STATE_N; int outsize = sizeof(uchar) * PLAN_LEN_MAX; if (argc < 2) { printf("usage: bin/cumain <ifname>\n"); exit(EXIT_FAILURE); } load_state_from_file(argv[1], s_list); CUDA_CHECK(cudaMalloc((void **) &s_list_device, insize)); CUDA_CHECK(cudaMalloc((void **) &plan_device, outsize)); CUDA_CHECK(cudaMemcpy(s_list_device, s_list, insize, cudaMemcpyHostToDevice)); idas_kernel<<<N_BLOCKS, N_THREADS>>>(s_list_device, plan_device); CUDA_CHECK(cudaMemcpy(plan, plan_device, outsize, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(s_list_device)); CUDA_CHECK(cudaFree(plan_device)); CUDA_CHECK(cudaDeviceReset()); printf("len=%d: ", (int)plan[0]); for (int i = 0; i < plan[0]; ++i) printf("%d ", (int) plan[i+1]); putchar('\n'); avoid_unused_static_assertions(); return 0; }
4,299
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void helloCuda() { printf("Hello Aman and Sharach ..........\n"); } /* int main() { dim3 block(4); // 4 threads per block; dim3 grid(8); // 8x4 = 32 threads; 1 grid = 8 blocks; helloCuda << <grid, block >> > (); } */
4,300
#include "includes.h" const int Nthreads = 1024, NrankMax = 3, nt0max = 71, NchanMax = 1024; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void getU(const double *Params, const double *dWU, double *W, double *U){ int Nfilt, nt0, tidx, tidy, bid, Nchan,k; double x; nt0 = (int) Params[4]; Nchan = (int) Params[9]; Nfilt = (int) Params[1]; tidx = threadIdx.x; tidy = threadIdx.y; bid = blockIdx.x; while (tidy<Nchan){ x = 0.0f; for (k=0; k<nt0; k++) x += W[k + nt0*bid + nt0*Nfilt*tidx] * dWU[k + tidy*nt0 + bid * Nchan*nt0]; U[tidy + Nchan * bid + Nchan * Nfilt * tidx] = x; tidy+=blockDim.y; } }