serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
18,901
__global__ void convolution1d(float *A, float *B, float *M, int numElements) { //int i=blockIdx.x; int i=threadIdx.x; int j, k; if (i < numElements) { for ( j = 0, k = -2; j < 4, k <=2; ++j, ++k) { if((i+k)>=0 && (i+k)<numElements) { B[i] += M[j]*A[i+k]; } } } // __syncthreads(); } __global__ void convolution2d(float *A, float *B, float *M, int numElements) { int i=blockIdx.x; int j=threadIdx.x; int k,l,m,n; if ((i < numElements) && (j < numElements)) { for(k = 0, l = -1; k < 3, l <=1; k++, l++) { for(m = 0, n = -1; m < 3, n <=1; m++, n++) { if((i+l)>=0 && (i+l)<numElements && (j+n)>=0 && (j+n)<numElements) { B[i*numElements + j] += M[k*3 + m]*A[(i+l)*numElements + (j+n)]; } } } __syncthreads(); } }
18,902
#include "includes.h" __global__ void kernelSumaMatrices(float *a, float *b,int m, int n) { int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; while(i<m){ j = threadIdx.y + blockIdx.y*blockDim.y; while(j<n){ a[i*n+j]+=b[i*n+j]; j+= blockDim.y*gridDim.y; } i+=blockDim.x*gridDim.x; } }
18,903
#include "includes.h" __device__ int DeviceDefaultStep() { return gridDim.x * blockDim.x; } __device__ int DeviceDefaultIndex() { return blockIdx.x * blockDim.x + threadIdx.x; } __global__ void KernelMemset(bool *p, int len, bool value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } }
18,904
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> using namespace std; __global__ void printHello(){ printf("Hello World! My threadId is %d\n",threadIdx.x); //printf("I am a part of block : %d\n",blockIdx.x); } int main(){ //int n = 256; printHello<<<1,256>>>(); //cudaDeviceSynchronize(); exit(0); }
18,905
#include <stdio.h> __global__ void jacobi(double * uold, double * unew, double * f, int N, double lambda2){ int M = N+2; for (int i = 1; i < N+1; i++){ for (int j = 1; j < N+1; j++){ unew[i*M+j] = ( 0.25*(uold[(i-1)*M+j]+uold[(i+1)*M+j]+ uold[i*M+j-1]+uold[i*M+j+1]+lambda2*f[i*M+j]) ); } } }
18,906
#include <cuda.h> #include <cuda_runtime_api.h> #include <curand.h> #include <curand_kernel.h> #include <stdio.h> #include <string.h> __device__ double generateRandom(curandState *state); __device__ void generateRandomInit(curandState *state,int i ); //错误处理宏 #define CHECK(call) \ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ printf("Error:%s:%d, ", __FILE__, __LINE__);\ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error));\ exit(1);\ }\ } typedef struct { double *d_pmt, *d_hit, *d_result; // double *h_result_s; cudaStream_t stream; cudaEvent_t start, stop; }GPU_data; __global__ void CDF_Sampling(double *pmt, double *hittime, double *result, int numElements,int max_n,int max_time,int gpu_id) { //compute one-dimensional data index int id = blockIdx.x*blockDim.x+threadIdx.x; curandState state; generateRandomInit(&state,id); if (id < numElements) { double prob; prob = generateRandom(&state); double sum = 0; int n = 0; for (int item = 0; item < max_n; item++) { sum += pmt[id*max_n+item]; if (prob <= sum) { n = item; // printf("[gpu: %d] thread %d: hit times:%d\n",gpu_id, id, n); break; } } for (int item = 0;item < n;item++) { // double prob2; prob = generateRandom(&state); sum = 0; for (int j = 0; j < max_time;j++) { sum += hittime[id*max_time+j]; if (prob <= sum) { result[id*max_n+item] = (double)j; // printf("[gpu: %d] thread %d: %dth hit time %d\n",gpu_id, id, item+1,j); break; } } } } } __device__ double generateRandom(curandState *state) { // int id = blockIdx.x*blockDim.x+threadIdx.x; double result = abs(curand_uniform_double(state)); // printf("thread:%d random double: %f \n",id,result); return result; } __device__ void generateRandomInit(curandState *state,int id) { // long seed = (unsigned long long)clock(); curand_init(id, 0, 0, state); } // __host__ void // initData(double *h_pmt, double *h_hit, int Size) // { // } extern "C" { float CDF_Sampling_wrapper(double *h_pmt,double *h_hit,double *h_result, int total_num, int nBytes,int max_n,int max_time) { //GPU计时,设置开始和结束事件 // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); //获取GPU数量 int GPU_num; CHECK(cudaGetDeviceCount(&GPU_num)); // printf("GPU number:%d\n",GPU_num); if(GPU_num<1) { printf("no CUDA capable devices were detected\n"); return -1; } GPU_data data[GPU_num]; int single_size = total_num/GPU_num; int single_bytes = nBytes/GPU_num; // cudaEventRecord(start,0); //申请GPU内存 // double *d_pmt, *d_hit,*d_result; for(int gpu_id =0; gpu_id < GPU_num; gpu_id++) { cudaSetDevice(gpu_id); cudaStreamCreate(&data[gpu_id].stream); cudaEventCreate(&data[gpu_id].start); cudaEventCreate(&data[gpu_id].stop); cudaEventRecord(data[gpu_id].start,data[gpu_id].stream); CHECK(cudaMalloc((double**)&(data[gpu_id].d_pmt),single_bytes)); CHECK(cudaMalloc((double**)&(data[gpu_id].d_hit), single_bytes)); CHECK(cudaMalloc((double**)&(data[gpu_id].d_result), single_bytes)); // data[gpu_id].h_result_s = (double*)malloc(single_bytes); // CHECK(cudaMemcpyAsync(data[gpu_id].d_pmt, (double*)(h_pmt+gpu_id*single_bytes/8), single_bytes, cudaMemcpyHostToDevice, data[gpu_id].stream)); CHECK(cudaMemcpyAsync(data[gpu_id].d_hit, (double*)(h_hit+gpu_id*single_bytes/8), single_bytes, cudaMemcpyHostToDevice, data[gpu_id].stream)); } //将CPU内存拷贝到GPU // CHECK(cudaMemcpy(d_pmt, h_pmt, nBytes, cudaMemcpyHostToDevice)); // CHECK(cudaMemcpy(d_hit, h_hit, nBytes, cudaMemcpyHostToDevice)); //设置使用编号为0的GPU // CHECK(cudaSetDevice(0)); //设置线程数量 int threadPerBlock,blocksPerGrid; if (single_size<128) { threadPerBlock = 128; blocksPerGrid =1; } else if(single_size<1024) { threadPerBlock = 128; blocksPerGrid =int(ceil(single_size/(double)threadPerBlock)); } else { threadPerBlock = 1024; blocksPerGrid =int(ceil(single_size/(double)threadPerBlock)); } dim3 block(threadPerBlock); //设置块数量 dim3 grid(blocksPerGrid);//blocksPerGrid // cudaEventRecord(gpu_start); //调用核函数 for(int gpu_id = 0; gpu_id < GPU_num; gpu_id++) { cudaSetDevice(gpu_id); //第三个参数为0,表示每个block用到的共享内存大小为0 CDF_Sampling <<<grid, block, 0,data[gpu_id].stream >>>(data[gpu_id].d_pmt, data[gpu_id].d_hit, data[gpu_id].d_result, single_size,max_n,max_time,gpu_id); // CHECK(cudaMemcpyAsync(data[gpu_id].h_result_s, data[gpu_id].d_result, single_bytes, cudaMemcpyDeviceToHost,data[gpu_id].stream)); CHECK(cudaMemcpyAsync((double*)(h_result+gpu_id*single_bytes/8),data[gpu_id].d_result,single_bytes,cudaMemcpyDeviceToHost,data[gpu_id].stream)); } // cudaEventRecord(gpu_stop); // cudaEventSynchronize(gpu_stop);//同步,强制CPU等待GPU event被设定 CHECK(cudaDeviceSynchronize()); for(int gpu_id = 0; gpu_id < GPU_num; gpu_id++) { cudaEventRecord(data[gpu_id].stop,data[gpu_id].stream); cudaEventSynchronize(data[gpu_id].stop); } // CHECK(cudaMemcpy(h_result, d_result, nBytes, cudaMemcpyDeviceToHost)); //等待stream流执行完成 // for(int gpu_id = 0; gpu_id < GPU_num; gpu_id++) // { // CHECK(cudaStreamSynchronize(data[gpu_id].stream)); // cudaEventRecord(data[gpu_id].stop,data[gpu_id].stream); // cudaEventSynchronize(data[gpu_id].stop); // } // for(i nt gpu_id = 0; gpu_id < GPU_num; gpu_id++) // { // memcpy(h_result+gpu_id*single_bytes,data[gpu_id].h_result_s,single_bytes); // } // cudaEventRecord(stop,0); // cudaEventSynchronize(stop); float total_time; //计算用时,精度0.5us cudaEventElapsedTime(&total_time, data[0].start, data[0].stop); // cudaEventDestroy(start); // cudaEventDestroy(stop); // printf("threadPerBlock:%d\n",threadPerBlock); // printf("blocksPerGrid;%d\n",blocksPerGrid); // printf("total use time %f ms\n", total_time); // cudaEventElapsedTime(&time, gpu_start, gpu_stop); // cudaEventDestroy(gpu_start); // cudaEventDestroy(gpu_stop); // printf("gpu use time %f ms\n", time); // printf("占用内存:%d B\n", nBytes); // printf("占用内存:%d kB\n", nBytes / 1024); //释放GPU内存 for(int gpu_id = 0; gpu_id < GPU_num; gpu_id++) { CHECK(cudaFree(data[gpu_id].d_pmt)); CHECK(cudaFree(data[gpu_id].d_hit)); CHECK(cudaFree(data[gpu_id].d_result)); CHECK(cudaStreamDestroy(data[gpu_id].stream)); CHECK(cudaEventDestroy(data[gpu_id].start)); CHECK(cudaEventDestroy(data[gpu_id].stop)); } CHECK(cudaDeviceReset()); return total_time; } }
18,907
#include <iostream> #include <cuda_runtime.h> #include <unistd.h> int main(int argc, char const *argv[]) { int * device; cudaError_t error; error = cudaMalloc( (void **) &device, sizeof(double)*4096*4096); if (error != cudaSuccess) { std::cout << "cudaMalloc returned error " << cudaGetErrorString(error) << "\n"; } sleep(3); std::cout << "time is over" << std::endl; return 0; }
18,908
#include "includes.h" __global__ void init_and_update (float *values_d, int tpoints, int nsteps){ int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; if(idx <= 1 || idx >= tpoints) return; float old_v, v, new_v; float x, tmp; tmp = tpoints - 1; x = (float)(idx - 1) / tmp; v = sin(2.0f * PI * x); old_v = v; for (int i = 1; i <= nsteps; i++){ new_v = (2.0f * v) - old_v + (0.09f * (-2.0f * v)); old_v = v; v = new_v; } values_d[idx] = v; }
18,909
#include "includes.h" __global__ void copy_const_kernel( float *iptr, const float *cptr ) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; if (cptr[offset] != 0) iptr[offset] = cptr[offset]; }
18,910
#include <stdio.h> __global__ void hello() { printf("Hello world from block (%d,%d), thread (%d,%d,%d).\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, threadIdx.z); __syncthreads(); } int main(void) { int devID; cudaDeviceProp p; cudaGetDevice(&devID); cudaGetDeviceProperties(&p, devID); printf("Running on device %d \"%s\" with capability %d.%d.\n", devID, p.name, p.major, p.minor); if (p.major < 2) { printf("Program incompatible with existing architecture; terminating.\n"); return 1; } dim3 dimGrid(2,2); dim3 dimBlock(2,2,2); hello<<<dimGrid,dimBlock>>>(); cudaDeviceSynchronize(); return 0; }
18,911
//xfail:BOOGIE_ERROR //--blockDim=1024 --gridDim=1 --warp-sync=16 --no-inline //It should show only the values from B[0] to B[31], but it exceeds. #include <cuda.h> #include <cuda_runtime_api.h> #include <stdio.h> #define N 2//32//1024 __global__ void shuffle (int* A) { int tid = threadIdx.x; int warp = tid / 32; int* B = A + (warp*32); A[tid] = B[(tid + 1)%32]; } int main() { int *a; int *dev_a; int size = N*sizeof(int); cudaMalloc((void**)&dev_a, size); a = (int*)malloc(N*size); for (int i = 0; i < N; i++) a[i] = i; cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice); printf("a: "); for (int i = 0; i < N; i++) printf("%d ", a[i]); shuffle<<<1,N>>>(dev_a); //ESBMC_verify_kernel(shuffle, 1, N, dev_a); cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost); printf("\nFunction Results:\n "); for (int i = 0; i < N; i++) printf("%d ", a[i]); free(a); cudaFree(dev_a); return 0; }
18,912
__device__ int f_gpu(int k, int n) { int i, M; M = 0; for (i=0; i<k; i++) M = M + (n-i-1); return M; } __device__ int compute_delta_gpu(int* a, int* b, int* p, int i, int j, int n) { int d; int k; d = ( a[i*n+i] - a[j*n+j] ) * ( b[ p[j]*n + p[j] ] - b[ p[i]*n + p[i] ] ) + ( a[i*n+j] - a[j*n+i] ) * ( b[ p[j]*n + p[i] ] - b[ p[i]*n + p[j] ] ); for (k=0; k<n; k++) if (k != i && k != j) d = d +( a[k*n+i] - a[k*n+j] ) * ( b[ p[k]*n + p[j] ] - b[ p[k]*n + p[i] ] ) + ( a[i*n+k] - a[j*n+k] ) * ( b[ p[j]*n + p[k] ] - b[ p[i]*n + p[k] ] ); return d; } __global__ void main_gpu(int *voisin_device, int *a_device, int *b_device, int *solution_device, int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; // I = id int k; int i,j; if (id < n*(n-1)/2) { // définit i et j à partir de I k = 0; while ( id >= f_gpu(k,n) ) k++; k--; i = k; j = id - f_gpu(k,n) + k + 1; // calcul le décalage d'un voisin et le place dans le tableau voisin_device voisin_device[id] = compute_delta_gpu(a_device, b_device, solution_device, i, j, n); } }
18,913
#include "includes.h" __global__ void transpose_smem_pad_unrolling(int * in, int* out, int nx, int ny) { __shared__ int tile[BDIMY * (2 * BDIMX + IPAD)]; //input index int ix, iy, in_index; //output index int i_row, i_col, _1d_index, out_ix, out_iy, out_index; //ix and iy calculation for input index ix = 2 * blockDim.x * blockIdx.x + threadIdx.x; iy = blockDim.y * blockIdx.y + threadIdx.y; //input index in_index = iy * nx + ix; //1D index calculation fro shared memory _1d_index = threadIdx.y * blockDim.x + threadIdx.x; //col major row and col index calcuation i_row = _1d_index / blockDim.y; i_col = _1d_index % blockDim.y; //coordinate for transpose matrix out_ix = blockIdx.y * blockDim.y + i_col; out_iy = 2 * blockIdx.x * blockDim.x + i_row; //output array access in row major format out_index = out_iy * ny + out_ix; if (ix < nx && iy < ny) { int row_idx = threadIdx.y * (2 * blockDim.x + IPAD) + threadIdx.x; //load from in array in row major and store to shared memory in row major tile[row_idx] = in[in_index]; tile[row_idx+ BDIMX] = in[in_index + BDIMX]; //wait untill all the threads load the values __syncthreads(); int col_idx = i_col * (2 * blockDim.x + IPAD) + i_row; out[out_index] = tile[col_idx]; out[out_index + ny* BDIMX] = tile[col_idx + BDIMX]; } }
18,914
// Modified from global reduction code in // https://sodocumentation.net/cuda/topic/6566/parallel-reduction--e-g--how-to-sum-an-array- // System includes #include <stdio.h> #include <assert.h> #include <iostream> #include <math.h> // CUDA runtime #include <cuda_runtime.h> #define BILLION 1000000000L // Note this cannot be executed on the CPU - just on the GPU __device__ double f( double a ) { return (4.0 / (1.0 + a*a)); } __global__ void PiEstSingleBlock(long N, double *piest) { int idx = threadIdx.x; int blockSize = blockDim.x; // We are exploiting the fact that there is just one thread group double h; double sum = 0.0; h = 1.0/(double)N; // Do the parallel partial sums for pi for (long i = idx+1; i <= N; i += blockSize) sum += f(h * ((double)i - 0.5)); __shared__ double p[1024]; // The maximum number of threads is 1024 // We can make this storage dynamic in size to paramterise // over the number of threads used. // Now add the partial sums together p[idx] = h*sum; __syncthreads(); for (int size = blockSize/2; size>0; size/=2) { //uniform if (idx<size) p[idx] += p[idx+size]; __syncthreads(); } if (idx == 0) *piest = p[0]; } int main(void) { struct timespec start , stop ; // variables for timing double accum ; // elapsed time variable const unsigned int blockSize=1024; dim3 numThreads; double pi25DT=3.141592653589793238462643; double x; double *mypi; long N = 1000000; double sum, h; h = 1.0/(double)N; // For CPU version of loop numThreads.x = blockSize; cudaMallocManaged(&mypi, sizeof(double)); clock_gettime ( CLOCK_REALTIME ,&start ); PiEstSingleBlock<<<1,numThreads>>>(N, mypi); cudaDeviceSynchronize(); // Cannot get sensible timing without synchronising host and device clock_gettime ( CLOCK_REALTIME ,&stop ); accum =( stop.tv_sec - start.tv_sec )+ ( stop.tv_nsec - start.tv_nsec )/(double)BILLION ; printf("Pi estimate %.16f error is %.16f", mypi[0],pi25DT-mypi[0]); printf("\n"); printf("Time to compute mypi is %lf sec.\n",accum); clock_gettime ( CLOCK_REALTIME ,&start ); sum = 0.0; for (long i=1; i <= N; i++){ x = h * ((double)i - 0.5); sum += 4.0/(1.0+x*x); } clock_gettime ( CLOCK_REALTIME ,&stop ); accum =( stop.tv_sec - start.tv_sec )+ ( stop.tv_nsec - start.tv_nsec )/(double)BILLION ; printf("CPU pi is %.16f error is %.16f\n",h*sum,pi25DT-h*sum); printf("Time to compute CPU pi is %lf sec.\n",accum); }
18,915
#include <iostream> #include <cuda.h> using namespace std; // declare the kernel function __global__ void add(int *A, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<N) A[i] = A[i] + 10; } int main(int argc,char **argv) { int n = atoi(argv[1]); int nBytes = n*sizeof(int); cout<<"The size of data: "<< nBytes << "\n"; int block_size, grid_size; int *a, *b; a = (int *)malloc(nBytes); b = (int *)malloc(nBytes); int *a_d; block_size=256; grid_size = 1024; dim3 dimBlock(block_size,1,1); dim3 dimGrid(grid_size,1,1); for(int i=0;i<n;i++) { a[i]=i; b[i]=0; } cudaMalloc((void **)&a_d,n*sizeof(int)); cout<<"Copying to device..\n"; clock_t start_h2d=clock(); cudaMemcpy(a_d,a,n*sizeof(int),cudaMemcpyHostToDevice); clock_t end_h2d=clock(); cout<<"Starting kernel\n"; clock_t start_d=clock(); add<<<grid_size,block_size>>>(a_d,n); cudaDeviceSynchronize(); clock_t end_d = clock(); cout<<"Copying to host..\n"; clock_t start_d2h=clock(); cudaMemcpy(b,a_d,n*sizeof(int),cudaMemcpyDeviceToHost); clock_t end_d2h=clock(); cudaError_t err = cudaGetLastError(); if ( err != cudaSuccess ) printf("CUDA Error: %s\n", cudaGetErrorString(err)); double time_a = (double)(end_h2d-start_h2d)/CLOCKS_PER_SEC; double time_b = (double)(end_d-start_d)/CLOCKS_PER_SEC; double time_c = (double)(end_d2h-start_d2h)/CLOCKS_PER_SEC; cout<<"\nH2D time: "<<time_a<<" Kernel time: "<<time_b<<" D2H time: "<<time_c<< endl; cudaFree(a_d); free(a); free(b); return 0; }
18,916
#include <stdio.h> #include <math.h> #include <assert.h> void fillRandom(double *E, int N){ int i; for(i=0; i<N; i++) E[i] = rand() % 10 + 1; } double eqseq(double *A, double *B, int N){ int i; double err=0; for(i=0;i<N;i++) err += abs(A[i]-B[i]); return err; } int prod(int a[], int n){ int res = 1; int i; for(i=0;i<n;i++) res *= a[i]; return res; } void dims2strides(int dims[], int n, int strides[]){ // #define dims2strides5d(A) A[1]*A[2]*A[3]*A[4],A[2]*A[3]*A[4],A[3]*A[4],A[4],1 int i,j,z; for(i=0;i<n-1;i++){ z=1; for(j=i+1;j<n;j++){ z*=dims[j]; } strides[i] = z; } strides[n-1]=1; } void getPoolingNdForwardOutputDim( int xDims[], int pdims, int poolDims[], int poolPad[], int poolStride[], int yDims[] ){ int i; for(i=0;i<pdims;i++) assert(poolDims[i]>=poolStride[i]); yDims[0] = xDims[0]; yDims[1] = xDims[1]; // N K (C) for(i=0;i<pdims;i++){ yDims[i+2] = 1+ceil((xDims[i+2]+2*poolPad[i]-poolDims[i])/(double)poolStride[i]); } }
18,917
#include <iostream> #include <cuda.h> constexpr int N = 10; /* * NOTE: * Come back to this function when directed to. * * - - - - - - - - - - - - - - - - - - - - - - - * * Hopefully by now you've seen how we allocate memory * on the host and the device. Now that we've copied our * data from the host to the device and called our kernel, * we can take a look at our kernel. * * Please note how similar this is to our `1-threads` example. * Each thread sets one index of c. This time, we don't manually * pass the thread index to the thread because it's taken care of by * CUDA, the library we are using. * * after examining the kernel you may return to where we called the kernel. */ __global__ void add_vectors( double* a, double* b, double* c) { const int thread_id = blockIdx.x; c[thread_id] = a[thread_id] + b[thread_id]; } /* * * - - - - - - - - - - - - - - - - - - - - - - - * */ int main(int, char**) { /* * We are setting up the arrays in the same way * as before */ std::cout << "Setting a=3, b=5, c=0\n"; auto a = new double[N]; auto b = new double[N]; auto c = new double[N]; for (int i=0; i<N; i++) { a[i] = 3.0; b[i] = 5.0; c[i] = 0.0; } /* * This time, we also have to allocate * memory on the 'device' which is our graphics card. * our 'host' is the CPU, where this main function will run. */ double* device_a; double* device_b; double* device_c; /* * when we call `auto c = new double[N];` we are telling the CPU * to allocate enough memory to fit N doubles. Now that we're also * using a GPU, we have to tell the GPU to allocate enough memory * for N doubles as well. We acomplish this with a cuda function: */ cudaMalloc(&device_a, N * sizeof(double)); cudaMalloc(&device_b, N * sizeof(double)); cudaMalloc(&device_c, N * sizeof(double)); /* * Now we have a, b, and c allocated and set to 3, 5, and 0 * on the host. On the device however, we have only allocated * the memory. The memory is uninitialized. * * To fix this, we will copy the values from a on the host * into the memory allocated for a on the device, and same * goes for b and c. */ cudaMemcpy(device_a, a, N * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(device_b, b, N * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(device_c, c, N * sizeof(double), cudaMemcpyHostToDevice); /* * Now that we have our memory copied from the host (cpu) to the * device (gpu) we can call our cuda kernel. The kernel can *only* * operate on memory allocated on the GPU. * * After examining the function call below, you may return to the * top of the file to take a look at the kernel. * * Calling the function with function_name<<< , >>>(parameters); * is how we inform cuda how it should configure our kernel. * * the first parameter to the triple angle brackets is the number of blocks * per grid that should be allocated, and the second parameter is the number * of threads per block that should be allocated. * The grid is the largest unit of computation when calling a kernel. * * Note: grids and blocks are entirely defined in software. threads and * warps are determined by the hardware. By aligning the number of * blocks and threads in software with the threads in the physical * hardware, we can achieve very large increases in performance. * * For example, calling `add_vectors<<<10, 1>>>(a, b, c)` would tell cuda * to allocate it 10 blocks per grid, and 1 thread per block. * Alternatively, calling `add_vectors<<<4, 10>>>(a, b, c)` would tell * cuda to allocate 4 blocks, each with 10 threads per block totalling * 40 threads. */ add_vectors<<<N, 1>>>(device_a, device_b, device_c); /* * Hopefully by now you have some understanding of the calling conventions * for cuda kernels and the nature of the grid, blocks, and threads. * * Now let us copy the data back from the device to the host, and see if * we still get what we expect. */ cudaMemcpy(c, device_c, N * sizeof(double), cudaMemcpyDeviceToHost); for (int i=0; i<N; i++) { std::cout << "c["<<i<<"] = " << c[i] << "\n"; } delete[] a; delete[] b; delete[] c; /* * We also have to free memory on the device since we allocated * it in two places. */ cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); return 0; }
18,918
// Include packages and also CUDA packages #include<stdio.h> #include<stdlib.h> #include<unistd.h> #include<stdbool.h> #include <cuda.h> #include <cuda_runtime.h> // Result from last compute of world. extern unsigned char *g_resultData; // Current state of world. extern unsigned char *g_data; // ----- SAVE RECEIVING ROWS FROM OTHER GPUS ----- // // "Above" row extern unsigned char *g_aboveRow; // "Below" row extern unsigned char *g_belowRow; // "Above" row extern unsigned char *g_resultAboveRow; // "Below" row extern unsigned char *g_resultBelowRow; // ----- DECLARE KERNEL ----- // __global__ void HL_kernel(unsigned int worldWidth, unsigned int worldHeight); // Define number of Processors int cudaDeviceCount; cudaError_t cE; static inline void HL_initAllZeros(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount ) { size_t total_world_size = worldWidth * worldHeight; // Initialize the data cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char))); cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char))); // Initialize the resulting data cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char))); cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char))); // Initialize the above row cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char))); // Initialize the below row cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char))); } static inline void HL_initAllOnes(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount ) { size_t total_world_size = worldWidth * worldHeight; // Initialize the data cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char))); cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char))); // Initialize the resulting data cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char))); cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char))); // Initialize the above row cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char))); // Initialize the below row cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char))); int i; // set all rows of world to true for( i = 0; i < total_world_size; i++) { g_data[i] = 1; // Set above and below rows if (i < worldWidth){ g_aboveRow[i] = 1; g_belowRow[i] = 1; } } } static inline void HL_initOnesInMiddle(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount ) { size_t total_world_size = worldWidth * worldHeight; // Initialize the data cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char))); cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char))); // Initialize the resulting data cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char))); cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char))); // Initialize the above row cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char))); // Initialize the below row cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char))); int i; for(i = worldWidth * (worldHeight - 1) + 128; i < worldWidth * (worldHeight - 1) + 139; i++){ g_data[i] = 1; } } static inline void HL_initOnesAtCorners(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount ) { size_t total_world_size = worldWidth * worldHeight; // Initialize the data cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char))); cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char))); // Initialize the resulting data cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char))); cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char))); // Initialize the above row cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char))); // Initialize the below row cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char))); if(myrank == 0){ g_data[0] = 1; // upper left g_data[worldWidth-1]=1; // upper right g_aboveRow[0] = 1; // upper left g_aboveRow[worldWidth-1]=1; // upper right } if(myrank == cudaDeviceCount - 1){ g_data[(worldHeight * (worldWidth-1))]=1; // lower left g_data[(worldHeight * (worldWidth-1)) + worldWidth-1]=1; // lower right g_belowRow[0] = 1; g_belowRow[worldWidth - 1] = 1; } } static inline void HL_initSpinnerAtCorner(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount ) { size_t total_world_size = worldWidth * worldHeight; // Initialize the data cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char))); cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char))); // Initialize the resulting data cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char))); cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char))); // Initialize the above row cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char))); // Initialize the below row cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char))); if( myrank == 0 ){ g_data[0] = 1; // upper left g_data[1] = 1; // upper left +1 g_data[worldWidth-1]=1; // upper right g_aboveRow[0] = 1; // upper left g_aboveRow[1] = 1; // upper left +1 g_aboveRow[worldWidth-1]=1; // upper right } } static inline void HL_initReplicator(size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount ) { size_t total_world_size = worldWidth * worldHeight; // Initialize the data cudaMallocManaged(&g_data, (total_world_size * sizeof(unsigned char))); cudaMemset(g_data, 0, (total_world_size * sizeof(unsigned char))); // Initialize the resulting data cudaMallocManaged(&g_resultData, (total_world_size * sizeof(unsigned char))); cudaMemset(g_resultData, 0, (total_world_size * sizeof(unsigned char))); // Initialize the above row cudaMallocManaged(&g_aboveRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_aboveRow, 0, (worldWidth * sizeof(unsigned char))); // Initialize the below row cudaMallocManaged(&g_belowRow, (worldWidth * sizeof(unsigned char))); cudaMemset(g_belowRow, 0, (worldWidth * sizeof(unsigned char))); size_t x, y; x = worldWidth/2; y = worldHeight/2; g_data[x + y*worldWidth + 1] = 1; g_data[x + y*worldWidth + 2] = 1; g_data[x + y*worldWidth + 3] = 1; g_data[x + (y+1)*worldWidth] = 1; g_data[x + (y+2)*worldWidth] = 1; g_data[x + (y+3)*worldWidth] = 1; } // ---------- EXPORT TO APPROPRIATE COMPILER ---------- // extern "C" void HL_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int cudaDeviceCount ) { // INITIALIZE THE CUDA WORLD if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess ) { printf(" Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount ); exit(-1); } if( (cE = cudaSetDevice( myrank % cudaDeviceCount )) != cudaSuccess ) { printf(" Unable to have myrank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE); exit(-1); } // INITIALIZE THE PATTERN switch(pattern) { case 0: HL_initAllZeros( worldWidth, worldHeight, myrank, cudaDeviceCount ); break; case 1: HL_initAllOnes( worldWidth, worldHeight, myrank, cudaDeviceCount ); break; case 2: HL_initOnesInMiddle( worldWidth, worldHeight, myrank, cudaDeviceCount ); break; case 3: HL_initOnesAtCorners( worldWidth, worldHeight, myrank, cudaDeviceCount ); break; case 4: HL_initSpinnerAtCorner( worldWidth, worldHeight, myrank, cudaDeviceCount ); break; case 5: HL_initReplicator( worldWidth, worldHeight, myrank, cudaDeviceCount ); break; default: printf("Pattern %u has not been implemented \n", pattern); exit(-1); } } // MAIN KERNEL FUNCTION THAT DOES ALL OF THE WORK __global__ void HL_kernel( unsigned char* d_data, unsigned char* d_resultData, unsigned char* d_aboveRow, unsigned char* d_belowRow, unsigned int worldWidth, unsigned int worldHeight){ // Store index value size_t index; // Loop over the threads for(index = blockIdx.x * blockDim.x + threadIdx.x; index < worldWidth*worldHeight; index += blockDim.x * gridDim.x){ // Allocate space int y0 = ((index + worldHeight - 1) % worldHeight) * worldWidth; int y1 = index * worldWidth; int y2 = ((index + 1) % worldHeight) * worldWidth; // Get the current block and thread int x; // Loop over corresponding COLUMNS for (x = 0; x < worldWidth; ++x){ // Set current column, left column, and right column int x1 = x; int x0 = (x1 + worldWidth - 1) % worldWidth; int x2 = (x1 + 1) % worldWidth; // Get the status of the current cell to determine logic of life span int is_alive = d_data[x1+y1]; // Count the number of alive neighbors int num_alive = 0; // Check above and below row cases if (x1+y1 < worldWidth) { num_alive = d_aboveRow[x0] + d_aboveRow[x1] + d_aboveRow[x2] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2]; } else if (x1+y1 > worldWidth*worldHeight - worldWidth - 1) { num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_belowRow[x0] + d_belowRow[x1] + d_belowRow[x2]; } else { num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2]; } // Logic for updating values if (is_alive == 1){ // Cell is alive! if (num_alive < 2){ // Underpopulated d_resultData[x1+y1] = 0; } else if (num_alive == 2 || num_alive == 3){ // Just the right amount of neighbors d_resultData[x1+y1] = 1; } else { // Overpopulated d_resultData[x1+y1] = 0; } } else { // Cell is dead :( if (num_alive == 3 || num_alive == 6) { // #Resurrected d_resultData[x1+y1] = 1; } else { // We stay dead d_resultData[x1+y1] = 0; } }// End logic for staying dead } // End x loop } // End loop over each thread // ----- SWAP DATA IN ABOVE ROWS AND BELOW ROWS ----- // int j; for(j = 0; j < worldWidth; j++){ d_aboveRow[j] = d_resultData[j]; d_belowRow[j] = d_resultData[j + worldWidth*(worldHeight - 1)]; } // Synchronize the threads? __syncthreads(); } // LAUNCH KERNEL FUNCTION extern "C" void HL_kernelLaunch( unsigned char** d_data, unsigned char** d_resultData, unsigned char** d_aboveRow, unsigned char** d_belowRow, int block_count, int thread_count, unsigned int worldWidth, unsigned int worldHeight, int myrank){ // Call the kernel HL_kernel<<<block_count,thread_count>>>(*d_data, *d_resultData, *d_aboveRow, *d_belowRow, worldWidth, worldHeight); // Synchronize the CUDA devices cudaDeviceSynchronize(); } // Free memory extern "C" void freeCudaArrays(int myrank){ cudaFree(g_data); cudaFree(g_resultData); cudaFree(g_aboveRow); cudaFree(g_belowRow); }
18,919
#include "includes.h" __global__ void gpu_histo_kernel_shared(u_char* Source, int *res, unsigned height, unsigned width){ __shared__ int hist[256]; int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; int index = threadIdx.x * BLOCKDIM_X + threadIdx.y; if( index < 256) { hist[index] = 0; } __syncthreads(); if ((i<0)||(i>=height) || (j<0) || (j>=width)) {} else { atomicAdd(&hist[Source[i*width+j]], 1); __syncthreads(); if( index < 256) atomicAdd(&res[index], hist[index]); } }
18,920
#include "includes.h" __global__ void _cuda_add_scalar(int *in, int scalar, int n) { int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; while(globalIdx < n) { in[globalIdx] = in[globalIdx] + scalar; globalIdx += blockDim.x * gridDim.x; } }
18,921
#include <cuda.h> #include <cuda_runtime.h> #include <assert.h> #include <iostream> #include <vector> #include <math.h> void initKernels() { int gpu_count; cudaGetDeviceCount(&gpu_count); assert(gpu_count>0); assert(cudaSetDevice(0) == cudaSuccess); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::cout << ">>> Cuda limits: threads(" << prop.maxThreadsPerBlock << ") " << "threads dim(" << prop.maxThreadsDim[0] << "," << prop.maxThreadsDim[1] << "," << prop.maxThreadsDim[2] << ") " << std::endl; } void destroyKernels() { } void* allocDevMemory(size_t size) { void *mem; if(!size) return nullptr; assert(cudaMalloc(&mem,size) == cudaSuccess); return mem; } void freeDevMemory(void *mem) { if(!mem) return; assert(cudaStreamSynchronize(0) == cudaSuccess); assert(cudaFree(mem) == cudaSuccess); } void copyToDevMemory(void *dst, void *src, size_t size) { assert(cudaMemcpy(dst,src,size,cudaMemcpyHostToDevice) == cudaSuccess); assert(cudaStreamSynchronize(0) == cudaSuccess); } void synchronize() { assert(cudaStreamSynchronize(0) == cudaSuccess); } void copyToHostMemory(void *dst, void *src, size_t size) { assert(cudaStreamSynchronize(0) == cudaSuccess); assert(cudaMemcpy(dst,src,size,cudaMemcpyDeviceToHost) == cudaSuccess); } static __inline__ __device__ double central_difference( double phi1, double phi2, double dx ) { return ( (phi2 - phi1) / ( 2.0 * dx ) ); } static __inline__ __device__ double boundary_difference( double phi1, double phi2, double dx ) { return ( (phi2 - phi1) / dx ); } static __inline__ __device__ int ceil_index( int ny, int nz, int x, int y, int z ) { return x*ny*nz+y*nz+z; } static __global__ void kernel_field_solver_eval_fields_from_potential( int spat_mesh_x_n_nodes, int spat_mesh_y_n_nodes, int spat_mesh_z_n_nodes, double spat_mesh_x_cell_size, double spat_mesh_y_cell_size, double spat_mesh_z_cell_size, const double *spat_mesh_potential, double *spat_mesh_electric_field ) { int nx = spat_mesh_x_n_nodes; int ny = spat_mesh_y_n_nodes; int nz = spat_mesh_z_n_nodes; double dx = spat_mesh_x_cell_size; double dy = spat_mesh_y_cell_size; double dz = spat_mesh_z_cell_size; const double *phi = spat_mesh_potential; double ex, ey, ez; int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; if(i>=nx || j>=ny) return; for ( int k = 0; k < nz; k++ ) { if ( i == 0 ) { ex = - boundary_difference( phi[ceil_index(ny,nz,i,j,k)], phi[ceil_index(ny,nz,i+1,j,k)], dx ); } else if ( i == nx-1 ) { ex = - boundary_difference( phi[ceil_index(ny,nz,i-1,j,k)], phi[ceil_index(ny,nz,i,j,k)], dx ); } else { ex = - central_difference( phi[ceil_index(ny,nz,i-1,j,k)], phi[ceil_index(ny,nz,i+1,j,k)], dx ); } if ( j == 0 ) { ey = - boundary_difference( phi[ceil_index(ny,nz,i,j,k)], phi[ceil_index(ny,nz,i,j+1,k)], dy ); } else if ( j == ny-1 ) { ey = - boundary_difference( phi[ceil_index(ny,nz,i,j-1,k)], phi[ceil_index(ny,nz,i,j,k)], dy ); } else { ey = - central_difference( phi[ceil_index(ny,nz,i,j-1,k)], phi[ceil_index(ny,nz,i,j+1,k)], dy ); } if ( k == 0 ) { ez = - boundary_difference( phi[ceil_index(ny,nz,i,j,k)], phi[ceil_index(ny,nz,i,j,k+1)], dz ); } else if ( k == nz-1 ) { ez = - boundary_difference( phi[ceil_index(ny,nz,i,j,k-1)], phi[ceil_index(ny,nz,i,j,k)], dz ); } else { ez = - central_difference( phi[ceil_index(ny,nz,i,j,k-1)], phi[ceil_index(ny,nz,i,j,k+1)], dz ); } spat_mesh_electric_field[ceil_index(ny,nz,i,j,k)*3+0] = ex; spat_mesh_electric_field[ceil_index(ny,nz,i,j,k)*3+1] = ey; spat_mesh_electric_field[ceil_index(ny,nz,i,j,k)*3+2] = ez; } } void run_kernel_field_solver_eval_fields_from_potential( int spat_mesh_x_n_nodes, int spat_mesh_y_n_nodes, int spat_mesh_z_n_nodes, double spat_mesh_x_cell_size, double spat_mesh_y_cell_size, double spat_mesh_z_cell_size, const double *spat_mesh_potential, double *spat_mesh_electric_field ) { dim3 block(16,16); dim3 grid((spat_mesh_x_n_nodes+15)/16,(spat_mesh_y_n_nodes+15)/16); kernel_field_solver_eval_fields_from_potential <<<grid,block>>>(spat_mesh_x_n_nodes, spat_mesh_y_n_nodes, spat_mesh_z_n_nodes, spat_mesh_x_cell_size, spat_mesh_y_cell_size, spat_mesh_z_cell_size, spat_mesh_potential, spat_mesh_electric_field); //std::cout << "run_kernel_field_solver_eval_fields_from_potential: " << cudaGetLastError() << " " << grid.x << " " << grid.y << " " << grid.z << " " << block.x << " " << block.y << " " << block.z << std::endl; assert(cudaGetLastError() == cudaSuccess); } static __global__ void kernel_field_solver_compute_phi_next_at_inner_points( int nx, int ny, int nz, double dxdxdydy, double dxdxdzdz, double dydydzdz, double dxdxdydydzdz, double denom, double m_pi, const double *spat_mesh_charge_density, const double *phi_current, double *phi_next ) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; if(i>=nx || j>=ny) return; for ( int k = 1; k < nz - 1; k++ ) { phi_next[ceil_index(ny,nz,i,j,k)] = ( phi_current[ceil_index(ny,nz,i-1,j,k)] + phi_current[ceil_index(ny,nz,i+1,j,k)] ) * dydydzdz; phi_next[ceil_index(ny,nz,i,j,k)] = phi_next[ceil_index(ny,nz,i,j,k)] + ( phi_current[ceil_index(ny,nz,i,j-1,k)] + phi_current[ceil_index(ny,nz,i,j+1,k)] ) * dxdxdzdz; phi_next[ceil_index(ny,nz,i,j,k)] = phi_next[ceil_index(ny,nz,i,j,k)] + ( phi_current[ceil_index(ny,nz,i,j,k-1)] + phi_current[ceil_index(ny,nz,i,j,k+1)] ) * dxdxdydy; phi_next[ceil_index(ny,nz,i,j,k)] = phi_next[ceil_index(ny,nz,i,j,k)] + 4.0 * m_pi * spat_mesh_charge_density[ceil_index(ny,nz,i,j,k)] * dxdxdydydzdz; phi_next[ceil_index(ny,nz,i,j,k)] = phi_next[ceil_index(ny,nz,i,j,k)] / denom; } } void run_kernel_field_solver_compute_phi_next_at_inner_points( int spat_mesh_x_n_nodes, int spat_mesh_y_n_nodes, int spat_mesh_z_n_nodes, double dx, double dy, double dz, const double *spat_mesh_charge_density, const double *phi_current, double *phi_next ) { double dxdxdydy = dx * dx * dy * dy; double dxdxdzdz = dx * dx * dz * dz; double dydydzdz = dy * dy * dz * dz; double dxdxdydydzdz = dx * dx * dy * dy * dz * dz; double denom = 2 * ( dxdxdydy + dxdxdzdz + dydydzdz ); dim3 block(16,16); dim3 grid((spat_mesh_x_n_nodes+15)/16,(spat_mesh_y_n_nodes+15)/16); kernel_field_solver_compute_phi_next_at_inner_points <<<grid,block>>>(spat_mesh_x_n_nodes, spat_mesh_y_n_nodes, spat_mesh_z_n_nodes, dxdxdydy, dxdxdzdz, dydydzdz, dxdxdydydzdz, denom, 3.14159265358979323846, spat_mesh_charge_density, phi_current, phi_next); //std::cout << "run_kernel_field_solver_compute_phi_next_at_inner_points: " << cudaGetLastError() << " " << grid.x << " " << grid.y << " " << grid.z << " " << block.x << " " << block.y << " " << block.z << std::endl; assert(cudaGetLastError() == cudaSuccess); } static __global__ void kernel_field_solver_set_phi_next_at_boundaries_ny_nz( int nx, int ny, int nz, const double *phi_current, double *phi_next) { int j = blockIdx.x*blockDim.x+threadIdx.x; int k = blockIdx.y*blockDim.y+threadIdx.y; if(j>=ny || k>nz) return; phi_next[ceil_index(ny,nz,0,j,k)] = phi_current[ceil_index(ny,nz,0,j,k)]; phi_next[ceil_index(ny,nz,nx-1,j,k)] = phi_current[ceil_index(ny,nz,nx-1,j,k)]; } static __global__ void kernel_field_solver_set_phi_next_at_boundaries_nx_nz( int nx, int ny, int nz, const double *phi_current, double *phi_next) { int i = blockIdx.x*blockDim.x+threadIdx.x; int k = blockIdx.y*blockDim.y+threadIdx.y; if(i>=nx || k>nz) return; phi_next[ceil_index(ny,nz,i,0,k)] = phi_current[ceil_index(ny,nz,i,0,k)]; phi_next[ceil_index(ny,nz,i,ny-1,k)] = phi_current[ceil_index(ny,nz,i,ny-1,k)]; } static __global__ void kernel_field_solver_set_phi_next_at_boundaries_nx_ny( int nx, int ny, int nz, const double *phi_current, double *phi_next) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; if(i>=nx || j>=ny) return; phi_next[ceil_index(ny,nz,i,j,0)] = phi_current[ceil_index(ny,nz,i,j,0)]; phi_next[ceil_index(ny,nz,i,j,nz-1)] = phi_current[ceil_index(ny,nz,i,j,nz-1)]; } void run_kernel_field_solver_set_phi_next_at_boundaries(int nx, int ny, int nz, const double *phi_current, double *phi_next) { dim3 block(16,16); dim3 grid((ny+15)/16,(nz+15)/16); kernel_field_solver_set_phi_next_at_boundaries_ny_nz<<<grid,block>>>(nx,ny,nz,phi_current,phi_next); assert(cudaGetLastError() == cudaSuccess); grid = dim3((nx+15)/16,(nz+15)/16); kernel_field_solver_set_phi_next_at_boundaries_nx_nz<<<grid,block>>>(nx,ny,nz,phi_current,phi_next); assert(cudaGetLastError() == cudaSuccess); grid = dim3((nx+15)/16,(ny+15)/16); kernel_field_solver_set_phi_next_at_boundaries_nx_ny<<<grid,block>>>(nx,ny,nz,phi_current,phi_next); assert(cudaGetLastError() == cudaSuccess); } static __global__ void kernel_field_solver_set_phi_next_at_inner_regions( const int *nodes, double *phi_next, int reg_count, int nx, int ny, int nz, const double *potential) { int i = blockIdx.x*blockDim.x+threadIdx.x; if(reg_count <= i) return; // todo: mark nodes at edge during construction // if (!node.at_domain_edge( nx, ny, nz )) { phi_next[ceil_index(ny,nz,nodes[i*3],nodes[i*3+1],nodes[i*3+2])] = potential[i]; // } } void run_kernel_field_solver_set_phi_next_at_inner_regions( const int *nodes, double *phi_next, int reg_count, int nx, int ny, int nz, const double *potential) { dim3 block(256); dim3 grid((reg_count+255)/256); kernel_field_solver_set_phi_next_at_inner_regions<<<grid,block>>>(nodes,phi_next,reg_count,nx,ny,nz,potential); assert(cudaGetLastError() == cudaSuccess); } static __global__ void kernel_feld_solver_iterative_Jacobi_solutions_converged( const double *phi_current, const double *phi_next, double *diff, double *rel_diff, int nx, int ny, int nz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; if(i>=nx || j>=ny) return; for ( int k = 0; k < nz; k++ ) { double d = abs( phi_next[ceil_index(ny,nz,i,j,k)] - phi_current[ceil_index(ny,nz,i,j,k)] ); double rd = d / abs( phi_current[ceil_index(ny,nz,i,j,k)] ); if(!k || d>diff[k]) diff[k] = d; if(!k || rd>rel_diff[k]) rel_diff[k] = rd; } } bool run_field_solver_iterative_Jacobi_solutions_converged( const double *phi_current, const double *phi_next, double *diff, double *rel_diff, int nx, int ny, int nz) { // todo: bind tol to config parameters //abs_tolerance = std::max( dx * dx, std::max( dy * dy, dz * dz ) ) / 5; double abs_tolerance = 1.0e-5; double rel_tolerance = 1.0e-12; //double tol; // dim3 block(16,16); dim3 grid((nx+15)/16,(ny+15)/16); kernel_feld_solver_iterative_Jacobi_solutions_converged <<<grid,block>>> (phi_current, phi_next, diff, rel_diff, nx, ny, nz); assert(cudaGetLastError() == cudaSuccess); std::vector<double> maximum_diff, maximum_rel_diff; maximum_diff.resize(nz); maximum_rel_diff.resize(nz); copyToHostMemory(maximum_diff.data(), diff, sizeof(double)*nz); copyToHostMemory(maximum_rel_diff.data(), rel_diff, sizeof(double)*nz); for(int i=0;i<nz;i++) { if ( maximum_diff[i] > abs_tolerance || maximum_rel_diff[i] > rel_tolerance ){ return false; } } return true; } // Eval charge density on grid static __global__ void kernel_particle_to_mesh_map_weight_particles_charge_to_mesh( int spat_mesh_x_n_nodes, int spat_mesh_y_n_nodes, int spat_mesh_z_n_nodes, double spat_mesh_x_cell_size, double spat_mesh_y_cell_size, double spat_mesh_z_cell_size, double *spat_mesh_charge_density, const double *sources, int source_size) { int i = blockIdx.x*blockDim.x+threadIdx.x; // Rewrite: // forall particles { // find nonzero weights and corresponding nodes // charge[node] = weight(particle, node) * particle.charge // } int nx = spat_mesh_x_n_nodes; int ny = spat_mesh_y_n_nodes; int nz = spat_mesh_z_n_nodes; double dx = spat_mesh_x_cell_size; double dy = spat_mesh_y_cell_size; double dz = spat_mesh_z_cell_size; double volume_around_node = dx * dy * dz; int tlf_i, tlf_j, tlf_k; // 'tlf' = 'top_left_far' double tlf_x_weight, tlf_y_weight, tlf_z_weight; if(i<source_size) { double x_in_grid_units = sources[i*4+1] / dx; tlf_i = ceil( x_in_grid_units ); tlf_x_weight = 1.0 - ( tlf_i - x_in_grid_units ); double y_in_grid_units = sources[i*4+2] / dy; tlf_j = ceil( y_in_grid_units ); tlf_y_weight = 1.0 - ( tlf_j - y_in_grid_units ); double z_in_grid_units = sources[i*4+3] / dz; tlf_k = ceil( z_in_grid_units ); tlf_z_weight = 1.0 - ( tlf_k - z_in_grid_units ); spat_mesh_charge_density[ceil_index(ny,nz,tlf_i,tlf_j,tlf_k)] += tlf_x_weight * tlf_y_weight * tlf_z_weight * sources[i*4] / volume_around_node; spat_mesh_charge_density[ceil_index(ny,nz,tlf_i-1,tlf_j,tlf_k)] += ( 1.0 - tlf_x_weight ) * tlf_y_weight * tlf_z_weight * sources[i*4] / volume_around_node; spat_mesh_charge_density[ceil_index(ny,nz,tlf_i,tlf_j-1,tlf_k)] += tlf_x_weight * ( 1.0 - tlf_y_weight ) * tlf_z_weight * sources[i*4] / volume_around_node; spat_mesh_charge_density[ceil_index(ny,nz,tlf_i-1,tlf_j-1,tlf_k)] += ( 1.0 - tlf_x_weight ) * ( 1.0 - tlf_y_weight ) * tlf_z_weight * sources[i*4] / volume_around_node; spat_mesh_charge_density[ceil_index(ny,nz,tlf_i,tlf_j,tlf_k - 1)] += tlf_x_weight * tlf_y_weight * ( 1.0 - tlf_z_weight ) * sources[i*4] / volume_around_node; spat_mesh_charge_density[ceil_index(ny,nz,tlf_i-1,tlf_j,tlf_k - 1)] += ( 1.0 - tlf_x_weight ) * tlf_y_weight * ( 1.0 - tlf_z_weight ) * sources[i*4] / volume_around_node; spat_mesh_charge_density[ceil_index(ny,nz,tlf_i,tlf_j-1,tlf_k - 1)] += tlf_x_weight * ( 1.0 - tlf_y_weight ) * ( 1.0 - tlf_z_weight ) * sources[i*4] / volume_around_node; spat_mesh_charge_density[ceil_index(ny,nz,tlf_i-1,tlf_j-1,tlf_k - 1)] += ( 1.0 - tlf_x_weight ) * ( 1.0 - tlf_y_weight ) * ( 1.0 - tlf_z_weight ) * sources[i*4] / volume_around_node; } } bool run_kernel_particle_to_mesh_map_weight_particles_charge_to_mesh( int spat_mesh_x_n_nodes, int spat_mesh_y_n_nodes, int spat_mesh_z_n_nodes, double spat_mesh_x_cell_size, double spat_mesh_y_cell_size, double spat_mesh_z_cell_size, double *spat_mesh_charge_density, const double *sources, int source_size) { dim3 block(256); dim3 grid((source_size+255)/256); kernel_particle_to_mesh_map_weight_particles_charge_to_mesh<<<grid,block>>>( spat_mesh_x_n_nodes, spat_mesh_y_n_nodes, spat_mesh_z_n_nodes, spat_mesh_x_cell_size, spat_mesh_y_cell_size, spat_mesh_z_cell_size, spat_mesh_charge_density, sources, source_size); assert(cudaGetLastError() == cudaSuccess); return true; }
18,922
/* CUDA TSP solver Tuomas Rintamäki 2016 tuomas.rintamaki@aalto.fi */ /* License for the helper code for reading the TSPLIB files: Copyright (c) 2014, Texas State University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted for academic, research, experimental, or personal use provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <limits.h> #include <sys/time.h> #include <cuda.h> #include <curand_kernel.h> #define dist(a, b) __float2int_rn(sqrtf((px[a] - px[b]) * (px[a] - px[b]) + (py[a] - py[b]) * (py[a] - py[b]))) #define swap(a, b) {int tmp = a; a = b; b = tmp;} static __device__ volatile int best_d; static __device__ volatile int sol_d; __global__ void Init() { sol_d = 0; best_d = INT_MAX; } __global__ void TwoOpt(int cities, float *posx, float *posy, float *px, float *py, int *tour, int *len) { int a,b,c,d, Dab; int i,j,ii,jj,mini,minj,from,to,cost,offset; int minchange, change; offset = blockIdx.x*cities; // copy the city coordinates and set the initial tour for each block for (i = 0; i < cities; i++) { px[offset+i] = posx[i]; py[offset+i] = posy[i]; tour[offset+i] = i; } // do serial permutation of the city coordinates so that initial tour is randomized curandState rndstate; curand_init(blockIdx.x, 0, 0, &rndstate); for (i = 0; i < cities; i++) { j = curand(&rndstate) % (cities); swap(tour[offset+i], tour[offset+j]); } // search for 2-opt moves do { minchange = 0; i = 0; b = tour[offset+cities-1]; while (i < cities-3) { a = b; i = i+1; b = tour[offset+i]; Dab = dist(a,b); j = i+1; d = tour[offset+j]; while (j < cities-1) { c = d; j = j+1; d = tour[offset+j]; change = dist(a,c) - dist(c,d) + dist(b,d) - Dab; if (change < minchange) { minchange = change; mini = i; minj = j; } } } // apply the best move if (minchange < 0) { i = mini; j = minj-1; while (i < j) { swap(tour[offset+j], tour[offset+i]); i++; j--; } } } while (minchange < 0); // we have a local minimum so compute the cost of the tour cost = 0; for (i = 0;i < cities - 1;i++) { from = tour[offset+i]; to = tour[offset+i+1]; cost += dist(from,to); } // check if the current local minimum is the best solution so far and save it if necessary atomicMin((int *)&best_d, cost); if (best_d == cost) { sol_d = blockIdx.x; } } /******************************************************************************/ /*** helper code **************************************************************/ /******************************************************************************/ static void CudaTest(char *msg) { cudaError_t e; cudaThreadSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", cudaGetErrorString(e)); exit(-1); } } #define mallocOnGPU(addr, size) if (cudaSuccess != cudaMalloc((void **)&addr, size)) fprintf(stderr, "could not allocate GPU memory\n"); CudaTest("couldn't allocate GPU memory"); #define copyToGPU(to, from, size) if (cudaSuccess != cudaMemcpy(to, from, size, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of data to device failed\n"); CudaTest("data copy to device failed"); #define copyFromGPU(to, from, size) if (cudaSuccess != cudaMemcpy(to, from, size, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of data from device failed\n"); CudaTest("data copy from device failed"); #define copyFromGPUSymbol(to, from, size) if (cudaSuccess != cudaMemcpyFromSymbol(to, from, size)) fprintf(stderr, "copying of symbol from device failed\n"); CudaTest("symbol copy from device failed"); #define copyToGPUSymbol(to, from, size) if (cudaSuccess != cudaMemcpyToSymbol(to, from, size)) fprintf(stderr, "copying of symbol to device failed\n"); CudaTest("symbol copy to device failed"); /******************************************************************************/ /*** read TSPLIB input ********************************************************/ /******************************************************************************/ static int readInput(char *fname, float **posx_d, float **posy_d) // ATT and CEIL_2D edge weight types are not supported { int ch, cnt, in1, cities, i, j; float in2, in3; FILE *f; float *posx, *posy; char str[256]; // potential for buffer overrun f = fopen(fname, "rt"); if (f == NULL) {fprintf(stderr, "could not open file %s\n", fname); exit(-1);} ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f); fscanf(f, "%s\n", str); cities = atoi(str); if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);} posx = (float *)malloc(sizeof(float) * cities); if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);} posy = (float *)malloc(sizeof(float) * cities); if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);} ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); fscanf(f, "%s\n", str); if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);} cnt = 0; while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3)) { posx[cnt] = in2; posy[cnt] = in3; cnt++; if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);} if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);} } if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);} fscanf(f, "%s", str); if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);} mallocOnGPU(*posx_d, sizeof(float) * cities); mallocOnGPU(*posy_d, sizeof(float) * cities); copyToGPU(*posx_d, posx, sizeof(float) * cities); copyToGPU(*posy_d, posy, sizeof(float) * cities); fclose(f); free(posx); free(posy); return cities; } int main(int argc, char *argv[]) { printf("2-opt TSP CUDA GPU code v0.001 \n"); int cities, restarts, climbs, best, sol; int *tour; int *tour_d, *len_d; float *posx_d, *posy_d, *px_d, *py_d; double runtime; struct timeval starttime, endtime; if (argc != 3) {fprintf(stderr, "\narguments: input_file restart_count\n"); exit(-1);} cities = readInput(argv[1], &posx_d, &posy_d); restarts = atoi(argv[2]); if (restarts < 1) {fprintf(stderr, "restart_count is too small: %d\n", restarts); exit(-1);} printf("configuration: %d cities, %d restarts, %s input\n", cities, restarts, argv[1]); cudaFuncSetCacheConfig(TwoOpt, cudaFuncCachePreferEqual); // allocate memory for saving blockwise x and y positions on the device as well as the tour orders mallocOnGPU(px_d, restarts*cities*sizeof(float)); mallocOnGPU(py_d, restarts*cities*sizeof(float)); mallocOnGPU(tour_d, restarts*cities*sizeof(int)); // also, allocate memory for saving the blockwise tour lengths and the final solution mallocOnGPU(len_d, restarts*sizeof(int)); mallocOnGPU(sol_d, cities*sizeof(int)); gettimeofday(&starttime, NULL); Init<<<1, 1>>>(); TwoOpt<<<restarts, 1>>>(cities, posx_d, posy_d, px_d, py_d, tour_d, len_d); CudaTest("kernel launch failed"); gettimeofday(&endtime, NULL); runtime = endtime.tv_sec + endtime.tv_usec / 1000000.0 - starttime.tv_sec - starttime.tv_usec / 1000000.0; // read results copyFromGPUSymbol(&best, best_d, sizeof(int)); copyFromGPUSymbol(&sol, sol_d, sizeof(int)); tour = (int *)malloc(sizeof(int)*cities); if (tour == NULL) {fprintf(stderr, "cannot allocate tour\n"); exit(-1);} copyFromGPU(tour, &tour_d[sol*cities], sizeof(int)*cities); // output results printf("best instance = %d \n", sol); printf("best found tour length = %d\n", best); for (int i = 0; i < cities; i++) { printf("node %d \n", tour[i]); } fflush(stdout); cudaFree(posx_d); cudaFree(posy_d); return 0; }
18,923
#include "cuda_runtime.h" #include <stdio.h> #include <time.h> const int N = 1024; //线程组织模型 //1. N个线程块 每个线程块1个线程 列向量 thread_id = blockIdx.x , blockDim.x是0 threadIdx.x也是0 //2. 1个线程块 每个线程块N个线程 行向量 thread_id = threadIdx.x //3. M个线程块 每个线程块N个线程 二维矩阵 M行N列 thread_id = blockIdx.x * blockDim.x + threadIdx.x //4. M×N个线程块 每个线程块1个线程 thread_id = blockIdx.y * gridDim.x + blockIdx.x //5. 1线程块 每个线程块M*N个线程 thread_id = threadIdx.y * blockDim.x + threadIdx.x //6. M×N个线程 每个线程块P×Q个线程 索引有两个维度(最常用) // thread_x_id = blockIdx.x * blockDim.x + threadIdx.x // thread_y_id = blockIdx.y * blockDim.y + threadIdx.y __global__ void VectorAdd(float* a, float* b, float* c) { //一维网格和一维线程块 相当于二维矩阵 行是线程块数量 列是线程数量 int thread_id = blockIdx.x * blockDim.x + threadIdx.x; c[thread_id] = a[thread_id] + b[thread_id]; } int main() { int start = clock(); float a[N] = { 0.0 }; float b[N] = { 0.0 }; float c[N] = { 0.0 }; float* device_a = NULL; float* device_b = NULL; float* device_c = NULL; //分配显存 cudaMalloc((void**)&device_a, sizeof(float) * N); cudaMalloc((void**)&device_b, sizeof(float) * N); cudaMalloc((void**)&device_c, sizeof(float) * N); for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i * i; } //将内存中a和b数组的值复制到GPU中显存中 cudaMemcpy(device_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(device_b, b, sizeof(float) * N, cudaMemcpyHostToDevice); //一个kernel函数由一个gpu的一个grid执行 //调用核函数 cpu调用 gpu运行 dim3 dim_grid(N / 64); //一个grid网格包含n / 512个线程块blocks(为了充分利用sm blocks尽可能多) dim3 dim_block(64); //一个线程块block包含 512个线程threads(最多不超过512个) VectorAdd<<<dim_grid, dim_block>>>(device_a, device_b, device_c); //GPU计算任务完成后 将数据传输回CPU cudaMemcpy(c, device_c, sizeof(float) * N, cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("%.0f + %.0f = %.0f\t", a[i], b[i], c[i]); if ((i + 1) % 5 == 0) printf("\n"); } int end = clock(); printf("\n程序耗时:%ds\n", (end - start) / 1000); //释放gpu显存 cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); return 0; }
18,924
#include <stdio.h> // // kernel code // __global__ void my_first_kernel() { printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); } // // host code // int main(int argc, char **argv) { // set number of blocks, and threads per block int nblocks = 4, nthreads = 8; // lanuch the kernel my_first_kernel<<<nblocks,nthreads>>>(); // CUDA exit -- needed to flush printf write buffer cudaDeviceReset(); return 0; }
18,925
template<typename T> __global__ void copyKernel(const T* __restrict__ d_in, int size, T* __restrict__ d_out) { int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; d_in += id; d_out += id; for (int i = 0; i < size; i += stride) { *d_out = *d_in; d_in += stride; d_out += stride; } } int main(int argc, char* argv[]) { int size = 1024; int *d_in, *d_out; cudaMalloc(&d_in, size); cudaMalloc(&d_out, size); copyKernel <<< 1, 128 >>> (d_in, size, d_out); }
18,926
#include <stdio.h> #include <curand.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/binary_search.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/system_error.h> // Fill d_buffer with num random numbers extern "C" void fill_rand(float *d_buffer, int num) { curandGenerator_t gen; int status; // Create generator status = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); // Set seed status |= curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); // Generate num random numbers status |= curandGenerateUniform(gen, d_buffer, num); // Cleanup generator status |= curandDestroyGenerator(gen); if (status != CURAND_STATUS_SUCCESS) { printf ("curand failure!\n"); exit (EXIT_FAILURE); } } // Sort key value pairs extern "C" void sort(int *d_keys, int *d_values, int num) { try { // Create THRUST usable device pointers thrust::device_ptr<int> keys(d_keys); thrust::device_ptr<int> values(d_values); // Sort keys AND values array by key thrust::sort_by_key(keys, keys + num, values); } catch(thrust::system_error &e) { std::cerr << "Error sorting with Thrust: " << e.what() << std::endl; exit (EXIT_FAILURE); } }
18,927
#include "includes.h" __global__ void gradient(float *u, float *g, int nx, int ny) { int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; /* if (idx<N) { g[2*idx+0] = 0; g[2*idx+1] = 0; } if ((idx< N) && px<(nx-1)) g[2*idx+0] = u[idx+1 ] - u[idx]; if ((idx< N) && py<(ny-1)) g[2*idx+1] = u[idx+nx] - u[idx]; */ if (px<nx && py<ny) { g[2 * idx + 0] = 0; g[2 * idx + 1] = 0; if (px<(nx - 1)) g[2 * idx + 0] = u[idx + 1] - u[idx]; if (py<(ny - 1)) g[2 * idx + 1] = u[idx + nx] - u[idx]; } //a[idx] =0; }
18,928
#include <stdio.h> #include "cuda_runtime.h" #define CHECK(call) {\ const cudaError_t err = call;\ if (err != cudaSuccess) {\ printf("Error №%d in %s:%d\n", err, __FILE__, __LINE__);\ printf("Reason: %s \n", err, cudaGetStringError(err));\ exit(1);\ }\ } __global__ void printThreadIndex(int *matr, int nx, int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned idx = iy * nx + ix; printf("ThreadIdx (%d, %d)\nBlockIdx (%d, %d)\nCoordinate (%d, %d)\nGlobal index %d, value is %2d\n====================================================\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, matr[idx]); } void initialInt(int * ip, unsigned sz){ for (unsigned i = 0; i < sz; i++) ip[i] = i; } void printMatrix(int *matr, unsigned nx, unsigned ny){ //печатаем матрицу matr, развернутую в одномерный вектор int *my_matr = matr; printf("Matrix %d x %d\n", nx, ny); for (unsigned i = 0; i < nx; i++) { for (unsigned j = 0; j < ny; j++){ int ind = j * nx + i; printf("%3d", matr[ind]); } printf("\n"); } printf("\n"); } void SetDevice(int dev = 0) { cudaDeviceProp devProp; // int dev = 0; cudaGetDeviceProperties(&devProp, dev); printf ("Using device %d: %s\n", dev, devProp.name); cudaSetDevice(dev); } int main() { SetDevice(); //setup matrix dimentions int nx = 6; //number of digits in column int ny = 8; //number of digits in row unsigned nxy = nx * ny; //whole matrix size int nBytes = nxy * sizeof(float); //malloc host memory int * h_A = (int *) malloc(nBytes); //initialize host matrix initialInt(h_A, nxy); //check out initialization printMatrix(h_A, nx, ny); //malloc device memory int * dev_A; cudaMalloc((void **) &dev_A, nBytes); //transfer data from host to device //куда, что, сколько, направление cudaMemcpy(dev_A, h_A, nBytes, cudaMemcpyHostToDevice); //setup block execution generation dim3 block (4, 2); dim3 grid ((nx + block.x -1)/block.x, (ny + block.y -1)/block.y); printThreadIndex <<<block, grid>>> (dev_A, nx, ny); cudaDeviceSynchronize(); //free host and device memory cudaFree(dev_A); free(h_A); cudaDeviceReset(); printf("Success!\n"); return 0; }
18,929
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <ctime> #include <stdlib.h> #include <iostream> __constant__ unsigned long long globalN[1]; __global__ void kernel_isPerfectNumber(bool *arr); __device__ bool isPerfectNumber(unsigned long long number); bool isPerfectNumber_(unsigned long long number); void host_isPerfectNumber(bool *arr, unsigned long long size); cudaError_t CUDA_get_perfect_numbers(bool *arr, unsigned long long threads, unsigned long long blocks, unsigned long long N); int main() { unsigned long long threads = 512; unsigned long long blocks = 4; unsigned long long size = threads * blocks; unsigned long long N = size; bool *array_cpu = (bool *)malloc(size * sizeof(bool)); bool *array_gpu = (bool *)malloc(size * sizeof(bool)); unsigned int start_time; unsigned int end_time; unsigned int search_time; start_time = clock(); host_isPerfectNumber(array_cpu, N); end_time = clock(); search_time = end_time - start_time; std::cout << search_time / 1000.0 << std::endl; start_time = clock(); CUDA_get_perfect_numbers(array_gpu, threads, blocks, N); end_time = clock(); search_time = end_time - start_time; std::cout << search_time / 1000.0 << std::endl; for (unsigned long long i = 0; i < N; i++) { if (array_gpu[i]) std::cout << i << std::endl; } return 0; } //CPU void host_isPerfectNumber(bool *arr, unsigned long long size) { for (unsigned long long i = 0; i < size; i++) { if (isPerfectNumber_(i)) arr[i] = true; else arr[i] = false; } } bool isPerfectNumber_(unsigned long long number) { unsigned long long i = 1, sum = 0; while (i < number) { if (number%i == 0) sum = sum + i; i++; } if (sum == number) return true; else return false; } //GPU cudaError_t CUDA_get_perfect_numbers(bool *arr, unsigned long long threads, unsigned long long blocks, unsigned long long N) { unsigned long long size = threads * blocks; bool *dev_arr = nullptr; cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_arr, size * sizeof(bool)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpyToSymbol(globalN, &N, sizeof(unsigned long long)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Memcpy failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } kernel_isPerfectNumber <<<blocks, threads >>> (dev_arr); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(arr, dev_arr, size * sizeof(bool), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_arr); return cudaStatus; } __global__ void kernel_isPerfectNumber(bool *arr) { unsigned long long i = threadIdx.x + blockDim.x*blockIdx.x; if (i >= globalN[0]) { arr[i] = false; return; } if (isPerfectNumber(i)) arr[i] = true; else arr[i] = false; } __device__ bool isPerfectNumber(unsigned long long number) { unsigned long long i = 1, sum = 0; while (i < number) { if (number%i == 0) sum = sum + i; i++; } if (sum == number) return true; else return false; }
18,930
#include<stdio.h> int main(void) { // int* a = (int *)malloc(sizeof(int)); // *a = 4; // printf("%p\n",a); // printf("%d\n",*a); int a = 5; int* d_a = NULL; d_a = &a; printf("variable a = %d\n", a); printf("contenido variable d_a = %d\n", *d_a); return 0; }
18,931
// RUN: %clang_cc1 -emit-llvm -o - -fcuda-is-device -fms-extensions -x hip %s \ // RUN: -fno-autolink -triple amdgcn-amd-amdhsa \ // RUN: | FileCheck -check-prefix=DEV %s // RUN: %clang_cc1 -emit-llvm -o - -fms-extensions -x hip %s -triple \ // RUN: x86_64-pc-windows-msvc | FileCheck -check-prefix=HOST %s // RUN: %clang_cc1 -emit-llvm -o - -fcuda-is-device -fms-extensions %s \ // RUN: -fno-autolink -triple amdgcn-amd-amdhsa \ // RUN: | FileCheck -check-prefix=DEV %s // RUN: %clang_cc1 -emit-llvm -o - -fms-extensions %s -triple \ // RUN: x86_64-pc-windows-msvc | FileCheck -check-prefix=HOST %s // DEV-NOT: llvm.linker.options // DEV-NOT: llvm.dependent-libraries // HOST: lvm.linker.options // HOST: "/DEFAULTLIB:libcpmt.lib" // HOST: "/FAILIFMISMATCH:\22myLib_version=9\22" #pragma comment(lib, "libcpmt") #pragma detect_mismatch("myLib_version", "9")
18,932
/* * Solves the Panfilov model using an explicit numerical scheme. * Based on code orginally provided by Xing Cai, Simula Research Laboratory * and reimplementation by Scott B. Baden, UCSD * * Modified and restructured by Didem Unat, Koc University * */ #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <iostream> #include <iomanip> #include <string.h> #include <math.h> #include <sys/time.h> using namespace std; void checkCUDAError(const char *msg); // Utilities // // Timer // Make successive calls and take a difference to get the elapsed time. static const double kMicro = 1.0e-6; static const int BLOCKSIZE = 16; double getTime() { struct timeval TV; struct timezone TZ; const int RC = gettimeofday(&TV, &TZ); if(RC == -1) { cerr << "ERROR: Bad call to gettimeofday" << endl; return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } // end getTime() // Allocate a 2D array double **alloc2D(int m,int n){ double **E; int nx=n, ny=m; E = (double**)malloc(sizeof(double*)*ny + sizeof(double)*nx*ny); assert(E); int j; for(j=0;j<ny;j++) E[j] = (double*)(E+ny) + j*nx; return(E); } double *flatten(double **array, int m, int n) { double *a; a = (double*)malloc(sizeof(double)*(m+2)*(n+2)); int i, j; for(j=0;j<=m + 1; j++){ for (i = 0; i <= n + 1; i++) { a[(j * (n+2)) + i] = array[j][i]; } } return a; } // Reports statistics about the computation // These values should not vary (except to within roundoff) // when we use different numbers of processes to solve the problem double stats(double *E, int m, int n, double *_mx){ double mx = -1; double l2norm = 0; int i, j; for (j=1; j<=m; j++) for (i=1; i<=n; i++) { l2norm += E[(j * (n+2)) + i]*E[(j * (n+2)) + i]; if (E[(j * (n+2)) + i] > mx) mx = E[(j * (n+2)) + i]; } *_mx = mx; l2norm /= (double) ((m)*(n)); l2norm = sqrt(l2norm); return l2norm; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } // External functions extern "C" { void splot(double **E, double T, int niter, int m, int n); } void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads); __global__ void vecODEKernel(double* R, double* E, double epsilon, double M1, double M2, double dt, double kk, double a, double b, int n) { int row = blockIdx.y*blockDim.y+threadIdx.y+1; int col = blockIdx.x*blockDim.x+threadIdx.x+1; if((row < n) && (col < n)) { row = row * (n+2); E[row + col] = E[row + col] -dt*(kk* E[row + col]*(E[row + col] - a)*(E[row + col]-1)+ E[row + col] *R[row + col]); R[row + col] = R[row + col] + dt*(epsilon+M1* R[row + col]/( E[row + col]+M2))*(-R[row + col]-kk* E[row + col]*(E[row + col]-b-1)); } } __global__ void boundaryKernel(double *E_prev, int m, int n) { int row = blockIdx.y*blockDim.y+threadIdx.y+1; int col = blockIdx.x*blockDim.x+threadIdx.x+1; row = row * (n+2); E_prev[row] = E_prev[row + 2]; E_prev[row + n + 1] = E_prev[row + (n-1)]; E_prev[col] = E_prev[col+2]; E_prev[(m+1)*(n+2) + col] = E_prev[(m-1)*(n+2) + col]; } __global__ void matAllKernel(double alpha, double* E, double* E_prev, double* R, int n, int m, double epsilon, double M1, double M2, double dt, double kk, double a, double b) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; int row_m = row * (n+2); // Mirror boundary setup if(col == 0 || col == (n+1)) { E_prev[row_m] = E_prev[row_m + 2]; E_prev[row_m + n + 1] = E_prev[row_m + (n-1)]; } if(row == 0 || row == (n+1)) { E_prev[col] = E_prev[col+2]; E_prev[(m+1)*(n+2) + col] = E_prev[(m-1)*(n+2) + col]; } __syncthreads(); row = row + 1; col = col + 1; if((row < n) && (col < n)) { row = row * (n+2); //PDE E[row + col] = E_prev[row + col]+alpha*(E_prev[row + col + 1]+E_prev[row + col -1]-4*E_prev[row + col]+E_prev[row + col + (n+2)]+E_prev[row + col - (n+2)]); //ODE E[row + col] = E[row + col] -dt*(kk* E[row + col]*(E[row + col] - a)*(E[row + col]-1)+ E[row + col] *R[row + col]); R[row + col] = R[row + col] + dt*(epsilon+M1* R[row + col]/( E[row + col]+M2))*(-R[row + col]-kk* E[row + col]*(E[row + col]-b-1)); } } void simulate (double* E, double* E_prev,double* R, const double alpha, const int n, const int m, const double kk, const double dt, const double a, const double epsilon, const double M1,const double M2, const double b) { dim3 DimBlock(BLOCKSIZE,BLOCKSIZE,1); dim3 DimGrid(ceil((double)n/DimBlock.x), ceil((double)n/DimBlock.y)); matAllKernel<<<DimGrid, DimBlock>>>(alpha, E, E_prev, R, n, m, epsilon, M1, M2, dt, kk, a, b); } // Main program int main (int argc, char** argv) { /* * Solution arrays * E is the "Excitation" variable, a voltage * R is the "Recovery" variable * E_prev is the Excitation variable for the previous timestep, * and is used in time integration */ double **E, **R, **E_prev; // Various constants - these definitions shouldn't change const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5; double T=1000.0; int m=200,n=200; int plot_freq = 0; int px = 1, py = 1; int no_comm = 0; int num_threads=1; cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads); m = n; // Allocate contiguous memory for solution arrays // The computational box is defined on [1:m+1,1:n+1] // We pad the arrays in order to facilitate differencing on the // boundaries of the computation box E = alloc2D(m+2,n+2); E_prev = alloc2D(m+2,n+2); R = alloc2D(m+2,n+2); int i,j; // Initialization for (j=1; j<=m; j++) for (i=1; i<=n; i++) E_prev[j][i] = R[j][i] = 0; for (j=1; j<=m; j++) for (i=n/2+1; i<=n; i++) E_prev[j][i] = 1.0; for (j=m/2+1; j<=m; j++) for (i=1; i<=n; i++) R[j][i] = 1.0; double *Ef, *Rf, *E_prevf; Ef = flatten(E, m, n); Rf = flatten(R, m, n); E_prevf = flatten(E_prev, m, n); double dx = 1.0/n; // For time integration, these values shouldn't change double rp= kk*(b+1)*(b+1)/4; double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk)); double dtr=1/(epsilon+((M1/M2)*rp)); double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr; double alpha = d*dt/(dx*dx); cout << "Grid Size : " << n << endl; cout << "Duration of Sim : " << T << endl; cout << "Time step dt : " << dt << endl; cout << "Process geometry: " << px << " x " << py << endl; if (no_comm) cout << "Communication : DISABLED" << endl; cout << endl; // Integer timestep number int niter=0; int size = ((n+2)*(m+2) * sizeof(double)); double *d_E, *d_E_prev, *d_R; // allocate memory for the devices cudaMalloc((void **) &d_E, size); cudaMalloc((void **) &d_E_prev, size); cudaMalloc((void **) &d_R, size); checkCUDAError("Error allocating device memory arrays"); // copy all arrays to device cudaMemcpy(d_R, Rf, size, cudaMemcpyHostToDevice); checkCUDAError("Unable to copy to device, R"); cudaMemcpy(d_E_prev, E_prevf, size, cudaMemcpyHostToDevice); checkCUDAError("Unable to copy to device, E_prev"); cudaMemcpy(d_E, Ef, size, cudaMemcpyHostToDevice); checkCUDAError("Unable to copy to device, E"); // Simulated time is different from the integer timestep number // Simulated time double t = 0.0; // Start the timer double t0 = getTime(); while (t<T) { t += dt; niter++; simulate(d_E, d_E_prev, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b); //swap current E with previous E double *tmp = d_E; d_E = d_E_prev; d_E_prev = tmp; if (plot_freq){ int k = (int)(t/plot_freq); if ((t - k * plot_freq) < dt){ splot(E,t,niter,m+2,n+2); } } }//end of while loop double time_elapsed = getTime() - t0; // copy back all arrays cudaMemcpy(E_prevf, d_E_prev, size, cudaMemcpyDeviceToHost); checkCUDAError("Unable to retrieve result from device, E_prev"); cudaMemcpy(Rf, d_R, size, cudaMemcpyDeviceToHost); checkCUDAError("Unable to retrieve result from device, R"); cudaMemcpy(Ef, d_E, size, cudaMemcpyDeviceToHost); checkCUDAError("Unable to retrieve result from device, E"); // free memory cudaFree(d_R); cudaFree(d_E); cudaFree(d_E_prev); double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ; double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed; cout << "Number of Iterations : " << niter << endl; cout << "Elapsed Time (sec) : " << time_elapsed << endl; cout << "Sustained Gflops Rate : " << Gflops << endl; cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl; double mx; double l2norm = stats(E_prevf,m,n,&mx); cout << "Max: " << mx << " L2norm: "<< l2norm << endl; if (plot_freq){ cout << "\n\nEnter any input to close the program and the plot..." << endl; getchar(); } free (E); free (E_prev); free (R); return 0; }
18,933
#include "includes.h" __global__ void histogramm(float* hist, unsigned char* input, int width, int height, int stride) { int index = blockIdx.x * blockDim.x * stride + threadIdx.x; int size = width * height; if (index > size - 1) return; __shared__ unsigned int histo_private[256]; #pragma unroll for (int i = 0; i < 8; i++) { histo_private[threadIdx.x * 8 + i] = 0; } __syncthreads(); int i = 0; while (i < stride && index < size) { int pixel = input[index]; atomicAdd(&(histo_private[pixel]), 1); index += blockDim.x; i++; } __syncthreads(); #pragma unroll for (int i = 0; i < 8; i++) { int x_off = threadIdx.x * 8 + i; hist[x_off * 3 + 0] = (x_off - 128.f) / 256.f * (float)width; float factor = .48f; float scaledValue = ((float)(histo_private[x_off]) / (float)size) - (factor / gridDim.x); atomicAdd(&(hist[x_off * 3 + 1]), scaledValue * (float)height); } }
18,934
#ifndef GPU_MESH_H #define GPU_MESH_H #include "gpuVector3D.cu" class gpuMesh { public: /* * Constructor using list of primitives */ __host__ gpuMesh(gpuVector3D *pos, gpuVector3D *norm) : positions(pos), normals(norm) {} gpuVector3D *positions; gpuVector3D *normals; }; #endif
18,935
#include <stdio.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void alloc_memory_GPU(int X, int Y, int Z, float **cells_d, float **flags_d, float **vel_d, float **rho_d, float **fuerza_d) { gpuErrchk( cudaMalloc(cells_d, 2*X*Y*Z*19*sizeof(float)) ); gpuErrchk( cudaMalloc(flags_d, X*Y*Z*sizeof(float)) ); gpuErrchk( cudaMalloc(vel_d, X*Y*Z*3*sizeof(float)) ); gpuErrchk( cudaMalloc(rho_d, X*Y*Z*sizeof(float)) ); gpuErrchk( cudaMalloc(fuerza_d, X*Y*Z*3*sizeof(float)) ); } void free_memory_GPU(float *cells_d, float *flags_d, float *vel_d, float *rho_d, float *fuerza_d) { gpuErrchk( cudaFree(cells_d) ); gpuErrchk( cudaFree(flags_d) ); gpuErrchk( cudaFree(vel_d) ); gpuErrchk( cudaFree(rho_d) ); gpuErrchk( cudaFree(fuerza_d) ); } void send_data_to_GPU(int X, int Y, int Z, float *cells, float *cells_d, float *flags, float *flags_d, float *vel, float *vel_d, float *rho, float *rho_d, float *fuerza, float *fuerza_d) { gpuErrchk( cudaMemcpy(cells_d, cells, 2*X*Y*Z*19*sizeof(float), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(flags_d, flags, X*Y*Z*sizeof(float), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(vel_d, vel, X*Y*Z*3*sizeof(float), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(rho_d, rho, X*Y*Z*sizeof(float), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(fuerza_d, fuerza, X*Y*Z*3*sizeof(float), cudaMemcpyHostToDevice) ); } void retrieve_data_from_GPU(int X, int Y, int Z, float *cells, float *cells_d, float *flags, float *flags_d, float *vel, float *vel_d, float *rho, float *rho_d, float *fuerza, float *fuerza_d) { gpuErrchk( cudaMemcpy(cells, cells_d, 2*X*Y*Z*19*sizeof(float), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(flags, flags_d, X*Y*Z*sizeof(float), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(vel, vel_d, X*Y*Z*3*sizeof(float), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(rho, rho_d, X*Y*Z*sizeof(float), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(fuerza, fuerza_d, X*Y*Z*3*sizeof(float), cudaMemcpyDeviceToHost) ); }
18,936
#include "includes.h" __device__ void Temp( float *R, float *G, float *B, float Temp) { float r, g, b; if (Temp <= 66.0f){ r = 255.0f; } else { r = Temp - 60.0f; r = 329.698727446 * powf(r, -0.1332047592); if(r < 0.0f){r = 0.0f;} if(r > 255.0f){r = 255.0f;} } if (Temp <= 66.0f){ g = Temp; g = 99.4708025861 * log(g) - 161.1195681661; if(g < 0.0f){g = 0.0f;} if(g > 255.0f){g = 255.0f;} } else { g = Temp - 60.0f; g = 288.1221695283 * powf(g, -0.0755148492); if(g < 0.0f){g = 0.0f;} if(g > 255.0f){g = 255.0f;} } if(Temp >= 66.0f){ b = 255.0f; } else { if(Temp <= 19.0f){ b = 0.0f; } else { b = Temp - 10.0f; b = 138.5177312231 * log(b) - 305.0447927307; if(b < 0.0f){b = 0.0f;} if(b > 255.0f){b = 255.0f;} } } *R = r / 255.0f; *G = g / 255.0f; *B = b / 255.0f; } __global__ void TempReturn(float* p_Input, float* p_Temp, int p_Width, int p_Height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < p_Width) && (y < p_Height)) { const int index = (y * p_Width + x) * 4; p_Input[index + 2] = p_Temp[y * p_Width + x]; }}
18,937
// (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences // This is supplement to the paper: // L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs". // e-mail: barash @ itp.ac.ru (remove space) #include<stdio.h> #define lfsr113_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0) #define lfsr113_BLOCKS 512 #define lfsr113_THREADS 128 #define lfsr113_ARRAY_SECTIONS (lfsr113_BLOCKS*lfsr113_THREADS/128) typedef unsigned long long lt; typedef struct{ unsigned z1,z2,z3,z4; } lfsr113_state; typedef struct{ unsigned z[4] __attribute__ ((aligned(16))); } lfsr113_sse_state; unsigned lfsr113_a[4] __attribute__ ((aligned(16)))={4294967294U,4294967288U,4294967280U,4294967168U}; int lfsr113_b[4] __attribute__ ((aligned(16)))={262144,4,128,8192}; int lfsr113_c[4] __attribute__ ((aligned(16)))={64,4,8192,8}; unsigned lfsr113__Consts[8][4] = {{6,2,13,3},{13,27,21,12},{4294967294U,4294967288U,4294967280U,4294967168U},{18,2,7,13}, {31,6,18,0},{29,2,2,0},{28,13,7,0},{25,3,13,0}}; __constant__ unsigned lfsr113_Consts[8][4] = {{6,2,13,3},{13,27,21,12},{4294967294U,4294967288U,4294967280U,4294967168U},{18,2,7,13}, {31,6,18,0},{29,2,2,0},{28,13,7,0},{25,3,13,0}}; extern "C" __host__ unsigned int lfsr113_sse_generate_(lfsr113_sse_state* state){ // here SSE4 instruction pblendw is used unsigned output; asm volatile("movaps (%1),%%xmm1\n" \ "movaps (%2),%%xmm2\n" \ "movaps (%4),%%xmm0\n" \ "pand %%xmm1,%%xmm2\n" \ "pmulld (%3),%%xmm2\n" \ "pmulld %%xmm1,%%xmm0\n" \ "pxor %%xmm0,%%xmm1\n" \ "psrld $12,%%xmm1\n" \ "pblendw $192,%%xmm1,%%xmm3\n" \ "psrld $1,%%xmm1\n" \ "pblendw $3,%%xmm1,%%xmm3\n" \ "psrld $8,%%xmm1\n" \ "pblendw $48,%%xmm1,%%xmm3\n" \ "psrld $6,%%xmm1\n" \ "pblendw $12,%%xmm1,%%xmm3\n" \ "pxor %%xmm2,%%xmm3\n" \ "movaps %%xmm3,(%1)\n" \ "pshufd $255,%%xmm3,%%xmm0\n" \ "pshufd $170,%%xmm3,%%xmm1\n" \ "pshufd $85,%%xmm3,%%xmm2\n" \ "pxor %%xmm0,%%xmm3\n" \ "pxor %%xmm1,%%xmm2\n" \ "pxor %%xmm2,%%xmm3\n" \ "pextrd $0,%%xmm3,%0\n" \ "":"=&r"(output):"r"(state->z),"r"(lfsr113_a),"r"(lfsr113_b),"r"(lfsr113_c)); return output; } extern "C" __device__ __host__ void lfsr113_get_sse_state_(lfsr113_state* state,lfsr113_sse_state* sse_state){ sse_state->z[0]=state->z1; sse_state->z[1]=state->z2; sse_state->z[2]=state->z3; sse_state->z[3]=state->z4; } extern "C" __device__ __host__ unsigned lfsr113_SkipAheadRoundSingleBit(unsigned state,unsigned bit,unsigned p,unsigned q,unsigned s,int n){ char arr0[64]; char arr[128][32]; // this function skips ahead 2^n*s bits unsigned e,i,j,k1=p,k2,l1,l2,ki,TwoInE; for(j=0;j<p; j++) arr0[j] = (state>>(31-j)) & 1; for(j=p;j<64;j++) arr0[j] = arr0[j-p]^arr0[j-p+q]; if(n<5&&s<32>>n){ j=(1<<n)*s; return (arr0[j+bit]<<(31-bit)); } else{ i=0; ki=s; while(ki<p){ ki*=2; i++;} TwoInE=1; for(e=0;e<=(n-i);e++){ for(j=0;j<p;j++){ if(j==0) l1=128; else {k1=j; l1=0; while(k1<p) {k1*=2; l1++;}} k2=j+q; l2=0; while(k2<p) {k2*=2; l2++;} arr[e][j]=(e<l1 ? arr0[j*TwoInE+bit] : arr[e-l1][k1-p])^( e<l2 ? arr0[(j+q)*TwoInE+bit] : arr[e-l2][k2-p]); } TwoInE*=2; } return (arr[n-i][ki-p]<<(31-bit)); } } extern "C" __device__ __host__ unsigned lfsr113_SkipAheadRoundSingle(unsigned state,unsigned p,unsigned q,unsigned s,int n){ char arr0[64]; char arr[128][32]; // this function skips ahead 2^n*s bits unsigned bit,e,i,j,k1=p,k2,l1,l2,ki,TwoInE; unsigned output=0; for(j=0;j<p; j++) arr0[j] = (state>>(31-j)) & 1; for(j=p;j<64;j++) arr0[j] = arr0[j-p]^arr0[j-p+q]; if(n<5&&s<32>>n){ j=(1<<n)*s; for(i=0;i<32;i++) output+= (arr0[j+i]<<(31-i)); return output; } else{ i=0; ki=s; while(ki<p){ ki*=2; i++;} for(bit=0;bit<32;bit++){ TwoInE=1; for(e=0;e<=(n-i);e++){ for(j=0;j<p;j++){ if(j==0) l1=128; else {k1=j; l1=0; while(k1<p) {k1*=2; l1++;}} k2=j+q; l2=0; while(k2<p) {k2*=2; l2++;} arr[e][j]=(e<l1 ? arr0[j*TwoInE+bit] : arr[e-l1][k1-p])^( e<l2 ? arr0[(j+q)*TwoInE+bit] : arr[e-l2][k2-p]); } TwoInE*=2; } output+=(arr[n-i][ki-p]<<(31-bit)); } return output; } } extern "C" __device__ __host__ void lfsr113_SkipAheadRound(lfsr113_state* state,int n){ // Skips Ahead 2^n state->z1=lfsr113_SkipAheadRoundSingle(state->z1,31,6,18,n); state->z2=lfsr113_SkipAheadRoundSingle(state->z2,29,2,2,n); state->z3=lfsr113_SkipAheadRoundSingle(state->z3,28,13,7,n); state->z4=lfsr113_SkipAheadRoundSingle(state->z4,25,3,13,n); } extern "C" __device__ __host__ void lfsr113_skipahead_(lfsr113_state* state,unsigned long long offset64,unsigned long long offset0){ unsigned long long i=offset0; int shift=0; while(i>0){ if(i%2==1) lfsr113_SkipAheadRound(state,shift); i/=2; shift++; } i=offset64; shift=64; while(i>0){ if(i%2==1) lfsr113_SkipAheadRound(state,shift); i/=2; shift++; } } extern "C" __device__ __host__ void lfsr113_init_(lfsr113_state* state){ state->z1=state->z2=state->z3=state->z4=12345; } extern "C" __device__ __host__ void lfsr113_init_sequence_(lfsr113_state* state,lt SequenceNumber){ lt n1,n2; // 0 <= SequenceNumber < 3.8*10^18, length of each sequence < 10^10 lfsr113_init_(state); n1=SequenceNumber/892447987; n2=SequenceNumber%892447987; lfsr113_skipahead_(state,n1,n1*4193950067); // 20669825409*892447987 = 2^64 + 4193950067 lfsr113_skipahead_(state,0,n2*20669825409); // thus we are skipping ahead (SequenceNumber*20669825409) numbers } extern "C" __device__ __host__ void lfsr113_init_long_sequence_(lfsr113_state* state,lt SequenceNumber){ lfsr113_init_(state); // 0 <= SequenceNumber < 4*10^9. length of each sequence < 10^24 lfsr113_skipahead_(state,100000*SequenceNumber,2699204111*SequenceNumber); } extern "C" __device__ __host__ unsigned int lfsr113_generate_(lfsr113_state* state){ unsigned b; b = ((state->z1 << 6) ^ state->z1) >> 13; state->z1 = ((state->z1 & 4294967294U) << 18) ^ b; b = ((state->z2 << 2) ^ state->z2) >> 27; state->z2 = ((state->z2 & 4294967288U) << 2) ^ b; b = ((state->z3 << 13) ^ state->z3) >> 21; state->z3 = ((state->z3 & 4294967280U) << 7) ^ b; b = ((state->z4 << 3) ^ state->z4) >> 12; state->z4 = ((state->z4 & 4294967168U) << 13) ^ b; return (state->z1 ^ state->z2 ^ state->z3 ^ state->z4); } extern "C" __device__ __host__ float lfsr113_generate_uniform_float_(lfsr113_state* state){ unsigned b; b = ((state->z1 << 6) ^ state->z1) >> 13; state->z1 = ((state->z1 & 4294967294U) << 18) ^ b; b = ((state->z2 << 2) ^ state->z2) >> 27; state->z2 = ((state->z2 & 4294967288U) << 2) ^ b; b = ((state->z3 << 13) ^ state->z3) >> 21; state->z3 = ((state->z3 & 4294967280U) << 7) ^ b; b = ((state->z4 << 3) ^ state->z4) >> 12; state->z4 = ((state->z4 & 4294967168U) << 13) ^ b; return (state->z1 ^ state->z2 ^ state->z3 ^ state->z4) * 2.3283064365386963e-10; } extern "C" __host__ void lfsr113_print_state_(lfsr113_state* state){ printf("Generator State: z1=%u, z2=%u, z3=%u, z4=%u\n",state->z1,state->z2,state->z3,state->z4); } __global__ void lfsr113_kernel_generate_array(lfsr113_state* state, unsigned* out, long* length) { unsigned b,idx,threadIdx0,rngnum,seqNum,sum,bit,myz; // use 128 threads per block long offset,i; lfsr113_state mystate=*state; int j,shift=0; __shared__ unsigned z[128]; // one generator per 128 threads idx = threadIdx.x; rngnum = (idx >> 5); bit = idx % 32; threadIdx0 = threadIdx.x-bit; seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>7; // RNG_sequence index offset = i = seqNum*(*length); // start of the section in the output array if(bit==0) z[threadIdx0]=(rngnum==0?mystate.z1:(rngnum==1?mystate.z2:(rngnum==2?mystate.z3:mystate.z4))); while(i>0){ if(i%2==1){ __syncthreads(); myz=lfsr113_SkipAheadRoundSingleBit(z[threadIdx0],bit,lfsr113_Consts[rngnum+4][0],lfsr113_Consts[rngnum+4][1],lfsr113_Consts[rngnum+4][2],shift); if(bit>0) z[threadIdx.x]=myz; __syncthreads(); if(bit==0){ sum=myz; for(j=1;j<32;j++) sum+=z[threadIdx0+j]; z[threadIdx0]=sum;} } i/=2; shift++; } if(bit==0) myz=z[threadIdx0]; for(i=0;i<(*length);i++){ if(bit==0){ b = ((myz<<lfsr113_Consts[0][rngnum])^myz)>>lfsr113_Consts[1][rngnum]; z[threadIdx0] = myz = ((myz&lfsr113_Consts[2][rngnum])<<lfsr113_Consts[3][rngnum])^b; } __syncthreads(); // each 4 threads result in "length" values in the output array if(idx==0) out[offset+i] = z[threadIdx0]^z[threadIdx0+32]^z[threadIdx0+64]^z[threadIdx0+96]; __syncthreads(); } } extern "C" __host__ void lfsr113_generate_gpu_array_(lfsr113_state* state, unsigned int* dev_out, unsigned int* length){ long mylength = (*length)/lfsr113_ARRAY_SECTIONS; lfsr113_state* dev_state; long* dev_length; if((mylength*lfsr113_ARRAY_SECTIONS)<(*length)) mylength++; lfsr113_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(lfsr113_state))); lfsr113_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long))); lfsr113_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(lfsr113_state),cudaMemcpyHostToDevice)); lfsr113_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice)); lfsr113_kernel_generate_array<<<lfsr113_BLOCKS,lfsr113_THREADS>>>(dev_state,dev_out,dev_length); lfsr113_CUDA_CALL(cudaGetLastError()); lfsr113_CUDA_CALL(cudaFree(dev_state)); lfsr113_CUDA_CALL(cudaFree(dev_length)); } __global__ void lfsr113_kernel_generate_array_float(lfsr113_state* state, float* out, long* length) { unsigned b,idx,threadIdx0,rngnum,seqNum,sum,bit,myz; // use 128 threads per block long offset,i; lfsr113_state mystate=*state; int j,shift=0; __shared__ unsigned z[128]; // one generator per 128 threads idx = threadIdx.x; rngnum = (idx >> 5); bit = idx % 32; threadIdx0 = threadIdx.x-bit; seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>7; // RNG_sequence index offset = i = seqNum*(*length); // start of the section in the output array if(bit==0) z[threadIdx0]=(rngnum==0?mystate.z1:(rngnum==1?mystate.z2:(rngnum==2?mystate.z3:mystate.z4))); while(i>0){ if(i%2==1){ __syncthreads(); myz=lfsr113_SkipAheadRoundSingleBit(z[threadIdx0],bit,lfsr113_Consts[rngnum+4][0],lfsr113_Consts[rngnum+4][1],lfsr113_Consts[rngnum+4][2],shift); if(bit>0) z[threadIdx.x]=myz; __syncthreads(); if(bit==0){ sum=myz; for(j=1;j<32;j++) sum+=z[threadIdx0+j]; z[threadIdx0]=sum;} } i/=2; shift++; } if(bit==0) myz=z[threadIdx0]; for(i=0;i<(*length);i++){ if(bit==0){ b = ((myz<<lfsr113_Consts[0][rngnum])^myz)>>lfsr113_Consts[1][rngnum]; z[threadIdx0] = myz = ((myz&lfsr113_Consts[2][rngnum])<<lfsr113_Consts[3][rngnum])^b; } __syncthreads(); // each 4 threads result in "length" values in the output array if(idx==0) out[offset+i] = ((float)(z[threadIdx0]^z[threadIdx0+32]^z[threadIdx0+64]^z[threadIdx0+96])) * 2.3283064365386963e-10; __syncthreads(); } } extern "C" __host__ void lfsr113_generate_gpu_array_float_(lfsr113_state* state, float* dev_out, unsigned int* length){ long mylength = (*length)/lfsr113_ARRAY_SECTIONS; lfsr113_state* dev_state; long* dev_length; if((mylength*lfsr113_ARRAY_SECTIONS)<(*length)) mylength++; lfsr113_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(lfsr113_state))); lfsr113_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long))); lfsr113_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(lfsr113_state),cudaMemcpyHostToDevice)); lfsr113_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice)); lfsr113_kernel_generate_array_float<<<lfsr113_BLOCKS,lfsr113_THREADS>>>(dev_state,dev_out,dev_length); lfsr113_CUDA_CALL(cudaGetLastError()); lfsr113_CUDA_CALL(cudaFree(dev_state)); lfsr113_CUDA_CALL(cudaFree(dev_length)); } __global__ void lfsr113_kernel_generate_array_double(lfsr113_state* state, double* out, long* length) { unsigned b,idx,threadIdx0,rngnum,seqNum,sum,bit,myz; // use 128 threads per block long offset,i; lfsr113_state mystate=*state; int j,shift=0; __shared__ unsigned z[128]; // one generator per 128 threads idx = threadIdx.x; rngnum = (idx >> 5); bit = idx % 32; threadIdx0 = threadIdx.x-bit; seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>7; // RNG_sequence index offset = i = seqNum*(*length); // start of the section in the output array if(bit==0) z[threadIdx0]=(rngnum==0?mystate.z1:(rngnum==1?mystate.z2:(rngnum==2?mystate.z3:mystate.z4))); while(i>0){ if(i%2==1){ __syncthreads(); myz=lfsr113_SkipAheadRoundSingleBit(z[threadIdx0],bit,lfsr113_Consts[rngnum+4][0],lfsr113_Consts[rngnum+4][1],lfsr113_Consts[rngnum+4][2],shift); if(bit>0) z[threadIdx.x]=myz; __syncthreads(); if(bit==0){ sum=myz; for(j=1;j<32;j++) sum+=z[threadIdx0+j]; z[threadIdx0]=sum;} } i/=2; shift++; } if(bit==0) myz=z[threadIdx0]; for(i=0;i<(*length);i++){ if(bit==0){ b = ((myz<<lfsr113_Consts[0][rngnum])^myz)>>lfsr113_Consts[1][rngnum]; z[threadIdx0] = myz = ((myz&lfsr113_Consts[2][rngnum])<<lfsr113_Consts[3][rngnum])^b; } __syncthreads(); // each 4 threads result in "length" values in the output array if(idx==0) out[offset+i] = ((double)(z[threadIdx0]^z[threadIdx0+32]^z[threadIdx0+64]^z[threadIdx0+96])) * 2.3283064365386963e-10; __syncthreads(); } } extern "C" __host__ void lfsr113_generate_gpu_array_double_(lfsr113_state* state, double* dev_out, unsigned int* length){ long mylength = (*length)/lfsr113_ARRAY_SECTIONS; lfsr113_state* dev_state; long* dev_length; if((mylength*lfsr113_ARRAY_SECTIONS)<(*length)) mylength++; lfsr113_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(lfsr113_state))); lfsr113_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long))); lfsr113_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(lfsr113_state),cudaMemcpyHostToDevice)); lfsr113_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice)); lfsr113_kernel_generate_array_double<<<lfsr113_BLOCKS,lfsr113_THREADS>>>(dev_state,dev_out,dev_length); lfsr113_CUDA_CALL(cudaGetLastError()); lfsr113_CUDA_CALL(cudaFree(dev_state)); lfsr113_CUDA_CALL(cudaFree(dev_length)); } extern "C" __host__ void lfsr113_generate_array_(lfsr113_state* state, unsigned int* out, unsigned int* length){ long mylength = (*length)/lfsr113_ARRAY_SECTIONS; lfsr113_state* dev_state; unsigned int* dev_out; long* dev_length; if((mylength*lfsr113_ARRAY_SECTIONS)<(*length)) mylength++; lfsr113_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(lfsr113_state))); lfsr113_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*lfsr113_ARRAY_SECTIONS*sizeof(unsigned int))); lfsr113_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long))); lfsr113_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(lfsr113_state),cudaMemcpyHostToDevice)); lfsr113_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice)); lfsr113_kernel_generate_array<<<lfsr113_BLOCKS,lfsr113_THREADS>>>(dev_state,dev_out,dev_length); lfsr113_CUDA_CALL(cudaGetLastError()); lfsr113_CUDA_CALL(cudaMemcpy(out,dev_out,(*length)*sizeof(unsigned int),cudaMemcpyDeviceToHost)); lfsr113_CUDA_CALL(cudaFree(dev_state)); lfsr113_CUDA_CALL(cudaFree(dev_out)); lfsr113_CUDA_CALL(cudaFree(dev_length)); }
18,938
/* * Author: Yair Schiff * Project: Fall 2018 CSCI.GA 3033-004: GPUs * Instructor: Prof. Zahran * * Project Description: This project explores the efficient implementation of the Floyd-Warshall (FW) algorithm, a * solution for the All-Pairs-Shortest-Path (APSP) and Transitive Closure problems. This project will compare sequential * (CPU) and parallel (GPU) versions of the algorithm. */ #include <ctype.h> #include <cuda.h> //#include <cuda_profiler_api.h> #include <getopt.h> #include <limits.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> /***************************************************************** * Macros *****************************************************************/ #define MAX_GRAPH 397020 // max graph size #define MAX_BUF 1000 // integer size of buffer for file reading #define index(i, j, N) ((i)*(N)) + (j) // To index element (i,j) of a 2D array stored as 1D // Macro for error checking cuda API calls #define CUDA_ERROR_CHECK(err) {\ if (err != cudaSuccess) {\ fprintf(stderr, "%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);\ exit(1);\ }\ } /***************************************************************** * Forward declarations *****************************************************************/ char* concat(const char *s1, const char *s2); unsigned int convert(char *st); void read_input(const char *fn, int *adj_matrix, unsigned int N); void preprocess_graph(int *adj_matrix, int *go_to, unsigned int N); void preprocess_graph_parallel(int *adj_matrix, int *go_to, unsigned int N); void print_adj(int *adj_matrix, unsigned int N); void save_path(const char *fn, int *adj_matrix, int *go_to, unsigned int N); void save_path_recursive(FILE * f, int *go_to, unsigned int i, unsigned int j, unsigned int N); void FW_sequential(int *adj_matrix, int *go_to, unsigned int N); void FW_parallel(int *adj_matrix, int *go_to, unsigned int N); /*****************************************************************/ /***************************************************************** * main method *****************************************************************/ int main(int argc, char *argv[]) { // Check that correct number of command line arguments given if (argc != 5) { fprintf(stderr, "usage: FW_seq <input> <N> <CPU/GPU> <verbose>\n"); fprintf(stderr, "input = file containing adjacency matrix for the graph\n"); fprintf(stderr, "N = number for vertices from input graph to use\n"); fprintf(stderr, "who = 0: sequential code on CPU, 1: GPU execution\n"); fprintf(stderr, "verbose = false: if flag is set (i.e. 1 is passed) then original adjacency matrix and APSP " "solution will be printed.\n"); exit(1); } // Parse command line arguments const char *input_file_name = argv[1]; // input file unsigned int N; // Number of vertices to use N = convert(argv[2]); if (N > MAX_GRAPH) { fprintf(stderr, "Max graph size allowed %u x %u. Defaulting to this size.", MAX_GRAPH, MAX_GRAPH); N = MAX_GRAPH; } int type_of_device = 0; // CPU or GPU type_of_device = atoi(argv[3]); int verbose = 0; verbose = atoi(argv[4]); // Allocate memory for NxN adjacency matrix int *adj_matrix; adj_matrix = (int *) calloc( N * N, sizeof(int)); if (adj_matrix == NULL) { fprintf(stderr, "malloc for adjacency matrix of size %u x %u failed.", N, N); exit(1); } // Allocate memory for NxN go_to matrix: int *go_to; go_to = (int *) malloc(sizeof(int) * N * N); if (go_to == NULL) { fprintf(stderr, "malloc for go_to matrix of size %u x %u failed.", N, N); exit(1); } // Read input and populate edges printf("Reading in graph input .txt file...\n"); read_input(input_file_name, adj_matrix, N); // Pre-process adjacency matrix and next index matrix printf("Pre-processing adjacency and next index matrices...\n"); //preprocess_graph_parallel(adj_matrix, go_to, N); preprocess_graph(adj_matrix, go_to, N); if (verbose) print_adj(adj_matrix, N); // Declare variables for tracking time double time_taken; clock_t clock_start, clock_end; // Dispatch FW to either sequential or parallel version based on flag passed in if (!type_of_device) { // The CPU sequential version printf("Running FW algorithm on graph (sequentially)...\n"); clock_start = clock(); FW_sequential(adj_matrix, go_to, N); clock_end = clock(); time_taken = ((double) clock_end - clock_start) / CLOCKS_PER_SEC; printf("Time taken to run FW algorithm sequentially: %lf seconds\n", time_taken); } else { // The GPU version printf("Running FW algorithm on graph (in parallel)...\n"); clock_start = clock(); //cudaProfilerStart(); FW_parallel(adj_matrix, go_to, N); //cudaProfilerStop(); clock_end = clock(); time_taken = ((double) clock_end - clock_start) / CLOCKS_PER_SEC; printf("Time taken to run FW algorithm in parallel: %lf seconds\n", time_taken); } // Save solution path between every pair of vertices to file solution_path_<N>.txt const char *outfile_name = concat(concat("../outputs/solution_path_", argv[2]), ".txt"); printf("Saving solution path to file %s...\n", outfile_name); save_path(outfile_name, adj_matrix, go_to, N); free(adj_matrix); free(go_to); return 0; } /******************************************************************************************************************* * Floyd-Warshall algorithm to solve APSP problem sequentially *******************************************************************************************************************/ void FW_sequential(int *adj_matrix, int *go_to, unsigned int N) { unsigned int i, j, k; //#pragma parallel for (k = 0; k < N; k++) { // #pragma loop --> first try w/no gangs and workers then add for (i = 0; i < N; i++) { // #pragma loop --> first try w/no gangs and workers then add for (j = 0; j < N; j++) { if (adj_matrix[index(i, j, N)] > (adj_matrix[index(i, k, N)] + adj_matrix[index(k, j, N)])) { adj_matrix[index(i, j, N)] = adj_matrix[index(i, k, N)] + adj_matrix[index(k, j, N)]; go_to[index(i, j, N)] = (int) k; } } } } } /******************************************************************************************************************* * 'Plain' kernel for running inner double for-loops of FW in parallel *******************************************************************************************************************/ __global__ void FW_kernel_plain(int *adj_matrix, int *go_to, unsigned int N, int k) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < N && j < N) { // Boundary check if (adj_matrix[index(i, j, N)] > (adj_matrix[index(i, k, N)] + adj_matrix[index(k, j, N)])) { adj_matrix[index(i, j, N)] = adj_matrix[index(i, k, N)] + adj_matrix[index(k, j, N)]; go_to[index(i, j, N)] = k; } } } /******************************************************************************************************************* * Floyd-Warshall algorithm to solve APSP problem on GPU *******************************************************************************************************************/ void FW_parallel(int *adj_matrix, int *go_to, unsigned int N) { // Allocate memory on GPU for NxN adjacency and next index matrices size_t num_bytes = sizeof(int) * N * N; // number of bytes for N x N matrix int *adj_matrix_d; int *go_to_d; cudaError_t err = cudaMalloc((void **) &adj_matrix_d, num_bytes); CUDA_ERROR_CHECK(err); err = cudaMemcpy(adj_matrix_d, adj_matrix, num_bytes, cudaMemcpyHostToDevice); CUDA_ERROR_CHECK(err); err = cudaMalloc((void **) &go_to_d, num_bytes); CUDA_ERROR_CHECK(err); err = cudaMemcpy(go_to_d, go_to, num_bytes, cudaMemcpyHostToDevice); CUDA_ERROR_CHECK(err); // Get warp size from device properties and set it as block size cudaDeviceProp dev_prop; err = cudaGetDeviceProperties(&dev_prop, 0); CUDA_ERROR_CHECK(err); int warp_size = dev_prop.warpSize; int dim_helper = ceil(N/((double) warp_size)); dim3 dimGrid(dim_helper, dim_helper); dim3 dimBlock(warp_size, warp_size); // Run FW triple-loop by launching a new kernel for each k unsigned int k; for (k = 0; k < N; k++) { FW_kernel_plain<<<dimGrid, dimBlock>>>(adj_matrix_d, go_to_d, N, (int) k); err = cudaGetLastError(); CUDA_ERROR_CHECK(err); } // Copy solution back to host err = cudaMemcpy(adj_matrix, adj_matrix_d, num_bytes, cudaMemcpyDeviceToHost); CUDA_ERROR_CHECK(err); err = cudaFree(adj_matrix_d); CUDA_ERROR_CHECK(err); err = cudaMemcpy(go_to, go_to_d, num_bytes, cudaMemcpyDeviceToHost); CUDA_ERROR_CHECK(err); err = cudaFree(go_to_d); CUDA_ERROR_CHECK(err); } /******************************************************************************************************************* * Concatenate two strings *******************************************************************************************************************/ char* concat(const char *s1, const char *s2) { void *result = malloc(strlen(s1) + strlen(s2) + 1); // +1 for the null-terminator if (!result) { fprintf(stderr, " Cannot allocate the concatenated string\n"); exit(1); } strcpy((char *) result, s1); strcat((char *) result, s2); return (char *) result; } /******************************************************************************************************************* * Convert command line input to integer * Code taken from https://stackoverflow.com/questions/34206446/how-to-convert-string-into-unsigned-int-c *******************************************************************************************************************/ unsigned int convert(char *st) { char *x; for (x = st ; *x ; x++) { if (!isdigit(*x)) return 0L; } return (strtoul(st, 0L, 10)); } /******************************************************************************************************************* * Read input graph file and populate adjacency matrix *******************************************************************************************************************/ void read_input(const char *fn, int *adj_matrix, unsigned int N) { FILE *input = fopen(fn, "r"); if (input == NULL) { fprintf(stderr, "Error while opening the file.\n"); exit(1); } char buffer[MAX_BUF]; // Read file int line = 0; while (1) { line++; fgets(buffer, MAX_BUF, input); // get next line // Skip lines starting with '#' and empty lines if (buffer[0] == '#' || buffer[0] == '\n' || buffer[0] == ' ') continue; int i; // row int j; // column int rel; // relationship (take absolute value, below) int rc = sscanf(buffer, "%d|%d|%d",&i, &j, &rel); if (rc != 3) { fprintf(stderr, "Input file not well formatted (Line %d). " "Expected format of graph lines: <v1>|<v2>|<edge>.\n", line); exit(1); } if (i <= N && j <= N) adj_matrix[index(i-1, j-1, N)] = abs(rel); if (feof(input)) break; } // Close file fclose(input); } /******************************************************************************************************************* * Pre-process adjacency matrix and next index matrix: * Fill non-edges with int_max/2 in adjacency matrix and -1 in next index on path matrix * This will use GPU kernel as task is highly parallel (even though kernel will experience a lot of branch divergence) *******************************************************************************************************************/ __global__ void preprocess_kernel(int *adj_matrix, int *go_to, unsigned int N) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < N && j < N) { // Boundary check if (adj_matrix[index(i, j, N)] >= 1) { go_to[index(i, j, N)] = j; } else { adj_matrix[index(i, j, N)] = INT_MAX / 2; go_to[index(i, j, N)] = -1; } } } void preprocess_graph_parallel(int *adj_matrix, int *go_to, unsigned int N) { int num_bytes = sizeof(int) * N * N; int *adj_matrix_d; int *go_to_d; cudaError_t err = cudaMalloc((void **) &adj_matrix_d, num_bytes); CUDA_ERROR_CHECK(err); err = cudaMemcpy(adj_matrix_d, adj_matrix, num_bytes, cudaMemcpyHostToDevice); CUDA_ERROR_CHECK(err); err = cudaMalloc((void **) &go_to_d, num_bytes); CUDA_ERROR_CHECK(err); err = cudaMemcpy(go_to_d, go_to, num_bytes, cudaMemcpyHostToDevice); CUDA_ERROR_CHECK(err); // Get warp size from device properties and set it as block size cudaDeviceProp dev_prop; err = cudaGetDeviceProperties(&dev_prop, 0); CUDA_ERROR_CHECK(err); int warp_size = dev_prop.warpSize; int dim_helper = ceil(N/((double) warp_size)); dim3 dimGrid(dim_helper, dim_helper); dim3 dimBlock(warp_size, warp_size); // Run pre-processing kernel preprocess_kernel<<<dimGrid, dimBlock>>>(adj_matrix_d, go_to_d, N); err = cudaGetLastError(); CUDA_ERROR_CHECK(err); // Copy solution back to host err = cudaMemcpy(adj_matrix, adj_matrix_d, num_bytes, cudaMemcpyDeviceToHost); CUDA_ERROR_CHECK(err); err = cudaFree(adj_matrix_d); CUDA_ERROR_CHECK(err); err = cudaMemcpy(go_to, go_to_d, num_bytes, cudaMemcpyDeviceToHost); CUDA_ERROR_CHECK(err); err = cudaFree(go_to_d); CUDA_ERROR_CHECK(err); } // Sequential preprocessing void preprocess_graph(int *adj_matrix, int *go_to, unsigned int N) { unsigned int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (adj_matrix[index(i, j, N)] >= 1) { go_to[index(i, j, N)] = j; } else { adj_matrix[index(i, j, N)] = INT_MAX / 2; go_to[index(i, j, N)] = -1; } } } } /******************************************************************************************************************* * Print adjacency matrix read in from file *******************************************************************************************************************/ void print_adj(int *adj_matrix, unsigned int N) { unsigned int i, j; printf("Original adjacency matrix:\n"); printf(" |"); for (i = 0; i < N; i++) printf(" %2d |", i+1); printf("\n"); for (i = 0; i <= N; i++) printf("----|"); printf("\n"); for (i = 0; i < N; i++) { printf(" %2d |", i+1); for (j = 0; j < N; j++) { if (adj_matrix[index(i, j, N)] != INT_MAX/2) printf(" %2d |", adj_matrix[index(i, j, N)]); else printf(" - |"); } printf("\n"); for (j = 0; j <= N; j++) printf("----|"); printf("\n"); } } /******************************************************************************************************************* * Print path between all vertex pairs i,j *******************************************************************************************************************/ void save_path(const char *fn, int *adj_matrix, int *go_to, unsigned int N) { FILE *output = fopen(fn, "w"); if (output == NULL) { fprintf(stderr, "Error while opening the file.\n"); exit(1); } unsigned int i, j; fprintf(output, "APSP solution:\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (go_to[index(i, j, N)] == -1) { fprintf(output, "No path exists between %u and %u.\n", i+1, j+1); } else { fprintf(output, "Path from %u to %u (length: %d): %u", i+1, j+1, adj_matrix[index(i, j, N)], i+1); save_path_recursive(output, go_to, i, j, N); fprintf(output, "\n"); } } } // Close file fclose(output); } /******************************************************************************************************************* * Recursive method for printing path *******************************************************************************************************************/ void save_path_recursive(FILE *f, int *go_to, unsigned int i, unsigned int j, unsigned int N) { unsigned int next = go_to[index(i, j, N)]; if (next == j) { fprintf(f, "->%u", next+1); return; } else { save_path_recursive(f, go_to, i, next, N); save_path_recursive(f, go_to, next, j, N); } }
18,939
#include "includes.h" __device__ int position; //index of the largest value __device__ int largest; //value of the largest value int lenString = 593; int maxNumStrings = 1000000; int threshold = 2; __global__ void search(int *d_b, int *d_c, int size) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if((d_c[my_id] == 0) && (d_b[my_id] == largest) && (my_id < size)) { position = my_id; } }
18,940
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #include <math_constants.h> extern "C" { __global__ void rtruncnorm_kernel(float *x, int n, float *mu, float *sigma, float *a, float *b, int len_mu, int len_sigma, int len_a, int len_b, int maxRejections, int rng_c) { int rng_a=1; // These can easily be made as argument, but I'm lazy. int rng_b=2; // Usual block/thread indexing... int myblock = blockIdx.x + blockIdx.y * gridDim.x; int blocksize = blockDim.x * blockDim.y * blockDim.z; int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; // Determine indexes for vectors if using recycling. // Note: A good programmer would avoid the code repitions. int ind_mu = ((len_mu<2) ? 0 : (idx % len_mu)); int ind_sigma = ((len_sigma<2) ? 0 : (idx % len_sigma)); int ind_a = ((len_a<2) ? 0 : (idx % len_a)); int ind_b = ((len_b<2) ? 0 : (idx % len_b)); if (idx < n){ // Set up RNG curandState rng; curand_init(rng_a+idx*rng_b, rng_c, 0, &rng); // Sample truncated normal, doing rejection-sampling for(int i=0; i<maxRejections; i++) { float samp=mu[ind_mu]+sigma[ind_sigma]*curand_normal(&rng); if(a[ind_a]<=samp && samp<=b[ind_b]) { x[idx]=samp; return; } } // Could not sample using rejection-sampling. // Simply sample from Uniform(a,b). x[idx]=curand_uniform(&rng)*(a[ind_a]-b[ind_b])+a[ind_a]; } return; } } // END extern "C"
18,941
#include "includes.h" __global__ void FindClosestPoint(float3 *points, int *closestPoint, const int numberPoints) { // used to identify the thread that is currently running int idx = blockIdx.x * blockDim.x + threadIdx.x; // now find the closest point to each point // 'i' represents the current point that we are finding the closest point to! int distanceBetweenPoints = 9999999, tempDistance = 0; for (int j = 0; j < numberPoints; j++) if (idx != j) // dont check the distance between the point and itself { tempDistance = pow((points[idx].x - points[j].x), 2) + pow((points[idx].y - points[j].y), 2); if (tempDistance < distanceBetweenPoints) { distanceBetweenPoints = tempDistance; closestPoint[idx] = j; } } }
18,942
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 16 __global__ void matrixMult(const double *A, const double *B, double *C, int K, int N) { int i0 = blockDim.y * blockIdx.y + threadIdx.y; int j0 = blockDim.x * blockIdx.x + threadIdx.x; double sum = 0; for (int k = 0; k < K; k++) sum += A[i0 * K + k] * B[k * N + j0]; C[N * i0 + j0] = sum; } void init_matrix_rnd(double* &matrix, int number_row, int number_col) { for (size_t i = 0; i < number_row * number_col; i++) matrix[i] = double(rand()) / double(1000); } int main() { //start, stop - for Kernel time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // количество строк и столбцов матриц A[MxK] и B[KxN] int M = 32, K = 48, N = 32; // Размеры матриц A и B должны нацело делиться на размер блока. size_t Asize = M * K * sizeof(double); size_t Bsize = K * N * sizeof(double); size_t Csize = M * N * sizeof(double); double *h_A = (double *)malloc(Asize); double *h_B = (double *)malloc(Bsize); double *h_C = (double *)malloc(Csize); init_matrix_rnd(h_A, M, K); init_matrix_rnd(h_B, K, N); double *d_A = NULL; cudaMalloc((void **)&d_A, Asize); double *d_B = NULL; cudaMalloc((void **)&d_B, Bsize); double * d_C = NULL; cudaMalloc((void **)&d_C, Csize); cudaMemcpy(d_A, h_A, Asize, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, Bsize, cudaMemcpyHostToDevice); dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE); dim3 blocksPerGrid = dim3(N / BLOCK_SIZE, M / BLOCK_SIZE); cudaEventRecord(start, 0); matrixMult<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, K, N); cudaEventRecord( stop, 0); cudaEventSynchronize( stop ); float KernelTime; cudaEventElapsedTime( &KernelTime, start, stop); printf("KernelTime: %.2f milliseconds\n", KernelTime); cudaMemcpy(h_C, d_C, Csize, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); cudaEventDestroy( start ); cudaEventDestroy( stop ); return 0; }
18,943
#include "includes.h" __global__ void matrixMultiplyTiled(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float ds_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_B[TILE_WIDTH][TILE_WIDTH]; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int col = blockIdx.x * TILE_WIDTH + tx; unsigned int row = blockIdx.y * TILE_WIDTH + ty; float acc = 0; for (int t = 0; t < (numAColumns-1)/TILE_WIDTH + 1; ++t) { unsigned int ATilePitch = t * TILE_WIDTH + tx; unsigned int BTilePitch = t * TILE_WIDTH + ty; if (row < numARows && ATilePitch < numAColumns) ds_A[ty][tx] = A[row * numAColumns + ATilePitch]; else ds_A[ty][tx] = 0; if (col < numBColumns && BTilePitch < numBRows) ds_B[ty][tx] = B[BTilePitch * numBColumns + col]; else ds_B[ty][tx] = 0; __syncthreads(); #pragma unroll for (int k = 0; k < TILE_WIDTH; ++k) acc += ds_A[ty][k] * ds_B[k][tx]; __syncthreads(); } if (row < numCRows && col < numCColumns) C[row * numCColumns + col] = acc; }
18,944
//##########################################################// // Name: Kirtan Mali // // Roll no: 18AG10016 // // Question 1: 2D Convolution Matrix // //##########################################################// #include <stdio.h> #include <stdlib.h> // Cuda Libraries #include <cuda.h> #include <cuda_runtime.h> // Macro for error checking and debugging #define CHECK(call) { \ const cudaError_t error = call; \ if (error != cudaSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } typedef long long int lli; #define MAX_VAL 100 // Important parameters #define KERNEL_SIZE 3 #define KERNEL_HALF (KERNEL_SIZE >> 1) #define BLOCK_SIZE 32 #define TILE_SIZE (BLOCK_SIZE - KERNEL_SIZE + 1) // Function prototypes void printMat(float *matrix, lli n); void convolution_2D_HOST(float *matrix, float *output, int n, int kernelH, int kernelW); float *createMat(lli n, int isempty, int seed); // Convolution Kernel __global__ void convolution_2D_DEVICE(float *matrix, float *output, int n) { __shared__ float tile[BLOCK_SIZE][BLOCK_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KERNEL_HALF; int col_i = col_o - KERNEL_HALF; // Load tile elements if(row_i >= 0 && row_i < n && col_i >= 0 && col_i < n) tile[ty][tx] = matrix[row_i*n + col_i]; else tile[ty][tx] = 0.0f; __syncthreads(); if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++) pValue += tile[y+ty][x+tx] / 9.0; if(row_o < n && col_o < n) { output[row_o*n + col_o] = pValue; } } } int main(int argc, char **argv) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; int isprint = 1; if (argc > 1) { printf("\n\nDisabling Printing ...\n\n"); isprint = 0; } lli t; scanf("%lld", &t); while (t--) { srand(t); lli n; scanf("%lld", &n); size_t size = sizeof(float) * n * n; float *h_matrix = createMat(n, 0, t); float *h_output = createMat(n, 1, t); float *d_matrix = NULL; float *d_output = NULL; CHECK(cudaMalloc((void **)&d_matrix, size)); CHECK(cudaMalloc((void **)&d_output, size)); CHECK(cudaMemcpy(d_matrix, h_matrix, size, cudaMemcpyHostToDevice)); dim3 blockSize, gridSize; blockSize.x = BLOCK_SIZE, blockSize.y = BLOCK_SIZE, blockSize.z = 1; gridSize.x = ceil((float)n/TILE_SIZE), gridSize.y = ceil((float)n/TILE_SIZE), gridSize.z = 1; convolution_2D_DEVICE<<<gridSize, blockSize>>>(d_matrix, d_output, n); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch convolution_2D_DEVICE kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // convolution_2D_HOST(h_matrix, h_output, n, 3, 3); CHECK(cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost)); if (isprint == 1) { printf("\n\n***** Original Matrix *****\n\n"); printMat(h_matrix, n); printf("\n\n***** Convolved Matrix Output *****\n\n"); printMat(h_output, n); } } return 0; } // Utility Functions float *createMat(lli n, int isempty, int seed) { srand(seed+1); size_t size = sizeof(float) * n * n; float *matrix = (float *)malloc(size); for (int i=0; i<n*n; i++) { if (isempty == 1) matrix[i] = 0.0f; else matrix[i] = (float)rand()/((float)RAND_MAX/MAX_VAL); } return matrix; } void printMat(float *matrix, lli n) { for (lli i=0; i<n*n; i++) { printf("%0.2f ", matrix[i]); if (i % n == n-1) printf("\n"); } } void convolution_2D_HOST(float *matrix, float *output, int n, int kernelH, int kernelW) { for (lli i=0; i<n; i++) { for (lli j=0; j<n; j++) { lli startx = i - (kernelH/2); lli starty = j - (kernelW/2); float newval = 0.0; for (lli a=0; a<kernelH; a++) { for (lli b=0; b<kernelW; b++) { if (startx + a >= 0 && startx + a < n && starty + b >= 0 && starty + b < n) { newval += matrix[(startx+a)*n + (starty+b)] / (float)(kernelH*kernelW); } } } output[i*n + j] = newval; } } }
18,945
#include <stdlib.h> #include <cuda_profiler_api.h> #define N 1000000 __global__ void vector_add(float *out, float *a, float *b, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < n; i += stride){ out[i] = a[i] + b[i]; } } int main(){ cudaProfilerStart(); float *a, *b, *out; // Allocate memory cudaMallocManaged(&a, sizeof(float) * N); cudaMallocManaged(&b, sizeof(float) * N); cudaMallocManaged(&out, sizeof(float) * N); // Initialize array for(int i = 0; i < N; i++){ a[i] = 1.0f; b[i] = 2.0f; } // Main function int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; vector_add<<<numBlocks, blockSize>>>(out, a, b, N); cudaDeviceSynchronize(); cudaFree(a); cudaFree(b); cudaFree(out); cudaDeviceReset(); cudaProfilerStop(); }
18,946
#include <iostream> #include <stdlib.h> #include <sys/time.h> #include <cstring> #include <math.h> using namespace std; double time_diff(timeval a, timeval b){ return (b.tv_sec-a.tv_sec) * pow(10,-6) * (b.tv_usec - a.tv_usec); } void init_array(float* ary, int n){ for(int i = 0; i<n; i++){ ary[i] = (float)rand()/(float(RAND_MAX)); } } void saxpy_cpu(float* x, float* y, float a, int n){ for(int i=0; i<n; i++){ y[i] = a * x[i] + y[i]; } } __global__ void saxpy_gpu(float* x, float* y, float a, int n){ int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i<n ) { y[i] = a * x[i] + y[i]; } } int main (int argc, char **argv){ long n; //problem size float a = 2.0; //alpha factor in equation, hardcoded bool pinned_mem; //use pinned memory or not long factor = 1; //for command line parsing char *pos = NULL; if (argc != 4) { //sprintf ("Usage: %s <problem size{k,M,G}> <block size>\n", argv[0]); exit (0); } pos = strrchr (argv[1], 'k'); if (pos != NULL) { factor = 1024; *pos = '\0'; //terminate input string here } pos = strrchr (argv[1], 'M'); if (pos != NULL) { factor = 1024*1024; *pos = '\0'; //terminate input string here } pos = strrchr (argv[1], 'G'); if (pos != NULL) { factor = 1024*1024*1024; *pos = '\0'; //terminate input string here } n = atol (argv[1]); n *= factor; pinned_mem = atoi(argv[3]); long numThreadsPerBlock; int selectedDevice = 0; numThreadsPerBlock = atol (argv[2]); int numBlocks = (n+numThreadsPerBlock-1) / numThreadsPerBlock; if (numThreadsPerBlock > 1024) { //printf ("ERROR: numThreadsPerBlock must be <= 1024!\n"); cout << "Err: numThready <= 1024 " << endl; return 0; } if (numBlocks >= 65536) { //printf ("ERROR: numBlocks must be < 65536 (is %ld)!\n", numBlocks); cout << "Err: numBlocks < 65536" << endl; return 0; } //for timing timeval start, stop; double t_init; double t_copy; double t_cpu; double t_gpu; double t_back; ///////////////////////////////////// // (1) initialisations: // - perform basic sanity checks // - set device ///////////////////////////////////// int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { //fprintf(stderr, "Sorry, no CUDA device fount"); return 1; } if (selectedDevice >= deviceCount) { //fprintf(stderr, "Choose device ID between 0 and %d\n", deviceCount-1); return 1; } cudaSetDevice(selectedDevice); cudaThreadSynchronize(); //allcoate mem float* x; float* y; float* d_x; float* d_y; //where to alloc host vars if (pinned_mem){ cudaMallocHost((void**) &x, n*sizeof(float)); cudaMallocHost((void**) &y, n*sizeof(float)); } else { x = (float*) malloc(n*sizeof(float)); y = (float*) malloc(n*sizeof(float)); } //allocate device vars on GPU cudaMalloc((void**)&d_x, n*sizeof(float)); cudaMalloc((void**)&d_y, n*sizeof(float)); //init arrays on CPU gettimeofday(&start, NULL); init_array(x, n); init_array(y, n); gettimeofday(&stop, NULL); t_init = time_diff(start,stop); // copy to GPU gettimeofday(&start, NULL); cudaMemcpy(d_x, x, n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, n*sizeof(float), cudaMemcpyHostToDevice); gettimeofday(&stop, NULL); t_copy = time_diff(start, stop); // do operation on CPU gettimeofday(&start, NULL); saxpy_cpu(x, y, a, n); gettimeofday(&stop, NULL); t_cpu = time_diff(start, stop); // do operation on GPU gettimeofday(&start, NULL); saxpy_gpu<<<numBlocks, numThreadsPerBlock>>>(d_x, d_y, a, n); cudaThreadSynchronize(); gettimeofday(&stop, NULL); t_gpu = time_diff(start, stop); //write back and compare float* tmp_y; if (pinned_mem){ cudaMallocHost((void**) &tmp_y, n*sizeof(float)); } else { tmp_y = (float*) malloc(n*sizeof(float)); } gettimeofday(&start, NULL); cudaMemcpy(tmp_y, d_y, n*sizeof(float), cudaMemcpyDeviceToHost); gettimeofday(&stop, NULL); t_back = time_diff(start, stop); int err_count = 0; for(int i = 0; i<n; i++){ if(abs(tmp_y[i]-y[i]) > 1e-6){ cout << "Error on comparison on index: " << i << endl; err_count++; } } cout << "Error count: " << err_count << endl; //cleanup if(pinned_mem){ cudaFreeHost(x); cudaFreeHost(y); cudaFreeHost(tmp_y); } else{ free(x); free(y); free(tmp_y); } cudaFree(d_x); cudaFree(d_y); //report timing cout << "Initialization: " << t_init << " s\n"; cout << "Copy to GPU: " << t_copy << " s\n"; cout << "Sequential CPU: " << t_cpu << " s\n"; cout << "Parallel GPU: " << t_gpu << " s\n"; cout << "Writeback: " << t_back << " s\n"; return 0; }
18,947
#include <iostream> #define N (1024 * 1024) #define FULL_DATA_SIZE (N * 20) using namespace std; #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }} __global__ void kernel(int *a, int *b, int *c) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main() { cudaDeviceProp prop; int whichDevice; cudaGetDevice(&whichDevice); cudaGetDeviceProperties(&prop, whichDevice); if (!prop.deviceOverlap) { cout << "Device does not support overlapping" << endl; return 0; } int *host_a, *host_b, *host_c; int *dev_a, *dev_b, *dev_c; cudaHostAlloc((void**)&dev_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&dev_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&dev_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaStream_t stream; cudaStreamCreate(&stream); float elapsedTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int i = 0; i < FULL_DATA_SIZE; i += N) { cudaMemcpyAsync(dev_a, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dev_b, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream); kernel <<< N / 256, 256, 0, stream >>> (dev_a, dev_b, dev_c); cudaMemcpyAsync(host_c + i, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost, stream); } cudaStreamSynchronize(stream); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime, start, stop); cout << "time: " << elapsedTime << " ms" << endl; }
18,948
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) __global__ void setup(curandState *state) { int id = threadIdx.x + blockIdx.x * 64; curand_init(1234, id, 0, &state[id]); } __global__ void generate(curandState *state, int n, unsigned int* result) { int id = threadIdx.x + blockIdx.x * 64; float x; curandState localState = state[id]; for(int i = 0; i < n; i++) { x = curand_uniform(&localState); result[i * 100 + (int)(x*100)]++; } state[id] = localState; } int main(int argc, char *argv[]) { int i, j; curandState *devStates; unsigned int *devResults, *hostResults; int samples = 10000; unsigned int r[100] = {0}; hostResults = (unsigned int *)calloc(64 * 64 * 100, sizeof(int)); CUDA_CALL(cudaMalloc((void **)&devResults, 100 * 64 * 64 * sizeof(unsigned int))); CUDA_CALL(cudaMemset(devResults, 0, 100 * 64 * 64 * sizeof(unsigned int))); CUDA_CALL(cudaMalloc((void **)&devStates, 64 * 64 * sizeof(curandState))); setup<<<64, 64>>>(devStates); generate<<<64, 64>>>(devStates, samples, devResults); CUDA_CALL(cudaMemcpy(hostResults, devResults, 100 * 64 * 64 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (i = 0; i < 64 * 64; i++) { for (j = 0; j < 100; j++) { r[j] += hostResults[i * 100 + j]; } } printf("x,y\n"); for (i = 0; i < 100; i++) { printf("%d,%d\n", i, r[i]); } }
18,949
#include "includes.h" __global__ void add(const int *a, const int *b, int *dest, const size_t length) { int tid = blockIdx.x; if (tid < length) { dest[tid] = a[tid] - b[tid]; } }
18,950
#include <time.h> #include <stdio.h> #define THREADS 512 __device__ float* cQ = NULL; __device__ float* cb_orig = NULL; __device__ float* cb = NULL; __device__ int* val = NULL; __device__ int* best_val = NULL; __device__ float sol; __device__ float best_sol; static __global__ void cuda_copy(float* t, float* s) { int i = threadIdx.x; t[i] = s[i]; } static __global__ void cuda_zero_int(int* t) { int i = threadIdx.x; t[i] = 0; } static __global__ void cuda_changed(int idx, int n) { __shared__ int update; __shared__ float mult; int va; int i = threadIdx.x; if (i == 0) { va = val[idx]; if (va == 0) { sol -= cb[idx]; val[idx] = 1; mult = -1; } else { sol += cb[idx]; val[idx] = 0; mult = 1; } } __syncthreads(); cb[i] += mult*cQ[idx*n+i]; if (i == 0) { update = 0; if (sol < best_sol) { best_sol = sol; update = 1; } } __syncthreads(); if (update == 1) best_val[i] = val[i]; } static void cuda_update(float* Q, float* b, int n) { cudaMemcpy(cQ, Q, n*n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(cb_orig, b, n*sizeof(float), cudaMemcpyHostToDevice); } void cuda_initialize(int n) { cudaMalloc((void**)&cQ, n*n*sizeof(float)); cudaMalloc((void**)&cb, n*sizeof(float)); cudaMalloc((void**)&cb_orig, n*sizeof(float)); cudaMalloc((void**)&val, n*sizeof(int)); cudaMalloc((void**)&best_val, n*sizeof(int)); } void cuda_finalize() { cudaFree(cQ); cudaFree(cb); cudaFree(cb_orig); cudaFree(val); cudaFree(best_val); } static __global__ void cuda_zero_sol() { sol = 0.; best_sol = 0.; } static void cuda_prepare_brute_force(int n) { cuda_copy<<< 1, n >>>(cb, cb_orig); cuda_zero_int<<< 1, n >>>(val); cuda_zero_int<<< 1, n >>>(best_val); cuda_zero_sol<<< 1, 1 >>>(); } void cuda_brute_force(float* Q, float* b, int* ans, int n) { printf("Chamado com n = %d\n", n); clock_t before = clock(); int lasti = 0; cuda_update(Q, b, n); cuda_prepare_brute_force(n); for (int _i = 1; _i < (1 << n); _i++) { int i, _changed, changed; i = _i ^ (_i >> 1); _changed = lasti ^ i; lasti = i; for (changed = -1; _changed; _changed >>= 1) changed++; if (_i % 1000 == 0) printf("Antes de chamar o cuda_changed %d!!\n", _i); cuda_changed<<<1, n>>>(changed, n); // printf("Depois de chamar o cuda_changed!!\n"); } cudaMemcpy(ans, best_val, n*sizeof(int), cudaMemcpyDeviceToHost); clock_t after = clock(); printf("Cuda: Brute-force for %d vars in %lf secs\n", n, (1.*(after-before))/CLOCKS_PER_SEC); }
18,951
#include "wave.cuh" /* ------------------------------------------------------------------------- */ __device__ void __half_selection_sort(short *array, int size) { int i, j; for (i = 0; i < size / 2; i++) { int min = i; for (j = i; j < size; j++) { if (array[j] < array[min]) min = j; } int tmp = array[i]; array[i] = array[min]; array[min] = tmp; } } /* ------------------------------------------------------------------------- */ __device__ inline int myabs(int x) { return (x > 0) ? x : -x; } /* ------------------------------------------------------------------------- */ __global__ void wave_encrypt(short *data, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) data[idx] = 0; } /* ------------------------------------------------------------------------- */ __global__ void wave_filter_median(short *data, int size, short *buffer) { int i = blockIdx.x * blockDim.x + threadIdx.x; int step = blockDim.x * gridDim.x; int range = 10; for (; i < size; i += step) { short sorted[21]; for(int j = -range; j <= range; j++) { sorted[j + range] = 0; if(((i + (j * 2)) < 0) || ((i + (j * 2)) >= size)) continue; sorted[j + range] = buffer[i + (j * 2)]; } __half_selection_sort(sorted, ((range * 2) + 1)); data[i] = sorted[range / 2]; } } /* ------------------------------------------------------------------------- */ __global__ void wave_filter_mean(short *data, int size, short *buffer) { int i = blockIdx.x * blockDim.x + threadIdx.x; int step = blockDim.x * gridDim.x; int range = 10; for (; i < size; i += step) { int sum = 0; for (int j = -range; j <= range; j++) { if (((i + (j * 2)) < 0) || ((i + (j * 2)) >= size)) continue; sum += buffer[i + (j * 2)]; } data[i] = (short) (sum / range); } } /* ------------------------------------------------------------------------- */ __global__ void wave_filter_gaussian(short *data, int size, short *buffer) { int i = blockIdx.x * blockDim.x + threadIdx.x; int step = blockDim.x * gridDim.x; int range = 10; float gaussian[] = { 0.065756, 0.065088, 0.063126, 0.059986, 0.055851, 0.050950, 0.045541, 0.039883, 0.034223, 0.028772, 0.023702 }; for (; i < size; i += step) { int sum = 0; for (int j = -range; j <= range; j++) { if (((i + (j * 2)) < 0) || ((i + (j * 2)) >= size)) continue; sum += (short) (gaussian[myabs(j)] * (float) buffer[i + (j * 2)]); } data[i] = sum; } }
18,952
#include <stdio.h> #include <cuda_runtime.h> #include <cuda.h> #define TILE_WIDTH 32 #define IPAD 1 void matricMul(int *A, int *B, int *C, int size) { for (int col = 0; col < size; col++) { for (int row = 0; row < size; row++){ int outidx = col * size + row; for (int idx = 0; idx < size; idx++) C[outidx] += A[col*size+idx] * B[idx*size+row]; } } } void matrixMulCheck(int *C_test, int *C_cuda, int size) { bool ResultFlag = true; // Print the result for (int i = 0; i < size; i++) { if (C_test[i] != C_cuda[i]) { ResultFlag = false; printf("Error: C_test[%d] = %d; C_cuda[%d] = %d;\n", i, C_test[i], i, C_cuda[i]); break; } } if (ResultFlag == true) printf("Matrix Multiplication OK!\n"); else printf("Matrix Multiplication Error!\n"); } __global__ void matrixMulGSmemPadd (int *A, int *B, int *C, int size) { // Static shared memory __shared__ int ds_A[TILE_WIDTH][TILE_WIDTH+IPAD]; __shared__ int ds_B[TILE_WIDTH][TILE_WIDTH+IPAD]; int tx = threadIdx.x; int ty = threadIdx.y; int row = blockIdx.y * TILE_WIDTH + ty; int col = blockIdx.x * TILE_WIDTH + tx; int Cval = 0; for (int i = 0; i < (size/TILE_WIDTH); i++) { if ((row < size) && (i*TILE_WIDTH+tx < size)) ds_A[ty][tx] = A[row*size+i*TILE_WIDTH+tx]; else ds_A[ty][tx] = 0; if ((col < size) && (i*TILE_WIDTH+ty < size)) ds_B[ty][tx] = B[col+size*(i*TILE_WIDTH+ty)]; else ds_B[ty][tx] = 0; __syncthreads(); for (int j = 0; j < TILE_WIDTH; j++) Cval += ds_A[ty][j] * ds_B[j][tx]; __syncthreads(); } if (row < size && col < size) C[row * size + col] = Cval; } int main() { int nx = 1600; int ny = 1600; int dimx = 32; int dimy = 16; dim3 block(dimx, dimy); // Block dimension 32x16 dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); int MatrixSize = nx * ny; int BufferSize = MatrixSize * sizeof(int); int *h_A; int *h_B; int *h_C; int *C_test; // Host memory allocation h_A = (int*)malloc(BufferSize); h_B = (int*)malloc(BufferSize); h_C = (int*)malloc(BufferSize); C_test = (int*)malloc(BufferSize); // Data input for (int i = 0; i < nx; i++) { h_A[i] = i % 100; h_B[i] = i % 100; h_C[i] = 0; C_test[i] = 0; } int *d_A; int *d_B; int *d_C; // Device memory allocation cudaMalloc((void**)&d_A, BufferSize); cudaMalloc((void**)&d_B, BufferSize); cudaMalloc((void**)&d_C, BufferSize); // Copy data from Host to Device cudaMemcpy(d_A, h_A, BufferSize, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, BufferSize, cudaMemcpyHostToDevice); // Matrix Multiplication matrixMulGSmemPadd<<<grid, block, TILE_WIDTH*(TILE_WIDTH+1)*sizeof(int)>>>(d_A, d_B, d_C, nx); // Copy result from Device to Host cudaMemcpy(h_C, d_C, BufferSize, cudaMemcpyDeviceToHost); // Check result matricMul(h_A, h_B, C_test, nx); matrixMulCheck(C_test, h_C, nx); // Free memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); free(C_test); return 0; }
18,953
#include "includes.h" // Include files // Parameters #define N_ATOMS 343 #define MASS_ATOM 1.0f #define time_step 0.01f #define L 10.5f #define T 0.728f #define NUM_STEPS 10000 const int BLOCK_SIZE = 1024; //const int L = ; const int scheme = 1; // 0 for explicit, 1 for implicit /*************************************************************************************************************/ /************* INITIALIZATION CODE **********/ /*************************************************************************************************************/ __global__ void forcered_simple(float * force, float * forcered){ int index = threadIdx.x + blockDim.x*blockIdx.x; int i = 0; int findex; __shared__ float forcered_sh[3 * N_ATOMS]; //if (index == 0){ printf("In force reduction kernel! \n"); } if (index < 3 * N_ATOMS){ forcered_sh[index] = 0.0f; } __syncthreads(); if (index < 3 * N_ATOMS){ findex = int(index / N_ATOMS)*N_ATOMS*N_ATOMS + index % N_ATOMS; for (i = 0; i < N_ATOMS; i++){ forcered_sh[index] += force[findex + i*N_ATOMS]; } } __syncthreads(); if (index < 3 * N_ATOMS){ forcered[index] = forcered_sh[index]; } /*if (index == 0){ printf("forcered [0]= %f \n", forcered[0]); printf("forcered [2]= %f \n", forcered[2]); printf("forcered [4]= %f \n \n", forcered[4]); }*/ }
18,954
#include<stdio.h> __global__ void deviceKernel(){ printf("Hello World"); } int main(){ deviceKernel<<<1,32>>>(); cudaDeviceSynchronize(); return 0; }
18,955
/* ================================================================ * * PyCA Project * * Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All * rights reserved. See Copyright.txt or for details. * * This software is distributed WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the above copyright notice for more information. * * ================================================================ */ #ifndef __REDUCE_STREAM_KERNEL_CU #define __REDUCE_STREAM_KERNEL_CU #if __CUDA_ARCH__ != 200 #define NUMBER_OF_CORES (240 * 16) #else #define NUMBER_OF_CORES (240) #endif #define OPTIMAL_THREAD_SIZE 64 #define MAX_NUMBER_OF_REDUCE_STREAM 8 #define M NUMBER_OF_CORES // Number of core #define N OPTIMAL_THREAD_SIZE // Best number of thread per block #define REDUCE_STREAM_SIZE (M * N) namespace PyCA { template<typename T, class op> __global__ void reduce_L1_kernel(T *res, const T* d_i, int size) { __shared__ T shm[N]; int idx=blockIdx.x*blockDim.x+threadIdx.x; T s=op::identity(); int j=idx; for(; j + M * N <size; j+=M*N*2) op::iop(s, op::op(d_i[j], d_i[j + M*N])); shm[threadIdx.x]= (j < size) ? op::op(s, d_i[j]) : s; __syncthreads(); if(threadIdx.x==0) { T s=op::identity(); for(int i=0; i<N; i++) op::iop(s, shm[i]); res[blockIdx.x]=s; } } template<typename T, class op, class op1> __global__ void compReduce_L1_kernel(T *res, const T* d_i, int size) { __shared__ T shm[N]; int idx=blockIdx.x*blockDim.x+threadIdx.x; T s=op::identity(); int j=idx; for(; j + M * N <size; j+=M*N*2) op::iop(s, op::op(op1::op(d_i[j]), op1::op(d_i[j + M*N]))); shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i[j])) : s; __syncthreads(); if(threadIdx.x==0) { T s=op::identity(); for(int i=0; i<N; i++) op::iop(s, shm[i]); res[blockIdx.x]=s; } } template<typename T, class op, class op1> __global__ void product_L1_kernel(T *res, const T* d_i0, const T* d_i1, int size) { __shared__ T shm[N]; int idx=blockIdx.x*blockDim.x+threadIdx.x; T s=op::identity(); int j=idx; for(; j + M * N <size; j+=M*N*2) op::iop(s, op::op(op1::op(d_i0[j],d_i1[j]), op1::op(d_i0[j + M*N], d_i1[j + M*N]))); shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i0[j],d_i1[j])) : s; __syncthreads(); if(threadIdx.x==0) { T s=op::identity(); for(int i=0; i<N; i++) op::iop(s, shm[i]); res[blockIdx.x]=s; } } //////////////////////////////////////////////////////////////////////////////// template<typename T, class op, bool accumulate> __device__ void reduce_L2_dev(T& d_o, const T* d_i) { __shared__ T shm[N]; uint idx=threadIdx.x; T s=op::identity(); #if M%(2*N)==0 for(uint j=idx; j<M; j+=N*2) op::iop(s, op::op(d_i[j], d_i[j + N])); #else for(uint j=idx; j<M; j+=N) op::iop(s, d_i[j]); #endif shm[threadIdx.x]=s; __syncthreads(); if(idx==0) { T s=op::identity(); for(uint i=0; i<N; i++) op::iop(s, shm[i]); d_o=(accumulate) ? op::op(d_o, s) : s; } } template<typename T, class op> __global__ void reduce_L2_kernel(T *d_i) { reduce_L2_dev<T, op, false>(d_i[0], d_i); } template<typename T, class op, bool accumulate> __global__ void reduce_L2_kernel(T* d_o, const T* d_i) { reduce_L2_dev<T, op, accumulate>(d_o[0], d_i); } //////////////////////////////////////////////////////////////////////////////// template<typename T, class op, class op1> __device__ void bireduce_L1_dev(T& res, T& res1, const T* d_i, int size) { __shared__ T shm[N]; __shared__ T shm1[N]; int idx=blockIdx.x*blockDim.x+threadIdx.x; T s =op::identity(); T s1=op1::identity(); int j=idx; for(; j + M * N <size; j+=M*N*2) { op::iop(s, op::op(d_i[j], d_i[j + M*N])); op1::iop(s1, op1::op(d_i[j], d_i[j + M*N])); } shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s; shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1; __syncthreads(); #if 1 if(threadIdx.x==0) { T s =op::identity(); T s1=op1::identity(); for(int i=0; i<N; i++){ op::iop(s, shm[i]); op1::iop(s1, shm1[i]); } res = s; res1 = s1; } #else if(threadIdx.x==0) { T s =op::identity(); for(int i=0; i<N; i++) op::iop(s, shm[i]); res = s; } if(threadIdx.x==1) { T s1 =op1::identity(); for(int i=0; i<N; i++) op1::iop(s1, shm1[i]); res1 = s1; } #endif } template<typename T, class op, class op1> __global__ void bireduce_L1_kernel(T *res, T* res1, const T* d_i, int size) { int blockId = blockIdx.x; bireduce_L1_dev<T, op, op1> (res[blockId], res1[blockId], d_i, size); } template<typename T, class op, class op1, bool accumulate> __device__ void reduce_ip2_op2_L2_dev(T& d_o, T& d_o1, const T* d_i, const T* d_i1) { __shared__ T shm[N]; __shared__ T shm1[N]; uint idx=threadIdx.x; T s =op::identity(); T s1=op1::identity(); #if M%(2*N)==0 for(uint j=idx; j<M; j+=N*2){ op::iop(s, op::op(d_i[j], d_i[j + N])); op1::iop(s1, op1::op(d_i1[j], d_i1[j + N])); } #else for(uint j=idx; j<M; j+=N){ op::iop(s, d_i[j]); op1::iop(s1, d_i1[j]); } #endif shm[threadIdx.x]=s; shm1[threadIdx.x]=s1; __syncthreads(); if(idx==0) { T s=op::identity(); T s1=op1::identity(); for(uint i=0; i<N; i++){ op::iop(s, shm[i]); op1::iop(s1, shm1[i]); } d_o = accumulate ? op::op(d_o, s) : s; d_o1 = accumulate ? op1::op(d_o1, s1) : s1; } } template<typename T, class op, class op1, bool accumulate> __global__ void reduce_ip2_op2_L2_kernel(T* d_o, T* d_o1, const T* d_i, const T* d_i1) { reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_i, d_i1); } template<typename T, class op, class op1, bool accumulate> __global__ void reduce_ip2_op2_L2_kernel(T* d_o, T *d_i, const T* d_i1) { reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_i, d_i1); } //////////////////////////////////////////////////////////////////////////////// template<typename T, class op, class op1, class op2> __device__ void trireduce_L1_dev(T& res, T& res1, T& res2, const T* d_i, int size) { __shared__ T shm[N]; __shared__ T shm1[N]; __shared__ T shm2[N]; int idx=blockIdx.x*blockDim.x+threadIdx.x; T s =op::identity(); T s1=op1::identity(); T s2=op2::identity(); int j=idx; for(; j + M * N <size; j+=M*N*2) { op::iop(s, op::op(d_i[j], d_i[j + M*N])); op1::iop(s1, op1::op(d_i[j], d_i[j + M*N])); op2::iop(s2, op2::op(d_i[j], d_i[j + M*N])); } shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s; shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1; shm2[threadIdx.x] = (j < size) ? op2::op(s2, d_i[j]) : s2; __syncthreads(); #if 1 if(threadIdx.x==0) { T s =op::identity(); T s1=op1::identity(); T s2=op2::identity(); for(int i=0; i<N; i++){ op::iop(s, shm[i]); op1::iop(s1, shm1[i]); op2::iop(s2, shm2[i]); } res = s; res1 = s1; res2 = s2; } #else if(threadIdx.x==0) { T s =op::identity(); for(int i=0; i<N; i++) op::iop(s, shm[i]); res = s; } if(threadIdx.x==1) { T s1 =op1::identity(); for(int i=0; i<N; i++) op1::iop(s1, shm1[i]); res1 = s1; } if(threadIdx.x==2) { T s2 =op2::identity(); for(int i=0; i<N; i++) op2::iop(s2, shm2[i]); res2 = s2; } #endif } template<typename T, class op, class op1, class op2, bool accumulate> __global__ void trireduce_L1_kernel(T *res, T* res1, T* res2, const T* d_i, int size) { int blockId = blockIdx.x; trireduce_L1_dev<T, op, op1, op2, accumulate> (res[blockId], res1[blockId], res2[blockId], d_i, size); } template<typename T, class op, class op1, class op2, bool accumulate> __device__ void reduce_ip3_op3_L2_dev(T& d_o, T& d_o1, T& d_o2, const T* d_i, const T* d_i1, const T* d_i2) { __shared__ T shm[N]; __shared__ T shm1[N]; __shared__ T shm2[N]; uint idx=threadIdx.x; T s =op::identity(); T s1=op1::identity(); T s2=op2::identity(); #if M%(2*N)==0 for(uint j=idx; j<M; j+=N*2){ op::iop(s, op::op(d_i[j], d_i[j + N])); op1::iop(s1, op1::op(d_i1[j], d_i1[j + N])); op2::iop(s2, op2::op(d_i2[j], d_i2[j + N])); } #else for(uint j=idx; j<M; j+=N){ op::iop(s, d_i[j]); op1::iop(s1, d_i1[j]); op2::iop(s2, d_i2[j]); } #endif shm[threadIdx.x]=s; shm1[threadIdx.x]=s1; shm2[threadIdx.x]=s2; __syncthreads(); if(idx==0) { T s=op::identity(); T s1=op1::identity(); T s2=op2::identity(); for(uint i=0; i<N; i++){ op::iop(s, shm[i]); op1::iop(s1, shm1[i]); op2::iop(s2, shm2[i]); } d_o = accumulate ? op::op(d_o, s) : s; d_o1 = accumulate ? op1::op(d_o1, s1) : s1; d_o2 = accumulate ? op2::op(d_o2, s2) : s2; } } template<typename T, class op, class op1, bool accumulate> __global__ void reduce_ip3_op3_L3_kernel(T* d_o, T* d_o1, T* d_o2, const T* d_i, const T* d_i1, const T* d_i2) { reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_o2[0], d_i, d_i1, d_i2); } template<typename T, class op, class op1, bool accumulate> __global__ void reduce_ip3_op3_L3_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2) { reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_o[2], d_i, d_i1, d_i2); } #endif } // end namespace PyCA
18,956
/* The CUDA programming model assumes a system composed of a host and a device, each with their own separate memory. Kernels operate out of device memory, so the runtime provides functions to allocate, deallocate, and copy device memory, as well as transfer data between host memory and device memory. Example adapted from the nVIDIA CUDA 9.1 samples */ #include <iostream> #include <algorithm> #include <memory> // Device Code __global__ void vectorAdd(const float *A, const float *B, float *C, int vector_length){ int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < vector_length){ C[index] = A[index] + B[index]; } } // Host Code int main(){ size_t vector_length; std::cout << "Enter with the size of the vector" << '\n'; std::cin >> vector_length; if (vector_length > std::numeric_limits<int>::max()) { throw std::logic_error("This program only accepts lengths which fit in an int-type variable"); } // Host Vector using C++14 smart pointers auto h_a = std::make_unique<float[]>(vector_length); auto h_b = std::make_unique<float[]>(vector_length); auto h_c = std::make_unique<float[]>(vector_length); auto generate_number = [n = 0]() mutable {return ++n;}; std::generate(h_a.get(), h_a.get() + vector_length, generate_number); std::generate(h_b.get(), h_b.get() + vector_length, generate_number); // Device Vectors using C pointers float *d_a, *d_b, *d_c; // Allocating memory on device cudaMalloc(&d_a, vector_length * sizeof(float)); cudaMalloc(&d_b, vector_length * sizeof(float)); cudaMalloc(&d_c, vector_length * sizeof(float)); // Copy from Host to Device cudaMemcpy(d_a, h_a.get(), vector_length * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b.get(), vector_length * sizeof(float), cudaMemcpyHostToDevice); // Launching kernel size_t threads_per_block = 32; size_t blocks_per_grid = (vector_length + (threads_per_block - 1))/ threads_per_block; std::cout << "\nLaunching CUDA kernel vectorAdd<<<" << blocks_per_grid << ", " << threads_per_block << ">>>" << '\n'; vectorAdd<<<blocks_per_grid, threads_per_block>>>(d_a, d_b, d_c, vector_length); // Copy from Device to Host cudaMemcpy(h_c.get(), d_c, vector_length * sizeof(float), cudaMemcpyDeviceToHost); // Deallocating memory on device cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Check Results for(size_t i = 0; i != vector_length; ++i){ if(h_a[i] + h_b[i] != h_c[i]){ std::cerr << "Mismatch found in position " << i << ": Expected = "<< h_a[i] + h_b[i] << " Obtained = " << h_c[i] << '\n'; exit(EXIT_FAILURE); } } std::cout << "\nSUCCESSFULLY EXECUTED!\n" << std::endl; return 0; }
18,957
#include <cstdio> #include <assert.h> #include "refCounter_kernel.cu" int main(int argc, char ** argv) { // local variables unsigned int * h_counters0 = NULL; unsigned int * h_counters1 = NULL; unsigned int * h_del0 = NULL; unsigned int * h_del1 = NULL; unsigned int numTBs = 0, tbSize = 0, numRepeats = 0, numSharersPerGroup = 0; const int numRuns = 1; if (argc != 5) { fprintf(stderr, "./refCounter <numSharersPerGroup> <numTBs> <tbSize> <numRepeats>\n"); fprintf(stderr, "where:\n"); fprintf(stderr, "\t<numSharersPerGroup>: number of thread blocks sharing a counter\n"); fprintf(stderr, "\t<numTBs>: number of thread blocks to launch\n"); fprintf(stderr, "\t<tbSize>: number of threads in a thread block\n"); fprintf(stderr, "\t<numRepeats>: how many times to have the 'main' TB read its data before setting stop\n"); exit(-1); } // parse input args numSharersPerGroup = atoi(argv[1]); numTBs = atoi(argv[2]); assert(numSharersPerGroup <= numTBs); tbSize = atoi(argv[3]); assert(tbSize <= 256); // so scratchpad allocations don't throttle max TBs/SM too much numRepeats = atoi(argv[4]); const unsigned int numThrs = (tbSize * numTBs); /* numSharersPerGroup TBs share a counter entry and a del entry. Within each TB, each thread has a separate counter that is shared with the same thread # in the sharing TBs. */ unsigned int numCounters = (numThrs / numSharersPerGroup); unsigned int numSharingGroups = (numTBs / numSharersPerGroup); unsigned int numCounters_perSharingGroup = (numCounters / numSharingGroups); fprintf(stdout, "# Thr: %d, # TB: %d, # Counters: %d, # Sharers: %d, # Groups: %d, # Counters/Group: %d\n", numThrs, numTBs, numCounters, numSharersPerGroup, numSharingGroups, numCounters_perSharingGroup); fprintf(stdout, "Initializing data...\n"); fprintf(stdout, "...allocating memory.\n"); cudaMallocManaged(&h_counters0, numCounters*sizeof(int)); cudaMallocManaged(&h_counters1, numCounters*sizeof(int)); cudaMallocManaged(&h_del0, numThrs*sizeof(int)); cudaMallocManaged(&h_del1, numThrs*sizeof(int)); // initialize arrays fprintf(stdout, "...initializing memory.\n"); for (int i = 0; i < numCounters; ++i) { h_counters0[i] = 0; h_counters1[i] = 0; } for (int i = 0; i < numThrs; ++i) { h_del0[i] = 0; h_del1[i] = 0; } fprintf(stdout, "Launching kernel - %d runs with %d TBs and %d threads/TB\n", numRuns, numTBs, tbSize); for (int iter = 0; iter < numRuns; ++iter) { refCounter_kernel<<<numTBs, tbSize>>>(h_counters0, h_counters1, h_del0, h_del1, numRepeats, numSharersPerGroup, numCounters, numSharingGroups, numCounters_perSharingGroup); cudaDeviceSynchronize(); } /* Instead of printing all the values, do some simple checks to decide if the output is reasonable or not (there is no one right output because the interleavings of the threads determine what will happen). So just make sure that all of the counters are 0 and that at least one location in the del arrays is true */ bool passFail = true; for (int i = 0; i < numCounters; ++i) { if (h_counters0[i] != 0) { fprintf(stderr, "ERROR: h_counters0[%d]: %d\n", i, h_counters0[i]); passFail = false; } } for (int i = 0; i < numCounters; ++i) { if (h_counters1[i] != 0) { fprintf(stderr, "ERROR: h_counters1[%d]: %d\n", i, h_counters1[i]); passFail = false; } } bool atLeastOneSet0 = false, atLeastOneSet1 = false; for (int i = 0; i < numThrs; ++i) { if (h_del0[i] != 0) { atLeastOneSet0 = true; break; } } if (!atLeastOneSet0) { fprintf(stderr, "ERROR: none of d_del0 array locations set\n"); passFail = false; } for (int i = 0; i < numThrs; ++i) { if (h_del1[i] != 0) { atLeastOneSet1 = true; break; } } if (!atLeastOneSet1) { fprintf(stderr, "ERROR: none of d_del1 array locations set\n"); passFail = false; } if (!passFail) { fprintf(stderr, "TEST FAILED!\n"); } else { fprintf(stderr, "TEST PASSED!\n"); } #ifdef DEBUG fprintf(stdout, "Counter 0 Values\n"); for (int i = 0; i < numCounters; ++i) { fprintf(stdout, "\t[%d]: %d\n", i, h_counters0[i]); } fprintf(stdout, "Counter 1 Values\n"); for (int i = 0; i < numCounters; ++i) { fprintf(stdout, "\t[%d]: %d\n", i, h_counters1[i]); } fprintf(stdout, "Deletion 0 Values\n"); for (int i = 0; i < numThrs; ++i) { fprintf(stdout, "\t[%d]: %s\n", i, ((h_del0[i]) ? "true" : "false")); } fprintf(stdout, "Deletion 1 Values\n"); for (int i = 0; i < numThrs; ++i) { fprintf(stdout, "\t[%d]: %s\n", i, ((h_del1[i]) ? "true" : "false")); } #endif // #ifdef DEBUG cudaFreeHost(h_counters0); cudaFreeHost(h_del0); cudaFreeHost(h_counters1); cudaFreeHost(h_del1); return 0; }
18,958
#include <stdio.h> void __global__ kernel(int *x, int size){ int id = blockIdx.x*blockDim.x + threadIdx.x; for(int i = id; i < size; i += blockDim.x*gridDim.x){ x[i] = 1; } } void __global__ print(int * x, int size){ int id = blockDim.x*blockIdx.x + threadIdx.x; for(;id < size; id += blockDim.x*gridDim.x){ if(x[id] != 1) printf("%d\n", x[id]); } } int main(){ int size = 100000000; int *x; cudaMalloc((void**)&x, size*sizeof(int)); cudaStream_t s[2]; cudaStreamCreate(s+0); cudaStreamCreate(s+1); cudaMemsetAsync(x, 0xFF, size*sizeof(int), s[0]); kernel<<<100000, 32, 0, s[1]>>>(x, size); print<<<10000 , 32>>>(x, size); cudaStreamDestroy(s[0]); cudaStreamDestroy(s[1]); cudaFree(x); if(cudaSuccess != cudaDeviceReset()){ printf("Error\n"); } return 0; }
18,959
__global__ void add(float* out, float* X, float* Y) { long idx = threadIdx.x; out[idx] = X[idx] +Y[idx]; }
18,960
#include "includes.h" __global__ void to_pbo_kernel1(unsigned char* g_in, int stride_in, uchar4* g_out, int stride_out, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x<width && y<height) { unsigned char value = g_in[y*stride_in+x]; g_out[y*stride_out+x] = make_uchar4(value, value, value, 1); } }
18,961
#include "includes.h" __device__ float tanh_(float x) { // e**2x - 1 // --------- // e**2x + 1 float exp2x = exp(2.0*x); return (exp2x - 1.0)/(exp2x + 1.0); } __global__ void LSTM1(float* layer1, float* lstm1, const float* gate1i, const float* gate1o, const int offset) { int i = blockDim.x*blockIdx.x + threadIdx.x; //256 float g_i = gate1i[256*offset + i]; float g_f = 1.0 - g_i; float g_o = gate1o[256*offset + i]; float i_t = tanh_(layer1[256*offset + i]) * g_i; float i_p = 0.0; if (offset > 0) i_p = g_f * lstm1[256*(offset-1) + i]; float sum = i_p + i_t; lstm1[256*offset + i] = sum; layer1[256*offset + i] = tanh_(sum) * g_o; }
18,962
#include "includes.h" __global__ void yuv2rgb_kernel(int img_size, unsigned char *img_r, unsigned char *img_g, unsigned char *img_b, unsigned char *img_y, unsigned char *img_u, unsigned char *img_v){ int i = threadIdx.x + blockDim.x*blockIdx.x; unsigned char y, cb, cr; if(i < img_size){ y = img_y[i]; cb = img_u[i] - 128; cr = img_v[i] - 128; img_r[i] = ( y + 1.402 * cr); img_g[i] = ( y - 0.344 * cb - 0.714 * cr); img_b[i] = ( y + 1.772 * cb); } }
18,963
__device__ int tryToInsert(int* mutex, int id, int value) { int result = atomicCAS((int*) (mutex + id), -1, value); if (result == -1 || result == value) return 1; else return 0; } __global__ void hashKernel( int* indices, int hashTableSize, int* hashTable, int* counts, int* mapping) { extern __shared__ int instanceCounts[]; int indexInstance = blockIdx.x; int indexColumn = threadIdx.x; int maximumNumberColumns = blockDim.x; int indexColumnWithinBatch = indexInstance * maximumNumberColumns + indexColumn; int stepInstanceCount = hashTableSize / maximumNumberColumns; int startInstanceCount = threadIdx.x * stepInstanceCount; int endInstanceCount = startInstanceCount + stepInstanceCount; for(int indexInstanceCount = startInstanceCount; indexInstanceCount < endInstanceCount; indexInstanceCount++) { instanceCounts[indexInstanceCount] = 0; } mapping[indexColumnWithinBatch] = -1; __syncthreads(); int parameterIndex = indices[indexColumnWithinBatch]; if(parameterIndex != -1) { unsigned candidate = parameterIndex % hashTableSize; while(true) { int insertionResult = tryToInsert(hashTable, candidate, parameterIndex); if(insertionResult == 1) { atomicExch(&instanceCounts[candidate], 1); mapping[indexColumnWithinBatch] = candidate; break; } else { candidate = (candidate + 1) % hashTableSize; } } } __syncthreads(); for(int indexInstanceCount = startInstanceCount; indexInstanceCount < endInstanceCount; indexInstanceCount++) { atomicAdd(&counts[indexInstanceCount], instanceCounts[indexInstanceCount]); } }
18,964
#include <iostream> #include <fstream> #include <stdio.h> #include <vector> using namespace std; bool isIncreased(float *data,int n){ int is_true = 1; for(int i = 0;i < n - 1;i ++){ if(data[i] > data[i + 1]){ cout << i << endl; return 0; } } return is_true; } void get_random(float* data, int n){ srand(0); for (int i = 0; i < n; i++) { data[i] = (rand() % 1000)/ 10.0; } } //Version-1 __global__ void naive_bitonic_sort(float *data,int i,int j){ int tid = threadIdx.x + blockDim.x * blockIdx.x; int neighour_data = tid ^ j;//find the pair data if(neighour_data > tid){//exchange data by low thread if(((tid / i) % 2) == 0){//sort ascending if(data[tid] > data[neighour_data]){ float temp = data[tid]; data[tid] = data[neighour_data]; data[neighour_data] = temp; } } else if(((tid / i) % 2) == 1){//sort decending,exist the same data of same position if(data[tid] < data[neighour_data]){ float temp = data[tid]; data[tid] = data[neighour_data]; data[neighour_data] = temp; } } } } __host__ void naive_call(int data_size,float *cuda_data,int block_size){ for(int i = 2;i <= data_size;i = i * 2){//stride_len for(int j = i/2;j > 0;j = j/2){//calc for the neighborhood naive_bitonic_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1, block_size>>>(cuda_data,i,j); } } } //Version-2 :Merge1 __global__ void merge_bitonic_sort(float *data,int data_size){ int tid = threadIdx.x + blockDim.x * blockIdx.x; int end = min(data_size,blockDim.x); int neighour_data; float temp; for(int i = 2;i <= end;i = i * 2){ for(int j = i/2;j > 0;j = j/2){ neighour_data = tid ^ j;//find the pair data if(neighour_data > tid){//exchange data by low thread if(((tid / i) % 2) == 0){//sort ascending if(data[tid] > data[neighour_data]){ temp = data[tid]; data[tid] = data[neighour_data]; data[neighour_data] = temp; } } else if(((tid / i) % 2) == 1){//sort decending,exist the same data of same position if(data[tid] < data[neighour_data]){ temp = data[tid]; data[tid] = data[neighour_data]; data[neighour_data] = temp; } } } __syncthreads(); } } } __host__ void less_call(int data_size,float *cuda_data,int block_size){ merge_bitonic_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1,block_size>>>(cuda_data,data_size); if(block_size < data_size){ for(int i = block_size * 2;i <= data_size;i *=2){ for(int j = i/2;j > 0;j = j/2){//calc for the neighborhood naive_bitonic_sort<<<(data_size%1024 == 0) ? data_size/1024 : data_size/1024 + 1, 1024>>>(cuda_data,i,j); } } } } //Version-3 : Merge the Second Half--Merge2 __global__ void merge2bitonic_sort(float *data,int data_size,int i,int block_size){ int tid = threadIdx.x + blockDim.x * blockIdx.x; int end = min(data_size,blockDim.x); int neighour_data; float temp; for(int j = block_size/2;j > 0;j = j/2){ neighour_data = tid ^ j;//find the pair data if(neighour_data > tid){//exchange data by low thread if(((tid / i) % 2) == 0){//sort ascending if(data[tid] > data[neighour_data]){ temp = data[tid]; data[tid] = data[neighour_data]; data[neighour_data] = temp; } } else if(((tid / i) % 2) == 1){//sort decending,exist the same data of same position if(data[tid] < data[neighour_data]){ temp = data[tid]; data[tid] = data[neighour_data]; data[neighour_data] = temp; } } } __syncthreads(); } } __host__ void less_call2(int data_size,float *cuda_data,int block_size){ merge_bitonic_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1,block_size>>>(cuda_data,data_size); if(block_size < data_size){ for(int i = block_size * 2;i <= data_size;i *=2){ for(int j = i/2;j >= block_size;j = j/2){//calc for the neighborhood naive_bitonic_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1, block_size>>>(cuda_data,i,j); } merge2bitonic_sort<<<(data_size%(block_size) == 0) ? data_size/(block_size) : data_size/(block_size) + 1, (block_size)>>>(cuda_data,data_size,i,block_size); } } } //Version-4 : Merge1 & shared sort __global__ void shared_sort(float *data,int data_size){ extern __shared__ float smem[]; int tid = threadIdx.x + blockDim.x * blockIdx.x; smem[threadIdx.x] = data[tid]; int end = min(data_size,blockDim.x); int neighour_data; float temp; for(int i = 2;i <= end;i = i * 2){ for(int j = i/2;j > 0;j = j/2){ neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x){//exchange data by low thread if(((tid / i) % 2) == 0){//sort ascending if(smem[threadIdx.x] > smem[neighour_data]){ temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } } else if(((tid / i) % 2) == 1){//sort decending,exist the same smem of same position if(smem[threadIdx.x] < smem[neighour_data]){ temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } } } __syncthreads(); } } data[tid] = smem[threadIdx.x]; } __host__ void shared_less_call(int data_size,float *cuda_data,int block_size){ shared_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1,block_size,block_size*sizeof(float)>>>(cuda_data,data_size); if(block_size < data_size){ for(int i = block_size*2 ;i <= data_size;i *=2){ for(int j = i/2;j > 0;j = j/2){//calc for the neighborhood naive_bitonic_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1, block_size>>>(cuda_data,i,j); } } } } //Version-5 : merge2 & shared used __host__ void shared_less_call2(int data_size,float *cuda_data,int block_size){ shared_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1,block_size,block_size*sizeof(float)>>>(cuda_data,data_size); if(block_size < data_size){ for(int i = block_size*2 ;i <= data_size;i *=2){ for(int j = i/2;j >= block_size;j = j/2){//calc for the neighborhood naive_bitonic_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1, block_size>>>(cuda_data,i,j); } merge2bitonic_sort<<<(data_size%(block_size) == 0) ? data_size/(block_size) : data_size/(block_size) + 1, (block_size)>>>(cuda_data,data_size,i,block_size); } } } //Version-6: __global__ void shared_merge2bitonic_sort(float *data,int data_size,int i,int block_size){ extern __shared__ float smem[]; int tid = threadIdx.x + blockDim.x * blockIdx.x; smem[threadIdx.x] = data[tid]; __syncthreads(); int neighour_data; float temp; for(int j = block_size/2;j > 0;j = j/2){ neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x){//exchange data by low thread if(((tid / i) % 2) == 0){//sort ascending if(smem[threadIdx.x] > smem[neighour_data]){ temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } } else if(((tid / i) % 2) == 1){//sort decending,exist the same smem of same position if(smem[threadIdx.x] < smem[neighour_data]){ temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } } } __syncthreads(); } data[tid] = smem[threadIdx.x]; } __host__ void shared_less_call3(int data_size,float *cuda_data,int block_size){ shared_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1,block_size,block_size*sizeof(float)>>>(cuda_data,data_size); if(block_size < data_size){ for(int i = block_size*2 ;i <= data_size;i *=2){ for(int j = i/2;j >= block_size;j = j/2){//calc for the neighborhood naive_bitonic_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1, block_size>>>(cuda_data,i,j); } shared_merge2bitonic_sort<<<(data_size%(block_size) == 0)?data_size/(block_size):data_size/(block_size)+1,(block_size),block_size*sizeof(float)>>>(cuda_data,data_size,i,block_size); } } } //Version-7:更换判断顺序 __global__ void shared_merge2bitonic_sort2(float *data,int data_size,int i,int block_size){ extern __shared__ float smem[]; int tid = threadIdx.x + blockDim.x * blockIdx.x; smem[threadIdx.x] = data[tid]; __syncthreads(); int neighour_data; float temp; if(((tid / i) % 2) == 0) { for(int j = block_size/2;j > 0;j = j/2){ neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); } } if(((tid / i) % 2) == 1) { for(int j = block_size/2;j > 0;j = j/2){ neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); } } data[tid] = smem[threadIdx.x]; } __host__ void shared_less_call4(int data_size,float *cuda_data,int block_size){ shared_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1,block_size,block_size*sizeof(float)>>>(cuda_data,data_size); if(block_size < data_size){ for(int i = block_size*2 ;i <= data_size;i *=2){ for(int j = i/2;j >= block_size;j = j/2){//calc for the neighborhood naive_bitonic_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1, block_size>>>(cuda_data,i,j); } shared_merge2bitonic_sort2<<<(data_size%(block_size) == 0)?data_size/(block_size):data_size/(block_size)+1,(block_size),block_size*sizeof(float)>>>(cuda_data,data_size,i,block_size); } } } //Version-8 __global__ void unroll_shared_merge2bitonic_sort2(float *data,int data_size,int i,int block_size){ extern __shared__ float smem[]; int tid = threadIdx.x + blockDim.x * blockIdx.x; smem[threadIdx.x] = data[tid]; __syncthreads(); int neighour_data; float temp; int j; if(((tid / i) % 2) == 0) { for(j = block_size/2;j > 256;j = j/2){ neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); } j = 256; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 128; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 64; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 32; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 16; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 8; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 4; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 2; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 1; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] > smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); } if(((tid / i) % 2) == 1) { for( j = block_size/2;j > 0;j = j/2){ neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); } j = 256; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 128; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 64; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 32; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 16; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 8; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 4; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 2; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); j = 1; neighour_data = threadIdx.x ^ j;//find the pair data if(neighour_data > threadIdx.x && smem[threadIdx.x] < smem[neighour_data]){//exchange data by low thread temp = smem[threadIdx.x]; smem[threadIdx.x] = smem[neighour_data]; smem[neighour_data] = temp; } __syncthreads(); } data[tid] = smem[threadIdx.x]; } __host__ void unroll_shared_less_call4(int data_size,float *cuda_data,int block_size){ shared_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1,block_size,block_size*sizeof(float)>>>(cuda_data,data_size); if(block_size < data_size){ for(int i = block_size*2 ;i <= data_size;i *=2){ for(int j = i/2;j >= block_size;j = j/2){//calc for the neighborhood naive_bitonic_sort<<<(data_size%block_size == 0) ? data_size/block_size : data_size/block_size + 1, block_size>>>(cuda_data,i,j); } unroll_shared_merge2bitonic_sort2<<<(data_size%(block_size) == 0)?data_size/(block_size):data_size/(block_size)+1,(block_size),block_size*sizeof(float)>>>(cuda_data,data_size,i,block_size); } } } int main(){ int block_size = 1024; cudaSetDevice(1); int whole_size = 1000000; float *data_cpu = new float[whole_size]; get_random(data_cpu,whole_size); //padding int data_size = 1; int n = 0; while(data_size < whole_size){ n ++; data_size*=2; } float *cuda_data,*cuda_data2,*cuda_data3,*cuda_data4,*cuda_data5,*cuda_data6,*cuda_data7,*cuda_data8; float *result_data = new float[whole_size]; cudaMalloc((void **) &cuda_data,data_size*sizeof(float)); cudaMalloc((void **) &cuda_data2,data_size*sizeof(float)); cudaMalloc((void **) &cuda_data3,data_size*sizeof(float)); cudaMalloc((void **) &cuda_data4,data_size*sizeof(float)); cudaMalloc((void **) &cuda_data5,data_size*sizeof(float)); cudaMalloc((void **) &cuda_data6,data_size*sizeof(float)); cudaMalloc((void **) &cuda_data7,data_size*sizeof(float)); cudaMalloc((void **) &cuda_data8,data_size*sizeof(float)); cudaMemcpy(cuda_data, data_cpu, whole_size*sizeof(float),cudaMemcpyHostToDevice); float start = clock(); naive_call(data_size,cuda_data,1024); cudaDeviceSynchronize(); float end = clock(); cout << "data_size : " << data_size <<" GPU naive bitonic sort time :" << float(end-start) * 1000.0/CLOCKS_PER_SEC << " ms" << endl; cudaMemcpy(result_data, cuda_data + data_size - whole_size, whole_size*sizeof(float),cudaMemcpyDeviceToHost); for(int i = whole_size - 10;i < whole_size;i ++){ printf("%.1f ", result_data[i]); } cout << endl; cout << "Right or false? " <<isIncreased(result_data,whole_size) << endl; cudaMemcpy(cuda_data2, data_cpu, whole_size*sizeof(float),cudaMemcpyHostToDevice); start = clock(); less_call(data_size,cuda_data2,256); cudaDeviceSynchronize(); end = clock(); cout << "data_size : " << data_size <<" GPU merge1 bitonic sort time :" << float(end-start) * 1000.0/CLOCKS_PER_SEC << " ms" << endl; float *result_data2 = new float[whole_size]; cudaMemcpy(result_data2, cuda_data2 + data_size - whole_size, whole_size*sizeof(float),cudaMemcpyDeviceToHost); cout << "Right or false? " <<isIncreased(result_data2,whole_size) << endl; for(int i = whole_size - 10;i < whole_size;i ++){ printf("%.1f ", result_data2[i]); } cout << endl; cudaMemcpy(cuda_data3, data_cpu, whole_size*sizeof(float),cudaMemcpyHostToDevice); start = clock(); less_call2(data_size,cuda_data3,256); cudaDeviceSynchronize(); end = clock(); cout << "data_size : " << data_size <<" GPU merge2 bitonic sort time :" << float(end-start) * 1000.0/CLOCKS_PER_SEC << " ms" << endl; float *result_data3 = new float[whole_size]; cudaMemcpy(result_data3, cuda_data3 + data_size - whole_size, whole_size*sizeof(float),cudaMemcpyDeviceToHost); cout << "Right or false? " <<isIncreased(result_data3,whole_size) << endl; for(int i = whole_size - 10;i < whole_size;i ++){ printf("%.1f ", result_data3[i]); } cout << endl; cudaMemcpy(cuda_data4, data_cpu, whole_size*sizeof(float),cudaMemcpyHostToDevice); start = clock(); shared_less_call(data_size,cuda_data4,256); cudaDeviceSynchronize(); end = clock(); cout << "data_size : " << data_size <<" GPU shared + merge1 bitonic sort time :" << float(end-start) * 1000.0/CLOCKS_PER_SEC << " ms" << endl; float *result_data4 = new float[whole_size]; cudaMemcpy(result_data4, cuda_data4 + data_size - whole_size, whole_size*sizeof(float),cudaMemcpyDeviceToHost); cout << "Right or false? " <<isIncreased(result_data4,whole_size) << endl; for(int i = whole_size - 10;i < whole_size;i ++){ printf("%.1f ", result_data4[i]); } cout << endl; cudaMemcpy(cuda_data5, data_cpu, whole_size*sizeof(float),cudaMemcpyHostToDevice); start = clock(); shared_less_call2(data_size,cuda_data5,256); cudaDeviceSynchronize(); end = clock(); cout << "data_size : " << data_size <<" GPU shared + merge2 bitonic sort time :" << float(end-start) * 1000.0/CLOCKS_PER_SEC << " ms" << endl; float *result_data5 = new float[whole_size]; cudaMemcpy(result_data5, cuda_data5 + data_size - whole_size, whole_size*sizeof(float),cudaMemcpyDeviceToHost); cout << "Right or false? " <<isIncreased(result_data5,whole_size) << endl; for(int i = whole_size - 10;i < whole_size;i ++){ printf("%.1f ", result_data5[i]); } cout << endl; cudaMemcpy(cuda_data6, data_cpu, whole_size*sizeof(float),cudaMemcpyHostToDevice); start = clock(); shared_less_call3(data_size,cuda_data6,256); cudaDeviceSynchronize(); end = clock(); cout << "data_size : " << data_size <<" GPU shared2 + merge2 bitonic sort time :" << float(end-start) * 1000.0/CLOCKS_PER_SEC << " ms" << endl; float *result_data6 = new float[whole_size]; cudaMemcpy(result_data6, cuda_data6 + data_size - whole_size, whole_size*sizeof(float),cudaMemcpyDeviceToHost); cout << "Right or false? " <<isIncreased(result_data6,whole_size) << endl; for(int i = whole_size - 10;i < whole_size;i ++){ printf("%.1f ", result_data6[i]); } cout << endl; cudaMemcpy(cuda_data7, data_cpu, whole_size*sizeof(float),cudaMemcpyHostToDevice); start = clock(); shared_less_call4(data_size,cuda_data7,256); cudaDeviceSynchronize(); end = clock(); cout << "block : " << 256 <<" GPU change order shared2 + merge2 bitonic sort time :" << float(end-start) * 1000.0/CLOCKS_PER_SEC << " ms" << endl; float *result_data7 = new float[whole_size]; cudaMemcpy(result_data7, cuda_data7 + data_size - whole_size, whole_size*sizeof(float),cudaMemcpyDeviceToHost); cout << "Right or false? " <<isIncreased(result_data7,whole_size) << endl; for(int i = whole_size - 10;i < whole_size;i ++){ printf("%.1f ", result_data7[i]); } cout << endl; cudaMemcpy(cuda_data8, data_cpu, whole_size*sizeof(float),cudaMemcpyHostToDevice); start = clock(); unroll_shared_less_call4(data_size,cuda_data8,512); cudaDeviceSynchronize(); end = clock(); cout << "data_size : " << data_size <<" GPU unroll change order shared2 + merge2 bitonic sort time :" << float(end-start) * 1000.0/CLOCKS_PER_SEC << " ms" << endl; float *result_data8 = new float[whole_size]; cudaMemcpy(result_data8, cuda_data8 + data_size - whole_size, whole_size*sizeof(float),cudaMemcpyDeviceToHost); cout << "Right or false? " <<isIncreased(result_data8,whole_size) << endl; for(int i = whole_size - 10;i < whole_size;i ++){ printf("%.1f ", result_data8[i]); } cout << endl; cudaFree(cuda_data); // cudaFree(cuda_data1); // cudaFree(cuda_data2); // cudaFree(cuda_data3); // cudaFree(cuda_data4); // cudaFree(cuda_data5); }
18,965
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include <iostream> #define N 64 struct User_Multiple { __host__ __device__ int operator()(const int &x, const int &y){ return x * y; } }; int main() { thrust::device_vector<int> array_X(N); thrust::device_vector<int> array_Y(N); thrust::device_vector<int> array_out(N); thrust::fill(array_X.begin(), array_X.end(),10); thrust::sequence(array_Y.begin(), array_Y.end()); thrust::transform(array_X.begin(), array_X.end(), array_Y.begin(), array_out.begin(), User_Multiple()); std::cout << "array_out[" << N << "] = "; for(int i=0; i<N; i++){ if(i < N-1) std::cout << array_out[i] << ", "; else std::cout << array_out[i] << std::endl; } }
18,966
extern "C" __global__ void pixelDiff_kernel (float** diff0, float* obj_func, const float* user_radiance, const int* conf, const int L, const int X_Y, const int Y, const int VEC_SIZE, const float** radianceMap, const float* energy, const float* brightness_ratio, const unsigned int divide) { // pixel area (i,j) unsigned int i = threadIdx.x; // unsigned int j = blockIdx.x; unsigned int j = divide*blockIdx.x + threadIdx.y; unsigned int W = blockDim.x; // unsigned int H = gridDim.x; // candidate id unsigned int candID = blockIdx.y; // synth in radiance area & calculate energy dissipation float synthRadiance = 0.0; float energyTerm = 0.0; for(int n=0;n<L;n++){ unsigned int b = conf[ candID*(L*VEC_SIZE) + (n*VEC_SIZE+0) ]; unsigned int x = conf[ candID*(L*VEC_SIZE) + (n*VEC_SIZE+1) ]; unsigned int y = conf[ candID*(L*VEC_SIZE) + (n*VEC_SIZE+2) ]; unsigned int id = n*(X_Y)+x*Y+y; synthRadiance += radianceMap[id][i+j*W]*brightness_ratio[b]; energyTerm += energy[b]; } // difference float diff = synthRadiance - user_radiance[i+j*W]; // radiance-based // float designTerm = fabs(diff*diff); // L1-norm float designTerm = diff*diff ; // L2-norm // write the result diff0[candID][i+j*W] = designTerm; if(i==0 && j==0) obj_func[candID] = energyTerm; } extern "C" __global__ void reduceSum_kernel (float** g_idata, float** g_odata, unsigned int n) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockDim.x*2*blockIdx.x + tid; unsigned int gridSize = blockDim.x*2*gridDim.x; int id = blockIdx.y; float mySum = 0.0; while(i<n){ mySum += g_idata[id][i]; if( i + blockDim.x < n) mySum += g_idata[id][i+blockDim.x]; i+=gridSize; } sdata[tid] = mySum; __syncthreads(); if(blockDim.x >= 512){ if(tid<256) { sdata[tid] = mySum = mySum + sdata[tid+256]; } __syncthreads(); } if(blockDim.x >= 256){ if(tid<128) { sdata[tid] = mySum = mySum + sdata[tid+128]; } __syncthreads(); } if(blockDim.x >= 128){ if(tid< 64) { sdata[tid] = mySum = mySum + sdata[tid+ 64]; } __syncthreads(); } if(tid<32) { volatile float* smem = sdata; if( blockDim.x >= 64 ){ smem[tid] = mySum = mySum + smem[tid+32]; } if( blockDim.x >= 32 ){ smem[tid] = mySum = mySum + smem[tid+16]; } if( blockDim.x >= 16 ){ smem[tid] = mySum = mySum + smem[tid+ 8]; } if( blockDim.x >= 8 ){ smem[tid] = mySum = mySum + smem[tid+ 4]; } if( blockDim.x >= 4 ){ smem[tid] = mySum = mySum + smem[tid+ 2]; } if( blockDim.x >= 2 ){ smem[tid] = mySum = mySum + smem[tid+ 1]; } } if(tid==0) g_odata[id][blockIdx.x] = sdata[0]; } extern "C" __global__ void transfer_kernel (float* obj_func, float** diff1, int W, int H, float alpha) { unsigned int candID = blockIdx.y; float designTerm = diff1[candID][0]; float energyTerm = obj_func[candID]; obj_func[candID] = designTerm/((float)W*H) + alpha * energyTerm; }
18,967
#include <stdio.h> /* * Notice the absence of the previously expected argument `N`. */ __global__ void loop() { /* * This kernel does the work of only 1 iteration * of the original for loop. Indication of which * "iteration" is being executed by this kernel is * still available via `threadIdx.x`. */ printf("This is iteration number %d\n", threadIdx.x); } int main() { /* * It is the execution context that sets how many "iterations" * of the "loop" will be done. */ loop<<<1, 10>>>(); cudaDeviceSynchronize(); }
18,968
#include "includes.h" __global__ void updatePosition_Kernel(int numElements, float4* bodyPos, float3* bodySpeed) { int elementId = blockIdx.x * blockDim.x + threadIdx.x; float4 elementPosMass; float3 elementSpeed; if (elementId < numElements) { elementPosMass = bodyPos[elementId]; elementSpeed = bodySpeed[elementId]; elementPosMass.x += elementSpeed.x * TIMESTEP; elementPosMass.y += elementSpeed.y * TIMESTEP; elementPosMass.z += elementSpeed.z * TIMESTEP; bodyPos[elementId] = elementPosMass; } }
18,969
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-26 * $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-01 float16 added */ #include "LogSoftmax.h" #include "LogSoftmax.cuh" #include "Loss.cuh" #include "../core/arithmetic/MultiplyDim.h" #include "../core/reduce/ReduceSum.cuh" #include "../core/reduce/ReduceMax.cuh" #include "../core/shape/IsSameShaped.h" #include "../XDevice.h" #include <device_launch_parameters.h> namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version) >> x - input vector >> y - result >> leadDim - leading dimension (along which we perform reduction) */ void _CudaLogSoftmax(const XTensor * x, XTensor * y, int leadDim) { ShowNTErrors("You should call LogSoftmax instead!"); } /* log softmax forward computation (Cuda kernel) for each column j, let y_{i,j} and x_{i,j} are the output and state value for the i-th element of column j. We have y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each column j >> sum - \sum_{i} e^{x_{i,j}) for each column j >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ template <class T ,TENSOR_DATA_TYPE dataType> __global__ void KernelLogSoftmaxComputeByRow(T * x, T * max, T * sum, T * y, int rowNum, int colNum) { __shared__ T inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ T inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each column */ if (threadIdx.y == 0) { inputSum[threadIdx.x] = sum[j]; inputMax[threadIdx.x] = max[j]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; if (dataType == DEFAULT_DTYPE) { DTYPE r = log((DTYPE)exp((DTYPE)(x[key] - inputMax[threadIdx.x])) / (DTYPE)inputSum[threadIdx.x]); if (isnan(r)) r = LOGPROB_MIN; if (isinf(r)) r = LOGPROB_MIN; y[key] = MAX(r, LOGPROB_MIN); } else if (dataType == X_FLOAT16) { #if __CUDA_ARCH__ >= 600 half r = hlog(hexp((half)x[key] - (half)inputMax[threadIdx.y]) / (half)inputSum[threadIdx.y]); y[key] = r; #endif } } } /* log softmax forward computation (Cuda kernel) for each row i, let y_{i,j} and x_{i,j} are the output and state value for the j-th element of row i. We have y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each row i >> sum - \sum_{j} e^{x_{i,j}) for each row i >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ template <class T ,TENSOR_DATA_TYPE dataType> __global__ void KernelLogSoftmaxComputeByCol(T * x, T * max, T * sum, T * y, int rowNum, int colNum) { __shared__ T inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ T inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each row */ if (threadIdx.x == 0) { inputSum[threadIdx.y] = sum[i]; inputMax[threadIdx.y] = max[i]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; if (dataType == DEFAULT_DTYPE) { DTYPE r = log((DTYPE)exp((DTYPE)(x[key] - inputMax[threadIdx.y])) / (DTYPE)inputSum[threadIdx.y]); /*if (r < LOGPROB_MIN) { printf("min %e %e, %e %e, %e %e\n", r, x[key] - inputMax[threadIdx.y], x[key], inputMax[threadIdx.y], exp(x[key] - inputMax[threadIdx.y]), inputSum[threadIdx.y]); }*/ if (isnan(r)) r = LOGPROB_MIN; if (isinf(r)) r = LOGPROB_MIN; y[key] = MAX(r, LOGPROB_MIN); } else if (dataType == X_FLOAT16) { #if __CUDA_ARCH__ >= 600 half r = hlog(hexp((half)x[key] - (half)inputMax[threadIdx.y]) / (half)inputSum[threadIdx.y]); y[key] = r; #endif } } } /* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version) >> x - input vector >> y - result >> leadDim - leading dimension (along which we perform reduction) >> sum - \sum_{i} e^{x_i} >> max - \max_{i} e^{x_i} */ void _CudaLogSoftmaxSumMax(XTensor * x, XTensor * y, int leadDim, XTensor * sum, XTensor * max) { CheckNTErrors((x->devID >= 0), "Forward computation of log softmax must be run on GPUs."); CheckNTErrors((x->devID == y->devID), "Input tensors must be on the same GPU."); CheckNTErrors((x->order == y->order), "Input tensors must be of the same size."); CheckNTErrors((x->order == 2), "Input tensors must be of order 2."); int devIDBackup; ProtectCudaDev(x->devID, devIDBackup); if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) { int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ DTYPE * maxData = (DTYPE*)max->data; DTYPE * sumData = (DTYPE*)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ KernelLogSoftmaxComputeByRow<DTYPE, DEFAULT_DTYPE> <<<dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0])>>> ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByCol<DTYPE, DEFAULT_DTYPE> <<<dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1])>>> ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } } else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) { #ifdef HALF_PRECISION int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ __half * maxData = (half*)max->data; __half * sumData = (half*)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ KernelLogSoftmaxComputeByRow<half, X_FLOAT16> <<<dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0])>>> ((half*)x->data, maxData, sumData, (half *)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByCol<half, X_FLOAT16> <<<dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1])>>> ((half*)x->data, maxData, sumData, (half*)y->data, n, m); } #else ShowNTErrors("Recompile the code with HALF_PRECISION!"); #endif } else { ShowNTErrors("TODO!"); } BacktoCudaDev(x->devID, devIDBackup); } /* set dE/dx = exp(y) >> dedy - dE/dy >> dedx - dE/dx >> y - output of the function >> size - size of output >> lossName - name of the loss function */ __global__ void KernelExpLoss(DTYPE * dedy, DTYPE * dedx, DTYPE * y, int size, LOSS_FUNCTION_NAME lossName) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) { /* dE/dx_j = exp(y_j) */ if (lossName == CROSSENTROPY) dedx[i] = exp(y[i]); /* dE/dx_j = exp(y_j) */ else if (lossName == SQUAREDERROR) dedx[i] = exp(y[i]); else if (lossName == ONEHOTERROR) dedx[i] = 0; else dedx[i] = 0; } } /* backward computation for log softmax dE/dx = dE/dy * dy/dx >> dedy - dE/dy >> dedx - dE/dx >> gold - gold standard to measure error (or loss) >> y - output of the function >> x - input of the function >> size - size of input/output >> lossName - name of the loss function */ __global__ void KernelLogSoftmaxBackwardDEDS(DTYPE * dedy, DTYPE * dedx, DTYPE * gold, DTYPE * y, DTYPE * x, int size, LOSS_FUNCTION_NAME lossName) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) { DTYPE r = 0; /* dE/ds_j = exp(y_j) */ if (lossName == CROSSENTROPY) r = -gold[i] + exp(y[i]); /* dE/ds_j = exp(y_j) */ else if (lossName == SQUAREDERROR) r = -gold[i] + exp(y[i]); else if (lossName == ONEHOTERROR) { if (gold[i] == 1.0F) r = -gold[i] + exp(y[i]); else r = 0; } else { r = dedy[i]; } if (isnan(r)) r = 0; if (isinf(r)) r = 0; dedx[i] = r; } } /* backward computation for log softmax (sparse matrices) for each column dE/dx_j += -gold_j (for dE/dx = dE/dy * dy/dx) >> dedy - dE/dy >> dedx - dE/dx >> gold - gold standard to measure error (or loss) >> y - output of the function >> x - input of the function >> rowNum - row number of the matrix >> colNum - column number of the matrix >> gNonZeroNum - >> lossName - name of the loss function */ __global__ void KernelLogSoftmaxBackwardDEDSSparseByRow(DTYPE * dedy, DTYPE * dedx, void * gold, DTYPE * y, DTYPE * x, int rowNum, int colNum, int gNonZeroNum, LOSS_FUNCTION_NAME lossName) { int tupleSize = sizeof(int) + sizeof(DTYPE); int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < gNonZeroNum) { /* load the sub-block of the sparse matrix b */ int key = *(int*)((char*)gold + tupleSize * k); int ni = key / colNum; int mi = key % colNum; int value = *(DTYPE*)((char*)gold + tupleSize * k + sizeof(int)); if (lossName == CROSSENTROPY) dedx[colNum * ni + mi] += -value; else if (lossName == SQUAREDERROR) dedx[colNum * ni + mi] += -value; else if (lossName == ONEHOTERROR) { int offset = colNum * ni + mi; if (value == 1.0F) dedx[offset] += (-value + exp(y[offset])); //dedx[offset] += -value * 0.005; } } } /* backward computation for dense matrics with default data type dE/dx = dE/dy * dy/dx log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k}) dy_i/dx_j = d{log(e^{x_i} / \sum_{k} e^{x_k})}/dx_j = d{log(e^{x_i})}/dx_j - d{log(\sum_{k} e^{x_k})}/dx_j = \delta(i,j) - e^{x_j}/\sum_{k} e^{x_k}) = \delta(i,j) - exp(y_j) where \delta(i,j) = 1 if i = j, and \delta(i,j) = 0 otherwise if loss E is defined as cross entropy, i.e., E = -\sum_{k} (gold_k * y_k), we have dE/dy_i = -gold_i (where {gold_k} is the gold standard distribution) then dE/dx_j = \sum_{i} {dE/dy_i * dy_i/dx_j} = \sum_{i} {-gold_i * (\delta(i,j) - exp(y_j))} = \sum_{i} {-gold_i * \delta{i,j)} + \sum_{i} {gold_i * exp(y_j)} = -gold_i * \delta(i,j) + \sum_{i} {gold_i * exp(y_j)} = -gold_j + exp(y_j) Note: gold_i is a distribution, i.e., \sum_{i} gold_i = 1 if gold is with a one-hot representation (gold_i = 1 for only one dimension), we can reformulize it as dE/dx_j = -\delta(i,j) + exp(y_j) There are two ways to implement this process. Method 1. we compute dE/dy and dy/dx resepectively, and then reach dE/dx by dE/dx = dE/dy * dy/dx (or more precisely dE/dx_j = \sum_{i} {dE/dy_i * dy_i/dx_j}) Method 2. we compute dE/dx (or dE/dx_j) in a single step, rather than resorting to the sub-models dE/dy and dy/dx. We can do this by using dE/dx_j = -gold_j + exp(y_j) Here we choose Method 2, i.e., we straightforwardly compute dE/dx_j by dE/dx_j = -gold_j + exp(y_j) (or dE/dx_j = -\delta(i,j) + exp(y_j) for a Maximum A Posteriori Estimation (MAP)) Method 1 is also fine but is more time consuming due to the summation over dimensions. Note that this method is not good for the standard version softmax when working with the cross entropy loss. Because it is numerical unstable. When we use a usual method to define softmax, we have softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k}). It is trivial to know that dy_i/dx_j = y_i * \delta(i,j) - y_i * y_j. As y_i and y_j could be a small number, y_i * y_i would result in a much smaller one with a risk of lossing precision. This is even worse we multiply dy_i/dx_j with dE/dy_i. So it is in general to use log softmax instead for better numerical stability. >> gold - gold standard to measure error (or loss) >> y - output of the function >> x - input of the function >> dedy - dE/dy >> deds - dE/dx >> lossName - type of loss function, e.g., cross entropy >> leadDim - leading dimension (along which we perform reduction) */ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, XTensor * dedy, XTensor * dedx, XTensor * padding, int leadDim, LOSS_FUNCTION_NAME lossName) { leadDim = leadDim < 0 ? y->order - 1 : leadDim; CheckNTErrors((x->devID >= 0), "Backward computation of log softmax must be run on GPUs."); CheckNTErrors((x->devID == y->devID && gold->devID == y->devID), "Tensors used in log softmax are not on the same GPU."); CheckNTErrors((gold != NULL), "No x gold standard is found!"); int dimensionSize = y->dimSize[leadDim]; int stride = 1; int blockSize = 1; int blockNum = 1; for (int i = leadDim + 1; i < y->order; i++) stride *= y->dimSize[i]; blockSize = stride * dimensionSize; blockNum = y->unitNum / blockSize; int devIDBackup; ProtectCudaDev(x->devID, devIDBackup); if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) { CheckNTErrors((lossName == CROSSENTROPY || lossName == SQUAREDERROR || lossName == NOLOSS), "Unknown loss function."); int cudaGridSize[3], cudaBlockSize[3]; if (lossName == CROSSENTROPY || lossName == SQUAREDERROR) { if (gold->isSparse) { CheckNTErrors((gold->order == 2), "TODO!") CheckNTErrors((leadDim == 0), "TODO!"); GDevs.GetCudaThread(x->devID, x->unitNum, cudaGridSize, cudaBlockSize); /* dE/ds_j = exp(y_j) */ KernelExpLoss <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>> (NULL, (DTYPE*)dedx->data, (DTYPE*)y->data, dimensionSize * stride, lossName); GDevs.GetCudaThread(x->devID, gold->unitNumNonZero, cudaGridSize, cudaBlockSize); /* dE/ds_j += -gold_j */ KernelLogSoftmaxBackwardDEDSSparseByRow <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>> (NULL, (DTYPE*)dedx->data, (char*)gold->data + sizeof(int), (DTYPE*)y->data, (DTYPE*)x->data, dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName); } else { CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!"); for (int k = 0; k < blockNum; k++) { GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize); /* dE/ds_j = -gold_j + exp(y_j) */ KernelLogSoftmaxBackwardDEDS <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>> (NULL, (DTYPE*)dedx->data + k * blockSize, (DTYPE*)gold->data + k * blockSize, (DTYPE*)y->data + k * blockSize, (DTYPE*)x->data + k * blockSize, dimensionSize * stride, lossName); } } if(padding != NULL) { int n = leadDim; int paddingOrder = padding->order; int * paddingDims = new int[paddingOrder]; memcpy(paddingDims, padding->dimSize, padding->order * sizeof(int)); padding->Reshape(padding->unitNum); int order = dedx->order; int * dims = new int[order]; memcpy(dims, dedx->dimSize, dedx->order * sizeof(int)); dedx->Reshape(dedx->unitNum/dedx->GetDim(n), dedx->GetDim(n)); _MultiplyDimMe(dedx, padding, 0); padding->Reshape(paddingOrder, paddingDims); dedx->Reshape(order, dims); delete[] paddingDims; delete[] dims; } } else { ShowNTErrors("TODO!"); } } else{ ShowNTErrors("TODO!"); } BacktoCudaDev(x->devID, devIDBackup); } #endif } // namespace nts(NiuTrans.Tensor)
18,970
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <unistd.h> #include <sys/time.h> #define Inf 9999 #define inf 9999 #define INF 9999 float *dist; float *kernel1; float *kernel2; float *kernel3; struct timeval startwtime, endwtime; double seq_time; __global__ void floydWarshellKernel2 (float* dist, int k, int n); __global__ void floydWarshellKernel1(float *dist, int k, int n); __global__ void floydWarshellKernel3(float *dist, int k, int n); void floydWarshellSerial (float* graph, float* result, int n); void printSolution(float* dist, int n); void Check(float* array1, float* array2, int n); int main(int argc, char*argv[]){ int n; /*Check Arguments*/ if (argc!=3){ printf("Error, two arguments are needed. arg1 = full path of the input txt file" " which contains the matrix, arg2 = n, where n X n is matrix dimension \n"); exit(1); } /*Open file*/ FILE *inputMatrix; inputMatrix=fopen(argv[1], "r+"); /*Check if success*/ if (inputMatrix==NULL){ printf("Error opening file. Check file permissions\n"); exit(1); } n= 1<<atoi(argv[2]); float *graph; graph=(float*)malloc(n*n*sizeof(float)); //printf("Initial Distance-Matrix between vertices is:\n"); for (int i=0; i<n; i++){ for (int j=0; j<n; j++){ fscanf(inputMatrix, "%f", &graph[j+i*n]); } } printf("\n"); fclose(inputMatrix); kernel1=(float*)malloc(n*n*sizeof(float)); kernel2=(float*)malloc(n*n*sizeof(float)); kernel3=(float*)malloc(n*n*sizeof(float)); //////////////////Serial algorithm///////////////////// gettimeofday (&startwtime, NULL); float *result; result=(float*)malloc(n*n*sizeof(float)); floydWarshellSerial(graph, result, n); gettimeofday (&endwtime, NULL); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); printf("Serial time: %f\n", seq_time); ///////////////Cuda kernel 1 algorithm/////////////////// int blocksize= 4; dim3 dimBlock( blocksize, blocksize ); dim3 dimGrid( n/dimBlock.x, n/dimBlock.y ); gettimeofday (&startwtime, NULL); cudaMalloc((void**)&dist, n*n*sizeof(float)); cudaMemcpy(dist, graph, n*n*sizeof(float), cudaMemcpyHostToDevice); for (int k=0; k<n; k++) floydWarshellKernel1<<<dimGrid, dimBlock>>>(dist, k, n); cudaMemcpy(kernel1, dist, n*n*sizeof(float), cudaMemcpyDeviceToHost); gettimeofday (&endwtime, NULL); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); Check(kernel1, result, n); printf("Cuda time, kernel 1: %f \n", seq_time); ///////////////Cuda kernel 2 algorithm/////////////////// blocksize= 4; dim3 dimBlock2( blocksize, blocksize ); dim3 dimGrid2(n, (n+blocksize-1)/blocksize); gettimeofday (&startwtime, NULL); cudaMemcpy(dist, graph, n*n*sizeof(float), cudaMemcpyHostToDevice); for (int k=0; k<n; k++) { floydWarshellKernel2<<<dimGrid2, dimBlock2>>>(dist, k, n); } cudaMemcpy(kernel2, dist, n*n*sizeof(float), cudaMemcpyDeviceToHost); gettimeofday (&endwtime, NULL); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); Check(kernel2, result, n); printf("Cuda time, kernel 2: %f\n", seq_time); ///////////////Cuda kernel 3 algorithm/////////////////// blocksize= 128; int gridsize=n/blocksize; gettimeofday (&startwtime, NULL); cudaMalloc((void**)&dist, n*n*sizeof(float)); cudaMemcpy(dist, graph, n*n*sizeof(float), cudaMemcpyHostToDevice); for (int k=0; k<n; k++) { floydWarshellKernel3<<<gridsize, blocksize>>>(dist, k, n); } cudaMemcpy(kernel3, dist, n*n*sizeof(float), cudaMemcpyDeviceToHost); gettimeofday (&endwtime, NULL); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); Check(kernel3, result, n); printf("Cuda time, kernel 3: %f\n", seq_time); } __global__ void floydWarshellKernel2(float *dist, int k, int n) { int j=blockIdx.y*blockDim.y + threadIdx.y; if(j>=n) return; int idx=n*blockIdx.x+j; __shared__ float best; if(threadIdx.y==0) best=dist[n*blockIdx.x+k]; __syncthreads(); if(dist[k*n+j]+best<dist[idx]){ dist[idx]=dist[k*n+j]+best; } } __global__ void floydWarshellKernel1(float *dist, int k, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i*n + j; if (i<n && j<n){ if (dist[k+i*n] + dist[j+k*n] < dist[index]){ dist[index] = dist[k+i*n]+dist[j+k*n]; } } __syncthreads(); } void floydWarshellSerial(float *graph, float *result, int n) { for (int i = 0; i<n; i++){ for (int j = 0; j<n; j++){ result[j+i*n] = graph[j+i*n]; } } for (int k=0; k<n; k++) { // Pick all vertices as source one by one for (int i=0; i<n; i++) { // Pick all vertices as destination for the // above picked source for (int j=0; j<n; j++) { // If vertex k is on the shortest path from // i to j, then update the value of Distance-Matrix[i][j] if (result[k+i*n] + result[j+k*n] < result[j+i*n]) result[j+i*n] = result[k+i*n]+result[j+k*n]; } } } } __global__ void floydWarshellKernel3(float *dist, int k, int n) { // int i=blockIdx.x * blockDim.x + threadIdx.x; // int index=0; // for (int j=0; j<n; j++){ // if(i<n){ // index=i*n+j; // if (dist[k+i*n] + dist[j+k*n] < dist[index]){ // dist[index] = dist[k+i*n]+dist[j+k*n]; // } // } // } int j= blockDim.x*blockIdx.x+threadIdx.x; if (j>=n) return; __shared__ float best; for (int i=0; i<n; i++){ if (threadIdx.x==0) best=dist[n*i+k]; __syncthreads(); if(best+dist[k*n+j]<dist[n*i+j]){ dist[n*i+j]=best+dist[k*n+j]; } } } void printSolution(float* dist, int n){ printf ("Following matrix shows the shortest distances" " between every pair of vertices \n"); printf("\n"); for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { printf ("%f ", dist[j+i*n]); } printf("\n"); } } void Check(float* array1, float* array2, int n){ for (int i=0; i<n*n; i++){ if (array1[i]!=array2[i]){ printf("Incorrect Solution\n"); exit(1); } } printf("Correct Solution\n"); }
18,971
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> const int iTileSize = 2; __global__ void matrixMultiplyKernel(float *A, float *B, float *C, int m, int n, int k) { int Row = threadIdx.y + blockDim.y * blockIdx.y; int Col = threadIdx.x + blockDim.x * blockIdx.x; if(Row <m && Col <k){ float CValue = 0.0; for(int i=0; i<n; i++) CValue += A[Row*n+i]*B[i*k+Col]; C[Row*k+Col] = CValue; } } void matrixMultiplication(float *h_A, float *h_B, float *h_C, int m, int n, int k) { dim3 dimGrid((k-1)/iTileSize+1,(m-1)/iTileSize+1,1); dim3 dimBlock(iTileSize,iTileSize); float *d_A, *d_B, *d_C; cudaMalloc((void**)&d_A, m*n*sizeof(float)); cudaMemcpy(d_A,h_A,m*n*sizeof(float),cudaMemcpyHostToDevice); cudaMalloc((void**)&d_B, n*k*sizeof(float)); cudaMemcpy(d_B,h_B,n*k*sizeof(float),cudaMemcpyHostToDevice); cudaMalloc((void**)&d_C, m*k*sizeof(float)); matrixMultiplyKernel<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, m, n, k); cudaMemcpy(h_C, d_C, k*m*sizeof(float),cudaMemcpyHostToDevice); cudaFree(h_A); cudaFree(h_B); cudaFree(h_C); } int main() { float h_A[16], h_B[16], h_C[16]; for(int y=0; y<16; y++){ h_A[y] = float(y); h_B[y] = 1.0; } matrixMultiplication(h_A,h_B,h_C,4,4,4); printf("The result is:\n"); for(int j=0; j<4*4; j++){ if((j+1)%4 !=0){ printf("%3.1f\t",h_C[j]); } else{ printf("%3.1f\n",h_C[j]); } } return 0; }
18,972
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <math.h> __global__ void add(int n, float* x, float* y) { //int index = threadIdx.x; //int stride = blockDim.x; //need to take into account new grid of thread blocks //index of thread in block int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] * y[i] + x[i]; } } int main(void) { int N = 1 << 20; //to make these accessible to GPU need to put in Unified memory using cudaMallocManaged() //float* x = new float[N]; //float* y = new float[N]; float *x, *y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } /* use <<<# blocks, # threads>>> to launch add on GPU, <<<1,1>>> launches one thread to run function on GPU will get error in VS on these, but program will still run */ //calc number of blocks to get N threads int blockSize = 1024; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks,256>>>(N, x, y); //use cudaDeviceSynchronize() to make sure kernel is done before CPU access results cudaDeviceSynchronize(); float maxError = 0.0f; for (int i = 0; i < N; i++) { maxError = fmax(maxError, fabs(y[i] - 3.0f)); } std::cout << "Max error: " << maxError << std::endl; //to free data in Unified memory need to use cudaFree() //delete[] x; //delete[] y; cudaFree(x); cudaFree(y); return 0; }
18,973
// // Created by ivan on 11.01.2020. // #include "operations.cuh" __host__ __device__ Matrix* sum(Matrix *a, Matrix *b) { const int n = a->n(); const int m = a->m(); const int nm = n * m; // int startIdx = blockIdx.x * blockDim.x + threadIdx.x; // int offset = gridDim.x * blockDim.x; Matrix* res = new Matrix(n, m); for (int ij = 0; ij < nm; ij++) { int i = ij / m, j = ij % m; double aij = a->get(i, j); double bij = b->get(i, j); res->set(i, j, aij + bij); } return res; } __host__ __device__ void show(Matrix* mtr, int n, int m) { // int startIdx = blockIdx.x * blockDim.x + threadIdx.x; // int offset = gridDim.x * blockDim.x; printf("-----MATRIX(%d, %d)-------\n", n, m); // printf("StartIdx(%d), offset(%d)", startIdx, offset); for (int ij = 0; ij < n * m; ij++) { int i = ij / m, j = ij % m; if (j == 0) { printf("\n"); } printf("%f ", mtr->get(i, j)); } printf("\n-----------\n"); } __host__ void show(Matrix* mtr, int n, int m, int k) { printf("-----TENSOR(%d, %d, %d)-------\n", n, m, k); for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { for (int q = 0; q < k; q++) { printf("(%d, %d, %d): %f\n", i, j, q, mtr->get(i, j, k)); } } } } __host__ __device__ Matrix* multiply(Matrix *a, Matrix *b) { Matrix* res = new Matrix(a->n(), b->m()); // TODO // Added better implementation for (int i = 0; i < a->n(); i++) { for (int j = 0; j < b->m(); j++) { for (int k = 0; k < a->m(); k++) { double aik = a->get(i, k); double bkj = b->get(k, j); double prev_resij = res->get(i, j); res->set(i, j, prev_resij + aik * bkj); } } } return res; } __host__ __device__ Matrix* multiply(Matrix *a, double b) { const int n = a->n(), m = a->m(); Matrix* res = new Matrix(n, m); for (int ij = 0; ij < n * m; ij++) { int i = ij / m; int j = ij % m; double aij = a->get(i, j); res->set(i, j, aij * b); } return res; } __global__ void multiply(Matrix* a, Matrix* b, Matrix* c) { int startIdx = blockIdx.x * blockDim.x + threadIdx.x; int offset = gridDim.x * blockDim.x; int n = a->n(); int m = a->m(); int k = b->m(); for (int i = startIdx; i < n * k; i += offset) { double sum = 0; int row = i / k; int col = i % k; for(int q = 0; q < m; q++) { // sum += a->matrix[row * m + q] * b->matrix[q * m + col]; sum += a->get(row, q) * b->get(q, col); } // c->matrix[row * k + col] = sum; c->set(row, col, sum); } } __host__ __device__ bool equals(Matrix *a, Matrix *b, double eps) { bool res = true; const int n = a->n(), m = a->m(); for (int ij = 0; ij < n * m; ij++) { double aij = a->get(ij / m,ij % m); double bij = b->get(ij / m,ij % m); res = res && (abs(aij - bij) <= eps); } return res; } __host__ __device__ double diff(Matrix *a, Matrix *b) { Matrix* tmp = multiply(b, -1); Matrix* res = sum(a, tmp); double diff = 0; const int n = a->n(), m = a->m(); for (int ij = 0; ij < n * m; ij++) { double rij = res->get(ij / m, ij % m); diff = max(abs(diff), rij); } delete tmp; delete res; return diff; } __host__ Matrix* randomMatrix(int n, int m) { auto* res = new Matrix(n, m); auto seed = std::chrono::system_clock::now().time_since_epoch().count(); std::mt19937 generator(seed); std::normal_distribution<double> distribution(0, 1); for (int ij = 0; ij < n * m; ij++) { res->set(ij / m, ij % m, distribution(generator)); } return res; } __host__ __device__ double vectorColLength(Matrix *a) { double sum = 0; for (int i = 0; i < a->n(); i++) { double ai0 = a->get(i, 0); sum += ai0 * ai0; } return sqrt(sum); } __host__ __device__ double matrixNorm(Matrix *a) { const int n = a->n(), m = a->m(); double sum = 0; for (int ij = 0; ij < n * m; ij++) { double aij = a->get(ij / m, ij % m); sum += aij * aij; } return sqrt(sum); } __host__ __device__ double frobeniousNorm(Matrix* a) { int total = 1; for (int i = 0; i < a->shape_length; i++) { total *= a->real_shape[i]; } double sum = 0; for (int i = 0; i < total; i++) { double value = a->matrix[i]; sum += value * value; } return sqrt(sum); } __host__ __device__ Matrix* vectorColNormalize(Matrix *a) { double sum = 1 / vectorColLength(a); return multiply(a, sum); } __host__ __device__ Matrix* transpose(Matrix *a) { const int n = a->n(), m = a->m(); auto* res = new Matrix(m, n); for (int ij = 0; ij < n * m; ij++) { double aij = a->get(ij / m, ij % m); res->set(ij % m, ij / m, aij); } return res; } __host__ __device__ Matrix* subMatrix(Matrix *a, int rowStart, int rowEnd, int colStart, int colEnd) { int n = rowEnd - rowStart; int m = colEnd - colStart; auto* res = new Matrix(rowEnd - rowStart, colEnd - colStart); // TODO: // Add CUDA parallel option of loop for (int ij = 0; ij < n * m; ij++) { int i = ij / m; int j = ij % m; double aij = a->get(rowStart + i, colStart + j); res->set(i, j, aij); } return res; } __host__ __device__ Matrix* hilbert(int n, int m) { auto* res = new Matrix(n, m); int i, j; // TODO: // Add CUDA parallel option of loop for (int ij = 0; ij < n * m; ij++) { i = ij / m; j = ij % m; res->set(ij / m, ij % m, 1. / (i + j + 1)); } return res; } __host__ Matrix* hilbert(int n, int m, int k) { int * shapes = new int[3]; shapes[0] = n; shapes[1] = m; shapes[2] = k; auto* res = new Matrix(n * m, k); res->reshape(shapes, 3); delete[] shapes; // TODO: // Add CUDA parallel option of loop for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { for (int q = 0; q < k; q++) { double val = 1./(i + j + q + 1); // printf("Set(%d, %d, %d) %f\n", i, j, q, val); int index = i + n * j + n * m * q; res->matrix[index] = val; } } } return res; } Matrix* hilbert(int i1, int i2, int i3, int i4, int i5) { int* shapes = new int[5]{i1, i2, i3, i4, i5}; auto* res = new Matrix(i1, i2 * i3 * i4 * i5); res->reshape(shapes, 5); for (int i = 0; i < i1; i++) { for (int j = 0; j < i2; j++) { for (int k = 0; k < i3; k++) { for (int l = 0; l < i4; l++) { for (int w = 0; w < i5; w++) { int index = i + i1 * j + i1 * i2 * k + i1 * i2 * i3 * l + i1 * i2 * i3 * i4 * w; double val = 1. / (i + j + k + l + w + 1); res->matrix[index] = val; } } } } } return res; } Matrix* hilbert(int i1, int i2, int i3, int i4, int i5, int i6, int i7) { int* shapes = new int[7]{i1, i2, i3, i4, i5, i6, i7}; auto* res = new Matrix(i1, i2 * i3 * i4 * i5 * i6 * i7); res->reshape(shapes, 7); for (int i = 0; i < i1; i++) { for (int j = 0; j < i2; j++) { for (int k = 0; k < i3; k++) { for (int l = 0; l < i4; l++) { for (int w = 0; w < i5; w++) { for (int e = 0; e < i6; e++) { for (int r = 0; r < i7; r++) { int index = i + i1 * j + i1 * i2 * k + i1 * i2 * i3 * l + i1 * i2 * i3 * i4 * w + i1 * i2 * i3 * i4 * i5 * e + i1 * i2 * i3 * i4 * i5 * i6 * r; double val = 1. / (i + j + k + l + w + e + r + 1); res->matrix[index] = val; } } } } } } } return res; } Matrix *sinCube(int r, double step) { int d = 7; vector<int> shape; int total = 1; for (int i = 0; i < d; i++) { shape.push_back(r); total *= r; } total /= r; auto *res = new Matrix(r, total); int* next_shape = new int[shape.size()]; for ( int i = 0; i < shape.size(); i++) { next_shape[i] = shape[i]; } res->reshape(next_shape, shape.size()); for (int i1 = 0; i1 < r; i1++) { for (int i2 = 0; i2 < r; i2++) { for (int i3 = 0; i3 < r; i3++) { for (int i4 = 0; i4 < r; i4++) { for (int i5 = 0; i5 < r; i5++) { for (int i6 = 0; i6 < r; i6++) { for (int i7 = 0; i7 < r; i7++) { double sum = i1 * step + i2 * step + i3 * step + i4 * step + i5 * step + i6 * step + i7 * step ; double sn = sin(sum); int index = i1 + r * i2 + r * r * i3 + r * r * r * i4 + r * r * r * r * i5 + r * r * r * r * r * i6 + r * r * r * r * r * r * i7; res->matrix[index] = sn; } } } } } } } return res; } double convolution(vector<Matrix*> tt, vector<Matrix *> u) { auto v1 = multiply(transpose(u[0]), tt[0]); auto vn = multiply(tt.back(), u.back()); vector<Matrix*> gks; for (int i = 1; i < tt.size() - 1; i++) { int r1 = tt[i]->real_shape[0]; int nk = tt[i]->real_shape[1]; int r2 = tt[i]->real_shape[2]; auto* gk_cur = new Matrix(r1, r2); for (int i1 = 0; i1 < r1; i1++) { for (int i2 = 0; i2 < r2; i2++) { for (int q = 0; q < nk; q++) { double val = gk_cur->get(i1, i2); double sum = tt[i]->matrix[i1 + r1 * q + r1 * nk * i2] * u[i]->get(q, 0); gk_cur->set(i1, i2, val + sum); } } } gks.push_back(gk_cur); } Matrix* v = v1; for (int i = 0; i < gks.size(); i++) { v = multiply(v, gks[i]); } v = multiply(v, vn); return v->get(0, 0); }
18,974
#define _NTHREAD 512 #define _NBLOCK 65535 __global__ void _AFFINE_KERNEL(int* ,int ,int* ,int ,int ,int ,int ,int ,int ,int ); #define MIN(a,b) (((a)<(b))?(a):(b)) #include<cuda.h> #include<stdio.h> #include<stdlib.h> int main() { int XP1[20][20],XS3[20],i,j,k; for(i=0;i<20;i++) for(j=0;j<20;j++) { XP1[i][j]=i+j; XS3[i]=2*i; } int _SZ_XS3_1 = 20; int _SZ_XP1_2 = 20; int _SZ_XP1_1 = 20; int *_DEV_XS3; cudaMalloc((void**) &_DEV_XS3, sizeof(int)*_SZ_XS3_1); cudaMemcpy(_DEV_XS3, XS3, sizeof(int)*_SZ_XS3_1, cudaMemcpyHostToDevice); int *_DEV_XP1; cudaMalloc((void**) &_DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1); cudaMemcpy(_DEV_XP1, XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, cudaMemcpyHostToDevice); float _NUM_THREADS = 400,_NUM_BLOCKS=1; int _NUM_TILE=1; dim3 _THREADS(512); dim3 _BLOCKS(1); if(_NUM_THREADS < _NTHREAD) { _THREADS.x=20; _THREADS.y=20; } else { _NUM_BLOCKS=_NUM_THREADS/256; _BLOCKS.x=_BLOCKS.y=ceil(sqrt(_NUM_BLOCKS)); _THREADS.x=_THREADS.y=ceil(sqrt(400.0/(_BLOCKS.x*_BLOCKS.y))); int temp=_NUM_BLOCKS; if(_NUM_BLOCKS>_NBLOCK) _NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1); } int ID_1, ID_2, START[2]; int _CUDA_TILE; int Phi[2]={3, 4}; int loopUpperLimits[2]={20, 20}; for(ID_1=1;ID_1<=MIN(20/3, 20/4)+1;ID_1++) { for(ID_2=0;ID_2<2;ID_2++) { if(Phi[ID_2]>=0) START[ID_2]=(ID_1-1)*Phi[ID_2]; else START[ID_2]=loopUpperLimits[ID_2]+(ID_1-1)*Phi[ID_2]; } for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++) { _AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_XS3, _SZ_XS3_1, _DEV_XP1, _SZ_XP1_2, _SZ_XP1_1, START[0], MIN(START[0]+3, 20), START[1], 20, _CUDA_TILE); cudaDeviceSynchronize(); } for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++) { _AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_XS3, _SZ_XS3_1, _DEV_XP1, _SZ_XP1_2, _SZ_XP1_1, START[0]+3, 20, START[1], MIN(START[1]+4, 20), _CUDA_TILE); cudaDeviceSynchronize(); } } cudaMemcpy(XP1, _DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, cudaMemcpyDeviceToHost); return 0; } __global__ void _AFFINE_KERNEL(int* XS3,int _SZ_XS3_1,int* XP1,int _SZ_XP1_2,int _SZ_XP1_1,int CUDA_L_i,int CUDA_U_i, int CUDA_L_j,int CUDA_U_j, int _CUDA_TILE) { int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x; int j = gridDim.y*blockDim.y*_CUDA_TILE + blockDim.y*blockIdx.y + threadIdx.y; if((CUDA_L_i<=i)&&(i<=CUDA_U_i)){ if((CUDA_L_j<=j)&&(j<=CUDA_U_j)){ XP1[i*_SZ_XP1_1+j]=XP1[(i+3)*_SZ_XP1_1+j+4]+XS3[i]; }}}
18,975
#include <stdio.h> #include <stdlib.h> const int INF = 10000000; int *host_D; int *dev_D; int n, m; void Input(char *inFileName) { FILE *infile = fopen(inFileName, "r"); setvbuf(infile, new char[1 << 20], _IOFBF, 1 << 20); fscanf(infile, "%d %d", &n, &m); host_D = (int*)malloc(n * n * sizeof(int)); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == j) host_D[i * n + j] = 0; else host_D[i * n + j] = INF; } } while (--m >= 0) { int a, b, v; fscanf(infile, "%d %d %d", &a, &b, &v); host_D[(a - 1) * n + (b - 1)] = v; } fclose(infile); } void Output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); setvbuf(outfile, new char[1 << 20], _IOFBF, 1 << 20); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (host_D[i * n + j] >= INF) fprintf(outfile, "INF "); else fprintf(outfile, "%d ", host_D[i * n + j]); } fprintf(outfile, "\n"); } fclose(outfile); } __global__ void func1(int n, int B, int k, int* arr) { extern __shared__ int shared_memory[]; int* dBlock = shared_memory; int i = threadIdx.x / B; int j = threadIdx.x % B; int x = i + k * B; int y = j + k * B; dBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF; for (int l = 0; l < B; l++) { __syncthreads(); int temp = dBlock[(i * B) + l] + dBlock[(l * B) + j]; if (dBlock[threadIdx.x] > temp) { dBlock[threadIdx.x] = temp; } } if (x < n && y < n) arr[x * n + y] = dBlock[threadIdx.x]; } __global__ void func2(int n, int B, int k, int* arr) { if (blockIdx.x == k) return; extern __shared__ int shared_memory[]; int* dBlock = shared_memory; int* cBlock = &shared_memory[B * B]; int i = threadIdx.x / B; int j = threadIdx.x % B; int x = i + k * B; int y = j + k * B; dBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF; if (blockIdx.y != 0) x = i + blockIdx.x * B; if (blockIdx.y == 0) y = j + blockIdx.x * B; cBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF; for (int l = 0; l < B; l++) { __syncthreads(); int temp = (blockIdx.y == 0)? dBlock[i * B + l] + cBlock[l * B + j]: cBlock[i * B + l] + dBlock[l * B + j]; if (cBlock[threadIdx.x] > temp) { cBlock[threadIdx.x] = temp; } } if (x < n && y < n) arr[x * n + y] = cBlock[threadIdx.x]; } __global__ void func3(int n, int B, int k, int* arr) { if (blockIdx.x == k || blockIdx.y == k) return; extern __shared__ int shared_memory[]; int* dyBlock = shared_memory; int* dxBlock = &shared_memory[B * B]; int i = threadIdx.x / B; int j = threadIdx.x % B; int x = i + k * B; int y = j + blockIdx.y * B; dxBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF; x = i + blockIdx.x * B; y = j + k * B; dyBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF; x = i + blockIdx.x * B; y = j + blockIdx.y * B; int dist = (x < n && y < n)? arr[x * n + y]: INF; __syncthreads(); for (int l = 0; l < B; l++) { int temp = dyBlock[i * B + l] + dxBlock[l * B + j]; if (dist > temp) { dist = temp; } } if (x < n && y < n) arr[x * n + y] = dist; } void Block(int B) { cudaMalloc(&dev_D, n * n * sizeof(int)); cudaMemcpy(dev_D, host_D, n * n * sizeof(int), cudaMemcpyHostToDevice); int round = (n + B - 1) / B; dim3 bk1(1, 1); dim3 bk2(round, 2); dim3 bk3(round, round); int gputhreads = B * B; for (int k = 0; k < round; k++) { func1<<<bk1, gputhreads, gputhreads * sizeof(int)>>>(n, B, k, dev_D); func2<<<bk2, gputhreads, 2 * gputhreads * sizeof(int)>>>(n, B, k, dev_D); func3<<<bk3, gputhreads, 2 * gputhreads * sizeof(int)>>>(n, B, k, dev_D); } cudaThreadSynchronize(); cudaMemcpy(host_D, dev_D, n * n * sizeof(int), cudaMemcpyDeviceToHost); } int main(int argc, char **argv) { Input(argv[1]); int B = atoi(argv[3]); Block(B); Output(argv[2]); return 0; }
18,976
#include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> int getMaxThreads( const int max_regs_per_thread, int cuda_device) { cudaDeviceProp d; cudaGetDeviceProperties(&d, cuda_device); return d.regsPerBlock / max_regs_per_thread; }
18,977
#include<stdio.h> #include<math.h> __global__ void sub(float *a,float *b,int count,float mean) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id<count) { b[id] = (a[id]-mean)*(a[id]-mean); } } int main(void) { float h_a[10],h_b[10],mean,std_dev=0,var=0; int i,count=10,sum=0; float *d_a,*d_b; for(i=0;i<count;i++) { h_a[i] = i; h_b[i] = 0.0; } printf("\n\tPrinting Array: "); for(i=0;i<count;i++) { printf("\n\t %f ",h_a[i]); } printf("\n\n\tAddition of Array = "); for(i=0;i<count;i++) { sum+=h_a[i]; } printf(" %d",sum); mean = (float)sum/count; printf("\n\tMean = %f",mean); cudaMalloc(&d_a,sizeof(int)*count); cudaMemcpy(d_a,h_a,sizeof(int)*count,cudaMemcpyHostToDevice); cudaMalloc(&d_b,sizeof(int)*count); cudaMemcpy(d_b,h_b,sizeof(int)*count,cudaMemcpyHostToDevice); sub<<<1,10>>>(d_a,d_b,count,mean); cudaMemcpy(h_b,d_b,sizeof(int)*count,cudaMemcpyDeviceToHost); for(i=0;i<count;i++) { var+=h_b[i]; } var = var/(count); printf("\n\tVariance = %f",var); std_dev = sqrt(var); printf("\n\tStandard Deviation = %f",std_dev); cudaFree(d_a); cudaFree(d_b); return 0; }
18,978
#include "includes.h" __global__ void transpose_read_column_write_row(int * mat, int * transpose, int nx, int ny) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix < nx && iy < ny) { transpose[iy * nx + ix] = mat[ix * ny + iy]; } }
18,979
extern "C" { __global__ void binaryentropyXsigmoidY_32(const int lengthX, const float *x, const float *y, float *z) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthX) { z[i]=x[i]*log(x[i])+(1.0-x[i])*log(1.0-x[i])-x[i]*y[i]+log(1.0+exp(y[i])); } } }
18,980
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define BLOCK_SIZE 16 #define GRID_SIZE 1 __global__ void YourKernel(int d_A[BLOCK_SIZE][BLOCK_SIZE], int d_B[BLOCK_SIZE][BLOCK_SIZE]) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //if (row >= h || col >= w) return; /* whatever you wanna do with d_A[][] and d_B[][] */ } int main() { int d_A[BLOCK_SIZE][BLOCK_SIZE]; int d_B[BLOCK_SIZE][BLOCK_SIZE]; /* d_A initialization */ dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // so your threads are BLOCK_SIZE*BLOCK_SIZE, 256 in this case dim3 dimGrid(GRID_SIZE, GRID_SIZE); // 1*1 blocks in a grid YourKernel <<<dimGrid, dimBlock >>>(d_A, d_B); //Kernel invocation }
18,981
#include "includes.h" __global__ void kGreaterThanEqScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] >= val; }
18,982
#include "includes.h" __global__ void relu_gpu_backward(float *ingrad, float *outgrad, float *indata, int64_t N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < N) ingrad[tid] = indata[tid] > 0 ? 1 * outgrad[tid] : 0; }
18,983
extern "C" { __global__ void applyFilter(const unsigned char* inputChannel, unsigned char* outputChannel, const unsigned int width, const unsigned int height, const unsigned int filterWidth) { unsigned int y; unsigned int x; unsigned int blur; int filterHalf; unsigned int row = threadIdx.y + blockIdx.y * blockDim.y; unsigned int col = threadIdx.x + blockIdx.x * blockDim.x; if (row < height && col < width) { // Unwrapped/optimized versions for small filters if (filterWidth == 1) { // 1x1 filter (Do nothing) outputChannel[col + row * width] = inputChannel[col + row * width]; return; } else if (filterWidth == 3 && row > 0 && row < height - 1 && col > 0 && col < width - 1) { // 3x3 filter blur = 0; blur += inputChannel[col + 1 + row * width + width]; blur += inputChannel[col + 1 + row * width]; blur += inputChannel[col + 1 + row * width - width]; blur += inputChannel[col + row * width + width]; blur += inputChannel[col + row * width]; blur += inputChannel[col + row * width - width]; blur += inputChannel[col - 1 + row * width + width]; blur += inputChannel[col - 1 + row * width]; blur += inputChannel[col - 1 + row * width - width]; outputChannel[col + row * width] = blur / 9; } filterHalf = filterWidth / 2; blur = 0; for (int i = -filterHalf; i <= filterHalf; i++) { for (int j = -filterHalf; j <= filterHalf; j++) { y = max(0, min(height - 1, row + i)); x = max(0, min(width - 1, col + j)); blur += inputChannel[x + y * width]; } } outputChannel[col + row * width] = blur / (filterWidth * filterWidth); } } }
18,984
/* * usage: nvcc ./stream_test_v2.cu -o ./stream_v2_legacy * nvvp ./stream_v2_legacy ( or as root: * nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_v2_legacy ) * ... versus ... * nvcc --default-stream per-thread ./stream_test_v2.cu -o ./stream_v2_per-thread * nvvp ./stream_v2_per-thread ( or as root: * nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_v2_per-thread ) * * purpose: just test whether substitution of cudaMalloc() with cudaMallocManaged() * will work * result: yes it does, however, one needs to be careful with synchronizing individual * streams before accessing managed memory, hence the below inserted call was * crucial, because without it only default stream 0 had printed out correct * results and all other streams just 0 ! * n.b. out-commented line 'cudaMalloc(&data[i]...' would have worked together with * also commented lines 'cudaMemcpy(... printf(...' in terms of low level * checking of results * */ #include <stdio.h> const int N = 1 << 20; __global__ void kernel(float *x, int n) { int tid = threadIdx.x; for (int i = tid; i < n; i += blockDim.x) { x[i] = sqrt(pow(3.14159,i)); } } int main() { const int num_streams = 8; float localx[N]; cudaStream_t streams[num_streams]; float *data[num_streams]; for (int i = 0; i < num_streams; i++) { cudaStreamCreate(&streams[i]); //cudaMalloc(&data[i], N * sizeof(float)); cudaMallocManaged(&data[i], N * sizeof(float)); // launch one worker kernel per stream kernel<<<1, 64, 0, streams[i]>>>(data[i], N); // launch a dummy kernel on the default stream kernel<<<1, 1>>>(0, 0); } // and a quick check of results because individual streams // should have done identical calculations ! for (int i = 0; i < num_streams; i++) { // cudaMemcpy(localx, data[i], N * sizeof(float), cudaMemcpyDeviceToHost); // printf("*** %d %12.6lf%12.6lf%12.6lf\n", i, localx[0], localx[1], localx[2]); cudaStreamSynchronize(streams[i]); printf("*** %d %12.6lf%12.6lf%12.6lf\n", i, data[i][0], data[i][1], data[i][2]); } cudaDeviceReset(); return 0; }
18,985
// CUDA programming // Exercise n. 01 #include <errno.h> #include <cuda.h> #include <stdio.h> // Prototype __host__ void print_dev_prop(cudaDeviceProp dev_prop); int main(void) { // Number of CUDA-capable devices attached to this system int dev_cnt; cudaGetDeviceCount(&dev_cnt); // Calculate the theoretical peak bandwidth for each device for(int i = 0; i < dev_cnt; i++) { cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop, i); printf("Device Number: %d\n", i); print_dev_prop(dev_prop); } } // Print device properties __host__ void print_dev_prop(cudaDeviceProp dev_prop) { printf(" Major revision number: %d\n", dev_prop.major); printf(" Minor revision number: %d\n", dev_prop.minor); printf(" Name: %s\n", dev_prop.name); printf(" Total global memory: %zu\n", dev_prop.totalGlobalMem); printf(" Total shared memory per block: %zu\n", dev_prop.sharedMemPerBlock); printf(" Total registers per block: %d\n", dev_prop.regsPerBlock); printf(" Warp size: %d\n", dev_prop.warpSize); printf(" Maximum memory pitch: %zu\n", dev_prop.memPitch); printf(" Maximum threads per block: %d\n", dev_prop.maxThreadsPerBlock); for(int i = 0; i < 3; ++i) printf(" Maximum block dimension #%02d: %d\n", i, dev_prop.maxThreadsDim[i]); for(int i = 0; i < 3; ++i) printf(" Maximum grid dimension #%02d: %d\n", i, dev_prop.maxGridSize[i]); printf(" Clock rate: %d\n", dev_prop.clockRate); printf(" Memory Bus Width (bits): %d\n", dev_prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0 * dev_prop.memoryClockRate * (dev_prop.memoryBusWidth / 8) / 1.0e6); printf(" Total constant memory: %zu\n", dev_prop.totalConstMem); printf(" Texture alignment: %zu\n", dev_prop.textureAlignment); printf(" Concurrent copy and execution: %s\n", (dev_prop.deviceOverlap ? "Yes" : "No")); printf(" Number of multiprocessors: %d\n", dev_prop.multiProcessorCount); printf(" Kernel execution timeout: %s\n", (dev_prop.kernelExecTimeoutEnabled ? "Yes" : "No")); return; }
18,986
#include "includes.h" // Gaurav Sheni // CSC 391 // September 16, 2015 // Project 1 //declaring kernel call __global__ void decrement(char* line, char* answer); __global__ void decrement(char *current, char* answer){ int i = threadIdx.x; answer[i] = (char)( (int) current[i] - 1 ); }
18,987
#include <iostream> #include <vector> #include "operations.cuh" #define BLOCK_SIZE 512 __global__ void addKernel(const float* a, const float* b, float* c) { int i = blockDim.x * blockIdx.x + threadIdx.x; c[i] = a[i] + b[i]; } __global__ void multiplyKernel(const float* firstInput, const float* secondInput, float* output) { int index = blockDim.x * blockIdx.x + threadIdx.x; output[index] = firstInput[index] * secondInput[index]; } __global__ void maskedAddKernel(const float* firstInput, const float* secondInput, const int* mask, float* output) { int index = blockDim.x * blockIdx.x + threadIdx.x; output[index] = firstInput[index] + secondInput[index] * mask[index]; } __global__ void softplusKernel(const float* input, float* output) { int index = blockDim.x * blockIdx.x + threadIdx.x; output[index] = logf(1 + expf(input[index])); } __global__ void dotProductCuda(const float* firstInput, const float* secondInput, float* output) { // Dynamically allocate the shared memory int THREADS_PER_BLOCK = __cudaGet_blockDim().x; __shared__ float sharedMemory[BLOCK_SIZE]; int multiplyX = blockDim.x * blockIdx.x + threadIdx.x; sharedMemory[threadIdx.x] = firstInput[multiplyX] * secondInput[multiplyX]; // All of the threads must be done with the multiplication __syncthreads(); // Add the values in the block if (threadIdx.x == 0) { float blockProduct = 0; for (int i = 0; i < THREADS_PER_BLOCK; ++i) { blockProduct += sharedMemory[i]; } atomicAdd(output, blockProduct); } } __global__ void dotProductWindowCuda(const float* firstInput, const float* secondInput, const int N, float* output) { // Dynamically allocate the shared memory __shared__ float sharedMemory[BLOCK_SIZE]; int multiplyX = blockDim.x * blockIdx.x + threadIdx.x; sharedMemory[threadIdx.x] = firstInput[multiplyX] * secondInput[multiplyX % N]; // All of the threads must be done with the multiplication __syncthreads(); // Add the values in the block int offset = blockDim.x * blockIdx.x; if (threadIdx.x == 0) { for (int i = 0; i < BLOCK_SIZE; ++i) output[(offset + i) / N] += sharedMemory[i]; } } namespace CudaFunctions { void dotProductWindow(float* firstInput, float* secondInput, int N, float* output, int outputSize) { int inputSize = N * outputSize; // Allocate memory for the CUDA operations float* dFirstInput; float* dSecondInput; float* dOutput; cudaMalloc((void**)& dFirstInput, inputSize * sizeof(float)); cudaMalloc((void**)& dSecondInput, N * sizeof(float)); cudaMalloc((void**)& dOutput, outputSize * sizeof(float)); // Copy the values to GPU cudaMemcpy(dFirstInput, firstInput, inputSize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dSecondInput, secondInput, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dOutput, output, outputSize * sizeof(float), cudaMemcpyHostToDevice); // Set the blocks to use dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid((inputSize + BLOCK_SIZE - 1)/ BLOCK_SIZE); // Run the kernel dotProductWindowCuda<<<dimGrid, dimBlock>>> (dFirstInput, dSecondInput, N, dOutput); cudaDeviceSynchronize(); // Copy the values to host cudaMemcpy(output, dOutput, outputSize * sizeof(float), cudaMemcpyDeviceToHost); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR CUDA: %s \n", cudaGetErrorString(error)); return; } // Clean up the values cudaFree(dFirstInput); cudaFree(dSecondInput); cudaFree(dOutput); } float dotProduct(float* firstInput, float* secondInput, int N) { // Allocate memory for the CUDA operations float output; float* dFirstInput; float* dSecondInput; float* dOutput; cudaMalloc((void**)&dFirstInput, N * sizeof(float)); cudaMalloc((void**)&dSecondInput, N * sizeof(float)); cudaMalloc((void**)&dOutput, sizeof(float)); // Copy the values to GPU cudaMemcpy(dFirstInput, firstInput, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dSecondInput, secondInput, N * sizeof(float), cudaMemcpyHostToDevice); // Set the blocks to use dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(N / BLOCK_SIZE + 1); // Run the kernel dotProductCuda << <dimGrid, dimBlock >> > (dFirstInput, dSecondInput, dOutput); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); return 1; } // Copy the values to host cudaMemcpy(&output, dOutput, sizeof(float), cudaMemcpyDeviceToHost); // Clean up the values cudaFree(dFirstInput); cudaFree(dSecondInput); cudaFree(dOutput); return output; } void add(float* firstInput, float* secondInput, int N, float* output) { // Allocate memory for the CUDA operations float* dFirstInput; float* dSecondInput; float* dOutput; cudaMalloc((void**)& dFirstInput, N * sizeof(float)); cudaMalloc((void**)& dSecondInput, N * sizeof(float)); cudaMalloc((void**)& dOutput, N * sizeof(float)); // Copy the values to GPU cudaMemcpy(dFirstInput, firstInput, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dSecondInput, secondInput, N * sizeof(float), cudaMemcpyHostToDevice); // Set the blocks to use dim3 dimBlock(N); dim3 dimGrid(N / BLOCK_SIZE + 1); // Run the kernel addKernel <<<dimGrid, dimBlock >>> (dFirstInput, dSecondInput, dOutput); cudaDeviceSynchronize(); // Copy the values to host cudaMemcpy(output, dOutput, N * sizeof(float), cudaMemcpyDeviceToHost); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); return; } // Clean up the values cudaFree(dFirstInput); cudaFree(dSecondInput); cudaFree(dOutput); } void multiply(float* firstInput, float* secondInput, int N, float* output) { // Allocate memory for the CUDA operations float* dFirstInput; float* dSecondInput; float* dOutput; cudaMalloc((void**)& dFirstInput, N * sizeof(float)); cudaMalloc((void**)& dSecondInput, N * sizeof(float)); cudaMalloc((void**)& dOutput, N * sizeof(float)); // Copy the values to GPU cudaMemcpy(dFirstInput, firstInput, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dSecondInput, secondInput, N * sizeof(float), cudaMemcpyHostToDevice); // Set the blocks to use dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(N / BLOCK_SIZE + 1); // Run the kernel multiplyKernel <<<dimGrid, dimBlock >>> (dFirstInput, dSecondInput, dOutput); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); return; } // Copy the values to host cudaMemcpy(output, dOutput, N * sizeof(float), cudaMemcpyDeviceToHost); // Clean up the values cudaFree(dFirstInput); cudaFree(dSecondInput); cudaFree(dOutput); } void maskedAdd(float* firstInput, float* secondInput, int* mask, int N, float* output) { // Allocate memory for the CUDA operations float* dFirstInput; float* dSecondInput; float* dOutput; int* dMask; cudaMalloc((void**)& dFirstInput, N * sizeof(float)); cudaMalloc((void**)& dSecondInput, N * sizeof(float)); cudaMalloc((void**)& dMask, N * sizeof(int)); cudaMalloc((void**)& dOutput, N * sizeof(float)); // Copy the values to GPU cudaMemcpy(dFirstInput, firstInput, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dSecondInput, secondInput, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dMask, mask, N * sizeof(int), cudaMemcpyHostToDevice); // Set the blocks to use dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(N / BLOCK_SIZE + 1); // Run the kernel maskedAddKernel<<<dimGrid, dimBlock >>> (dFirstInput, dSecondInput, dMask, dOutput); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); return; } // Copy the values to host cudaMemcpy(output, dOutput, N * sizeof(float), cudaMemcpyDeviceToHost); // Clean up the values cudaFree(dFirstInput); cudaFree(dSecondInput); cudaFree(dOutput); } void softplus(float* input, int N, float* output) { // Allocate memory for the CUDA operations float* dInput; float* dOutput; cudaMalloc((void**)& dInput, N * sizeof(float)); cudaMalloc((void**)& dOutput, N * sizeof(float)); // Copy the values to GPU cudaMemcpy(dInput, input, N * sizeof(float), cudaMemcpyHostToDevice); // Set the blocks to use dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(N / BLOCK_SIZE + 1); // Run the kernel softplusKernel<<<dimGrid, dimBlock>>> (dInput, dOutput); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); return; } // Copy the values to host cudaMemcpy(output, dOutput, N * sizeof(float), cudaMemcpyDeviceToHost); // Clean up the values cudaFree(dInput); cudaFree(dOutput); } }
18,988
#include "includes.h" __global__ void poly_div7(float* poli, const int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { float x = poli[idx]; poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+1.0f/x; } }
18,989
#include<stdio.h> // any 2 <= mod <= 2^31 should work __host__ __device__ unsigned mod_sum(unsigned a, unsigned b, unsigned mod) { unsigned c = a+b; return c >= mod ? c-mod : c; } // each block solves a case // block size must be n - 1 __global__ void my_hamilton(int n, int *adj, int *ret, unsigned int mod) { __shared__ unsigned qc[512]; __shared__ unsigned a_n_1[32]; // adj[n-1][i] int tid = threadIdx.x; // logical thread index int bid = blockIdx.x * blockDim.y + threadIdx.y; // logical block index int sha = threadIdx.y * (n-1); // because logical thread blocks are sharing real block int gridSize = blockDim.y * gridDim.x;// logical grid size // prefetch unsigned a_i = 0; for (int i = 0; i < n; i++) { a_i = a_i | adj[tid*n + i]<<i; } if (threadIdx.y == 0) a_n_1[tid] = adj[(n-1)*n + tid]; unsigned total = 0; unsigned times = ((1U<<(n-1))-1) / gridSize + 1; if ((times-1) * gridSize + blockIdx.y * blockDim.x >= (1U<<(n-1))) times -= 1; // test each case in this block for (unsigned _t = 0; _t < times; _t++) { unsigned s = _t * gridSize + bid; // active means this thread is selected unsigned active = s>>tid & 1; // first transition qc[tid + sha] = active * (a_i>>(n-1) & 1); unsigned row = active * a_i; __syncthreads(); // calculate each transition, uses GPU SIMD feature for (int t = 1; t < n-1; t++) { unsigned sum = 0; for (int i = 0; i < n-1; i++) { sum = mod_sum(sum, qc[i + sha] * (row>>i & 1), mod); } __syncthreads(); qc[tid + sha] = sum; __syncthreads(); } // last transition unsigned count = 0; for (int i = 0; i < n-1; i++) { count = mod_sum(count, qc[i + sha] * a_n_1[i], mod); } count *= s < (1U<<(n-1)); // adjust sign for inclusion-exclusion principle int sign = (n - __popc(s)) & 1; unsigned count_with_sign = sign ? count : (count ? mod-count : 0); total = mod_sum(total, count_with_sign, mod); //if(tid==0) printf("%ds=%d total=%d\n",active,s, count); __syncthreads(); } if (tid == 0) { // output total for this block ret[bid] = total; } } // thread size must be >= 64 and power of 2 __global__ void sum_all(int n, int *data, int *sum, unsigned mod) { __shared__ int tmp_sum[1024]; int blockSize = blockDim.x; int stride = gridDim.x * blockSize; int id = threadIdx.x; int i = id + blockSize * blockIdx.x; // sum part of data tmp_sum[id] = 0; while (i < n) { tmp_sum[id] = mod_sum(tmp_sum[id], data[i], mod); i += stride; } __syncthreads(); // merge threads if (blockSize >= 1024) { if (id < 512) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 512], mod); __syncthreads(); } if (blockSize >= 512) { if (id < 256) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 256], mod); __syncthreads(); } if (blockSize >= 256) { if (id < 128) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 128], mod); __syncthreads(); } if (blockSize >= 128) { if (id < 64) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 64], mod); __syncthreads(); } if (id < 32) { // now, only 1 warp is active volatile int *tmp = tmp_sum; tmp[id] = mod_sum(tmp[id], tmp[id + 32], mod); tmp[id] = mod_sum(tmp[id], tmp[id + 16], mod); tmp[id] = mod_sum(tmp[id], tmp[id + 8], mod); tmp[id] = mod_sum(tmp[id], tmp[id + 4], mod); tmp[id] = mod_sum(tmp[id], tmp[id + 2], mod); tmp[id] = mod_sum(tmp[id], tmp[id + 1], mod); } // write back to global memory if (id == 0) { sum[blockIdx.x] = tmp_sum[0]; } } #define showCudaError(errorcode) showError(errorcode, __FILE__, __LINE__) void showError(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "<error> at %s:%d: %s\n", file, line, cudaGetErrorString(code)); } } int n, a[32*32], sum[1<<7]; int main(int argc, char *argv[]) { if (scanf("%d", &n) != 1) return 1; if (n < 3 || n > 32) return 1; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { int aij = 1; scanf("%d", &aij); if (i == j) a[i*n+j] = 0; else a[i*n+j] = aij; } } // decide block size and grid size unsigned works = 1U<<(n-1); int grid_size = 1, block_size_y = 1; int sum_size = 128; block_size_y = 512 / (n-1); if (block_size_y > works) block_size_y = works; grid_size = (works-1) / block_size_y + 1; if (grid_size > 65536) grid_size = 65536; int works_per_thread = (works-1) / (grid_size * block_size_y) + 1; grid_size = (works-1) / (works_per_thread * block_size_y) + 1; int logical_grid_size = grid_size * block_size_y; works_per_thread = (works-1) / logical_grid_size + 1; // allocate memory int *gpu_a, *gpu_ans, *gpu_sum; showCudaError(cudaMalloc(&gpu_a, sizeof a)); showCudaError(cudaMalloc(&gpu_ans, sizeof(int) * logical_grid_size)); // only resides in GPU! showCudaError(cudaMalloc(&gpu_sum, sizeof(int) * sum_size)); showCudaError(cudaMemcpy(gpu_a, a, sizeof a, cudaMemcpyHostToDevice)); // show work info printf("grid size: %d\n", grid_size); printf("block size: %d,%d\n", n-1, block_size_y); printf("logical grid size: %d\n", logical_grid_size); printf("works per thread: %d\n", works_per_thread); // run! for (int i = 1; i < argc; i++) { unsigned mod = 0; sscanf(argv[i], "%u", &mod); my_hamilton<<<grid_size, dim3(n-1, block_size_y)>>>(n, gpu_a, gpu_ans, mod); showCudaError(cudaDeviceSynchronize()); sum_all<<<sum_size, 256>>>(logical_grid_size, gpu_ans, gpu_sum, mod); showCudaError(cudaDeviceSynchronize()); showCudaError(cudaMemcpy(sum, gpu_sum, sizeof(int) * sum_size, cudaMemcpyDeviceToHost)); unsigned ans = 0; for (int j = 0; j < sum_size; j++) ans = mod_sum(ans, sum[j], mod); printf("%u\n", ans); } }
18,990
#include <iostream> #include <ctime> #include <cassert> #include <stdio.h> #define start_timer(id) \ cudaEvent_t start##id, stop##id; \ cudaEventCreate(&start##id); \ cudaEventCreate(&stop##id); \ cudaEventRecord(start##id, 0) #define stop_timer(id, elapsedTime) \ cudaEventRecord(stop##id, 0); \ cudaEventSynchronize(stop##id); \ cudaEventElapsedTime(&elapsedTime, start##id, stop##id); \ cudaEventDestroy(start##id); \ cudaEventDestroy(stop##id) using namespace std; static inline void cpu_memory_bandwidth(int *X, int n, int m = 1) { for (int i = 0; i < m; ++i) memset(X, 0, n*sizeof(int)); } static inline void cpu_gpu_memory(int *X_d, int *X, int n, int m = 1) { int size = sizeof(int)*n; for (int i = 0; i < m; ++i) cudaMemcpy(X_d, X, size, cudaMemcpyHostToDevice); } static inline void gpu_cpu_memory(int *X, int *X_d, int n, int m = 1) { int size = sizeof(int)*n; for (int i = 0; i < m; ++i) cudaMemcpy(X, X_d, size, cudaMemcpyDeviceToHost); } void gpu_gpu_memory(int *Y_d, int *X_d, int n, int m = 1) { int size = sizeof(int)*n; for (int i = 0; i < m; ++i) cudaMemcpy(Y_d, X_d, size, cudaMemcpyDeviceToDevice); } #define E_THREAD 8 #define N_THREAD (1<<E_THREAD) __global__ void reset(int *X, int m) { int bid = (blockIdx.y << 15) + blockIdx.x; int tid = (bid << E_THREAD) + threadIdx.x; for (int i = 0; i < m; ++i) { X[tid] = 0; } } void gpu_kernel(int *X_d, int n, int m) { int nb = (n >> E_THREAD); dim3 nBlock(nb); if (nb > (1 << 15)) { nBlock.x = (1 << 15); nBlock.y = (nb >> 15); } reset<<<nBlock, N_THREAD>>>(X_d, m); } int main(int argc, char** argv) { int m = 1, e = 28, n = (1L << e); if (argc > 1) { e = atoi(argv[1]); n = (1L << e); assert(n > 0); } int *X = new int[n]; int *X_d; int *Y_d; cudaMalloc((void **)&X_d, n*sizeof(int)); cudaMalloc((void **)&Y_d, n*sizeof(int)); clock_t t1, t2; double dt; // CPU - Main Memory t1 = clock(); cpu_memory_bandwidth(X, n, m); t2 = clock(); dt = (t2 - t1) / (double)CLOCKS_PER_SEC; printf("CPU -> CPU :: time %.4lf\t", dt); printf("bandwidth %.1lf MB/s\n", sizeof(int)*(n >> 20) / dt); // CPU -> GPU t1 = clock(); cpu_gpu_memory(X_d, X, n, m); t2 = clock(); dt = (t2 - t1) / (double)CLOCKS_PER_SEC; printf("CPU -> GPU :: time %.4lf\t", dt); printf("bandwidth %.1lf MB/s\n", sizeof(int)*(n >> 20) / dt); // GPU -> CPU t1 = clock(); gpu_cpu_memory(X, X_d, n, m); t2 = clock(); dt = (t2 - t1) / (double)CLOCKS_PER_SEC; printf("GPU -> CPU :: time %.4lf\t", dt); printf("bandwidth %.1lf MB/s\n", sizeof(int)*(n >> 20) / dt); // GPU -> GPU float elapsedTime; start_timer(0); for (int i = 0; i < m; ++i) cudaMemcpy(Y_d, X_d, n*sizeof(int), cudaMemcpyDeviceToDevice); stop_timer(0, elapsedTime); printf("GPU -> GPU :: time %.4lf\t", elapsedTime / 1000); printf("bandwidth %.1lf GB/s\n", sizeof(int)*((double)n / (1 << 30)) / (elapsedTime / 1000)); // GPU kernel start_timer(1); gpu_kernel(X_d, n, m); stop_timer(1, elapsedTime); printf("GPU kernel :: time %.4lf\t", elapsedTime / 1000); printf("bandwidth %.1lf GB/s\n", sizeof(int)*((double)n / (1 << 30)) / (elapsedTime / 1000)); delete [] X; cudaFree(X_d); cudaFree(Y_d); return 0; }
18,991
/** * fdtd2d.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 10.05 #define GPU_DEVICE 0 /* Problem size */ #define tmax 500 #define NX 2048 #define NY 2048 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int i, j; for (i = 0; i < tmax; i++) { _fict_[i] = (DATA_TYPE) i; } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { ex[i*NY + j] = ((DATA_TYPE) i*(j+1) + 1) / NX; ey[i*NY + j] = ((DATA_TYPE) (i-1)*(j+2) + 2) / NX; hz[i*NY + j] = ((DATA_TYPE) (i-9)*(j+4) + 3) / NX; } } } void runFdtd(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int t, i, j; for (t=0; t < tmax; t++) { for (j=0; j < NY; j++) { ey[0*NY + j] = _fict_[t]; } for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i*NY + j] = ey[i*NY + j] - 0.5*(hz[i*NY + j] - hz[(i-1)*NY + j]); } } for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i*(NY+1) + j] = ex[i*(NY+1) + j] - 0.5*(hz[i*NY + j] - hz[i*NY + (j-1)]); } } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i*NY + j] = hz[i*NY + j] - 0.7*(ex[i*(NY+1) + (j+1)] - ex[i*(NY+1) + j] + ey[(i+1)*NY + j] - ey[i*NY + j]); } } } } void compareResults(DATA_TYPE* hz1, DATA_TYPE* hz2) { int i, j, fail; fail = 0; for (i=0; i < NX; i++) { for (j=0; j < NY; j++) { if (percentDiff(hz1[i*NY + j], hz2[i*NY + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void fdtd_step1_kernel(DATA_TYPE* _fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { if (i == 0) { ey[i * NY + j] = _fict_[t]; } else { ey[i * NY + j] = ey[i * NY + j] - 0.5f*(hz[i * NY + j] - hz[(i-1) * NY + j]); } } } __global__ void fdtd_step2_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY) && (j > 0)) { ex[i * (NY+1) + j] = ex[i * (NY+1) + j] - 0.5f*(hz[i * NY + j] - hz[i * NY + (j-1)]); } } __global__ void fdtd_step3_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { hz[i * NY + j] = hz[i * NY + j] - 0.7f*(ex[i * (NY+1) + (j+1)] - ex[i * (NY+1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } void fdtdCuda(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz, DATA_TYPE* hz_outputFromGpu) { double t_start, t_end; DATA_TYPE *_fict_gpu; DATA_TYPE *ex_gpu; DATA_TYPE *ey_gpu; DATA_TYPE *hz_gpu; cudaMalloc((void **)&_fict_gpu, sizeof(DATA_TYPE) * tmax); cudaMalloc((void **)&ex_gpu, sizeof(DATA_TYPE) * NX * (NY + 1)); cudaMalloc((void **)&ey_gpu, sizeof(DATA_TYPE) * (NX + 1) * NY); cudaMalloc((void **)&hz_gpu, sizeof(DATA_TYPE) * NX * NY); cudaMemcpy(_fict_gpu, _fict_, sizeof(DATA_TYPE) * tmax, cudaMemcpyHostToDevice); cudaMemcpy(ex_gpu, ex, sizeof(DATA_TYPE) * NX * (NY + 1), cudaMemcpyHostToDevice); cudaMemcpy(ey_gpu, ey, sizeof(DATA_TYPE) * (NX + 1) * NY, cudaMemcpyHostToDevice); cudaMemcpy(hz_gpu, hz, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid( (size_t)ceil(((float)NY) / ((float)block.x)), (size_t)ceil(((float)NX) / ((float)block.y))); t_start = rtclock(); for(int t = 0; t< tmax; t++) { fdtd_step1_kernel<<<grid,block>>>(_fict_gpu, ex_gpu, ey_gpu, hz_gpu, t); cudaThreadSynchronize(); fdtd_step2_kernel<<<grid,block>>>(ex_gpu, ey_gpu, hz_gpu, t); cudaThreadSynchronize(); fdtd_step3_kernel<<<grid,block>>>(ex_gpu, ey_gpu, hz_gpu, t); cudaThreadSynchronize(); } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); cudaMemcpy(hz_outputFromGpu, hz_gpu, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyDeviceToHost); cudaFree(_fict_gpu); cudaFree(ex_gpu); cudaFree(ey_gpu); cudaFree(hz_gpu); } int main() { double t_start, t_end; DATA_TYPE* _fict_; DATA_TYPE* ex; DATA_TYPE* ey; DATA_TYPE* hz; DATA_TYPE* hz_outputFromGpu; _fict_ = (DATA_TYPE*)malloc(tmax*sizeof(DATA_TYPE)); ex = (DATA_TYPE*)malloc(NX*(NY+1)*sizeof(DATA_TYPE)); ey = (DATA_TYPE*)malloc((NX+1)*NY*sizeof(DATA_TYPE)); hz = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); hz_outputFromGpu = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); init_arrays(_fict_, ex, ey, hz); GPU_argv_init(); fdtdCuda(_fict_, ex, ey, hz, hz_outputFromGpu); t_start = rtclock(); runFdtd(_fict_, ex, ey, hz); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(hz, hz_outputFromGpu); free(_fict_); free(ex); free(ey); free(hz); free(hz_outputFromGpu); return 0; }
18,992
#include<stdio.h> #define TILE_DIM 32 __global__ void transpose(float *odata, const float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; if (x < width && y < height) { tile[threadIdx.y][threadIdx.x] = idata[y*width + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; if (y < width && x < height) { odata[y*height + x] = tile[threadIdx.x][threadIdx.y]; } }
18,993
#include <iostream> #include <cuda.h> using namespace std; __global__ void add(int *a) { int tid = threadIdx.x; int no_of_threads = blockDim.x; int step=1; while(no_of_threads>0) { if(tid<no_of_threads) { int first = tid*step*2; int second = first+step; a[first]+=a[second]; } step<<=1; no_of_threads>>=1; } } __global__ void max(int *a) { int tid=threadIdx.x; int step =1; int no_of_threads = blockDim.x; while(no_of_threads>0) { if(tid<no_of_threads) { int first = tid*step*2; int second = first+step; a[first] = a[first]>a[second]?a[first]:a[second]; } step<<=1; no_of_threads>>=1; } } __global__ void min(int *a) { int tid=threadIdx.x; int step =1; int no_of_threads = blockDim.x; while(no_of_threads>0) { if(tid<no_of_threads) { int first = tid*step*2; int second = first+step; a[first] = a[first]<a[second]?a[first]:a[second]; } step<<=1; no_of_threads>>=1; } } __global__ void stdDev(int *a,int mean){ a[threadIdx.x]-=mean; a[threadIdx.x]*=a[threadIdx.x]; } int main() { int host_arr[]={1,2,3,4,5,6,7,8}; int *dev_arr; int SIZE=8; cudaMalloc((void**)&dev_arr,SIZE*sizeof(int)); //SUM AND AVERAGE cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice); add<<<1,SIZE/2>>>(dev_arr); int sum; cudaMemcpy(&sum,dev_arr,sizeof(int),cudaMemcpyDeviceToHost); int mean=sum/SIZE; cout<<"Sum is : "<<sum; cout<<"Average is : "<<mean; //MAX cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice); max<<<1,SIZE/2>>>(dev_arr); int max; cudaMemcpy(&max,dev_arr,sizeof(int),cudaMemcpyDeviceToHost); cout<<"Max is : "<<max; //MIN cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice); min<<<1,SIZE/2>>>(dev_arr); int min; cudaMemcpy(&min,dev_arr,sizeof(int),cudaMemcpyDeviceToHost); cout<<"Min is : "<<min; cout<<"\n\n"; //STDDV cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice); stdDev<<<1,SIZE>>>(dev_arr,mean); cudaMemcpy(host_arr,dev_arr,SIZE*sizeof(int),cudaMemcpyDeviceToHost); cout<<host_arr[0]; cout<<host_arr[1]; cout<<host_arr[2]; cout<<host_arr[3]; cout<<host_arr[4]; cout<<host_arr[5]; cout<<host_arr[6]; cout<<host_arr[7]; cout<<"\n\n"; cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice); add<<<1,SIZE/2>>>(dev_arr); int stdDeviation; cudaMemcpy(&stdDeviation,dev_arr,sizeof(int),cudaMemcpyDeviceToHost); cout<<"STDDEV:"<<sqrt(stdDeviation/SIZE); }
18,994
// SDSC Summer Institute 2018 // Andreas Goetz (agoetz@sdsc.edu) // CUDA program to add two vectors in parallel on the GPU // version 2: // launch a fixed number of blocks and threads // #include<stdio.h> // define vector length, number of blocks NBL and threads per block TPB #define N (255*4096) #define NBL 256 #define TPB 128 // // CUDA device function that adds two integer vectors // __global__ void add(int *a, int *b, int *c, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < n; i += stride) { c[i] = a[i] + b[i]; } } // // main program // int main(void){ int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i, err; // allocate host memory h_a = (int *) malloc(size); h_b = (int *) malloc(size); h_c = (int *) malloc(size); // allocate device memory cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // initialize vectors for (i=0; i<N; i++){ h_a[i] = i+1; h_b[i] = i+1; } // copy input data to device cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice); // add vectors by launching a sufficient number of blocks of the add() kernel printf("\nLaunching vector addition kernel...\n"); printf("Vector length = %d\n",N); printf("Blocks = %d\n",NBL); printf("Threads per block = %d\n",TPB); printf("Kernel copies = %d\n",NBL*TPB); add<<<NBL,TPB>>>(d_a, d_b, d_c, N); // copy results back to host cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost); // deallocate memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // check results err = 0; for (i=0; i<N; i++){ if (h_c[i] != 2*(i+1)) err += 1; } if (err != 0){ printf("\n Error, %d elements do not match!\n\n", err); } else { printf("\n Success! All elements match.\n\n"); } // deallocate host memory free(h_a); free(h_b); free(h_c); return err; }
18,995
#include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void histogram_kernel( unsigned char *img, int height, int width, unsigned long long *histogram) { const int histogram_size = 255 * 3; __shared__ unsigned long long shared_histogram[histogram_size]; if (threadIdx.x < 16 && threadIdx.y < 16) { int thread_index = threadIdx.x * 16 + threadIdx.y; shared_histogram[thread_index * 3] = 0; shared_histogram[thread_index * 3 + 1] = 0; shared_histogram[thread_index * 3 + 2] = 0; } __syncthreads(); int row_ind = threadIdx.x + blockIdx.x * blockDim.x; int col_ind = threadIdx.y + blockIdx.y * blockDim.y; if (row_ind < height && col_ind < width) { for (int ch = 0; ch < 3; ++ch) { unsigned int value = img[(row_ind * width + col_ind) * 3 + ch]; atomicAdd(&shared_histogram[value * 3 + ch], 1); } } __syncthreads(); if (threadIdx.x < 16 && threadIdx.y < 16) { int thread_index = threadIdx.x * 16 + threadIdx.y; atomicAdd( &histogram[thread_index * 3], shared_histogram[thread_index * 3]); atomicAdd( &histogram[thread_index * 3 + 1], shared_histogram[thread_index * 3 + 1]); atomicAdd( &histogram[thread_index * 3 + 2], shared_histogram[thread_index * 3 + 2]); } } int main(int argc, char *argv[]) { if (argc < 2) { printf( "Usage: %s input_img.bmp\n", argv[0]); return 1; } FILE *input_img = fopen(argv[1], "rb"); int width, height; unsigned short int bpp; unsigned char header[138]; fseek(input_img, 18, 0); fread(&width, sizeof(int), 1, input_img); fseek(input_img, 22, 0); fread(&height, sizeof(int), 1, input_img); fseek(input_img, 28, 0); fread(&bpp, sizeof(unsigned char), 1, input_img); fseek(input_img, 0, 0); fread(&header, sizeof(unsigned char), 138, input_img); int img_sizeof = height * width * 3 * sizeof(unsigned char); unsigned char *h_img = (unsigned char *) malloc(img_sizeof); unsigned int padding_size = (int)((width * bpp + 31) / 32) * 4 - width * 3; unsigned char *h_padding = (unsigned char *) malloc(padding_size); for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { unsigned char b, g, r; fread(&b, sizeof(unsigned char), 1, input_img); fread(&g, sizeof(unsigned char), 1, input_img); fread(&r, sizeof(unsigned char), 1, input_img); h_img[(i * width + j) * 3] = r; h_img[(i * width + j) * 3 + 1] = g; h_img[(i * width + j) * 3 + 2] = b; } if (padding_size) { fread(&h_padding, padding_size, 1, input_img); } } fclose(input_img); int histogram_sizeof = 256 * 3 * sizeof(unsigned long long); unsigned long long *h_histogram = ( (unsigned long long *) malloc(histogram_sizeof)); unsigned char *d_img; unsigned long long *d_histogram; cudaSetDevice(0); cudaMalloc((void **) &d_img, img_sizeof); cudaMalloc((void **) &d_histogram, histogram_sizeof); cudaMemcpy( d_img, h_img, img_sizeof, cudaMemcpyHostToDevice); dim3 gridSize((int)(height / 16) + 1, int(width / 16) + 1); dim3 blockSize(16, 16); histogram_kernel<<< gridSize, blockSize >>>( d_img, height, width, d_histogram); cudaDeviceSynchronize(); cudaMemcpy( h_histogram, d_histogram, histogram_sizeof, cudaMemcpyDeviceToHost); for (int ch = 0; ch < 3; ++ch) { for (int i = 0; i < 256; ++i) { printf("%llu ", h_histogram[i * 3 + ch]); } printf("\n"); } free(h_padding); free(h_histogram); free(h_img); cudaFree(d_img); cudaFree(d_histogram); return 0; }
18,996
#include<stdio.h> #include<stdlib.h> #include<unistd.h> #include<stdbool.h> #include <cuda.h> #include <cuda_runtime.h> extern "C" unsigned int *g_resultData; // Current state of world. extern "C" unsigned int *g_data; // Current width of world. extern "C" size_t g_worldWidth; /// Current height of world. extern "C" size_t g_worldHeight; /// Current data length (product of width and height) extern "C" size_t g_dataLength; static inline void gol_initAllZeros( size_t worldWidth, size_t worldHeight ) { // calloc inits cudaMallocManaged(&g_data, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); cudaMallocManaged(&g_resultData, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); //set inits to 0 for(size_t i = 0; i < g_dataLength; i++) { g_data[i+g_worldWidth] = 0; g_resultData[i+g_worldWidth] = 0; } } static inline void gol_initAllOnes( size_t worldWidth, size_t worldHeight ) { int i; cudaMallocManaged(&g_data, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); cudaMallocManaged(&g_resultData, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); // set all rows of world to true for( i = 0; i < g_dataLength; i++) { g_data[i+g_worldWidth] = 1; g_resultData[i+g_worldWidth]=0; } } static inline void gol_initOnesInMiddle( size_t worldWidth, size_t worldHeight ) { int i; cudaMallocManaged(&g_data, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); cudaMallocManaged(&g_resultData, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); for(i =0; i<10;i++){ g_data[g_dataLength+127+i]=1; } } static inline void gol_initOnesAtCorners( size_t worldWidth, size_t worldHeight, int my_rank, int max_rank) { cudaMallocManaged(&g_data, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); cudaMallocManaged(&g_resultData, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); if(my_rank==0){ g_data[worldWidth] = 1; // upper left g_data[(worldWidth*2)-1]=1; // upper right } if(my_rank == max_rank){ g_data[g_dataLength]=1; // lower left g_data[g_dataLength + worldWidth-1]=1; // lower right } } static inline void gol_initSpinnerAtCorner( size_t worldWidth, size_t worldHeight, int my_rank) { cudaMallocManaged(&g_data, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); cudaMallocManaged(&g_resultData, (g_dataLength+(2*worldWidth))*sizeof(unsigned int)); if(my_rank==0){ g_data[worldWidth] = 1; // upper left g_data[1+worldWidth] = 1; // upper left +1 g_data[(worldWidth*2)-1]=1; // upper right } } static inline void gol_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int size) { g_worldWidth = worldWidth; //g_worldHeight = worldHeight*size; g_worldHeight = worldHeight; g_dataLength = g_worldWidth * g_worldHeight; int cE, cudaDeviceCount; if((cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess ){ printf(" Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount ); exit(-1); } if((cE = cudaSetDevice( myrank % cudaDeviceCount )) != cudaSuccess ){ printf(" Unable to have rank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE); exit(-1); } switch(pattern) { case 0: gol_initAllZeros( worldWidth, worldHeight ); break; case 1: gol_initAllOnes( worldWidth, worldHeight ); break; case 2: gol_initOnesInMiddle( worldWidth, worldHeight ); break; case 3: gol_initOnesAtCorners( worldWidth, worldHeight, myrank, size-1); break; case 4: gol_initSpinnerAtCorner( worldWidth, worldHeight, myrank); break; default: printf("Pattern %u has not been implemented \n", pattern); exit(-1); } } extern "C" void initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int size, int my_rank){ gol_initMaster(pattern, worldWidth, worldHeight, myrank, size); } __device__ unsigned int gol_countAliveCells(const unsigned int* data, size_t x0, size_t x1, size_t x2, size_t y0, size_t y1, size_t y2) { int counter = 0; // You write this function - it should return the number of alive cell for data[x1+y1] // There are 8 neighbors - see the assignment description for more details. //This increments counter based on the value of the 8 neighbors of the current element // if the neighbor is alive counter in incremented by 1 counter+= (data[y0 + x0]+data[y0 + x1]+data[y0 + x2]); counter+= (data[y1 + x0]+data[y1 + x2]); counter+= (data[y2 + x0]+data[y2 + x1]+data[y2 + x2]); // the status of the node follow a tic is returns bellow according the specification // conditions if(data[x1+y1]==1){ if(counter >=2 && counter <=3) return 1; } else if(counter==3) return 1; return 0; } __global__ void gol_kernel(const unsigned int * d_data, unsigned int worldWidth, unsigned int worldHeight, unsigned int * d_resultData, int my_rank){ unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; size_t dataLength = worldWidth * worldHeight; size_t y0,y1,y2; size_t x0,x1,x2; for(unsigned int i = index; i<worldHeight*worldWidth; i+=stride){ x1 = index % worldWidth; y1 = index - x1; x0 = (x1 + worldWidth - 1) % worldWidth; x2 = (x1 + 1) % worldWidth; y0 = (y1 + dataLength - worldWidth) % dataLength; y2 = (y1 + worldWidth) % dataLength; unsigned int tmp = gol_countAliveCells(d_data, x0, x1, x2, y0, y1, y2); d_resultData[y1+x1] = tmp; } } extern "C" void kernelCall(int numBlocks, ushort threadsCount, const unsigned int *d_data, size_t worldWidth, size_t worldHeight, unsigned int** d_resultData, int my_rank){ gol_kernel<<<numBlocks, threadsCount>>>(d_data, worldWidth, worldHeight, *d_resultData, my_rank); //cudaDeviceSynchronize(); }
18,997
#include <stdio.h> #include <string.h> #include <sys/types.h> #include <sys/time.h> #include <cuda.h> #include <math.h> __global__ void doBlur(int *R,int *h_R,int *G,int *h_G,int *B,int *h_B,int colsize,int rowsize){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if(col<colsize && row<rowsize){ if (row != 0 && row != (rowsize-1) && col != 0 && col != (colsize-1)){ R[row * colsize + col] = (h_R[(row+1) * colsize + col]+h_R[(row-1) * colsize + col]+h_R[row * colsize + (col+1)]+h_R[row * colsize + (col-1)])/4; G[row * colsize + col] = (h_G[(row+1) * colsize + col]+h_G[(row-1) * colsize + col]+h_G[row * colsize + (col+1)]+h_G[row * colsize + (col-1)])/4; B[row * colsize + col] = (h_B[(row+1) * colsize + col]+h_B[(row-1) * colsize + col]+h_B[row * colsize + (col+1)]+h_B[row * colsize + (col-1)])/4; // R[row * colsize + col] = (h_R[(row+1) * colsize + col]+h_R[(row-1) * colsize + col]+h_R[row * colsize + (col+1)]+h_R[row * colsize + (col-1)])*0; // G[row * colsize + col] = (h_G[(row+1) * colsize + col]+h_G[(row-1) * colsize + col]+h_G[row * colsize + (col+1)]+h_G[row * colsize + (col-1)])*0; // B[row * colsize + col] = (h_B[(row+1) * colsize + col]+h_B[(row-1) * colsize + col]+h_B[row * colsize + (col+1)]+h_B[row * colsize + (col-1)])*0; } else if (row == 0 && col != 0 && col != (colsize-1)){ R[row * colsize + col] = (h_R[(row+1) * colsize + col]+h_R[row * colsize + (col+1)]+h_R[row * colsize + (col-1)])/3; G[row * colsize + col] = (h_G[(row+1) * colsize + col]+h_G[row * colsize + (col+1)]+h_G[row * colsize + (col-1)])/3; B[row * colsize + col] = (h_B[(row+1) * colsize + col]+h_B[row * colsize + (col+1)]+h_B[row * colsize + (col-1)])/3; // R[row * colsize + col] = (h_R[(row+1) * colsize + col]+h_R[row * colsize + (col+1)]+h_R[row * colsize + (col-1)])*0; // G[row * colsize + col] = (h_G[(row+1) * colsize + col]+h_G[row * colsize + (col+1)]+h_G[row * colsize + (col-1)])*0; // B[row * colsize + col] = (h_B[(row+1) * colsize + col]+h_B[row * colsize + (col+1)]+h_B[row * colsize + (col-1)])*0; } else if (row == (rowsize-1) && col != 0 && col != (colsize-1)){ R[row * colsize + col] = (h_R[(row-1) * colsize + col]+h_R[row * colsize + (col+1)]+h_R[row * colsize + (col-1)])/3; G[row * colsize + col] = (h_G[(row-1) * colsize + col]+h_G[row * colsize + (col+1)]+h_G[row * colsize + (col-1)])/3; B[row * colsize + col] = (h_B[(row-1) * colsize + col]+h_B[row * colsize + (col+1)]+h_B[row * colsize + (col-1)])/3; } else if (col == 0 && row != 0 && row != (rowsize-1)){ R[row * colsize + col] = (h_R[(row+1) * colsize + col]+h_R[(row-1) * colsize + col]+h_R[row * colsize + (col+1)])/3; G[row * colsize + col] = (h_G[(row+1) * colsize + col]+h_G[(row-1) * colsize + col]+h_G[row * colsize + (col+1)])/3; B[row * colsize + col] = (h_B[(row+1) * colsize + col]+h_B[(row-1) * colsize + col]+h_B[row * colsize + (col+1)])/3; } else if (col == (colsize-1) && row != 0 && row != (rowsize-1)){ R[row * colsize + col] = (h_R[(row+1) * colsize + col]+h_R[(row-1) * colsize + col]+h_R[row * colsize + (col-1)])/3; G[row * colsize + col] = (h_G[(row+1) * colsize + col]+h_G[(row-1) * colsize + col]+h_G[row * colsize + (col-1)])/3; B[row * colsize + col] = (h_B[(row+1) * colsize + col]+h_B[(row-1) * colsize + col]+h_B[row * colsize + (col-1)])/3; } else if (row==0 &&col==0){ R[row * colsize + col] = (h_R[row * colsize + (col+1)]+h_R[(row+1) * colsize + col])/2; G[row * colsize + col] = (h_G[row * colsize + (col+1)]+h_G[(row+1) * colsize + col])/2; B[row * colsize + col] = (h_B[row * colsize + (col+1)]+h_B[(row+1) * colsize + col])/2; } else if (row==0 &&col==(colsize-1)){ R[row * colsize + col] = (h_R[row * colsize + (col-1)]+h_R[(row+1) * colsize + col])/2; G[row * colsize + col] = (h_G[row * colsize + (col-1)]+h_G[(row+1) * colsize + col])/2; B[row * colsize + col] = (h_B[row * colsize + (col-1)]+h_B[(row+1) * colsize + col])/2; } else if (row==(rowsize-1) &&col==0){ R[row * colsize + col] = (h_R[row * colsize + (col+1)]+h_R[(row-1) * colsize + col])/2; G[row * colsize + col] = (h_G[row * colsize + (col+1)]+h_G[(row-1) * colsize + col])/2; B[row * colsize + col] = (h_B[row * colsize + (col+1)]+h_B[(row-1) * colsize + col])/2; } else if (row==(rowsize-1) &&col==(colsize-1)){ R[row * colsize + col] = (h_R[row * colsize + (col-1)]+h_R[(row-1) * colsize + col])/2; G[row * colsize + col] = (h_G[row * colsize + (col-1)]+h_G[(row-1) * colsize + col])/2; B[row * colsize + col] = (h_B[row * colsize + (col-1)]+h_B[(row-1) * colsize + col])/2; } } } __global__ void doCopy(int *R,int *h_R,int *G,int *h_G,int *B,int *h_B,int colsize,int rowsize){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if(col<colsize && row<rowsize){ h_R[row * colsize + col] = R[row * colsize + col]; h_G[row * colsize + col] = G[row * colsize + col]; h_B[row * colsize + col] = B[row * colsize + col]; } } int main (int argc, const char * argv[]) { static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12; char str[maxlen], lines[5][maxlen]; FILE *fp, *fout; int nlines = 0; unsigned int h1, h2, h3; char *sptr; int row = 0, col = 0, nblurs, lineno=0, k; struct timeval tim; int *R, *B, *G; int sizei; sizei = sizeof(int)*colsize*rowsize; R = (int*)malloc(sizei); G = (int*)malloc(sizei); B = (int*)malloc(sizei); fp = fopen("David.ps", "r"); while(! feof(fp)) { fscanf(fp, "\n%[^\n]", str); if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);} else{ for (sptr=&str[0];*sptr != '\0';sptr+=6){ sscanf(sptr,"%2x",&h1); sscanf(sptr+2,"%2x",&h2); sscanf(sptr+4,"%2x",&h3); if (col==colsize){ col = 0; row++; } if (row < rowsize && col < colsize) { R[row * colsize + col] = h1; G[row * colsize + col] = h2; B[row * colsize + col] = h3; } col++; } } } fclose(fp); nblurs = 160; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); int *Rnew, *Bnew, *Gnew; int *h_R, *h_G, *h_B; h_R = (int*)malloc(sizei); h_G = (int*)malloc(sizei); h_B = (int*)malloc(sizei); // memset(h_R, 0, sizeof h_R); // memset(R, 0, sizeof R); // memset(h_G, 0, sizeof h_G); // memset(G, 0, sizeof G); // memset(h_B, 0, sizeof h_B); // memset(B, 0, sizeof B); Rnew = (int*)malloc(sizei); Gnew = (int*)malloc(sizei); Bnew = (int*)malloc(sizei); int *d_R, *d_G, *d_B; cudaMalloc((void **)&h_R,sizei); cudaMalloc((void **)&h_G,sizei); cudaMalloc((void **)&h_B,sizei); cudaMalloc((void **)&d_R,sizei); cudaMalloc((void **)&d_G,sizei); cudaMalloc((void **)&d_B,sizei); cudaMemcpy(h_R,R,sizei,cudaMemcpyHostToDevice); cudaMemcpy(h_G,G,sizei,cudaMemcpyHostToDevice); cudaMemcpy(h_B,B,sizei,cudaMemcpyHostToDevice); dim3 dimGrid(ceil(colsize/(float)32),ceil(rowsize/(float)32),1); dim3 dimBlock(32,32,1); for(k=0;k<nblurs;k++){ doBlur<<<dimGrid,dimBlock>>>(d_R,h_R,d_G,h_G,d_B,h_B,colsize,rowsize); doCopy<<<dimGrid,dimBlock>>>(d_R,h_R,d_G,h_G,d_B,h_B,colsize,rowsize); } cudaMemcpy(Rnew,h_R,sizei,cudaMemcpyDeviceToHost); cudaMemcpy(Gnew,h_G,sizei,cudaMemcpyDeviceToHost); cudaMemcpy(Bnew,h_B,sizei,cudaMemcpyDeviceToHost); cudaFree(h_R); cudaFree(h_G); cudaFree(h_B); cudaFree(d_R); cudaFree(d_G); cudaFree(d_B); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("%.6lf seconds elapsed\n", t2-t1); fout= fopen("DavidBlur.ps", "w"); for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]); fprintf(fout,"\n"); for(row=0;row<rowsize;row++){ for (col=0;col<colsize;col++){ fprintf(fout,"%02x%02x%02x",Rnew[row*colsize+col],Gnew[row*colsize+col],Bnew[row*colsize+col]); lineno++; if (lineno==linelen){ fprintf(fout,"\n"); lineno = 0; } } } fclose(fout); return 0; }
18,998
#include<stdio.h> #include<cuda.h> int main() { int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printf("Device Name: %s\n", devProp.name); printf("Total Global Memory: %d\n", devProp.totalGlobalMem); printf("Maximum Threads per Block: %d\n", devProp.maxThreadsPerBlock); printf("Maximum Threads Dimension in X-axis: %d\n", devProp.maxThreadsDim[0]); printf("Maximum Threads Dimension in Y-axis: %d\n", devProp.maxThreadsDim[1]); printf("Maximum Threads Dimension in Z-axis: %d\n", devProp.maxThreadsDim[2]); printf("Maximum Grid Size in X-axis: %d\n", devProp.maxGridSize[0]); printf("Maximum Grid Size in Y-axis: %d\n", devProp.maxGridSize[1]); printf("Maximum Grid Size in Z-axis: %d\n", devProp.maxGridSize[2]); printf("Warp Size: %d\n", devProp.warpSize); printf("Clock Rate: %d\n", devProp.clockRate); printf("Shared Memory Per Block: %d\n", devProp.sharedMemPerBlock); printf("Registers Per Block: %d\n", devProp.regsPerBlock); printf("Maximum pitch in bytes allowed by memory copies: %d\n", devProp.memPitch); printf("Total Constant Memory: %d\n", devProp.totalConstMem); printf("Major compute capability: %d\n", devProp.major); printf("Minor compute capability: %d\n", devProp.minor); printf("Alignment required for textures: %d\n", devProp.textureAlignment); printf("Device can concurrently copy memory and execute a kernel: %d\n", devProp.deviceOverlap); printf("Number of multiprocessors on device: %d\n", devProp.multiProcessorCount); printf("Specified whether there is a run time limit on kernels: %d\n", devProp.kernelExecTimeoutEnabled); printf("Device is integrated as opposed to discrete: %d\n", devProp.integrated); printf("Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer: %d\n", devProp.canMapHostMemory); printf("Compute mode: %d\n", devProp.computeMode); printf("Device can possibly execute multiple kernels concurrently: %d\n", devProp.concurrentKernels); printf("Device has ECC support enabled: %d\n", devProp.ECCEnabled); printf("PCI bus ID of the device: %d\n", devProp.pciBusID); printf("PCI device ID of the device: %d\n", devProp.pciDeviceID); printf("1 if device is a Tesla device using TCC driver, 0 otherwise: %d\n", devProp.tccDriver); } return 0; }
18,999
#include <stdlib.h> #include <math.h> #include <stdio.h> #include <time.h> #define N 10000 #define NTPB 1024 __global__ void mergeSmallBatch_k(int *a, int *b, int *m, int *sizeA, int *sizeB, const int d){ const int tidx = threadIdx.x % d; //Num de la diagonal dans le tableau indice Qt const int Qt = (threadIdx.x - tidx) / d; //Num du tableau dans le tableau shared const int gbx = Qt + blockIdx.x * (blockDim.x / d); //Num du tableau dans le tableau global //printf("blockId.x = %d | threadIdx. x = %d | tidx = %d | Qt = %d | gbx = %d\n", blockIdx.x, threadIdx.x, tidx, Qt, gbx); //Taille du tableau en cours de traitement const int sizeAi = sizeA[gbx]; const int sizeBi = sizeB[gbx]; //Tableau partagé par les threads d'un bloc __shared__ int sA[1024]; __shared__ int sB[1024]; //Transfer des données dans la memeoir shared sA[Qt * d + tidx] = a[gbx * d + tidx]; sB[Qt * d + tidx] = b[gbx * d + tidx]; __syncthreads(); if (gbx * d + tidx >= N * d){ return; } int K[2]; int P[2]; int Q[2]; if (tidx > sizeAi) { K[0] = tidx - sizeAi; K[1] = sizeAi; P[0] = sizeAi; P[1] = tidx - sizeAi; } else{ K[0] = 0; K[1] = tidx; P[0] = tidx; P[1] = 0; } while(1){ int offset = (abs(K[1]-P[1]))/2; Q[0] = K[0] + offset; Q[1] = K[1] - offset; if(Q[1] >= 0 && Q[0] <= sizeBi && (Q[1] == sizeAi || Q[0] == 0 || sA[ Qt*d + Q[1] ] > sB[ Qt*d + Q[0]-1 ]) ){ if(Q[0] == sizeBi || Q[1] == 0 || sA[Qt*d + Q[1]-1 ] <= sB[ Qt*d + Q[0] ]){ if(Q[1] < sizeAi && (Q[0] == sizeBi || sA[Qt*d + Q[1] ] <= sB[ Qt*d + Q[0] ])){ m[gbx * d + tidx] = sA[Qt*d + Q[1]] ; } else{ m[gbx * d + tidx] = sB[Qt*d + Q[0]]; } break; } else{ K[0] = Q[0] + 1; K[1] = Q[1] - 1; } } else{ P[0] = Q[0] - 1; P[1] = Q[1] + 1; } } } int main(void){ int *A; int *B; int *M; const int d = 128; int nb_block = (N*d + NTPB-1)/NTPB; //printf("%d\n",nb_block); float time= 0.; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); A = (int*) malloc(N*d* sizeof(int)); B = (int*) malloc(N*d* sizeof(int)); M = (int*) calloc(sizeof(int),N*d); int * sizeAi = (int*) malloc(N * sizeof(int)); int * sizeBi = (int*) malloc(N * sizeof(int)); srand(0); for (int i = 0; i < N; i++){ //int x; //printf("entrer la taille de A_%d\n",i); //scanf("%d",&x); // Taille aléatoire du tableau Ai[i] int x = rand() % d; sizeAi[i] = x; sizeBi[i] = d - x; // Remplissage des tableaux avec des valeurs croissantes car le tab doit etre trié for (int j = 0; j < sizeAi[i]; j ++){ A[i*d+j] = 2*j; } for (int j = 0; j < sizeBi[i]; j ++){ B[i*d+j] = 2*j + 1; } } int *A_gpu; int *B_gpu; int *M_gpu; int *sizeAi_GPU; int *sizeBi_GPU; cudaMalloc(&A_gpu, N*d* sizeof(int)); cudaMalloc(&B_gpu, N*d* sizeof(int)); cudaMalloc(&M_gpu, N*d* sizeof(int)); cudaMalloc(&sizeAi_GPU, N * sizeof(int)); cudaMalloc(&sizeBi_GPU, N * sizeof(int)); cudaMemcpy(A_gpu, A, N*d* sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, N*d* sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(sizeAi_GPU, sizeAi, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(sizeBi_GPU, sizeBi, N * sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(start); mergeSmallBatch_k<<<nb_block,NTPB>>>(A_gpu, B_gpu, M_gpu, sizeAi_GPU, sizeBi_GPU, d); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("mergeSmallBatch_k: temps écoulé = %f secs\n", time/1000); cudaMemcpy(M, M_gpu, N*d *sizeof(int), cudaMemcpyDeviceToHost); //for (int i = 0; i < N+Nb; i++) // printf("M[%d] = %d\n", i, M[i]); int i = rand()%N+1; printf("Tableau M _%d\n", i); if (sizeAi[i] != 0) printf("A_%d de size : %d | nb PAIRS allant 0 à %d\n",i, sizeAi[i], A[d*i+sizeAi[i]-1]); if (sizeBi[i] != 0) printf("B_%d de size : %d | nb IMPAIRS allant 1 à %d\n",i, sizeBi[i], B[d*i+sizeBi[i]-1]); for (int j = 0; j < d; j++){ printf("M[%d][%d] = %d\n", i, j, M[i*d+j]); } free(A); free(B); free(M); free(sizeAi); free(sizeBi); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(M_gpu); cudaFree(sizeAi_GPU); cudaFree(sizeBi_GPU); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
19,000
/** * Copyright © 2018 - 2019 Sergei Iurevich Filippov, All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file contains implementations and interfaces for cuda kernel activation functions. All cuda kernels then are * compiled to a standalone dynamic library to be linked with D code later. * * Authors: Sergei Iurevich Filippov. $(COPYRIGHT) */ /** * Calculate the hyperbolic tangent of each element of an array x on a GPU in place. * * <math><mrow> * <mi mathvariant="italic">tanh</mi><mfenced><mi>x</mi></mfenced> * <mo>=</mo> * <mfrac> * <mrow><msup><mi>e</mi><mrow><mn>2</mn><mi>x</mi></mrow></msup><mo>-</mo><mn>1</mn></mrow> * <mrow><msup><mi>e</mi><mrow><mn>2</mn><mi>x</mi></mrow></msup><mo>+</mo><mn>1</mn></mrow> * </mfrac> * </mrow></math> * * Params: * x = A pointer to an array to calculate. * count = Size of the array. */ __global__ void kernel_tanh(float *x, const size_t count) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) x[i] = tanhf(x[i]); } /// ditto __host__ void cuda_tanh(float *x, const size_t count) { kernel_tanh<<<(count + 1023) / 1023, 1024>>>(x, count); } /** * Calculate the rectified linear unit of each element of an array x on a GPU in place. * * <math><mrow> * <mi mathvariant="italic">ReLU</mi><mfenced><mi>x</mi></mfenced> * <mo>=</mo> * <mo>{</mo> * <mtable> * <mtr> * <mtd><mn>0</mn><mi>x</mi></mtd><mtd><mtext>for&nbsp;</mtext><mi>x</mi><mo>&lt;</mo><mn>0</mn></mtd> * </mtr> * <mtr> * <mtd><mi>x</mi></mtd><mtd><mtext>for&nbsp;</mtext><mi>x</mi><mo>&ge;</mo><mn>0</mn></mtd> * </mtr> * </mtr> * </mrow></math> * * Params: * x = A pointer to an array to calculate. * count = Size of the array. */ __global__ void kernel_ReLU(float *x, const size_t count) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) x[i] = fmaxf(0.0f, x[i]); } /// ditto __host__ void cuda_ReLU(float *x, const size_t count) { kernel_ReLU<<<(count + 1023) / 1023, 1024>>>(x, count); } /** * Calculate the leaky rectified linear unit of each element of an array x on a GPU in place. * * <math><mrow> * <mi mathvariant="italic">Leaky ReLU</mi><mfenced><mi>x</mi></mfenced> * <mo>=</mo> * <mo>{</mo> * <mtable> * <mtr> * <mtd><mn>0.01</mn><mi>x</mi></mtd><mtd><mtext>for&nbsp;</mtext><mi>x</mi><mo>&lt;</mo><mn>0</mn></mtd> * </mtr> * <mtr> * <mtd><mi>x</mi></mtd><mtd><mtext>for&nbsp;</mtext><mi>x</mi><mo>&ge;</mo><mn>0</mn></mtd> * </mtr> * </mtr> * </mrow></math> * * Params: * x = A pointer to an array to calculate. * count = Size of the array. */ __global__ void kernel_LeakyReLU(float *x, const size_t count) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) if (x[i] < 0) x[i] *= 0.01f; } /// ditto __host__ void cuda_LeakyReLU(float *x, const size_t count) { kernel_LeakyReLU<<<(count + 1023) / 1023, 1024>>>(x, count); }