serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
13,501
#include <stdio.h> #include<iostream> #include <vector> typedef struct { int width; int height; float* elements; } Matrix; __global__ void doThings(Matrix* matrices) { printf("?"); int i = blockIdx.x; for(int j=0;j<5;j++) printf("-%f-",matrices[i].elements[j]); printf("\n"); matrices[i].elements[0] = 42+i; matrices[i].elements[3] = 42+i; } int main(void) { int rows=5, cols=1, numMat = 16; // These are actually determined at run-time Matrix* data = (Matrix*)malloc(numMat * sizeof(Matrix)); for(int i=0;i<numMat;i++) { data[i].elements = (float*)malloc(sizeof(float)*5); for(int j=0;j<5;j++) data[i].elements[j] = j; } Matrix* d_data; // ... Successfully read from file into "data" ... Matrix* h_data = (Matrix*)malloc(numMat * sizeof(Matrix)); cudaMalloc(&d_data, numMat*sizeof(Matrix)); cudaMemcpy(d_data, data, numMat * sizeof(Matrix), cudaMemcpyHostToDevice); for (int i=0; i<numMat; i++){ cudaMalloc(&(h_data[i].elements), 5*sizeof(float)); cudaMemcpy(h_data[i].elements, data[i].elements, 5*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(&(d_data[i].elements), &(h_data[i].elements), sizeof(float*), cudaMemcpyHostToDevice); }// matrix data is now on the gpu, now copy the "meta" data to gpu // ... Do other things ... doThings<<<numMat,16,0>>>(d_data); cudaMemcpy(h_data,d_data, numMat*sizeof(Matrix), cudaMemcpyDeviceToHost); for (int i=0; i<numMat; i++){ cudaMemcpy(data[i].elements,h_data[i].elements, rows*cols*sizeof(float), cudaMemcpyDeviceToHost); }// matrix data is now on the gpu, now copy the "meta" data to gpu for(int i=0;i<numMat;i++) { for(int j=0;j<5;j++) printf("%f ",data[i].elements[j]); printf("\n"); } }
13,502
#include "includes.h" __global__ void calcPixelVal(float *g_idata, float* constant, float* min) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<LENA_SIZE)g_idata[i]=(g_idata[i]-(*min))*(*constant); }
13,503
#include <cstdio> #include <cuda.h> void __cudaSafeCall(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } }
13,504
#include<cuda.h> #include<iostream> #include <unistd.h> #include <math.h> #include <stdlib.h> using namespace std; __global__ void monitorKernel(double * write_2_ptr, double * read_in_ptr); __device__ void MatrixAddKernel(double* Melems, double* Nelems, double* Pelems) { int thid = threadIdx.x+blockIdx.x*blockDim.x; int i = 0; int shift = thid; for(i = 0; i < 1; ++i) Pelems[shift+i] = Melems[shift+i]+Nelems[shift+i]; } __device__ void Muldev(double* A, double* B, double* C, int nRows) { extern __shared__ double ptr[]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = nRows*blockDim.x*by; int aEnd = aBegin + nRows-1; int aStep = blockDim.x; int bBegin = blockDim.x*bx; int bStep = blockDim.x*nRows; double Csub = 0; double* As = &ptr[0]; double* Bs = &ptr[blockDim.x*blockDim.x]; int a; int b; int k; for(a = aBegin, b = bBegin; a<=aEnd; a+=aStep, b+=bStep){ As[ty*blockDim.x+tx] = A[a+nRows*ty+tx]; Bs[ty*blockDim.x+tx] = B[b+nRows*ty+tx]; __syncthreads(); __threadfence_block(); for(k = 0; k < blockDim.x; ++k){ Csub+=As[ty*blockDim.x+k]*Bs[k*blockDim.x+tx];} //Csub+=As[tx*blockDim.x+k]*Bs[k*blockDim.x+ty];} __syncthreads(); } int c = nRows*blockDim.x*by+blockDim.x*bx; C[c+nRows*ty+tx] = Csub; } __global__ void dataKernel(double* data, double* A, double* B, int nsteps, double *temp1, double *temp2, double* temp3){ //this adds a value to a variable stored in global memory int tx = threadIdx.x; int ty = threadIdx.y; int thid = tx + blockDim.x*ty; clock_t start = clock64(); clock_t now; int i = 0; while(i < nsteps) { temp3[thid] = sin(data[thid]); __syncthreads(); Muldev(data, data, temp1, 2); __syncthreads(); Muldev(B, data, temp2, 2); __syncthreads(); Muldev(A, temp3, temp3, 2); __syncthreads(); data[thid] = temp1[thid]+temp2[thid]+temp3[thid]+.01; __syncthreads(); i = i+1; clock_t start = clock64(); for(;;){ now = clock(); clock_t cycles = now-start; if(cycles > 5000) break; } } } int main(int argc, char** argv) { double hA[4] = {.6, -.1, .6, 1.95}; double hB[4] = {1/150, .1/150, -.1/150, -1/150}; double hC[4] = {.3, .3, -.5, -.25}; double* dA; double* dB; double* dC; double *temp1, *temp2, *temp3; int nRows = 2; int TileSize = 2; int size = 4*sizeof(double); int nSteps = atoi(argv[1]); cudaMalloc((void**)&dA, size); cudaMalloc((void**)&dB, size); cudaMalloc((void**)&dC, size); cudaMalloc((void**)&temp1, size); cudaMalloc((void**)&temp2, size); cudaMalloc((void**)&temp3, size); cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice); cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice); cudaMemcpy(dC, hC, size, cudaMemcpyHostToDevice); dim3 dimBlock(TileSize, TileSize); dim3 dimGrid(nRows/TileSize, nRows/TileSize); double *monitor_data; double *h_data; cudaMalloc((void**)&monitor_data, sizeof(double)); cudaMallocHost((void**)&h_data, sizeof(double)); cudaStream_t stream1; cudaStreamCreate(&stream1); cudaStream_t stream0; cudaStreamCreate(&stream0); dataKernel<<<dimGrid, dimBlock, sizeof(double)*TileSize*TileSize*TileSize*TileSize, stream0>>>(dC, dA, dB, nSteps, temp1, temp2, temp3); //sleep(.0001); cout <<"Launching Monitor Kernel" << endl; monitorKernel<<<1, 1,0, stream1>>>(monitor_data, &dC[1]); cout <<"Launching Async Mem Cpy" << endl; cudaMemcpyAsync(h_data, monitor_data, sizeof(double), cudaMemcpyDeviceToHost, stream1); cudaStreamSynchronize(stream1); cout << "Value monitored over: " << *h_data*100 << endl; sleep(.0001); cout <<"Launching Monitor Kernel" << endl; monitorKernel<<<1, 1,0, stream1>>>(monitor_data, &dC[1]); cout <<"Launching Async Mem Cpy" << endl; cudaMemcpyAsync(h_data, monitor_data, sizeof(double), cudaMemcpyDeviceToHost, stream1); cudaStreamSynchronize(stream1); cout << "Value monitored over: " << *h_data*100 << endl; sleep(1); cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost); int i = 0; for(i = 0; i < 4; i++) cout << "hC: " << hC[i]*100 << endl; cudaFree(dA); cudaFree(dB); cudaFree(dC); return 0; } /*int *dVal; int size = sizeof(bool); //pointer of helper function return asdfasdfasdf int transfered_data; int *h_data = &transfered_data; int *monitor_data; bool k_stop_cmd = 1; bool *host_stop_kernel = &k_stop_cmd; bool bool_test = 0; bool *test_value = &bool_test; cudaMallocHost((void**)&host_stop_kernel, size); *host_stop_kernel = 1; cudaMalloc((void**)&stop_kernel, size); bool *stop_kern_ptr; cudaGetSymbolAddress((void**)&stop_kern_ptr, stop_kernel); cudaMalloc((void**)&dVal, sizeof(int)); cudaMallocHost((void**)&monitor_data, sizeof(int)); cudaStream_t stream1; cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking); dataKernel<<<1, 1>>>(dVal, stop_kern_ptr); cout <<"Launching Monitor Kernel" << endl; monitorKernel<<<1, 1,0, stream1>>>(monitor_data, dVal); cout <<"Launching Async Mem Cpy" << endl; cudaMemcpyAsync(h_data, monitor_data, sizeof(int), cudaMemcpyDeviceToHost, stream1); cout << "Value monitored over: " << *h_data << endl; cudaStreamSynchronize(stream1); sleep(2); monitorKernel<<<1, 1,0, stream1>>>(monitor_data, dVal); cout <<"Launching Async Mem Cpy" << endl; cudaMemcpyAsync(h_data, monitor_data, sizeof(int), cudaMemcpyDeviceToHost, stream1); cout << "Value monitored over: " << *h_data << endl; cudaStreamSynchronize(stream1); sleep(1); monitorKernel<<<1, 1,0, stream1>>>(monitor_data, dVal); cout <<"Launching Async Mem Cpy" << endl; cudaMemcpyAsync(h_data, monitor_data, sizeof(int), cudaMemcpyDeviceToHost, stream1); cout << "Value monitored over: " << *h_data << endl; cudaStreamSynchronize(stream1); sleep(1); cout << "Stopping Kernel " << *host_stop_kernel << endl; cudaMemcpyAsync(stop_kern_ptr, host_stop_kernel, sizeof(bool), cudaMemcpyHostToDevice, stream1); cudaStreamSynchronize(stream1); cudaMemcpyAsync(test_value, stop_kern_ptr, sizeof(bool), cudaMemcpyDeviceToHost, stream1); cudaStreamSynchronize(stream1); cout << "if stop_kernel in global memory of device then this better be 1: " << *test_value << endl; cudaMemcpy(h_data, dVal, sizeof(int), cudaMemcpyDeviceToHost); cout << "Value copied over: " << *h_data << endl; return 0;*/ __global__ void monitorKernel(double * write_2_ptr, double * read_in_ptr){ *write_2_ptr = *read_in_ptr; }
13,505
/* * A very simple cuda implementation of map */ #include <stdio.h> #include <stdlib.h> __global__ void map(float* out, float* in, int size); void startClock(char*); void stopClock(char*); void printClock(char*); int main(int argc, char** argv) { if (argc < 2) { printf("Usage: %s #-of-floats\n",argv[0]); exit(1); } int size = atoi(argv[1]); printf("size = %d\n",size); cudaDeviceProp props; cudaGetDeviceProperties(&props,0); printf("maxThreadsPerBlock = %d\n",props.maxThreadsPerBlock); printf("maxGridSize[0] = %d\n",props.maxGridSize[0]); if (size > (long)props.maxThreadsPerBlock*props.maxGridSize[0]) { fprintf(stderr,"Max size for the large model is %d\n", props.maxThreadsPerBlock*props.maxGridSize[0]); exit(1); } void *d_in; // device data void *d_out; float *h_in; // host data float *h_out; cudaMalloc(&d_in,size*sizeof(float)); cudaMalloc(&d_out,size*sizeof(float)); h_in = (float*) malloc(size*sizeof(float)); h_out =(float*) malloc(size*sizeof(float)); for (int i = 0; i < size; i++) { h_in[i] = i; } startClock("copy data to device"); cudaMemcpy(d_in,h_in,size*sizeof(float),cudaMemcpyHostToDevice); stopClock("copy data to device"); startClock("compute"); // use max threads/block and the required # of blocks int numBlocks = ceil(1.0*size/props.maxThreadsPerBlock); map<<<numBlocks,props.maxThreadsPerBlock>>>((float*) d_out,(float*) d_in,size); cudaThreadSynchronize(); stopClock("compute"); startClock("copy data to host"); cudaMemcpy(h_out,d_out,size*sizeof(float),cudaMemcpyDeviceToHost); stopClock("copy data to host"); for (int i = 0; i < size; i++) { printf("%f -> %f\n",h_in[i],h_out[i]); } free(h_in); free(h_out); cudaFree(d_in); cudaFree(d_out); printClock("copy data to device"); printClock("compute"); printClock("copy data to host"); }
13,506
#ifndef SOLVER_H #define SOLVER_H #ifndef CUDA_CALLABLE_MEMBER_OVERWRITE #ifdef __CUDACC__ #define CUDA_CALLABLE_MEMBER __host__ __device__ #else #define CUDA_CALLABLE_MEMBER #endif #endif #include <iostream> using namespace std; /* class TemplateBaseClass_t { public: CUDA_CALLABLE_MEMBER virtual void operator=(TemplateBaseClass_t &_TemplateBaseClass); CUDA_CALLABLE_MEMBER virtual TemplateBaseClass_t operator+(const TemplateBaseClass_t &_TemplateBaseClass) const ; CUDA_CALLABLE_MEMBER virtual TemplateBaseClass_t operator-(const TemplateBaseClass_t &_TemplateBaseClass) const ; CUDA_CALLABLE_MEMBER virtual TemplateBaseClass_t operator*(const float_tt &d) const ; CUDA_CALLABLE_MEMBER virtual TemplateBaseClass_t operator/(const float_tt &d) const ; CUDA_CALLABLE_MEMBER virtual TemplateBaseClass_t compare(const TemplateBaseClass_t& _TemplateBaseClass) const ; }; template <typename BaseClass_t> class TemplateCallableClass_t { public: CUDA_CALLABLE_MEMBER virtual BaseClass_t operator()(const BaseClass_t& _BaseClass) const; }; */ template<typename float_tt> struct err_n_dt_t { CUDA_CALLABLE_MEMBER void operator=(const err_n_dt_t<float_tt>& _end) { err = _end.err; nbs = _end.nbs; dt = _end.dt; } CUDA_CALLABLE_MEMBER void set(float_tt _err, unsigned int _nbs, float_tt _dt){ err = _err; nbs = _nbs; dt = _dt; } float_tt err, dt; unsigned int nbs; }; struct RKF_t { RKF_t(){} double a21 = 1./4, a31 = 3./32, a32 = 9./32, a41 = 1932./2197, a42 = -7200./2197, a43 = 7296./2197, a51 = 439./216, a52 = -8., a53 = 3680./513, a54 = -845./4104, a61 = -8./27, a62 = 2., a63 = -3544./2565, a64 = 1859./4104, a65 = -11./40, b1 = 16./135, b2 = 0., b3 = 6656./12825, b4 = 28561./56430, b5 = -9./50, b6 = 2./55, bb1 = 25./216, bb2 = 0., bb3 = 1408./2565, bb4 = 2197./4104, bb5 = -1./5, bb6 = 0.; }; template <typename CallableClass_t, typename BaseClass_t, typename float_tt> class RK45Solver_t { private : float_tt dt_min; unsigned int nb_steps_max; const RKF_t RKF; float_tt dt, err, tol; err_n_dt_t<float_tt> err_n_dt; BaseClass_t BaseClass_out_deg, K1, K2, K3, K4, K5, K6, K1int, K2int, K3int, K4int, K5int, K6int, dX; CallableClass_t Callable; CUDA_CALLABLE_MEMBER float_tt one_step(const BaseClass_t& _BaseClass_in, BaseClass_t& _BaseClass_out); public: CUDA_CALLABLE_MEMBER RK45Solver_t(CallableClass_t& _Callable, float_tt _dt_min, unsigned int _nb_steps_max); CUDA_CALLABLE_MEMBER CallableClass_t& get_CallableClass() const; CUDA_CALLABLE_MEMBER bool operator()(float_tt _dt, float_tt _tol, const BaseClass_t& _BaseClass_in, BaseClass_t& _BaseClass_out); CUDA_CALLABLE_MEMBER const err_n_dt_t<float_tt>& get_err_n_dt() const; }; template <typename CallableClass_t, typename BaseClass_t, typename float_tt> CUDA_CALLABLE_MEMBER RK45Solver_t <CallableClass_t, BaseClass_t, float_tt>::RK45Solver_t(CallableClass_t& _Callable, float_tt _dt_min, unsigned int _nb_steps_max) : Callable(_Callable), dt_min (_dt_min), nb_steps_max (_nb_steps_max) {} template <typename CallableClass_t, typename BaseClass_t, typename float_tt> CUDA_CALLABLE_MEMBER float_tt RK45Solver_t<CallableClass_t, BaseClass_t, float_tt>::one_step(const BaseClass_t& _BaseClass_in, BaseClass_t& _BaseClass_out) { const BaseClass_t &X = _BaseClass_in; BaseClass_t &Y = _BaseClass_out, &Ydeg = BaseClass_out_deg; K1 = Callable(X); K1int = K1*RKF.a21*dt; K2 = Callable(K1int+X); K1int = K1*RKF.a31*dt; K2int = K2*RKF.a32*dt; K3 = Callable(K2int+K1int+X); K1int = K1*RKF.a41*dt; K2int = K2*RKF.a42*dt; K3int = K3*RKF.a43*dt; K4 = Callable(K3int+K2int+K1int+X); K1int = K1*RKF.a51*dt; K2int = K2*RKF.a52*dt; K3int = K3*RKF.a53*dt; K4int = K4*RKF.a54*dt; K5 = Callable(K4int+K3int+K2int+K1int+X); K1int = K1*RKF.a61*dt; K2int = K2*RKF.a62*dt; K3int = K3*RKF.a63*dt; K4int = K4*RKF.a64*dt; K5int = K5*RKF.a65*dt; K6 = Callable(K4int+K3int+K2int+K1int+X); dX = (K1*RKF.b1 + K2*RKF.b2 + K3*RKF.b3 + K4*RKF.b4 + K5*RKF.b5 + K6*RKF.b6)*dt; Y = X + dX; dX = (K1*(RKF.b1-RKF.bb1) + K2*(RKF.b2-RKF.bb2) + K3*(RKF.b3-RKF.bb3) + K4*(RKF.b4-RKF.bb4) + K5*(RKF.b5-RKF.bb5) + K6*(RKF.b6-RKF.bb6))*dt; Ydeg = X + dX; return Y.compare(Ydeg); } template <typename CallableClass_t, typename BaseClass_t, typename float_tt> CUDA_CALLABLE_MEMBER bool RK45Solver_t<CallableClass_t, BaseClass_t, float_tt>::operator()(float_tt _dt, float_tt _tol, const BaseClass_t& _BaseClass_in, BaseClass_t& _BaseClass_out) { dt = _dt; tol = _tol; int nb_steps=0; for(float_tt t=0; t<_dt; t+=dt) { err = one_step(_BaseClass_in, _BaseClass_out); while (err>tol) { dt = .9*powf(tol/err, 1./3); nb_steps++; if (dt<dt_min || nb_steps>nb_steps_max) { err_n_dt.set(err, nb_steps, dt); return false; } err = one_step(_BaseClass_in, _BaseClass_out); } } err_n_dt.set(err, nb_steps, dt); return true; } template <typename CallableClass_t, typename BaseClass_t, typename float_tt> CUDA_CALLABLE_MEMBER const err_n_dt_t<float_tt>& RK45Solver_t<CallableClass_t, BaseClass_t, float_tt>::get_err_n_dt() const { return err_n_dt; } #endif
13,507
/* 159.735 Semester 2, 2016. Ian Bond, 3/10/2016 Sequential version of the N-sphere counting problem for Assignment 5. Two alternative algorithms are presented. Note: a rethink will be needed when implementing a GPU version of this. You can't just cut and paste code. To compile: g++ -O3 -o nsphere nsphere.cpp (you will get slightly better performance with the O3 optimization flag) */ #include <cstdlib> #include <cmath> #include <iostream> #include <string> #include <cuda.h> #include <vector> const long MAXDIM = 10; const double RMIN = 2.0; const double RMAX = 8.0; const int MAX_POINTS_PER_THREAD = 500; const int MAX_BPG_ONE_DIM = 1024; const int MAX_TPB = 1024; double diffclock(clock_t clock1, clock_t clock2) { double diffticks = clock1 - clock2; double diffms = (diffticks * 1000) / CLOCKS_PER_SEC; return diffms; // Time difference in milliseconds } /* * Evaluate n**k where both are long integers */ long powlong(long n, long k) { long p = 1; for (long i = 0; i < k; ++i) p *= n; return p; } /* * Convert a decimal number into another base system - the individual * digits in the new base are stored in the index array. */ void convert(long num, long base, std::vector<long>& index) { const long ndim = index.size(); for (long i = 0; i < ndim; ++i) index[i] = 0; long idx = 0; while (num != 0) { long rem = num % base; num = num / base; index[idx] = rem; ++idx; } } long count_in_v1(long ndim, double radius) { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius * radius; // This is the total number of points we will need to test. const long ntotal = powlong(base, ndim); std::cout << "Points need to be test " << ntotal << std::endl; long count = 0; // Indices in x,y,z,.... std::vector<long> index(ndim, 0); // Loop over the total number of points. For each visit of the loop, // we covert n to its equivalent in a number system of given "base". for (long n = 0; n < ntotal; ++n) { convert(n, base, index); double rtestsq = 0; for (long k = 0; k < ndim; ++k) { double xk = index[k] - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare) ++count; } return count; } // kernel __global__ void cuda_count(int ndim, double radius, long nfrom, long nto, long nthreads, int* counter) { long id = blockIdx.x * blockDim.x + threadIdx.x; counter[id] = 0; if (id >= nto) return; const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius*radius; long index = 0; long num = nfrom + id; //a thread might test more than one numbers while (num < nto) { double rtestsq = 0; for (int i=0; i<ndim; i++) { long rem = num % base; num = num / base; double xk = rem - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare ) { atomicAdd(&counter[id], 1); } index++; num = nfrom + id + nthreads*index; } } long count_in_cuda(long ndim, double radius) { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; // This is the total number of points we will need to test. const long ntotal = powlong(base, ndim); const int tpb_x = (ntotal<MAX_TPB)?ntotal:MAX_TPB; //use maximum MAX_BPG_ONE_DIM x 1024 threads int blocks = ntotal / MAX_TPB + 1; if (blocks > MAX_BPG_ONE_DIM) { blocks = MAX_BPG_ONE_DIM; } const long nthreads = tpb_x*blocks; int* counters = new int[nthreads]; memset(counters, 0, sizeof(int)*nthreads); int* d_counters; cudaMalloc(&d_counters, sizeof(int)*nthreads); long total_count = 0; //invoke the kernel //std::cout << "Launching a grid of " << nthreads << " threads" << std::endl; const long points_for_each_call = MAX_POINTS_PER_THREAD*nthreads; long nfrom = 0; long nto = points_for_each_call; do { if (nto > ntotal) nto = ntotal; //std::cout << "will handle [" << nfrom << ", " << nto << "]\n"; cuda_count <<<blocks, tpb_x>>>(ndim, radius, nfrom, nto, nthreads, d_counters); cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << "CUDA kernel error:\n"<<cudaGetErrorString(err)<<std::endl; break; } //copy the counters to host cudaMemcpy(counters, d_counters, sizeof(int)*nthreads, cudaMemcpyDeviceToHost); //sum all counters for (long i = 0; i < nthreads; i++) { total_count += counters[i]; } nfrom = nto; nto += points_for_each_call; }while (nfrom < ntotal); cudaFree(d_counters); delete[] counters; return total_count; } int main(int argc, char* argv[]) { // You can make this larger if you want const long ntrials = 20; std::cout <<"r nd Seq Count Seq Time cuda Count tcuda Time"<<std::endl; for (long n = 0; n < ntrials; ++n) { // Get a random value for the hypersphere radius between the two limits const double r = drand48() * (RMAX - RMIN) + RMIN; // Get a random value for the number of dimensions between 1 and MAXDIM inclusive const long nd = lrand48() % (MAXDIM - 1) + 1; clock_t tstart = clock(); const long count_s = count_in_v1(nd, r); double ts = diffclock(clock(), tstart); //std::cout << "Counted by sequential is "<< count_s << std::endl; tstart = clock(); const long count_cuda = count_in_cuda(nd, r); double tp = diffclock(clock(), tstart); //std::cout << "Counted by CUDA is " << count_cuda << std::endl<<std::endl; std::cout << r << "\t " << nd << "\t" << count_s << "\t" << ts <<"\t"<< count_cuda << "\t"<< tp <<std::endl; } }
13,508
#include "includes.h" #define NUM_THREADS 743511 // length of calculation #define BLOCK_SIZE 256 // number of threads per block used in gpu calc #define EPS 0.00005 // Epsilon for tolerance of diffs between cpu and gpu calculations #define INCLUDE_MEMTIME false // Decides whether to include memory transfers to and from gpu in gpu timing #define PRINTLINES 0 // Number of lines to print in output during validation int timeval_subtract( struct timeval* result, struct timeval* t2, struct timeval* t1) { unsigned int resolution = 1000000; long int diff = (t2->tv_usec + resolution * t2->tv_sec) - (t1->tv_usec + resolution * t1->tv_sec); result->tv_sec = diff / resolution; result->tv_usec = diff % resolution; return (diff<0); } __global__ void calcKernel(float* d_in, float *d_out) { const unsigned int lid = threadIdx.x; // local id inside a block const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id d_out[gid] = pow((d_in[gid] / ( d_in[gid] - 2.3 )),3); // do computation }
13,509
#include "includes.h" __global__ void get_edgemean_kernal(const float* data, float* edgemean, const int nx, const int ny, const int nz) { int di = 0; float edge_sum = 0; float edge_mean = 0; size_t nxy = nx * ny; if (nz == 1) { for (int i = 0, j = (ny - 1) * nx; i < nx; ++i, ++j) { edge_sum += data[i] + data[j]; } for (size_t i = 0, j = nx - 1; i < nxy; i += nx, j += nx) { edge_sum += data[i] + data[j]; } edge_mean = (float)edge_sum / (nx * 2 + ny * 2); } else { if (nx == ny && nx == nz * 2 - 1) { for (size_t j = (nxy * (nz - 1)); j < nxy * nz; ++j, ++di) { edge_sum += data[j]; } } else { for (size_t i = 0, j = (nxy * (nz - 1)); i < nxy; ++i, ++j, ++di) { edge_sum += data[i] + data[j]; } } int nxy2 = nx * (ny - 1); for (int k = 1; k < nz - 1; ++k) { size_t k2 = k * nxy; size_t k3 = k2 + nxy2; for (int i = 0; i < nx; ++i, ++di) { edge_sum += data[i + k2] + data[i + k3]; } } for (int k = 1; k < nz - 1; ++k) { size_t k2 = k * nxy; size_t k3 = nx - 1 + k2; for (int i = 1; i < ny - 1; ++i, ++di) { edge_sum += data[i * nx + k2] + data[i * nx + k3]; } } edge_mean = (float)edge_sum / (di * 2); } *edgemean = edge_mean; }
13,510
#include "ppm.h" #include <math.h> #include <iostream> #include <stdio.h> __global__ void colorToGreyScaleConversion(int* imdata,int* outimdata,int size){ int dex= 3*(threadIdx.x+blockIdx.x*blockDim.x); if (dex>= size) return; int r=imdata[dex]; int g=imdata[dex+1]; int b=imdata[dex+2]; int grey= round(255*( 0.21*(r/255.0)+0.71*(g/255.0)+0.07*(b/255.0))); // printf("Grey value is : ") outimdata[dex]=grey; outimdata[dex+1]=grey; outimdata[dex+2]=grey; } int main(){ ppm football("football.ppm"); int size=3*football.height*football.width; int arsize=sizeof(int)*size; std::cout <<"Size is: "<< size; int* d_football_data; int* d_gfootball_data; cudaMalloc((void**)&d_football_data,arsize); cudaMalloc((void**)&d_gfootball_data,arsize ); cudaMemcpy(d_football_data,football.data,arsize,cudaMemcpyHostToDevice); cudaMemcpy(d_gfootball_data,football.data,arsize,cudaMemcpyHostToDevice); colorToGreyScaleConversion<<< 1<<20 ,256>>>(d_football_data,d_gfootball_data,size); ppm gfootball(football); cudaMemcpy(gfootball.data,d_gfootball_data,arsize,cudaMemcpyDeviceToHost); gfootball.write("gfootball.ppm"); cudaFree(d_gfootball_data); cudaFree(d_football_data); }
13,511
#include <stdio.h> __global__ void helloFromeGPU(){ printf("Hello World from GPU! %d\n",threadIdx.x); } int main(int argc, char **argv){ printf("Hello World from CPU!\n"); helloFromeGPU<<<1,100>>>(); cudaDeviceReset(); //cudaDeviceSynchronize(); return 0; }
13,512
#include <stdio.h> #include <stdlib.h> __global__ void prueba1(int* datos) { int gid = (blockIdx.x * blockDim.x) + threadIdx.x; datos[gid] = gid; } int main() { int numbloques = 128; int numthreads = 128; int* d_datos; int* d_datos2; cudaMalloc((void **) &d_datos, (sizeof(int) * numbloques * (numthreads - 1) - sizeof(int))); cudaMalloc((void **) &d_datos2, (sizeof(int) * numbloques * (numthreads - 1) - sizeof(int))); prueba1<<<numbloques, numthreads>>>(d_datos); cudaDeviceSynchronize(); return 0; }
13,513
#include "includes.h" __global__ void linearLayerUpdateBias(float* dZ, float* b, int dZ_x_dim, int dZ_y_dim, int b_x_dim, float learning_rate) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < dZ_x_dim * dZ_y_dim) { int dZ_x = index % dZ_x_dim; int dZ_y = index / dZ_x_dim; atomicAdd(&b[dZ_y], - learning_rate * (dZ[dZ_y * dZ_x_dim + dZ_x] / dZ_x_dim)); } }
13,514
/* Block size X: 32 */ __global__ void fct_ale_b3_vertical(const int maxLevels, const int * __restrict__ nLevels, double * __restrict__ fct_adf_v, const double * __restrict__ fct_plus, const double * __restrict__ fct_minus) { const int node = (blockIdx.x * maxLevels); const int flux_index = (blockIdx.x * (maxLevels + 1)); const int maxNodeLevel = nLevels[blockIdx.x] - 1; /* Intermediate levels */ for ( int level = threadIdx.x + 1; level < maxNodeLevel; level += 32 ) { double flux = 0.0; double ae_plus = 0.0; double ae_minus = 0.0; flux = fct_adf_v[flux_index + level]; ae_plus = 1.0; ae_minus = 1.0; ae_plus = fmin(ae_plus, fct_minus[node + (level) - 1]); ae_minus = fmin(ae_minus, fct_minus[node + (level)]); ae_plus = fmin(ae_plus, fct_plus[node + (level)]); ae_minus = fmin(ae_minus, fct_plus[node + (level) - 1]); if ( signbit(flux) == 0 ) { flux *= ae_plus; } else { flux *= ae_minus; } fct_adf_v[flux_index + level] = flux; } /* Top level */ if ( threadIdx.x == 0 ) { double flux = fct_adf_v[flux_index]; double ae = 1.0; if ( signbit(flux) == 0 ) { ae = fmin(ae, fct_plus[node]); } else { ae = fmin(ae, fct_minus[node]); } fct_adf_v[flux_index] = ae * flux; } }
13,515
#include <iostream> #include <unistd.h> #include <stdlib.h> #include "cuda.h" using namespace std; __global__ void infinitekernel(float *dptr, int *dwait) { while(*dwait) *dptr += 1; *dptr = 999; } int main(void) { cudaStream_t stream[2]; for (int i=0; i < 2 ; i++) cudaStreamCreate(&stream[i]); float *hptr; float *dptr; int *hwait; int *dwait; hptr = (float*)malloc(sizeof(float)); hwait = (int*)malloc(sizeof(int)); cudaMalloc((void **)&dptr, sizeof(float)); cudaMalloc((void **)&dwait, sizeof(int)); *hptr = 9; *hwait = 1; cudaMemcpyAsync(dptr, hptr, sizeof(float), cudaMemcpyHostToDevice, stream[0]); cudaMemcpyAsync(dwait, hwait, sizeof(float), cudaMemcpyHostToDevice, stream[0]); infinitekernel<<<1, 1, 0, stream[1]>>>(dptr,dwait); for(int i=0; i<10; i++) { sleep(1); cudaMemcpyAsync(hptr, dptr, sizeof(float), cudaMemcpyDeviceToHost, stream[0]); cout << "["<< i << " seconds]" <<"value = " << *hptr << endl; } *hwait = 0; cudaMemcpyAsync(dwait, hwait, sizeof(int), cudaMemcpyHostToDevice, stream[0]); cudaMemcpyAsync(hptr, dptr, sizeof(float), cudaMemcpyDeviceToHost, stream[0]); cout <<"[Finally]" << "value = "<< *hptr << endl; }
13,516
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <sys/resource.h> #include <math.h> void check(){ struct cudaDeviceProp capabilities; cudaGetDeviceProperties (&capabilities, 0); printf("maxThreadsDim:%d\n",capabilities.maxThreadsDim[0]); printf("maxGridSize:%d\n",capabilities.maxGridSize[0]); } int main(){ check(); return 0; } /* void checkparams(unsigned int *n, unsigned int *cb){ struct cudaDeviceProp capabilities; // Si menos numero total de hilos que tamaño bloque, reducimos bloque if (*cb > *n) *cb = *n; cudaGetDeviceProperties (&capabilities, 0); if (*cb > capabilities.maxThreadsDim[0]) { *cb = capabilities.maxThreadsDim[0]; printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n\n", *cb); } if (((*n + *cb - 1) / *cb) > capabilities.maxGridSize[0]) { *cb = 2 * (*n - 1) / (capabilities.maxGridSize[0] - 1); if (*cb > capabilities.maxThreadsDim[0]) { *cb = capabilities.maxThreadsDim[0]; printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n", *cb); if (*n > (capabilities.maxGridSize[0] * *cb)) { *n = capabilities.maxGridSize[0] * *cb; printf("->Núm. total de hilos cambiado a %d (máx por grid para \ dev)\n\n", *n); } else { printf("\n"); } } else { printf("->Núm. hilos/bloq cambiado a %d (%d máx. bloq/grid para \ dev)\n\n", *cb, capabilities.maxGridSize[0]); } } } */
13,517
/* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> #include <iostream> #include <float.h> //__global__ void output_init_gpu_kernel(int center_num, int kernel_num, // float padding, int channels, // float* output_features) { // int thread_id = threadIdx.x + blockIdx.x * blockDim.x; // if (thread_id < center_num * kernel_num) { // for (int c=0; c<channels; c++) { // output_features[thread_id*channels + c] = padding; // } // } //} __global__ void voxel_sampling_feature_gpu_kernel(int center_num, int channels, int kernel_num, float padding, int output_pooling_size, const float* input_features, const int* output_idx, float* output_features) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; int c = thread_id % channels; int voxel_coor = thread_id / channels; if (thread_id < center_num * kernel_num * channels) { int pooling_count = 0; for (int p=0; p<output_pooling_size; p++) { int point_id = output_idx[voxel_coor*output_pooling_size + p]; if (point_id >= 0) { output_features[thread_id] += input_features[point_id * channels + c]; pooling_count += 1; // printf("%f\n", input_features[point_id * channels + c]); } } // if (pooling_count > 1) // printf("%f\n", output_features[thread_id]); if (pooling_count > 0) output_features[thread_id] /= pooling_count; if (pooling_count == 0) output_features[thread_id] = padding; } } __global__ void voxel_sampling_feature_grad_gpu_kernel(int center_num, int kernel_num, int channels, int output_pooling_size, const int* output_idx, const float* output_features_grad, float* input_features_grad) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id < center_num * kernel_num) { int pooling_count = 0; for (int p=0; p<output_pooling_size; p++) { if (output_idx[thread_id*output_pooling_size + p] >= 0) pooling_count += 1; } for (int p=0; p<pooling_count; p++) { int point_id = output_idx[thread_id*output_pooling_size + p]; for (int c=0; c<channels; c++) { atomicAdd(&input_features_grad[point_id*channels + c], output_features_grad[thread_id*channels + c] / pooling_count); } } } } void voxel_sampling_feature_gpu_launcher(int center_num, int kernel_num, int channels, float padding, int output_pooling_size, const float* input_features, const int* output_idx, float* output_features) { if (center_num * channels <= 0) { printf("VoxelSampleFeatureOp ERROR: Invalid CUDA input dimensions.\n"); return; } int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size // cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, output_init_gpu_kernel, 0, center_num * kernel_num); // gridSize = (center_num * kernel_num + blockSize - 1) / blockSize; // output_init_gpu_kernel<<<gridSize, blockSize>>>(center_num, kernel_num, // padding, channels, // output_features); cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, voxel_sampling_feature_gpu_kernel, 0, center_num * kernel_num * channels); gridSize = (center_num * kernel_num * channels + blockSize - 1) / blockSize; voxel_sampling_feature_gpu_kernel<<<gridSize, blockSize>>>(center_num, channels, kernel_num, padding, output_pooling_size, input_features, output_idx, output_features); } void voxel_sampling_feature_grad_gpu_launcher(int center_num, int kernel_num, int channels, int output_pooling_size, const int* output_idx, const float* output_features_grad, float* input_features_grad) { if (center_num==0 || kernel_num*channels == 0) { printf("VoxelSampleGradOp ERROR: Invalid CUDA input dimensions.\n"); return; } int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, voxel_sampling_feature_grad_gpu_kernel, 0, center_num * kernel_num); gridSize = (center_num * kernel_num + blockSize - 1) / blockSize; voxel_sampling_feature_grad_gpu_kernel<<<gridSize, blockSize>>>(center_num, kernel_num, channels, output_pooling_size, output_idx, output_features_grad, input_features_grad); }
13,518
#include "includes.h" __global__ void NNResampleKernel(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int size = outputWidth * outputHeight; if (id < size) { int px = id % outputWidth; int py = id / outputWidth; float xRatio = (float)(inputWidth - 1) / (outputWidth); float yRatio = (float)(inputHeight - 1) / (outputHeight); int x = (int) (xRatio * (px+.5f)); int y = (int) (yRatio * (py+.5f)); output[py * outputWidth + px] = input[y*inputWidth + x]; } }
13,519
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cufft.h> #include <malloc.h> int main() { //cufft.lib Ŀ ߰ int length = 1000; float2 * src = (float2*) malloc(length*sizeof(float2)); for (int i = 0; i < length; i++) { src[i].x = i;//Ǽ src[i].y = 0;// } float2 *src_d; cudaMalloc(&src_d, length*sizeof(float2)); cudaMemcpy(src_d, src, length*sizeof(float2), cudaMemcpyHostToDevice); cufftHandle plan; cufftPlan1d(&plan, length, CUFFT_C2C, 1);//Ķ cufftExecC2C(plan, src_d, src_d, CUFFT_INVERSE);//ȯ cudaMemcpy(src, src_d, length*sizeof(float2), cudaMemcpyDeviceToHost); for (int i = 0; i < length; i++) printf("%d, real: %f, imag: %f \n", i, src[i].x, src[i].y); cudaFree(src_d); return 0; }
13,520
// Author: Noah Van Der Weide // 3/30/2020 // upscale the image by doubling height and width // fill in empty areas according to neighboring pixels and difference thresholds // THREE APPROACHES // FIRST APPROACH: // each thread will process one pixel // SECOND APPROACH: // each thread will process one original pixel and surrounding pixels // THIRD APPROACH: // each thread will process one original pixel and pixels to the right and below // Two filling approaches: // First: // Tackle everything at once. // Second: // Stretch out original image and fill in adjacent pixels with original pixel value, // Then go through and SAXPY if original pixel differences aren't too great. // dimension of image: upper left = (0,0), bottom right = (width-1, height-1) // *img_original is the original image // *img_new width = *img_original width * 3 - 2 // *img_new width = *img_original height * 3 - 2 // 8 bits per color (0 - 255) // upscale function is called independently for each color. // this allows it to be faster for black and white images as it only needs to be called once. // Can therefore also be applied to images which use a different color map than RGB (JPEG, for example). /* #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <vector> #include <stdlib.h> #include <string.h> #include "upscale.cuh" #define THREADS_PER_BLOCK 64 //__global__ void upscale_CUDA(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold) { __global__ void upscale_CUDA(unsigned char * dst, unsigned char * src, int src_width, int src_height, int src_channels, unsigned char threshold) { // not using shared memory right now // there is 48 KB of shared memory available. // images are typically more than that, so I'll have to think about how it could be implemented //extern __shared__ unsigned char pic[]; //int pixel = blockIdx.x * blockdim.x + threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; // not relevant to code function, but shows how a thread could access a pixel in every channel. // pixel values are from 0 to 255. //for (int k = 0; k < channels; k++){ // img[idx + k]; //} int dst_width = src_width * 3 - 2; //int dst_height = src_height * 3 - 2; //long int dst_elements = dst_width * dst_height * src_channels; //long int src_elements = src_width * src_height * src_channels; int src_stride = src_width * src_channels; int dst_stride = dst_width * src_channels; // if invalid location do nothing. //if (i >= dst_width || j >= dst_height) // is that width or width-1? if (i >= src_width || j >= src_height) return; // all channels for a pixel are grouped together. To access an adjacent pixel, you must add by the number of channels. for (int k = 0; k < src_channels; k++) { int dst_index = (j * 21 + i * 3) + k; // this is strictly for my predefined dst width and height (*3 -2) int src_index = (j * src_width + i) + k; // transfer known src values to dst // to access different channels, the number of elements of the src/dst image must be added to the respective array index. dst[dst_index] = src[src_index]; // vertical comparison acts on src image and applies values to dst image int y_diff = src[src_index + src_stride] - src[src_index]; if (y_diff < threshold) { // apply third-average // linear fill int step = y_diff / 3; dst[dst_index + dst_stride] = src[src_index] + step; dst[dst_index + 2 * dst_stride] = src[src_index] + step * 2; } else { // nearest neighbor dst[dst_index + dst_stride] = src[src_index]; dst[dst_index + 2 * dst_stride] = src[src_index + src_stride]; } __syncthreads(); // horizontal // I know this is painfully inefficient. int x_diff_0 = src[src_index] - src[src_index + src_channels]; int x_diff_1 = dst[dst_index + dst_stride] - dst[dst_index + dst_stride + src_channels]; int x_diff_2 = dst[dst_index + 2 * dst_stride] - dst[dst_index + 2 * dst_stride + src_channels]; int step = 0; if (x_diff_0 < threshold) { // apply third-average // linear fill step = x_diff_0 / 3; dst[dst_index + 1] = src[src_index] + step; dst[dst_index + 2] = src[src_index] + step * 2; } else { // nearest neighbor dst[dst_index + src_channels] = src[src_index]; dst[dst_index + 2*src_channels] = src[src_index + src_channels]; } if (x_diff_1 < threshold) { // apply third-average // linear fill step = x_diff_1 / 3; dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride] + step; dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride] + step * 2; } else { // nearest neighbor dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride]; dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride + 3]; } if (x_diff_2 < threshold) { // apply third-average // linear fill step = x_diff_2 / 3; dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride] + step; dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride] + step * 2; } else { // nearest neighbor dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride]; dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride + 3]; } __syncthreads(); } __syncthreads(); } __global__ void upscale(unsigned char threshold) { // CUDA timing parameters cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float ms; // file handling using opencv // IMREAD_COLOR loads image in BGR 8-bit format // IMREAD_UNCHANGED includes alpha channel // IMREAD_GRAYSCALE loads as intensity 0-1 // load image string image_path = samples::findFile("peppers.png"); Mat src = imread(image_path, IMREAD_COLOR); // check if image loaded properly if (src.empty()) { cout << "Could not read image: " << image_path << endl; return 1; } // ------------------------------------------ // properties of the source and upscaled image // ------------------------------------------ // input dimensions int src_height = src.rows; int src_width = src.cols; // channels (e.g. Red, Green, Blue) int channels = src.channels(); //int type = src.type; // CV_8UC3? // output dimensions int dst_height = src_height * 3 - 2; int dst_width = src_width * 3 - 2; // number of elements (if a picture has 3 channels, this is 3 * pixels) int dst_elements = dst_width * dst_height * channels; int src_elements = src_width * src_height * channels; // number of bytes each image will take int dst_size = dst_elements * sizeof(unsigned char); int src_size = src_elements * sizeof(unsigned char); // create new image with same datatype as input Mat dst(dst_height, dst_width, CV_8UC3, Scalar(0, 0, 0)); //Mat dst(dst_height, dst_width, type); // image data for upscale function unsigned char* src_img = src.data; unsigned char* dst_img = dst.data; cout << "Loaded " << image_path << " -- " << src_height << ", " << src_width << " -- Channels: " << channels << endl; // initialize device variables unsigned char* dev_src, * dev_dst; // number of blocks to call in kernel. Max threads per block is usually 1024 int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; // allocate memory in GPU cudaMalloc((void**)&dev_dst, dst_size); cudaMalloc((void**)&dev_src, src_size); // used for shared memory if eventually implemented //cudaMallocManaged(&dst, dst_size); //cudaMallocManaged(&src, src_size); // copy data from CPU to GPU cudaMemcpy(dev_dst, dst_img, dst_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_src, src_img, src_size, cudaMemcpyHostToDevice); // start timer for performance evaluation cudaEventRecord(start); // call upscale function //upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_dst, dev_src, src_elements, src_width, src_height, threshold); // <<<blocks, threads per block, shared mem>>> dim3 grid(src_width, src_height); upscale_CUDA < < <grid, 1 > > >(dev_dst, dev_src, src_width, src_height, channels, threshold); cudaDeviceSynchronize(); // end timer cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); // copy data back from GPU to CPU cudaMemcpy(dst_img, dev_dst, dst_size, cudaMemcpyDeviceToHost); cudaMemcpy(src_img, dev_src, dst_size, cudaMemcpyDeviceToHost); // might not need this // free GPU cudaFree(dev_dst); cudaFree(dev_src); // create output image. I might not need another Mat -- just use 'dst' instead of 'output' //Mat output = Mat(dst_height, dst_width, type, dst); imshow("source", src); imshow("output", dst); imwrite("upscaled_image.png", dst); waitKey(0); std::cout << "\ntime (ms) = " << ms << std::endl; } /* void upscale(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold){ unsigned char* dev_src, * dev_dst; int dst_width = src_width * 3 - 2; int dst_height = src_height * 3 - 2; int dst_elements = dst_width * dst_height * src_channels; int src_elements = src_width * src_height * src_channels; int dst_size = dst_elements * sizeof(unsigned char); int src_size = src_elements * sizeof(unsigned char); int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; // allocate memory in GPU cudaMalloc((void**)&dev_dst, dst_size); cudaMalloc((void**)&dev_src, src_size); //cudaMallocManaged(&dst, dst_size); //cudaMallocManaged(&src, src_size); // copy data from CPU to GPU cudaMemcpy(dev_dst, dst, dst_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_src, src, src_size, cudaMemcpyHostToDevice); upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_src, src_height, src_width, src_channels, threshold); // <<<blocks, threads per block, shared mem>>> cudaDeviceSynchronize(); // copy data back from GPU to CPU cudaMemcpy(dst, dev_dst, dst_size, cudaMemcpyDeviceToHost); // free GPU cudaFree(dev_dst); cudaFree(dev_src); } */
13,521
extern "C" __global__ void blur3x3(unsigned char* A, unsigned char* B, const int rows, const int cols) { const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col < cols && row < rows) { int pixVal = 0; int pixels = 0; for (int blurRow = -1; blurRow < 1 + 1; blurRow++) { for (int blurCol = -1; blurCol < 1 + 1; blurCol++) { int curRow = row + blurRow; int curCol = col + blurCol; if (curRow > -1 && curRow < rows && curCol > -1 && curCol < cols) { pixVal += A[curRow * cols + curCol]; pixels++; } } } B[row * cols + col] = (unsigned char)(pixVal / pixels); } } extern "C" __global__ void blur9x9(unsigned char* A, unsigned char* B, const int rows, const int cols) { const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col < cols && row < rows) { int pixVal = 0; int pixels = 0; for (int blurRow = -4; blurRow < 4 + 1; blurRow++) { for (int blurCol = -4; blurCol < 4 + 1; blurCol++) { int curRow = row + blurRow; int curCol = col + blurCol; if (curRow > -1 && curRow < rows && curCol > -1 && curCol < cols) { pixVal += A[curRow * cols + curCol]; pixels++; } } } B[row * cols + col] = (unsigned char)(pixVal / pixels); } } extern "C" __global__ void blurMxM(unsigned char* A, unsigned char* B, const int rows, const int cols, const int M) { const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col < cols && row < rows) { int pixVal = 0; int pixels = 0; for (int blurRow = -M; blurRow < M + 1; blurRow++) { for (int blurCol = -M; blurCol < M + 1; blurCol++) { int curRow = row + blurRow; int curCol = col + blurCol; if (curRow > -1 && curRow < rows && curCol > -1 && curCol < cols) { pixVal += A[curRow * cols + curCol]; pixels++; } } } B[row * cols + col] = (unsigned char)(pixVal / pixels); } }
13,522
//============================================================================ // Name : MF6.cpp // Author : Sohrab // Version : 1 // Copyright : Hi! // Description : Matched Filter in C++, Ansi-style //============================================================================ #include <iostream> #include <string> #include <cmath> #include <math.h> #include <ctime> #include <complex> #include <vector> #include <string> #include "stdio.h" #include "stdlib.h" #include "time.h" // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> // references #define date_ref "1015" #define obj_ref "1" #define run_ref 2 // 0: down sampling, 1: averaging, 2: nothing #define average 2 //internal distance #define int_dst 2.615 // TX relative position to TX's starting point #define Tx_pos_x 0.41 #define Tx_pos_y -0.028 #define Tx_pos_z -0.012 //starting point of using samples #define N_spl_fr 1000 #define N_lfreq_spl 0 #define N_hfreq_spl 0 #define N_mfreq_spl (N_spl_fr/2)-N_lfreq_spl-N_hfreq_spl #define N_mfreq_spl_slow 2*((N_spl_fr/2)-N_lfreq_spl-N_hfreq_spl) // Number of frames for each axis #define N_x_stg 20 //1667 #define N_z_stg 20 //constants #define Ts 6e-3 #define Rs 5e5 #define lambda 4.983873e-3 //step size between each two frames considered #define dlx 0.005 * 0.006 #define dlz lambda/2 #define linxmax -dlx/2-(N_x_stg-1)*dlx #define linzmax -dlz/2-(N_z_stg-1)*dlz // environment dimensions #define xmin -5 #define xmax 5 #define ymin 0 #define ymax 10 #define zmin -1.5 #define zmax 1.5 //resolution #define res 0.07 //scientific values for some constants #define sci_fac 1e8 #define c_sci 2.9979 #define fc_sci 609 #define As1_sci 1.5001e4 #define As2_sci 7.5005e3 #define file_size 100020000 #define size1 3000 #define BLOCK_WIDTH 8 #define Beat_I(uu, vv, nn) Beat_I[uu*N_z_stg*size1 + vv*size1 + nn] #define Beat_R(uu, vv, nn) Beat_R[uu*N_z_stg*size1 + vv*size1 + nn] #define cell_MF_R(xx, yy, zz) cell_MF_R[xx*Ny*Nz + yy*Nz + zz] #define cell_MF_I(xx, yy, zz) cell_MF_I[xx*Ny*Nz + yy*Nz + zz] #define deviceCellMF(xx, yy, zz) deviceCellMF[xx*Ny*Nz + yy*Nz + zz] #define SQR(s) ((s) * (s)) #define mult_complex(r1, i1, r2, i2, r3, i3) r3 = r1* r2 - i1*i2; i3 = r1* i2 + r2* i1; using namespace std; /****************** FUNCTIONS ******************/ struct indices { int kx; int ky; int kz; }; indices idxfinder(int n1, int n2, int n3, int k) { k = k % (n1 * n2 * n3); indices I; I.kx = k % n1; I.ky = ((int) floor(k / n1)) % n2; I.kz = ((int) floor(k / (n1 * n2))) % n3; return I; } /****************************************************/ /************* KERNEL CALL *************************/ __global__ void matchedFilterKernel(float* Beat_R, float* Beat_I, float* cell_MF_R, float* cell_MF_I, int Nx, int Ny, int Nz) { #define MF_x_axis(xx) (xx*res + xmin) #define MF_y_axis(yy) (yy*res + ymin) #define MF_z_axis(zz) (zz*res + zmin) #define u_axis(uu) (-dlx/2 - uu*dlx) #define v_axis(vv) (-dlz/2 - vv*dlz) const float pi = acosf(-1); int xx, yy, zz; xx = blockIdx.x * blockDim.x + threadIdx.x; yy = blockIdx.y * blockDim.y + threadIdx.y; zz = blockIdx.z * blockDim.z + threadIdx.z; if(xx < Nx && yy < Ny && zz < Nz) { float cell_z = MF_z_axis(zz); float cell_y = MF_y_axis(yy); float cell_x = MF_x_axis(xx); float cell_sum_R = 0; float cell_sum_I = 0; // for(int nn = 0; nn < size1; nn++) // 3000 // Beat[nn] = Beat_R(uu, vv, nn) + i_float * Beat_I(uu, vv, nn); // __shared__ complex<float> Beat[size1] float cell_dist_t = sqrtf( SQR(cell_x - Tx_pos_x) + SQR(cell_y - Tx_pos_y) + SQR(cell_z - Tx_pos_z)); for (int uu = 0; uu < N_x_stg; uu++) { float x_diff = SQR(cell_x - u_axis(uu)); for (int vv = 0; vv < N_z_stg; vv++) { // 2d receiver float temp_tau = (cell_dist_t + int_dst * 2 + sqrtf( x_diff + SQR(cell_z - v_axis(vv)) + SQR(cell_y)) ) / c_sci; float tmp_zero = (2.0 * fc_sci * temp_tau); float cell_sig_fst_temp_R[N_mfreq_spl]; float cell_sig_fst_temp_I[N_mfreq_spl]; float cell_sig_slow_temp_R[N_mfreq_spl_slow]; float cell_sig_slow_temp_I[N_mfreq_spl_slow]; for (int nn = 0; nn < N_mfreq_spl; nn++) { // for each fixed receiver and object location, 3000 samples float tmp = tmp_zero + (2.0 * As1_sci * (N_lfreq_spl / Rs + nn / Rs) * temp_tau); sincosf(tmp*pi, &cell_sig_fst_temp_I[nn], &cell_sig_fst_temp_R[nn]); float temp_R, temp_I; mult_complex(cell_sig_fst_temp_R[nn], cell_sig_fst_temp_I[nn], Beat_R(uu, vv, nn), Beat_I(uu, vv, nn), temp_R, temp_I); cell_sum_R += temp_R; cell_sum_I -= temp_I; } for (int nn = N_mfreq_spl; nn < 2*N_mfreq_spl; nn++) { float temp_R, temp_I; mult_complex(cell_sig_fst_temp_R[2*N_mfreq_spl-1-nn], cell_sig_fst_temp_I[2*N_mfreq_spl-1-nn], Beat_R(uu, vv, nn), Beat_I(uu, vv, nn), temp_R, temp_I); cell_sum_R += temp_R; cell_sum_I -= temp_I; } for (int nn = 0; nn < N_mfreq_spl_slow; nn++) { float tmp = tmp_zero + (2.0 * As2_sci * (N_lfreq_spl * 2 / Rs + nn / Rs) * temp_tau); sincosf(tmp*pi, &cell_sig_slow_temp_I[nn], &cell_sig_slow_temp_R[nn]); float temp_R, temp_I; mult_complex(cell_sig_slow_temp_R[nn], cell_sig_slow_temp_I[nn], Beat_R(uu, vv, nn+2*N_mfreq_spl), Beat_I(uu, vv, nn+2*N_mfreq_spl), temp_R, temp_I); cell_sum_R += temp_R; cell_sum_I -= temp_I; } for (int nn = N_mfreq_spl_slow; nn < 2* N_mfreq_spl_slow; nn++) { float temp_R, temp_I; mult_complex(cell_sig_slow_temp_R[2*N_mfreq_spl_slow-1-nn], cell_sig_slow_temp_I[2*N_mfreq_spl_slow-1-nn], Beat_R(uu, vv, nn+2*N_mfreq_spl), Beat_I(uu, vv, nn+2*N_mfreq_spl), temp_R, temp_I); cell_sum_R += temp_R; cell_sum_I -= temp_I; } } } cell_MF_R(xx, yy, zz) = cell_sum_R; cell_MF_I(xx, yy, zz) = cell_sum_I; } #undef MF_x_axis #undef MF_y_axis #undef MF_z_axis #undef u_axis #undef v_axis } /**************************************************/ int main(void) { cudaError_t err = cudaSuccess; /************* LARGE ARRAY DECLRATATIONS AND NX, NY, NZ************/ int Nx = 10; // (int) floor((xmax-xmin)/res)+1; //143 int Ny = 10; //(int) floor((ymax-ymin)/res)+1; //143 int Nz = 43; //(int) floor((zmax-zmin)/res)+1; //43 // complex<float> cell_sig_fst[N_x_stg][N_z_stg][N_mfreq_spl]; // complex<float> cell_sig_slow[N_x_stg][N_z_stg][N_mfreq_spl_slow]; // Allocate host memory float* Beat_R = (float *)malloc(N_x_stg * N_z_stg * size1 * sizeof(float)); //[N_x_stg][N_z_stg][size1] = {}; float* Beat_I = (float *)malloc(N_x_stg * N_z_stg * size1 * sizeof(float)); //[N_x_stg][N_z_stg][size1] = {}; float* cell_MF_R = (float*)malloc(Nx * Ny * Nz * sizeof(float)); //[Nx][Ny][Nz] 143 * 143 *43 float* cell_MF_I = (float*)malloc(Nx * Ny * Nz * sizeof(float)); //[Nx][Ny][Nz] 143 * 143 *43 // Verify that allocations succeeded if (Beat_R == NULL || Beat_I == NULL || cell_MF_R == NULL || cell_MF_I == NULL ) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } /**************************************************/ clock_t begin = clock(); clock_t end; // srand (time(NULL)); for (int ii = 0; ii < N_z_stg; ii++) for (int jj = 0; jj < N_x_stg; jj++) for (int kk = 0; kk < size1; kk++) { Beat_R(jj, ii, kk) = 1; Beat_I(jj, ii, kk) = 0; } /*********** READ THE .BIN FILES ************/ // FILE *fp = fopen("/home/synrg-gpu1/Desktop/MF6/testReal.bin","rb"); // for (int ii = 0; ii < N_z_stg; ii++){ // for (int jj = 0; jj < N_x_stg; jj++) { // float b[size1]; // fseek(fp, (ii*N_x_stg + jj)*size1*4, SEEK_SET); // fread(b, sizeof *b, size1, fp); // for(int kk = 0; kk < size1; kk++) { // Beat_R(jj, ii, kk) = 1; // //if (ii == 0 && jj == 1 && kk < 500) cout << b[kk] << endl; // } // } // } // fclose(fp); // cout << "Successfully read the file in " << (double) (clock() - begin) / CLOCKS_PER_SEC << " seconds!" << endl; // FILE *fp2 = fopen("/home/synrg-gpu1/Desktop/MF6/testImag.bin","rb"); // for (int ii = 0; ii < N_z_stg; ii++){ // for (int jj = 0; jj < N_x_stg; jj++) { // float b[size1]; // fseek(fp2, (ii*N_x_stg + jj)*size1*4, SEEK_SET); // fread(b, sizeof *b, size1, fp2); // for(int kk = 0; kk < size1; kk++) { // Beat_I(jj, ii, kk) = 0; // } // } // } // fclose(fp2); // cout << "Successfully read the files in " << (double) (clock() - begin) / CLOCKS_PER_SEC << " seconds!" << endl; // cout << Beat_I(149, 14, 149)<< endl << endl; /******************** END OF READ FILE *********************/ //some constants const float pi = acos(-1); // for (int i = 0; i < 1; i++){ // for (int j = 0; j < 1; j++){ // for (int k = 0; k < 10; k++) { // cout << cell_MF_R(k, j, i) << cell_MF_I(k, j, i) << " "; // } // std::endl( std::cout ); // } // std::endl( std::cout ); // } float* deviceBeatI; float* deviceBeatR; float* deviceCellMF_R; float* deviceCellMF_I; clock_t begin_mem = clock(); // Allocate GPU memory err = cudaMalloc((void **) &deviceBeatR , N_z_stg * N_x_stg * size1 * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate deviceBeatR (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &deviceBeatI , N_z_stg * N_x_stg * size1 * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate deviceBeatI (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &deviceCellMF_R , Nx * Ny * Nz * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate deviceCellMF (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &deviceCellMF_I , Nx * Ny * Nz * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate deviceCellMF (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(deviceBeatR, Beat_R, N_z_stg * N_x_stg * size1 * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy deviceBeatR from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(deviceBeatI, Beat_I, N_z_stg * N_x_stg * size1 * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy deviceBeatI from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cout << "hi!" << endl; dim3 DimGrid(ceil(Nx * 1.0 / BLOCK_WIDTH), ceil(Ny * 1.0 / BLOCK_WIDTH), ceil(Nz * 1.0 /BLOCK_WIDTH)); dim3 DimBlock(BLOCK_WIDTH, BLOCK_WIDTH, BLOCK_WIDTH); cout << "Allocating & copying memory DONE! Time taken:" << (double) (clock() - begin_mem) / CLOCKS_PER_SEC; matchedFilterKernel<<<DimGrid, DimBlock>>>(deviceBeatR, deviceBeatI, deviceCellMF_R, deviceCellMF_I, Nx, Ny, Nz); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch matchedFilterKernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } begin_mem = clock(); printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(cell_MF_R, deviceCellMF_R, Nx * Ny * Nz * sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy deviceCellMF from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(cell_MF_I, deviceCellMF_I, Nx * Ny * Nz * sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy deviceCellMF from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cout << "Copying memory back DONE! Time taken:" << (double) (clock() - begin_mem) / CLOCKS_PER_SEC; cout << "Hi! \n"; cout << Nx << endl; cout << Ny << endl; cout << Nz << endl; cout << N_x_stg << endl; cout << N_z_stg << endl; cout << N_mfreq_spl_slow << endl; end = clock(); cout << "DONE! Time taken:" << (double) (end - begin) / CLOCKS_PER_SEC << endl; for (int i = 0; i < 1; i++){ for (int j = 0; j < 5; j++){ for (int k = 0; k < 5; k++) { cout << "(" << cell_MF_R(k, j, i) << ", " << cell_MF_I(k, j, i) << ") "; } std::endl( std::cout ); } std::endl( std::cout ); } cudaFree(deviceBeatR); cudaFree(deviceBeatI); cudaFree(deviceCellMF_R); cudaFree(deviceCellMF_I); free(Beat_R); free(Beat_I); free(cell_MF_R); free(cell_MF_I); return 0; }
13,523
#include<stdio.h> #include<iostream> #include<math.h> __global__ void add(int *a,int len) { __shared__ int mem[1954]; int tid =blockDim.x*blockIdx.x+threadIdx.x; int n=1; for(;tid<len && tid<16384*n;tid=tid+16384) { __syncthreads(); for(int i=0;i<logf(len);i++) { int j=powf(2,i); a[tid+j]=a[tid]+a[tid+j]; //sum of elements for depth of logflen } mem[n]=a[16384*n]; a[tid]+=mem[n]; __syncthreads(); n++; } } int main(void) { int len=32000000; int *a_d; int *a=(int *)malloc(sizeof(int)*len); for(int i=0;i<len;i++) { a[i]=rand()%10; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((int **)&a_d,sizeof(int)*len); cudaMemcpy(a_d,a,sizeof(int)*len,cudaMemcpyHostToDevice); dim3 threadsPerBlock(32,4); dim3 blocksPerGrid(32,4); cudaEventRecord(start); add<<<blocksPerGrid,threadsPerBlock>>>(a_d,len); cudaMemcpy(a,a_d,sizeof(int)*len,cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaDeviceSynchronize(); float milliseconds = 0; cudaEventElapsedTime(&milliseconds,start,stop); printf("Elapsed time is : %f millisec\n\n",milliseconds); cudaFree(a_d); }
13,524
/* Program : Perform matrix multiplication using the tiling algorithm * Author : Anant Shah * Roll Number : EE16B105 * Date : 8-9-2018 **/ #include<stdio.h> #include<cuda.h> #define ERROR_HANDLER(error_msg,line) error_handler(error_msg,line) #define SIZE 8192 #define NUM_THREADS_X 16 #define NUM_THREADS_Y 16 #define TILE_WIDTH 16 void error_handler(cudaError_t error_msg, int line){ /* Function to flag an error if found in the CUDA program */ if(error_msg!=cudaSuccess){ printf("%s in %s at %d",cudaGetErrorString(error_msg),__FILE__,__LINE__); exit(EXIT_FAILURE); } } void fill_matrix(double *mat, unsigned numRows, unsigned numCols){ /*Program to fill the elements of a matrix with the given number of rows and columns */ for(unsigned i=0;i<numRows;i++){ for(unsigned j=0;j<numCols;j++){ mat[i*numCols+j] = i*2.1f+j*3.2f; } } } void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols){ /* Function to print the matrix elements into a file */ const char *fname = "assignment2_out"; FILE *f = fopen(fname,"a"); for(unsigned i=0; i<numRows; i++){ for(unsigned j=0;j<numCols;j++){ fprintf(f,"%4.4f ",mat[i*numCols+j]); } fprintf(f,"\n"); } fclose(f); } __global__ void matrixMul(double *M,double *N,double *P,int width){ /* Kernel to find the matrix multiplication for each cell using the tiling algorithm * TILE_WIDTH is taken to be the block size which is 16*16 * Parameters : M - Multiplicand matrix M * N - Multiplicand matrix N * P - Multiplicand matrix P * width - Dimension of the square block */ int bx = blockDim.x; /* Number of threads in the block in the x-direction */ int by = blockDim.y; /* Number of threads in the block in the y-direction */ int tx = threadIdx.x; /* x-Thread-ID in the block */ int ty = threadIdx.y; /* y-Thread-ID in the block */ double pSum = 0.0; /* Sum to store the partial matrix multiplication */ int Row = blockIdx.y*by+ty; /* Specific row of the cell in the output matrix */ int Col = blockIdx.x*bx+tx; /* Column of the cell in the output matrix */ /* Decalring the shared memory variables */ __shared__ double Ms[TILE_WIDTH][TILE_WIDTH]; /* Shared memory allocation for a tile of matrix M */ __shared__ double Ns[TILE_WIDTH][TILE_WIDTH]; /* Shared memory allocation for a tile on matrix N */ /* We will use a for loop to go through each phase */ for(unsigned m=0;m<(width+TILE_WIDTH-1)/TILE_WIDTH;m++){ /* Loading Operations */ Ms[ty][tx] = M[Row*width+m*TILE_WIDTH+tx]; Ns[ty][tx] = N[(m*TILE_WIDTH+ty)*width+Col]; /* We now need to synchronize as all threads in a block have to finidh loading for the multiplication to continue */ __syncthreads(); /* Iterations to perform the partial matrix multiplication from the tile */ for(unsigned i=0;i<TILE_WIDTH;i++){ pSum += Ms[ty][i]*Ns[i][tx]; } /* Another synchronization need to be performed so that all threads finish one phase and then start loading the next phase together */ __syncthreads(); } P[Row*width+Col] = pSum; } int main(int argc,char **argv){ if(argc!=1){ printf("error : Invalid number of arguments"); exit(EXIT_FAILURE); } /********************************************** Variable Declaration *********************************/ double *h_M; /* Matrix multiplicand M on the host */ double *h_N; /* Matrix multiplicand N on the host */ double *h_P; /* Matrix product M*N on the host */ double *d_M; /* Matrix multiplicand M on the device */ double *d_N; /* Matrix multiplicand N on the device */ double *d_P; /* Matrix product M*N on the device */ size_t size; /* Size in bytes of the square matrices */ cudaEvent_t start,stop; /* CUDA events to measure the time of the matrix multiplication execution */ cudaEventCreate(&start); cudaEventCreate(&stop); size = sizeof(double)*SIZE*SIZE; /************************************** Memory Allocation on the host ********************************/ h_M = (double *)malloc(size); h_N = (double *)malloc(size); h_P = (double *)malloc(size); /************************************* Initialize the matrices **************************************/ fill_matrix(h_M,SIZE,SIZE); fill_matrix(h_N,SIZE,SIZE); /************************************ Allocate memory on the device *********************************/ ERROR_HANDLER(cudaMalloc((void **)&d_M,size),__LINE__); ERROR_HANDLER(cudaMalloc((void **)&d_N,size),__LINE__); ERROR_HANDLER(cudaMalloc((void **)&d_P,size),__LINE__); /**************************** Copy the matrices from the host to device ****************************/ ERROR_HANDLER(cudaMemcpy(d_M,h_M,size,cudaMemcpyHostToDevice),__LINE__); ERROR_HANDLER(cudaMemcpy(d_N,h_N,size,cudaMemcpyHostToDevice),__LINE__); /************************************ Kernel Invocation *******************************************/ dim3 threads(NUM_THREADS_X,NUM_THREADS_Y); dim3 blocks((SIZE+NUM_THREADS_X-1)/NUM_THREADS_X,(SIZE+NUM_THREADS_Y-1)/NUM_THREADS_Y); cudaEventRecord(start); matrixMul<<<blocks,threads>>>(d_M,d_N,d_P,SIZE); cudaEventRecord(stop); ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__); cudaEventSynchronize(stop); float run_time = 0.0; cudaEventElapsedTime(&run_time,start,stop); printf("Run-Time(milli-seconds) : %.10f\n",run_time); print_matrix_to_file(h_P,SIZE,SIZE); /*********************************** Free the memory *********************************************/ cudaFree(d_M); cudaFree(d_N); cudaFree(d_P); free(h_M); free(h_N); free(h_P); }
13,525
/* * Copyright 2016 Alexander Terenin * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ #include <curand_kernel.h> /* * Function : cuda_rand_init * Purpose : initializes random number generator * Argument state : random number generator state * Output : mutates state and stores result in its place */ extern "C" __global__ void cuda_rand_init(int seed, curandStatePhilox4_32_10_t *state) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i == 0) curand_init((unsigned long long) seed, (unsigned long long) 0, (unsigned long long) 0, &state[0]); }
13,526
#include<stdio.h> #include<math.h> #include<cuda.h> #include<time.h> // Setting block size #define BLOCK_SIZE 16 // Helper function for calculating upper ceil of division int upper_ceil(int numerator, int denominator) { if(numerator % denominator == 0){ return numerator/denominator; } return (numerator/denominator) + 1; } // Function to check if space can be allocated or not #define printError(func){ \ cudaError_t E = func; \ if(E != cudaSuccess){ \ printf( "\nError at line: %d ", __LINE__); \ printf( "\nError: %s ", cudaGetErrorString(E)); \ } \ } // Kernel for matrix multiplication __global__ void MatrixMultiplication(int *device_A, int *device_B, int *device_C, int m, int n, int k){ // Calculating row and col int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; int i, sum; // Checking for validity if(Row<0 || Col<0 || Row>=m || Col>=k){ return; } else{ sum = 0; for(i=0;i<n;i++){ // Calculating sum sum = sum + (device_A[Row*n + i] * device_B[k*i + Col]); } // Assigning value device_C[Row*k + Col] = sum; } } // Function to check if result is correct int check(int m, int n, int k, int *host_A, int *host_B, int *host_C) { int flag=1, row, col, sum, i; for(row= 0;row<m;row++){ for(col=0;col<k;col++){ sum=0; for(i=0;i<n;i++){ sum = sum + host_A[row*n + i] * host_B[col + i*k]; } // Checking if the answer is shared_A expected if(host_C[row*k + col] != sum){ flag=0; break; } } if(!flag) break; } // Returning flag return flag; } int main(){ // Seeding PRNG srand(time(NULL)); int i; // Host Matrices int *host_A; int *host_B; int *host_C; // Matrix host_A of size (m,n) and Matrix host_B of size (n,k) int m = 512; int n = 1024; int k = 512; // Device matrices int *device_A; int *device_B; int *device_C; // Allocating memory host_A = (int *)malloc(m * n * sizeof(int)); host_B = (int *)malloc(n * k * sizeof(int)); host_C = (int *)malloc(m * k * sizeof(int)); for(i=0;i<m*n;i++){ // Assigning values host_A[i] = rand()%100; } for(i=0;i<n*k;i++){ // Assigning values host_B[i] = rand()%100; } // Allocating memory with error checking printError(cudaMalloc((void **)&device_A, m * n * sizeof(int))); printError(cudaMalloc((void **)&device_B, n * k * sizeof(int))); printError(cudaMalloc((void **)&device_C, m * k * sizeof(int))); // Copying values cudaMemcpy(device_A, host_A, m * n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_B, host_B, n * k * sizeof(int), cudaMemcpyHostToDevice); // Initializing grid size and block size dim3 dimGrid(upper_ceil(k,BLOCK_SIZE), upper_ceil(m,BLOCK_SIZE), 1); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); // Matrix multiplication MatrixMultiplication<<<dimGrid, dimBlock>>>(device_A, device_B, device_C, m, n, k); // Copying results cudaMemcpy(host_C, device_C, m * k * sizeof(int), cudaMemcpyDeviceToHost); // Checking results if(check(m, n, k, host_A, host_B, host_C)) printf("Correct\n"); else printf("Incorrect\n"); // Freeing memory cudaFree(device_A); cudaFree(device_B); cudaFree(device_C); free(host_A); free(host_B); free(host_C); return 0; }
13,527
// // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // // kernel routine // __global__ void VecAdd(float* A, float* B, float* C) { int i = blockDim.x * blockIdx.x + threadIdx.x; C[i] = A[i] + B[i]; } // // main code // int main(int argc, char **argv) { cudaSetDevice(1); // Input the vector length int N = atoi(argv[1]); // Number of bytes to allocate for N float size_t bytes = N*sizeof(float); // Generate randomly vectors A and B float *A = (float *)malloc(bytes); float *B = (float *)malloc(bytes); float *C = (float *)malloc(bytes); // Allocate memory for arrays d_A, d_B, and d_C on device float *d_A, *d_B, *d_C; cudaMalloc(&d_A, bytes); cudaMalloc(&d_B, bytes); cudaMalloc(&d_C, bytes); for (int i = 0; i < N; i++) { A[i] = rand()%100; B[i] = rand()%100; } cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice); // Kernel invocation int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C); // Copy data from device array d_C to host array C cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost); int s = 0; for (int j = 0; j < N; j++) s += C[j]; printf("\nGPU Vector Length: %d Sum: %d\n", N, s); // Free CPU memory free(A); free(B); free(C); // Free GPU memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // CUDA exit -- needed to flush printf write buffer cudaDeviceReset(); return 1; }
13,528
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <cuda_runtime.h> #define WIDTH 1024 #define TILE_WIDTH 16 #define BLOCKSPERGRID (WIDTH + TILE_WIDTH - 1) / TILE_WIDTH int M[WIDTH][WIDTH] = {0}; int N[WIDTH][WIDTH] = {0}; int P[WIDTH][WIDTH] = {0}; int MxN[WIDTH][WIDTH] = {0}; __global__ void mat_mul(int *Md, int *Nd, int *Pd); __device__ int GetElement(int *matrix, int row, int col); __device__ void SetElement(int *matrix, int row, int col, int value); __device__ int *GetSubMatrix(int *matrix, int blockrow, int blockcol); int main(int argc, char *argv[]) { float elapsedTime1; float elapsedTime2; for (int i = 0; i < WIDTH; ++i) { for (int j = 0; j < WIDTH; ++j) { M[i][j] = (int)(rand() % 255 + 1); N[i][j] = (int)(rand() % 255 + 1); } } struct timeval starttime, endtime; gettimeofday(&starttime, NULL); for (int i = 0; i < WIDTH; ++i) { for (int j = 0; j < WIDTH; ++j) { for (int k = 0; k < WIDTH; ++k) { MxN[i][j] += M[i][k] * N[k][j]; } } } gettimeofday(&endtime, NULL); double executime; executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0; executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0; printf("CPU time: %13lf msec\n", executime); // Original size_t size = WIDTH * WIDTH * sizeof(int); int *Md, *Nd, *Pd; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int i = 0; i < WIDTH; i++) { for (int j = i + 1; j < WIDTH; j++) { int temp = N[i][j]; N[i][j] = N[j][i]; N[j][i] = temp; } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime1, start, stop); printf("CPU transpose time: %13f msec\n", elapsedTime1); cudaMalloc((void **)&Md, size); cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice); cudaMalloc((void **)&Nd, size); cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice); cudaMalloc((void **)&Pd, size); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(BLOCKSPERGRID, BLOCKSPERGRID); cudaEventRecord(start, 0); mat_mul<<<dimGrid, dimBlock>>>(Md, Nd, Pd); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime2, start, stop); printf("GPU time: %13f msec\n", elapsedTime2); printf("GPU total time: %13f msec\n", elapsedTime1 + elapsedTime2); cudaError_t cuda_err = cudaGetLastError(); if (cudaSuccess != cuda_err) { printf("before kernel call: error = %s\n", cudaGetErrorString(cuda_err)); exit(1); } cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost); int pass = 1; for (int i = 0; i < WIDTH; ++i) { for (int j = 0; j < WIDTH; ++j) { if (MxN[i][j] != P[i][j]) { printf("MxN[%d][%d] = %d P[%d][%d] = %d\n", i, j, MxN[i][j], i, j, P[i][j]); pass = 0; break; } } } printf("Test %s\n", (pass) ? "PASSED" : "FAILED"); cudaFree(Md); cudaFree(Nd); cudaFree(Pd); return 0; } __global__ void mat_mul(int *Md, int *Nd, int *Pd) { int blockRow = blockIdx.y; int blockCol = blockIdx.x; int *Pd_sub = GetSubMatrix(Pd, blockRow, blockCol); int row = threadIdx.y; int col = threadIdx.x; int Pvalue = 0; __shared__ int Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds[TILE_WIDTH][TILE_WIDTH]; for (int m = 0; m < (WIDTH / TILE_WIDTH); ++m) { int *Md_sub = GetSubMatrix(Md, blockRow, m); int *Nd_sub = GetSubMatrix(Nd, blockCol, m); Mds[row][col] = GetElement(Md_sub, row, col); Nds[row][col] = GetElement(Nd_sub, row, col); __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[row][k] * Nds[col][k]; } __syncthreads(); } SetElement(Pd_sub, row, col, Pvalue); } __device__ int GetElement(int *matrix, int y, int x) { return *(matrix + y * WIDTH + x); } __device__ void SetElement(int *matrix, int y, int x, int value) { *(matrix + y * WIDTH + x) = value; } __device__ int *GetSubMatrix(int *matrix, int block_y, int block_x) { return (matrix + block_y * TILE_WIDTH * WIDTH + block_x * TILE_WIDTH); }
13,529
/// LSU EE 7722 GPU Microarchitecture // /// Simple, Self-Contained, One-File CUDA Example /// How to Compile from the Command Line // // nvcc -o cuda cuda.cu -O3 -Xcompiler -Wall /// Documentation // // CUDA: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // C++: http://en.cppreference.com/w/ #include <stdio.h> #include <cuda_runtime.h> #include <vector> using namespace std; /// Declaration of Kernel (Entry point for code running on GPU.) // // Note: the attribute __global__ indicates that the procedure is // started by a kernel launch. A GPU-only procedure would use the // attribute __device__ and a CPU-only procedure would use the // attribute __host__. // __global__ void thread_main(int size, float *x, float *a, float *b) { // Variables threadIdx, blockIdx, and blockDim are pre-set. // // Compute a unique index (number) for this thread. // This will be used as an array index. // int idx = threadIdx.x + blockIdx.x * blockDim.x; // Idx within Idx of Block size. // a block. block. // Can be 0 Can be // to block from 0 // size -1 to # of // blocks. // Array size might not be a multiple of block size. // if ( idx >= size ) return; a[idx] = idx + blockIdx.x; b[idx] = float(blockIdx.x) / (idx+1); // Perform Computation // x[idx] = a[idx] + b[idx]; } __host__ int main(int argc, char** argv) { const int SIZE = 100000000; // Declare host arrays for inputs and output. // vector<float> a(SIZE); vector<float> b(SIZE); vector<float> x(SIZE); // Compute size of each array. // const int array_size_chars = a.size() * sizeof(a[0]); // Allocate storage for GPU copy of data. // // The address of the allocated storage is returned in the first // argument, a_dev, etc. The addresses are in GPU global space and // so they are not necessarily valid on the CPU. // float *a_dev, *b_dev, *x_dev; cudaMalloc( &a_dev, array_size_chars ); cudaMalloc( &b_dev, array_size_chars ); cudaMalloc( &x_dev, array_size_chars ); // Specify Launch Configuration // const int db = 64; // Number of threads per block. // Choose grid size so that there is at least one thread per array // element. // const int dg = (SIZE + db - 1 ) / db; // Launch Kernel // thread_main<<<dg,db>>>(SIZE, x_dev, a_dev, b_dev); // Copy data from GPU to CPU. // cudaMemcpy( x.data(), x_dev, array_size_chars, cudaMemcpyDeviceToHost ); printf("Finished with %d elements, element %d is %.5f\n", SIZE, argc, x[argc]); cudaFree( a_dev ); cudaFree( b_dev ); cudaFree( x_dev ); }
13,530
#include <stdio.h> __global__ void hello() { printf("Hello world from device!\n"); } int main() { printf("Hello world from host!\n"); hello<<<10,1>>>(); cudaDeviceSynchronize(); return 0; }
13,531
#include "includes.h" const int Nthreads = 1024, maxFR = 5000, NrankMax = 6; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void extract_snips(const double *Params, const int *st, const int *id, const int *counter, const float *dataraw, float *WU){ int nt0, tidx, tidy, bid, ind, NT, Nchan, Nmax; NT = (int) Params[0]; nt0 = (int) Params[4]; Nchan = (int) Params[9]; tidx = threadIdx.x; bid = blockIdx.x; Nmax = min(maxFR, counter[1]); for(ind=0; ind<Nmax;ind++) if (id[ind]==bid){ tidy = threadIdx.y; while (tidy<Nchan){ WU[tidx+tidy*nt0 + nt0*Nchan * ind] = dataraw[st[ind]+tidx + NT * tidy]; tidy+=blockDim.y; } } }
13,532
#include "includes.h" __global__ void uniform_float(int n,float lower,float upper,float *result) { int totalThreads = gridDim.x * blockDim.x; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + tid; for(; i < n; i += totalThreads) { float u = result[i]; result[i] = u * upper + (1 - u) * lower; } }
13,533
// // simpleGPUGrep.cu // simpleGPUGrep // // Created by HaoJi on 10/29/13. // Copyright (c) 2013 HaoJi. All rights reserved. // #include <stdio.h> #include <string.h> #include <stdlib.h> #define CHECK_ERR(x) \ if (x != cudaSuccess) { \ fprintf(stderr,"%s in %s at line %d\n", \ cudaGetErrorString(err),__FILE__,__LINE__); \ exit(-1); \ } #define BLOCKS 100 #define THREADS 1024 #define BUFSIZE 5000 #define FAILURE -1 #define SUCCESS 0 __device__ char *mystrstr(const char *s1, const char *s2) { int n; if (*s2) { while (*s1) { for (n = 0; *(s1 + n) == *(s2 + n); n++) { if (!*(s2 + n + 1)) return (char *) s1; } s1++; } return NULL ; } else return (char *) s1; } __device__ char *mystrncpy(char *dest, char *source, size_t n) { int i; if (dest == NULL || source == NULL ) return NULL ; for (i = 0; i < n && source[i] != '\0'; i++) { dest[i] = source[i]; } dest[i] = '\0'; return dest; } __device__ int mystrlen(char *str) { if (str == NULL ) return 0; int len = 0; for (; *str++ != '\0';) { len++; } return len; } __global__ void match(char *d_pattern, char* d_lines) { //int i = threadIdx.x; int pos = blockIdx.x * blockDim.x + threadIdx.x; int offset = pos * BUFSIZE; char *line = d_lines + offset; char *pch = mystrstr(line, d_pattern) != NULL ? line : NULL; if (pch != NULL ) { //mystrncpy(d_buf + offset, pch, mystrlen(line)); //mystrncpy(d_buf + offset, pch, mystrlen(line)); printf("%s", pch); } } int main(int argc, char *argv[]) { cudaError_t err; char *line; char *lines; // Memory allocation for pattern, filename (in the host) char pattern[BUFSIZE]; char file_name[BUFSIZE]; char *d_pattern, *d_lines; // Obtain two argv: pattern and file_name strcpy(pattern, argv[1]); strcpy(file_name, argv[2]); // Memory allocation for d_pattern, d_lines (in the device) err = cudaMalloc((void **) &d_pattern, BUFSIZE); CHECK_ERR(err); err = cudaMalloc((void **) &d_lines, BLOCKS * THREADS * BUFSIZE); CHECK_ERR(err); // Copying memory to device err = cudaMemcpy(d_pattern, pattern, BUFSIZE, cudaMemcpyHostToDevice); CHECK_ERR(err); // Memory allocation for lines lines = (char*) calloc(BLOCKS * THREADS * BUFSIZE, sizeof(char)); // Open file FILE *fp; fp = (FILE *) fopen(file_name, "r"); if (fp == NULL ) { perror("fopen():"); exit(1); } // Memory allocation for line line = (char*) calloc(BUFSIZE, sizeof(char)); // n_lines to detect the number of lines in the file int n_lines = 0; while (fgets(line, BUFSIZE, fp) != NULL ) { if (n_lines <= BLOCKS * THREADS - 1) { // Copying line to lines int offset = n_lines * BUFSIZE; strncpy(lines + offset, line, strlen(line)); memset(line, 0, BUFSIZE); n_lines++; // Situation that the number of liens in the file is 1024 times if (n_lines == BLOCKS * THREADS - 1) { // Copying memory to device err = cudaMemcpy(d_lines, lines, BLOCKS * THREADS * BUFSIZE, cudaMemcpyHostToDevice); CHECK_ERR(err); // Calling the kernel match<<<BLOCKS, THREADS>>>(d_pattern, d_lines); // Reset lines n_lines = 0; memset(lines, 0, BLOCKS * THREADS * BUFSIZE); } } } // Situation that the number of lines in the file not 1024 times if (n_lines != 0) { // Copying memory to device err = cudaMemcpy(d_lines, lines, BLOCKS * THREADS * BUFSIZE, cudaMemcpyHostToDevice); CHECK_ERR(err); // Calling the kernel match<<<BLOCKS, THREADS>>>(d_pattern, d_lines); } // Free memory and close file free(line); free(lines); cudaFree(d_pattern); cudaFree(d_lines); fclose(fp); return SUCCESS; }
13,534
/************************************************************************************\ * * * Copyright © 2014 Advanced Micro Devices, Inc. * * Copyright (c) 2015 Mark D. Hill and David A. Wood * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following are met: * * * * You must reproduce the above copyright notice. * * * * Neither the name of the copyright holder nor the names of its contributors * * may be used to endorse or promote products derived from this software * * without specific, prior, written permission from at least the copyright holder. * * * * You must include the following terms in your license and/or other materials * * provided with the software. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A * * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER * * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * * OF SUCH DAMAGE. * * * * Without limiting the foregoing, the software may implement third party * * technologies for which you must obtain licenses from parties other than AMD. * * You agree that AMD has not obtained or conveyed to you, and that you shall * * be responsible for obtaining the rights to use and/or distribute the applicable * * underlying intellectual property rights related to the third party technologies. * * These third party technologies are not licensed hereunder. * * * * If you use the software (in whole or in part), you shall adhere to all * * applicable U.S., European, and other export laws, including but not limited to * * the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), * * and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant * * to Section 740.6 of the EAR, you hereby certify that, except pursuant to a * * license granted by the United States Department of Commerce Bureau of Industry * * and Security or as otherwise permitted pursuant to a License Exception under * * the U.S. Export Administration Regulations ("EAR"), you will not (1) export, * * re-export or release to a national of a country in Country Groups D:1, E:1 or * * E:2 any restricted technology, software, or source code you receive hereunder, * * or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such * * technology or software, if such foreign produced direct product is subject to * * national security controls as identified on the Commerce Control List (currently * * found in Supplement 1 to Part 774 of EAR). For the most current Country Group * * listings, or for additional information about the EAR or your obligations under * * those regulations, please refer to the U.S. Bureau of Industry and Security's * * website at http://www.bis.doc.gov/. * * * \************************************************************************************/ /** * @brief naive floyd warshal kernel * @param dist Distance array * @param next Next array * @param dim Dimension of the 2-D matrix * @param k Current iteration number */ __global__ void floydwarshall(int *dist, int *next, int dim, int k) { // Get my workitem id x_dim int i = blockDim.x * blockIdx.x + threadIdx.x; // Get my workitem id y_dim int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < dim && j < dim) { // if (dist i -> k + k -> j) update the dist i-> j if (dist[i * dim + k] + dist[k * dim + j] < dist[i * dim + j]) { dist[i * dim + j] = dist[i * dim + k] + dist[k * dim + j]; next[i * dim + j] = k; } } }
13,535
#include "includes.h" __global__ static void kernelCalcSum_EffectiveShareAccess(const int* dataArray, int arraySize, int* sum) { __shared__ extern int cache[]; int cacheIndex = threadIdx.x; int arrayIndex = (int)(blockDim.x * blockIdx.x + threadIdx.x); if (arrayIndex < arraySize) { cache[cacheIndex] = dataArray[arrayIndex]; } else { cache[cacheIndex] = 0; } __syncthreads(); int blockSize = blockDim.x; for (int offset = blockSize >> 1; offset > 0; offset >>= 1) // code in this for block is changed { if (cacheIndex < offset) { cache[cacheIndex] += cache[cacheIndex ^ offset]; } __syncthreads(); } if (cacheIndex == 0) { atomicAdd(sum, cache[0]); } }
13,536
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #define TILE_SIZE 128 __global__ void naiveMM(int m, int n, int k, const float *A, const float *B, float* C){ int id = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if(id<m*n) { int row = (int)(id / m); int col = id % n; //printf("row %d col %d\n",row,col); for(int i = 0; i < k; i++) { //printf("row %d col %d k %d i %d n %d A %d B %d %f %f\n",row,col,k,i,n,row*k+i,i*n+col, A[row*k+i],B[i*n+col]); sum += A[row * k + i] * B[i * n + col]; //printf("sum %f\n",sum); } //printf("%d %d\n",id,sum); C[id] = sum; } } /* __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { __shared__ float a[TILE_SIZE][TILE_SIZE]; __shared__ float b[TILE_SIZE][TILE_SIZE]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_SIZE + ty, Col = bx * TILE_SIZE + tx; float Pvalue = 0; for (int i = 0; i < (k-1)/TILE_SIZE+1; ++i) { a[ty][tx] = A[Row*k + i*TILE_SIZE+tx]; b[ty][tx] = B[(i*TILE_SIZE+ty)*n+Col]; __syncthreads(); for (int j = 0; j < TILE_SIZE; ++j) Pvalue += a[ty][j] * b[j][tx]; __syncthreads(); } if (Row < m && Col < n) C[Row*n+Col] = Pvalue; } */ void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = TILE_SIZE; //INSERT CODE HERE int gridSize = (int)ceil((float) (m*n)/BLOCK_SIZE);; dim3 dimGrid((n-1)/TILE_SIZE+1, (m-1)/TILE_SIZE+1, 1); dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1); // Invoke CUDA kernel ----------------------------------------------------- //<<<num of blocks, num of threads per block //INSERT CODE HERE //printf("dimGrid1 %d dimGrid2 %d blocksize %d\n",(n-1)/TILE_SIZE+1, (m-1)/TILE_SIZE+1, TILE_SIZE); //mysgemm<<<dimGrid,dimBlock>>>(m,n,k,A,B,C); //if(which==1){ //printf("gridSize %d blocksize %d\n",gridSize,BLOCK_SIZE); naiveMM<<<gridSize,BLOCK_SIZE>>>(m,n,k,A,B,C); //} }
13,537
__global__ void mapToNumb( const int N, //Number of whole threads const int M, //Length of subseq that one thread handles char* seq, int* numb_seq ) { int gid = blockDim.x * blockIdx.x + threadIdx.x; int idx = gid * M; int i, letter; if(idx < N*M) { for(i=0; i < M; i++) { letter = seq[idx+i]; if(letter == 'A') { numb_seq[idx+i] = 0; } else { if(letter == 'C') { numb_seq[idx+i] = 1; } else { if(letter == 'G') { numb_seq[idx+i] = 2; } else { if(letter == 'U') { numb_seq[idx+i] = 3; } else { numb_seq[idx+i] = (-1) * (int)(powf(4, (float)3)); } } } } } } } __global__ void genNumbCodon( const int N, const int M, int* numb_seq, int* codon_seq ) { int gid = blockDim.x * blockIdx.x + threadIdx.x; int idx = gid * M; int i, k; int codon_numb, loc_idx, numb, base; for(i=0; i < M; i++) { codon_numb = 0; loc_idx = idx + i; if(loc_idx <= N*M -3 + 1) { for(k=0; k<3; k++) { numb = numb_seq[loc_idx]; base = (int)powf(4, (float)(2-k)); codon_numb += numb * base; } codon_seq[loc_idx] = codon_numb; } } } __global__ void mapToAA( const int N, const int M, char* rna_codon_tab, int* codon_seq, char* aa_seq ) { int gid = blockDim.x * blockIdx.x + threadIdx.x; int idx = gid * M; int codon_idx, loc_idx; int i; for(i=0; i < M; i++) { loc_idx = idx + i; codon_idx = codon_seq[loc_idx]; if(loc_idx <= N*M -3 + 1) { if(codon_idx >= 0) { aa_seq[loc_idx] = rna_codon_tab[codon_idx]; } } } }
13,538
// some handy lorentz verctor and methords struct P4_PtEtaPhiM{ float pt; float eta; float phi; float m; }; __device__ P4_PtEtaPhiM lorentz_add( P4_PtEtaPhiM *p1, P4_PtEtaPhiM *p2){ float px1 = p1->pt*cos(p1->phi); float py1 = p1->pt*sin(p1->phi); float pz1 = p1->pt*sinh(p1->eta); float pe1 = sqrt(px1*px1 + py1*py1 + pz1*pz1 + p1->m*p1->m); float px2 = p2->pt*cos(p2->phi); float py2 = p2->pt*sin(p2->phi); float pz2 = p2->pt*sinh(p2->eta); float pe2 = sqrt(px2*px2 + py2*py2 + pz2*pz2 + p2->m*p2->m); float qx = px1+px2; float qy = py1+py2; float qz = pz1+pz2; float qe = pe1+pe2; float q_pt = sqrt(qx*qx + qy*qy); float q_eta = 0.0; // FIX ME float q_phi = 0.0; // FIX ME float q_m = sqrt(qe*qe - qx*qx - qy*qy - qz*qz); struct P4_PtEtaPhiM q = {q_pt, q_eta, q_phi, q_m}; return q; } // root function return phi in [-pi,pi] //https://root.cern.ch/doc/master/TVector2_8cxx_source.html#l00103 __device__ float phi_mpi_pi(float x){ while(x>M_PI) x -= 2*M_PI; while(x<-M_PI) x += 2*M_PI; return x; }
13,539
/******************************************************************************************** source Code : deviceDetails.cu Objective : Example code to demonstrate the number of devices that are present on the current system and their properties Description: To query using the cuda API calls about the various properties of the devices like the device model,max number of threads per block, compute capability,warp size, available Global, shared, and constant memories etc. input: none output: The various properties of all the devices that are present on the current system **********************************************************************************************/ #include <cuda.h> #include<stdio.h> /////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // main routene to find the gpu devices that are presented on the system // querying the various details of all the devices that are presented and printing the details // ///////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc,char* argv[]) { int deviceCount; cudaGetDeviceCount(&deviceCount); int device; for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); if (device == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { printf("\n\nThere is no device supporting CUDA.\n"); break; } else printf("\n\nThere are %d device(s) supporting CUDA\n",deviceCount); } printf("\n\n********************* DEVICE-%d DETAILS *******************\n",device); printf("The name of the device : %s\n",deviceProp.name); printf("The compute capability : %d.%d\n",deviceProp.major,deviceProp.minor); printf("The warp size : %d\n",deviceProp.warpSize); printf("The Global memory available on device : %lf GBytes\n",(double)deviceProp.totalGlobalMem/1000000000); printf("The Constant memory available on device: %ld Bytes\n",deviceProp.totalConstMem); printf("The shared memory available per Block : %ld Bytes\n",deviceProp.sharedMemPerBlock); printf("The registers available per Block : %d\n",deviceProp.regsPerBlock); printf("The number of multiprocessors on the device : %d\n",deviceProp.multiProcessorCount); printf("The max number of threads per Block : %d\n",deviceProp.maxThreadsPerBlock); printf("The max sizes of each dimension of a block: (%d,%d,%d)\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]); printf("The max sizes of each dimension of a grid: (%d,%d,%d)\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]); printf("----------------------------------------------------------\n\n"); } return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
13,540
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include "168ModelMaker.cuh" #include <cmath> #include <time.h> #include <string> #include <vector> using namespace std; int residual_offset = 0; extern uint3 blocks = { 30, 30, 1 }; extern uint3 threads = { 1, 1, 1 }; void make_models_1st(float ** host_dev_residuals, float* dev_kernels, int*offsets, int* sizes, int* shifts, cudaStream_t streams[], PSRM_Features &host_features, int q, int src_width, int src_height) { residual_offset = 0; const int shift_offset = 3; host_features.last_index = -1; host_features.submodel_index = -1; host_features.index[++host_features.last_index] = 1; host_features.sub_model_index[++host_features.submodel_index] = host_features.last_index; host_features.submodel[host_features.last_index] = 1; strcpy(host_features.name[host_features.last_index], "s1_spam14_R"); PROJ_HIST_SPAM(host_dev_residuals[RESIDUALS_1st::R]); //return; host_features.index[++host_features.last_index] = 2; host_features.submodel[host_features.last_index] = 2; strcpy(host_features.name[host_features.last_index], "s1_spam14_U"); PROJ_HIST_SPAM(host_dev_residuals[RESIDUALS_1st::U]); return; } void make_models_3st(float ** host_dev_residuals, float* dev_kernels, int*offsets, int* sizes, int* shifts, cudaStream_t streams[], PSRM_Features &host_features, int q, int src_width, int src_height) { //return; const int shift_offset = 5; host_features.index[++host_features.last_index] = 1; host_features.sub_model_index[++host_features.submodel_index] = host_features.last_index; host_features.submodel[host_features.last_index] = 3; strcpy(host_features.name[host_features.last_index], "s3_spam14_R"); PROJ_HIST_SPAM(host_dev_residuals[RESIDUALS_3st::R_]); host_features.index[++host_features.last_index] = 2; host_features.submodel[host_features.last_index] = 4; strcpy(host_features.name[host_features.last_index], "s3_spam14_U"); PROJ_HIST_SPAM(host_dev_residuals[RESIDUALS_3st::U_]); return; } void make_models_2x2(float ** host_dev_residuals, float* dev_kernels, int*offsets, int* sizes, int* shifts, cudaStream_t streams[], PSRM_Features &host_features, int q, int src_width, int src_height) { //return; const int shift_offset = 3; host_features.index[++host_features.last_index] = 1; host_features.sub_model_index[++host_features.submodel_index] = host_features.last_index; host_features.submodel[host_features.last_index] = 5; strcpy(host_features.name[host_features.last_index], "s2x2_spam14_H"); PROJ_HIST_SPAM(host_dev_residuals[RESIDUALS_2x2::Dh]); host_features.index[++host_features.last_index] = 2; host_features.submodel[host_features.last_index] = 6; strcpy(host_features.name[host_features.last_index], "s2x2_spam14_V"); PROJ_HIST_SPAM(host_dev_residuals[RESIDUALS_2x2::Dv]); host_features.index[++host_features.last_index] = 3; host_features.submodel[host_features.last_index] = 7; strcpy(host_features.name[host_features.last_index], "s2x2_spam14_DMaj"); PROJ_HIST_SPAM(host_dev_residuals[RESIDUALS_2x2::Dd]); return; } __global__ void proj_hist_spam(float* first_residual, float* kernels, int*offsets, int* sizes, int* shifts, int* out_hist, int q, int shift_offset, int src_width, int src_height) { //return; int hist0 = 0, hist1 = 0; float gauss_kernels[4][8][8]; int kernel_index = blockIdx.x * gridDim.x + blockIdx.y; int kernel_offset = offsets[kernel_index]; int kernel_rows = sizes[kernel_index * 2 + 0]; int kernel_cols = sizes[kernel_index * 2 + 1]; int kernel_size = kernel_rows * kernel_cols; int shift_y = shifts[kernel_index * 2 + 0]; int shift_x = shifts[kernel_index * 2 + 1]; int row_offset = 0; float sums, tmp = 0; int img_offset = 0; int i, j, w, z, m, n; int end_x = src_width - kernel_cols - 1; int end_y = src_height - kernel_rows - 1; int bin_edge = q; //if (blockIdx.x == 0 && blockIdx.y == 0)printf("\n\n"); for (j = 0; j < kernel_rows; j++) { row_offset = kernel_offset + j * kernel_cols; for (z = 0; z < kernel_cols; z++) { gauss_kernels[0][j][z] = kernels[row_offset + z]; //if (blockIdx.x == 0 && blockIdx.y == 0)printf(" %f", gauss_kernels[0][j][z]); } } //if (blockIdx.x == 0 && blockIdx.y == 0)printf("\n\n"); kernel_offset = kernel_offset + kernel_size; for (j = 0; j < kernel_rows; j++) { row_offset = kernel_offset + j * kernel_cols; for (z = 0; z < kernel_cols; z++) { gauss_kernels[1][j][z] = kernels[row_offset + z]; //if (blockIdx.x == 0 && blockIdx.y == 0)printf(" %f", gauss_kernels[1][j][z]); } } //if (blockIdx.x == 0 && blockIdx.y == 0)printf("\n\n"); kernel_offset = kernel_offset + kernel_size; for (j = 0; j < kernel_rows; j++) { row_offset = kernel_offset + j * kernel_cols; for (z = 0; z < kernel_cols; z++) { gauss_kernels[2][j][z] = kernels[row_offset + z]; //if (blockIdx.x == 0 && blockIdx.y == 0)printf(" %f", gauss_kernels[2][j][z]); } } //if (blockIdx.x == 0 && blockIdx.y == 0)printf("\n\n"); kernel_offset = kernel_offset + kernel_size; for (j = 0; j < kernel_rows; j++) { row_offset = kernel_offset + j * kernel_cols; for (z = 0; z < kernel_cols; z++) { gauss_kernels[3][j][z] = kernels[row_offset + z]; //if (blockIdx.x == 0 && blockIdx.y == 0)printf(" %f", gauss_kernels[3][j][z]); } } w = 0; i = shift_y; j = shift_x; for (; i < end_y; i+= 8)//+3 for 4*4 kernel { for (; j < end_x; j+= 8)//+3 for 4*4 kernel { img_offset = i * src_width; sums = 0; for (m = 0; m < kernel_rows; m++) { for (n = 0; n < kernel_cols; n++) { tmp = first_residual[img_offset + m * src_width + j + n]; sums += (tmp * gauss_kernels[w][m][n]); } } tmp = abs(sums); //if (i < 100 && j < 100)printf(" %f", tmp); if (tmp >= 0 && tmp < bin_edge) { hist0 += 1; } else if (tmp >= bin_edge && tmp < 2 * bin_edge) { hist1 += 1; } } } w++; i = 1 - shift_y - kernel_rows + shift_offset; i = i < 0 ? 0 : i; j = shift_x ; for (; i < end_y; i += 8)//+3 for 4*4 kernel { for (; j < end_x; j += 8)//+3 for 4*4 kernel { img_offset = i * src_width; sums = 0; for (m = 0; m < kernel_rows; m++) { for (n = 0; n < kernel_cols; n++) { tmp = first_residual[img_offset + m * src_width + j + n]; sums += (tmp * gauss_kernels[w][m][n]); } } tmp = abs(sums); if (tmp >= 0 && tmp < bin_edge) { hist0 += 1; } else if (tmp >= bin_edge && tmp < 2 * bin_edge) { hist1 += 1; } } } w++; i = shift_y; j = 1 - shift_x - kernel_cols + shift_offset; j = j < 0 ? 0 : j; for (; i < end_y; i += 8)//+3 for 4*4 kernel { for (; j < end_x; j += 8)//+3 for 4*4 kernel { img_offset = i * src_width; sums = 0; for (m = 0; m < kernel_rows; m++) { for (n = 0; n < kernel_cols; n++) { tmp = first_residual[img_offset + m * src_width + j + n]; sums += (tmp * gauss_kernels[w][m][n]); } } tmp = abs(sums); if (tmp >= 0 && tmp < bin_edge) { hist0 += 1; } else if (tmp >= bin_edge && tmp < 2 * bin_edge) { hist1 += 1; } } } w++; i = 1 - shift_y - kernel_rows + shift_offset; i = i < 0 ? 0 : i; j = 1 - shift_x - kernel_cols + shift_offset; j = j < 0 ? 0 : j; for (; i < end_y; i += 8)//+3 for 4*4 kernel { for (; j < end_x; j += 8)//+3 for 4*4 kernel { img_offset = i * src_width; sums = 0; for (m = 0; m < kernel_rows; m++) { for (n = 0; n < kernel_cols; n++) { tmp = first_residual[img_offset + m * src_width + j + n]; sums += (tmp * gauss_kernels[w][m][n]); } } tmp = abs(sums); if (tmp >= 0 && tmp < bin_edge) { hist0 += 1; } else if (tmp >= bin_edge && tmp < 2 * bin_edge) { hist1 += 1; } } } out_hist[kernel_index * 2 + 0] = hist0; out_hist[kernel_index * 2 + 1] = hist1; }
13,541
#include <stdio.h> #define N 256 // size of vectors #define T 64 //number of threads per block __global__ void vecAdd(int *A){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) A[i] = i; } int main(int argc, char *argv[]){ srand(1234); int i; int size = N * sizeof ( int); int a[N], *devA; cudaMalloc( (void**)&devA, size); cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice); vecAdd<<<4,T>>>(devA); cudaMemcpy( a, devA, size, cudaMemcpyDeviceToHost); cudaFree( devA); for (i = 0; i < N; i++){ printf("%d ",a[i]); } printf("\n"); }
13,542
/* Print all basic device informations you could need. */ #include <cstdio> #include <cuda_runtime_api.h> // To prevent calls from errors #define CUDA_SAFE_CALL_NO_SYNC(x) \ do { \ cudaError_t err = x; \ if (err != cudaSuccess) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } \ } while(0) void deviceInfo() { int deviceCount; cudaDeviceProp deviceProp; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } for(int dev=0 ; dev < deviceCount ; dev++){ CUDA_SAFE_CALL_NO_SYNC( cudaGetDeviceProperties(&deviceProp, dev) ); printf(" Device Number: %d\n" , dev); printf(" Name: %s\n" , deviceProp.name); //printf(" Unique identifier %lu\n" , deviceProp.uuid); printf(" Total Global memory (bytes) %-lu\n" , deviceProp.totalGlobalMem); printf(" Shared memory per block (bytes) %-10lu\n" , deviceProp.sharedMemPerBlock); printf(" Maximum 32-bits registers per block %d\n" , deviceProp.regsPerBlock); printf(" Warp size (threads) %d\n" , deviceProp.warpSize); printf(" Max threads per block %d\n" , deviceProp.maxThreadsPerBlock); printf(" Max threads in x dimension %d\n" , deviceProp.maxThreadsDim[0]); printf(" Max threads in y dimension %d\n" , deviceProp.maxThreadsDim[1]); printf(" Max threads in z dimension %d\n" , deviceProp.maxThreadsDim[2]); printf(" Where x*y*z <= 1024\n" ); printf(" Max grid size x (block) %d\n" , deviceProp.maxGridSize[0]); printf(" Max grid size y (block) %d\n" , deviceProp.maxGridSize[1]); printf(" Max grid size z (block) %d\n" , deviceProp.maxGridSize[2]); printf(" Clock rate (kHZ) %d\n" , deviceProp.clockRate); printf(" Total constant memory (bytes) %lu\n" , deviceProp.totalConstMem); printf(" Major %d\n" , deviceProp.major); printf(" Minor %d\n" , deviceProp.minor); printf(" Multiprocessor count %d\n" , deviceProp.multiProcessorCount); printf(" Run time limit for kernel execution %d\n" , deviceProp.kernelExecTimeoutEnabled ); printf(" Integrated GPU (motherboard) %d\n" , deviceProp.integrated); printf(" Mapping host mem into CUDA adress space %d\n" , deviceProp.canMapHostMemory); printf(" Compute mode %d\n" , deviceProp.computeMode); printf(" Memory Clock Rate (KHz): %d\n" , deviceProp.memoryClockRate); printf(" Memory Bus Width (bits): %d\n" , deviceProp.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n" , 2.0*deviceProp.memoryClockRate*(deviceProp.memoryBusWidth/8)/1.0e6); //Double rate memory explain the x2 printf(" L2 cache size (bytes) %d\n" , deviceProp.l2CacheSize); printf(" Max resident threads per multiprocessor %d\n" , deviceProp.maxThreadsPerMultiProcessor); printf(" Supports caching globals in L1 %d\n" , deviceProp.globalL1CacheSupported); printf(" Supports caching locals in L1 %d\n" , deviceProp.localL1CacheSupported); printf(" Shared memory per MP (bytes) %lu\n" , deviceProp.sharedMemPerMultiprocessor); //Shared by all threads blocks simultaneously resident on a multiprocessor printf(" 32-bits registers per MP %d\n" , deviceProp.regsPerMultiprocessor); //Shared by all threads blocks simultaneously resident on a multiprocessor } } int main(int argc, char *argv[]){ deviceInfo(); return 0; }
13,543
/* Copyright 2015 Thomas Luu Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* File: plog.cu Computation of the Lambert W-function by Halley's Method. Single and double precision implementations. Initial guesses based on: D.A. Barry, J.-Y. Parlange, L. Li, H. Prommer, C.J. Cunningham, and F. Stagnitti. Analytical approximations for real values of the Lambert W-function. Mathematics and Computers in Simulation, 53(1):95-103, 2000. D.A. Barry, J.-Y. Parlange, L. Li, H. Prommer, C.J. Cunningham, and F. Stagnitti. Erratum to analytical approximations for real values of the Lambert W-function. Mathematics and computers in simulation, 59(6):543-543, 2002. */ #ifndef PLOG #define PLOG __host__ __device__ double plog(double x) { if (x == 0.0) { return 0.0; } double w0, w1; if (x > 0.0) { w0 = log(1.2 * x / log(2.4 * x / log1p(2.4 * x))); } else { double v = 1.4142135623730950488 * sqrt(1 + 2.7182818284590452354 * x); double N2 = 10.242640687119285146 + 1.9797586132081854940 * v; double N1 = 0.29289321881345247560 * (1.4142135623730950488 + N2); w0 = -1.0 + v * (N2 + v) / (N2 + v + N1 * v); } while (true) { double e = exp(w0); double f = w0 * e - x; w1 = w0 + ((f+f) * (1.0 + w0)) / (f * (2.0 + w0) - (e+e) * (1.0 + w0) * (1.0 + w0)); if (fabs(w0 / w1 - 1.0) < 1.4901161193847656e-8) { break; } w0 = w1; } return w1; } __host__ __device__ float plog(float x) { if (x == 0.0f) { return 0.0f; } float w0, w1; if (x > 0.0f) { w0 = log(1.2f * x / log(2.4f * x / log1p(2.4f * x))); } else { float v = 1.4142135623730950488f * sqrt(1 + 2.7182818284590452354f * x); float N2 = 10.242640687119285146f + 1.9797586132081854940f * v; float N1 = 0.29289321881345247560f * (1.4142135623730950488f + N2); w0 = -1.0f + v * (N2 + v) / (N2 + v + N1 * v); } while (true) { float e = exp(w0); float f = w0 * e - x; w1 = w0 + ((f+f) * (1.0f + w0)) / (f * (2.0f + w0) - (e+e) * (1.0f + w0) * (1.0f + w0)); if (fabs(w0 / w1 - 1.0f) < 0.00034526698300124390840f) { break; } w0 = w1; } return w1; } #endif
13,544
#include "includes.h" __global__ void accumulate(float *da, float* ans_device, int N){ int bx = blockIdx.x; int tx = threadIdx.x; int idx = bx * blockDim.x + tx; //printf("%d\n", idx); for(int stride = N / 2; stride > 0; stride >>= 1){ if(idx < stride){ da[idx] = da[idx] + da[idx + stride]; } __syncthreads(); } if(idx == 0){ ans_device[0] = da[idx]; //printf("ans 0: %f\n", ans_device[0]); } }
13,545
#include <cassert> #include <cstdio> #define N 3 __global__ void inc(int *a) { int i = blockIdx.x; if (i < N) { a[i]++; } #ifdef __CUDA_ARCH__ printf ("Hello World! From device b.t %d.%d\n", i, threadIdx.x); #endif } int main() { int ha[N], *da; cudaMalloc((void **)&da, N*sizeof(int)); for (int i = 0; i < N; ++i) { ha[i] = i; } cudaMemcpy(da, ha, N*sizeof(int), cudaMemcpyHostToDevice); inc <<<N, 1>>>(da); cudaMemcpy(ha, da, N*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { assert(ha[i] == i + 1); } cudaFree(da); printf("All asserts pass - looks like cuda is working!\n"); return 0; }
13,546
#include <cuda_runtime.h> #include <curand_mtgp32_kernel.h> #include <device_launch_parameters.h> #include <stdio.h> #include <cmath> #define MY_THREAD_NUM 512 #define BLOCK_NUM 32 #define MY_RAND_MAX 32767 #define MATRIX_SIZE 1000 const int blocks_num = (MATRIX_SIZE + MY_THREAD_NUM - 1) / MY_THREAD_NUM; //n * n Matrix Multiply A * B = C __global__ static void MatrixMulCUDA(const float *a, const float *b, float *c, int n) { const int tid = threadIdx.x; //目前的thread是第几个thread const int bid = blockIdx.x; //目前的thread是属于哪个block const int idx = bid * THREAD_NUM + tid; //从bid和tid推出目前的thread应该计算的A矩阵的行数和B矩阵列数 const int row = idx / n; const int col = idx % n; if(row < n && col < n){ // float sum = 0; // for(int i = 0; i < n; i++){ // sum += a[row * n + i] * b[i * n + col]; // } float t = 0; float y = 0; for(int i = 0; i < n; i++){ float r; y -= a[row * n + i] * b[i * n + col]; r = t - y; y = (r - t) + y; t = r; } c[row * n + col] = t; } } //生成随机矩阵 void RandomMatrix(float *A, int n){ for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ A[i * n + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX * RAND_MAX); } } } void MatrixMul(){ //定义矩阵 float *a, *b, *c, *d; int n = MATRIX_SIZE; a = (float*)malloc(sizeof(float)*n*n); b = (float*)malloc(sizeof(float)*n*n); c = (float*)malloc(sizeof(float)*n*n); d = (float*)malloc(sizeof(float)*n*n); srand(time(NULL)); RandomMatrix(a, n); RandomMatrix(b, n); //把数据复制到显卡内存中 float *cuda_a, *cuda_b, *cuda_c; //cudaMalloc 取得一块显存内存 cudaMalloc((void**)&cuda_a, sizeof(float) * n * n); cudaMalloc((void**)&cuda_b, sizeof(float) * n * n); cudaMalloc((void**)&cuda_c, sizeof(float) * n * n); //cudaMemcpy 将产生的矩阵复制到显卡内存中 //cudaMemcpyHostToDevice - 从内存复制到显卡内存 //cudaMemcpyDeviceToHost - 从显卡内存复制到内存 cudaMemcpy(cuda_a, a, sizeof(float)*n*n, cudaMemcpyHostToDevice); cudaMemcpy(cuda_b, b, sizeof(float)*n*n, cudaMemcpyHostToDevice); float time_elapsed=0; cudaEvent_t start,stop; cudaEventCreate(&start); //创建Event cudaEventCreate(&stop); cudaEventRecord( start,0); //记录当前时间 MatrixMulCUDA<<<blocks_num, THREAD_NUM, 0>>>(cuda_a, cuda_b, cuda_c, n); cudaEventRecord( stop,0); //记录当前时间 cudaEventSynchronize(start); //Waits for an event to complete. cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务 //把结果从显示芯片复制回主内存 //cudaMemcpy 将结果从显存中复制回内存 cudaMemcpy(c, cuda_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost); cudaEventElapsedTime(&time_elapsed,start,stop); //计算时间差 cudaEventDestroy(start); //destory the event cudaEventDestroy(stop); //Free cudaFree(cuda_a); cudaFree(cuda_b); cudaFree(cuda_c); printf("Matrix multiply GPU time: %.10f\n", time_elapsed); clock_t start_time = clock(); //CPU计算矩阵乘法 for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ double temp = 0; for(int k = 0; k < n; k++){ temp += a[i * n + k]* b[k * n + j]; } d[i * n + j] = temp; } } clock_t end_time = clock(); //验证正确性和准确性 float max_error = 0.0, average_error = 0; for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ if(d[i * n + j] != 0){ float err = fabs((c[i * n + j] - d[i * n + j]) / d[i * n + j]); if(max_error < err) max_error = err; average_error += err; } } } double cpu_time = (double)(end_time - start_time) / CLOCKS_PER_SEC * 1000.0; printf("Matrix multiply CPU time: %.10f\n", cpu_time); printf("Max error: %.10f Average error: %.10f\n", max_error, average_error / (n * n)); printf("%.10f\n", cpu_time/time_elapsed); }
13,547
#include <stdio.h> #include <stdlib.h> /* compile: nvcc vecAdd.cu -o vecAdd */ __global__ void add_vec(float *A, float *B, float *C) { int i = blockDim.x * blockIdx.x + threadIdx.x; C[i] = A[i] + B[i]; } void init(float *V, int vec_size) { for (int i = 0; i < vec_size; i++) { V[i] = i; } } void verify(float *A, float *B, float *C, int vec_size) { for (int i = 0; i < vec_size; i++) { if (A[i] + B[i] != C[i]) { printf("Verification failed! A[%d] = %d, B[%d] = %d, C[%d] = %d\n", i, A[i], i, B[i], i, C[i]); return; } } printf("Verification success!\n"); } int main() { int vec_size = 16384; float *A = (float*)malloc(sizeof(float) * vec_size); float *B = (float*)malloc(sizeof(float) * vec_size); float *C = (float*)malloc(sizeof(float) * vec_size); init(A, vec_size); init(B, vec_size); // Memory objects of the device float *d_A, *d_B, *d_C; size_t mem_obj_size = sizeof(float) * vec_size; // allocate memory objects d_A, d_B, and d_C. cudaMalloc(&d_A, mem_obj_size); cudaMalloc(&d_B, mem_obj_size); cudaMalloc(&d_C, mem_obj_size); // Copy "A" to "d_A" and copy "B" to "d_B" (host to device). cudaMemcpy(d_A, A, mem_obj_size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, mem_obj_size, cudaMemcpyHostToDevice); // Launch the kernel. /* __ __ __ |__|__|__| |__|__|__| dim_block = (3,2) dim_grid = # of threads in each block */ dim3 dim_block(32, 1); dim3 dim_grid(vec_size / 32, 1); add_vec<<< dim_grid, dim_block >>> (d_A, d_B, d_C); // Copy "d_C" to "C" (device to host). cudaMemcpy(C, d_C, mem_obj_size, cudaMemcpyDeviceToHost); verify(A, B, C, vec_size); // Release d_A, d_B, and d_C. cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(A); free(B); free(C); return 0; }
13,548
#include "includes.h" __global__ void setChiAtLast ( const int dim, const int nwl, const float *lst, float *stt ) { int i = threadIdx.x + blockDim.x * blockIdx.x; if ( i < nwl ) { stt[i] = lst[dim+2+i*(dim+1+1+1+1)]; } }
13,549
/* * sequencealign_parallel.cu * * IMPORTANT: * * The final version of this code has been developed by Mustafa ACIKARAOĞLU, Mustafa SARAÇ, * Mustafa Mert ÖGETÜRK as term project of Parallel Programming (COMP 429) course. * Koç University's code of ethics can be applied to this code and liability can not be * accepted for any negative situation. Therefore, be careful when you get content from here. * * This parallel version of Sequence Alignment code has been * implemented using the source specified in the link below. * * Reference: * Implementation of Sequence Alignment in C++ * URL: <https://codereview.stackexchange.com/questions/97825/implementation-of-sequence-alignment-in-c> * * NOTE: * * THIS SOURCE CODE CONTAINS TWO VERSIONS OF * PARALLELIZATION PROCESS. * * FIRST VERSION: * ONLY TWO PART OF THE SERIAL IMPLEMENTATION IS PARALLELIZED * IN A SUCCESSFUL WAY. THIS IMPLEMENTATION CONTAINS * 'alphabet_matching_penalty' AND 'array_filling_1' FUNCTIONS. * * SECOND VERSION: * AS WE MENTIONED IN THE FINAL REPORT, WE TRIED TO IMPLEMENT * THE PARALLELIZED VERSION OF THE PART OF THE 'align' FUNCTION * FROM THE SERIAL IMPLEMENTATION, WHICH DIAGONALLY TRAVERSES * THROUGH THE END OF THE ARRAY BY FINDING THE MINIMUM OF THE * CURRENT INDEX'S LEFT, TOP AND LEFT-TOP INDEXES. * HOWEVER, BECAUSE OF THE RACE CONDITION, WE TRIED TO FOLLOW * MANY DIFFERENT WAYS TO SOLVE THIS ISSUE, BUT WE COULD NOT * SUCCEED IT. THEREFORE, WE COMMENTED OUT ALL OF THE CODE THAT * IS RELATED TO THE 'align_filling_2 kernel'. * * * For more detailed questions you can review our project report. * * You can also contact me at this email address: msarac13@ku.edu.tr * */ #include <iostream> #include <string> #include <vector> #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <fstream> #include <sstream> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> using namespace std; const size_t alphabets = 26; static const double kMicro = 1.0e-6; /* * Returns the current time */ double get_time(); /* * Loading a file into a char array */ char* load_file(char const* path); /* * alpha_d[i][j] = penalty for matching the ith alphabet with the * jth alphabet. * Here: Penalty for matching an alphabet with anoter one is 1 * Penalty for matching an alphabet with itself is 0 */ __global__ void alphabet_matching_penalty(int *alpha_d); /* * Returns the minimum integer */ int min(int a, int b, int c); /* * Filling the first row and the first * column of the array based on the gap * penalty, which is equal to 2. */ __global__ void align_filling_1(size_t n, size_t m, int *A, int alpha_gap); /* * COMMENTED OUT: * * Align_filling_2 is diagonally traversing by * finding the minimum value among the current * index's left, top and left-top indexes * through the end of the array. __global__ void align_filling_2(size_t n, size_t m, char* input_1_d, char* input_2_d, int *alpha_d, int *A, int alpha_gap); */ int main() { double time_0, time_1, time_2, time_3, time_4, time_5; int *alpha_h, *alpha_d, *array_h, *array_d; char *input_1, *input_2; string a_aligned, b_aligned; /* * COMMENTED OUT: * * Device char arrays that will be used in the * align_filling_2 kernel. char *input_1_d, *input_2_d; */ time_0 = get_time(); // Reading the input strings that need to be aligned input_1 = load_file("DNA_Sequence_1.txt"); input_2 = load_file("DNA_Sequence_2.txt"); size_t n = strlen(input_1); size_t m = strlen(input_2); // Penalty for any alphabet matched with a gap int gap_penalty = 2; // Allocation alpha_h = (int *) malloc(sizeof(int) * alphabets * alphabets); array_h = (int *) malloc(sizeof(int) * (n + 1) * (m + 1)); if(cudaSuccess != cudaMalloc((void**) &array_d, sizeof(int) * (n + 1) * (m + 1))){ cout << "Cuda Malloc error for array_d." << endl; } if(cudaSuccess != cudaMalloc((void**) &alpha_d, sizeof(int) * alphabets * alphabets)){ cout << "Cuda Malloc error for alpha_d." << endl; } /* * COMMENTED OUT: * * Memory Allocations for the arrays that will be used in the * align_filling_2 kernel. * if(cudaSuccess != cudaMalloc((void**) &input_1_d, sizeof(int) * n)){ cout << "Cuda Malloc error for input_1_d." << endl; } if(cudaSuccess != cudaMalloc((void**) &input_2_d, sizeof(int) * m)){ cout << "Cuda Malloc error for input_2_d." << endl; } */ // MEMORY COPYING FROM HOST TO THE DEVICE if(cudaSuccess != cudaMemcpy(array_d, array_h, sizeof(int) * (n + 1) * (m + 1), cudaMemcpyHostToDevice)){ cout << "Cuda Memory Copying error from array_h to array_d." << endl; } if(cudaSuccess != cudaMemcpy(alpha_d, alpha_h, sizeof(int) * alphabets * alphabets, cudaMemcpyHostToDevice)){ cout << "Cuda Memory Copying error from alpha_h to alpha_d." << endl; } /* * COMMENTED OUT: * * Memory Copying from Host to Device for the arrays that will be used * in the align_filling_2 kernel. * if(cudaSuccess != cudaMemcpy(input_1_d, input_1, sizeof(int) * n, cudaMemcpyHostToDevice)){ cout << "Cuda Memory Copying error from input_1 to input_1_d." << endl; } if(cudaSuccess != cudaMemcpy(input_2_d, input_2, sizeof(int) * m, cudaMemcpyHostToDevice)){ cout << "Cuda Memory Copying error from input_2 to input_2_d." << endl; } */ time_1 = get_time(); dim3 threads(alphabets, alphabets, 1); dim3 grid(alphabets * alphabets/threads.x, alphabets * alphabets/threads.y); alphabet_matching_penalty<<<grid, threads>>>(alpha_d); cudaDeviceSynchronize(); if(cudaGetLastError() != cudaSuccess){ cout << "Kernel alphabet_matching_penalty was not launched." << endl; } time_2 = get_time(); dim3 threads1(16, 16, 1); dim3 grid1(n / threads1.x, m / threads1.y); align_filling_1<<<grid1,threads1>>>(n, m, array_d, gap_penalty); cudaDeviceSynchronize(); if(cudaGetLastError() != cudaSuccess){ cout << threads1.x << endl; cout << threads1.y << endl; cout << "Kernel filling_1 was not launched." << endl; } time_3 = get_time(); /* * COMMENTED OUT: * * Memory Allocations for the arrays that will be used in the * align_filling_2 kernel. * align_filling_2<<<grid1,threads1>>>(n, m, input_1_d, input_2_d, alpha_d, array_d, gap_penalty); cudaDeviceSynchronize(); if(cudaGetLastError() != cudaSuccess){ cout << threads1.x << endl; cout << threads1.y << endl; cout << "Kernel filling_2 was not launched." << endl; } time_4 = get_time(); */ // MEMORY COPYING FROM DEVICE TO THE HOST if(cudaSuccess != cudaMemcpy(array_h, array_d, sizeof(int) * (n + 1) * (m + 1), cudaMemcpyDeviceToHost)){ cout << "Cuda Memory Copying error from array_d to array_h." << endl; } if(cudaSuccess != cudaMemcpy(alpha_h, alpha_d, sizeof(int) * alphabets * alphabets, cudaMemcpyDeviceToHost)){ cout << "Cuda Memory Copying error from alpha_d to alpha_h." << endl; } /* * COMMENTED OUT: * * Memory Copying from Device to Host for the arrays, which were * used in the align_filling_2 kernel, * if(cudaSuccess != cudaMemcpy(input_1_d, input_1, sizeof(int) * n, cudaMemcpyDeviceToHost)){ cout << "Cuda Memory Copying error from input_1 to input_1_d." << endl; } if(cudaSuccess != cudaMemcpy(input_2_d, input_2, sizeof(int) * m, cudaMemcpyDeviceToHost)){ cout << "Cuda Memory Copying error from input_2 to input_2_d." << endl; } */ /* * After the first filling step is finished, * the function is diagonally traversing by * finding the minimum value among the current * index's left, top and left-top indexes * through the end of the array. */ for (size_t i = 1; i <= n; ++i) { for (size_t j = 1; j <= m; ++j) { char x_i = input_1[i-1]; char y_j = input_2[j-1]; array_h[i * n + j] = min(array_h[(i-1) * n + (j-1)] + alpha_h[(x_i - 'A') * alphabets + (y_j - 'A')], array_h[(i-1) * n + j] + gap_penalty, array_h[i * n + (j-1)] + gap_penalty); } } /* * After the second filling step is finished, * the function is diagonally tracebacking * through the beginning of the array and it * is generating the output strings, which are * the aligned DNA sequences. */ long k; size_t i = n; size_t j = m; for (; i >= 1 && j >= 1; --i) { k= i * n + j; char x_i = input_1[i-1]; char y_j = input_2[j-1]; if (array_h[k] == array_h[(i-1)*n + (j-1)] + alpha_h[(x_i - 'A') * alphabets + (y_j - 'A')]) { a_aligned = x_i + a_aligned; b_aligned = y_j + b_aligned; --j; } else if (array_h[k] == array_h[(i-1)*n + j] + gap_penalty) { a_aligned = x_i + a_aligned; b_aligned = '-' + b_aligned; } else { a_aligned = '-' + a_aligned; b_aligned = y_j + b_aligned; --j; } } while (i >= 1 && j < 1) { a_aligned = input_1[i-1] + a_aligned; b_aligned = '-' + b_aligned; --i; } while (j >= 1 && i < 1) { a_aligned = '-' + a_aligned; b_aligned = input_2[j-1] + b_aligned; --j; } time_4 = get_time(); /* * Needleman Score that represents the similarity * between the DNA sequences. */ int needleman_score = array_h[n * m - 1]; ofstream outputFile; outputFile.open("output_file_cuda_v1.txt"); outputFile << a_aligned << endl << b_aligned << endl; outputFile.close(); free(alpha_h); free(array_h); cudaFree(alpha_d); cudaFree(array_d); /* * COMMENTED OUT: * * Freeing the device arrays cudaFree(input_1_d); cudaFree(input_2_d); */ time_5 = get_time(); // print printf("Time for mallocs and memcopies: %9.6f s\n", (time_1 - time_0)); printf("Time for alphabet_matching_penalty: %9.6f s\n", (time_2 - time_1)); printf("Time for filling_1: %9.6f s\n", (time_3 - time_2)); printf("Time for filling_2 and get_traceback: %9.6f s\n", (time_4 - time_3)); printf("Needleman score : %d\n",needleman_score); printf("Total time: %9.6f s\n", (time_5 - time_0)); return 0; } double get_time() { struct timeval TV; struct timezone TZ; const int RC = gettimeofday(&TV, &TZ); if(RC == -1) { printf("ERROR: Bad call to gettimeofday\n"); return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } char* load_file(char const* path) { char* buffer = 0; long length; FILE * f = fopen (path, "rb"); //was "rb" if (f) { fseek (f, 0, SEEK_END); length = ftell (f); fseek (f, 0, SEEK_SET); buffer = (char*)malloc ((length+1)*sizeof(char)); if (buffer) { fread (buffer, sizeof(char), length, f); } fclose (f); } buffer[length] = '\0'; if(strlen(buffer) == 1){ printf("Failed to read the file"); } return buffer; } __global__ void alphabet_matching_penalty(int *alpha_d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; long k; if(i < alphabets && j < alphabets){ k = i * alphabets + j; if (i == j) { alpha_d[k] = 0; } else { alpha_d[k] = 1; } } else { return; } } int min(int a, int b, int c) { return std::min(std::min(a,b), c); } __global__ void align_filling_1(size_t n, size_t m, int *A, int alpha_gap) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i <= n && j <= m){ A[i*n] = alpha_gap * i; A[j] = alpha_gap * j; } else { return; } } /* * COMMENTED OUT: * * Align_filling_2 is diagonally traversing by * finding the minimum value among the current * index's left, top and left-top indexes * through the end of the array. __global__ void align_filling_2(size_t n, size_t m, char* input_1_d, char* input_2_d, int *alpha_d, int *A, int alpha_gap) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(((i >=1) && (i <= n)) && ((j >= 1) && (j <= m))){ char x_i = input_1_d[i-1]; char y_j = input_2_d[j-1]; int first = A[(i-1) * n + (j-1)] + alpha_d[(x_i - 'A') * alphabets + (y_j - 'A')]; int second = A[(i-1) * n + j] + alpha_gap; int third = A[i * n + (j-1)] + alpha_gap; if(first < second && first < third){ A[i * n + j] = first; } else if(second < first && second < third){ A[i * n + j] = second; } else if(third < first && third < second){ A[i * n + j] = third; } } } */
13,550
#include <cuda_runtime.h> #include <iostream> __global__ void InnerProduct(int *d_a,int *d_b,int *d_c,int n) { int Idx = threadIdx.x; int IdxMax = blockDim.x; do{ d_c[Idx] = d_a[Idx] * d_b[Idx]; IdxMax += n; }while(IdxMax < n); d_c[Idx] = d_a[Idx] * d_b[Idx]; __syncthreads(); int i = 2, j = 1; while(i <= n) { if(Idx % 2 == 0) { d_c[Idx] += d_c[Idx + j]; } i *= 2; j *= 2; } } int main() { int blag = 1; //标志位 int n = 0; //数据大小 do{ std::cout << "请输入数据大小:" << std::endl; std::cin >> n; if(n < 0) { std::cout << "你输入的数据是错误的,请重新输入!" << std::endl; }else { blag = 0; } }while(blag); /*********申请主机内存*************/ int *h_a,*h_b,*h_c; int nByte = sizeof(int) * n; h_a = (int*)malloc(nByte); h_b = (int*)malloc(nByte); h_c = (int*)malloc(nByte); /********申请设备内存 *************/ int *d_a,*d_b,*d_c; cudaMalloc((void**)&d_a,nByte); cudaMalloc((void**)&d_b,nByte); cudaMalloc((void**)&d_c,nByte); /******给主机内存赋值**************/ for(int i = 0; i < n; ++i) { h_a[i] = i + 1; h_b[i] = i + 3; } /******主机内存数据复制到设备内存中***/ cudaMemcpy(d_a,h_a,nByte,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,nByte,cudaMemcpyHostToDevice); /*****执行核函数*****/ InnerProduct<<<1,n>>>(d_a,d_b,d_c,n); /*******设备数据复制到主机内存**************/ cudaMemcpy(h_c,d_c,nByte,cudaMemcpyDeviceToHost); /**********输出结果***********/ std::cout << "h_c = " << h_c[0] << std::endl; /******释放内存*****/ free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); std::cout << "运行完毕!" << std::endl; return 0; }
13,551
#include <iostream> #include <stdio.h> #include <time.h> #define LENGTH 256 using namespace std; struct soa{ int *a; int *b; int *c; }; __global__ void vector_add(int *a, int *b, int *c){ int i = threadIdx.x ; if (i < LENGTH) c[i] = a[i] + b[i]; // read } __host__ void vector_add_cpu(float a[], float b[], float *c){ for(int i=0 ; i< LENGTH ; i++){ c[i] = a[i] + b[i]; // std::cout << c[i] << std::endl; } } int main(){ soa h_s; int *d_s, *d_a, *d_b; h_s.a = new int [LENGTH]; h_s.b = new int [LENGTH]; h_s.c = new int [LENGTH]; for(int i=0 ; i< LENGTH; i++){ h_s.a[i] = i; h_s.b[i] = i; } cudaMalloc((void**)&d_s, LENGTH*sizeof(int)); cudaMalloc((void**)&d_a, LENGTH*sizeof(int)); cudaMalloc((void**)&d_b, LENGTH*sizeof(int)); cudaMemcpy(d_a, h_s.a, LENGTH*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_s.b, LENGTH*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_s, h_s.c, LENGTH*sizeof(int), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; float total_time = 0.0; // for(int k=0 ; k< 1000 ; k++){ // cudaEventRecord(start); vector_add<<<LENGTH/128, 128>>>(d_a, d_b, d_s); cudaDeviceSynchronize(); // cudaEventRecord(stop); cudaMemcpy(h_s.c, d_s, LENGTH*sizeof(int), cudaMemcpyDeviceToHost); // cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); total_time += milliseconds; // } std::cout << "Time taken : " << milliseconds << " Avg time : " << total_time / 1000 << std::endl; for(int i=0; i<10 ;i++){ cout << h_s.c[i] << endl; } }
13,552
//Defines a 3D Vector class with the barebones vector operations that I need. // It is basically a wrapper around the built in float3 struct, with some vector operations associated with it. // I wish there was a better optimized library for this, but I can't find one atm :-( // //By Ray Imber a.k.a Rayman22201 #ifndef __VECTOROBJECT__ #define __VECTOROBJECT__ #include <cuda_runtime.h> namespace wildDoughnut { class Vector { public: float3 values; //constructor __host__ __device__ Vector( float newX, float newY, float newZ ) { values.x = newX; values.y = newY; values.z = newZ; } __host__ __device__ Vector( float3 newValues ) { values = newValues; } __host__ __device__ Vector() { values.x = 0; values.y = 0; values.z = 0; } //math operations __host__ __device__ Vector operator+ ( Vector b ) const { return Vector( (*this).values.x + b.values.x , (*this).values.y + b.values.y, (*this).values.z + b.values.z ); } __host__ __device__ Vector operator+ ( float b ) const { return Vector( (*this).values.x + b , (*this).values.y + b, (*this).values.z + b ); } __host__ __device__ Vector operator- ( Vector b ) const { return Vector( (*this).values.x - b.values.x , (*this).values.y - b.values.y, (*this).values.z - b.values.z ); } __host__ __device__ Vector operator* ( Vector b ) const { return Vector( (this->values.x * b.values.x) , (this->values.y * b.values.y), (this->values.z * b.values.z) ); } __host__ __device__ Vector operator* ( float b ) const { return Vector( (*this).values.x * b , (*this).values.y * b , (*this).values.z * b ); } __host__ __device__ Vector operator/ ( float b ) const { return Vector( (*this).values.x / b , (*this).values.y / b , (*this).values.z / b ); } __host__ __device__ float dot( Vector b ) const { return ( ((*this).values.x * b.values.x) + ((*this).values.y * b.values.y) + ((*this).values.z * b.values.z) ); } __host__ __device__ float magnitude( void ) { return sqrtf( ((*this).values.x * (*this).values.x) + ((*this).values.y * (*this).values.y) + ((*this).values.z * (*this).values.z) ); } __host__ __device__ Vector normalize( void ) { float mag = this->magnitude(); return Vector( ((*this).values.x / mag), ((*this).values.y / mag), ((*this).values.z / mag) ); } }; } #endif
13,553
#include "includes.h" #define PI 3.1415926535897932 #define MAXEQNS 10 // maximum number of differential equations in the system const int itermax10 = 2; // number of iterations to use for rk10 const int itermax12 = 1; // number of additional iterations to use for rk12 const int neqns = 2; // number of differential equations in the system const double tol = 1.0e-10; // the error tolerance const double tol10 = tol / 10; const bool sho = true; // set sho to true if you want the simple harmonic oscillator results // set sho to false, if you want the predator - prey results // the following constants are the 10th order method's coefficients const double a0 = 0; __constant__ double a1 = 0.11747233803526765; __constant__ double a2 = 0.35738424175967745; __constant__ double a3 = 0.64261575824032255; __constant__ double a4 = 0.88252766196473235; const double a5 = 1.0000000000000000; __constant__ double b10 = 0.047323231137709573; __constant__ double b11 = 0.077952072407795078; __constant__ double b12 = -0.010133421269900587; __constant__ double b13 = 0.0028864915990617097; __constant__ double b14 = -0.00055603583939812082; __constant__ double b20 = 0.021779075831486075; __constant__ double b21 = 0.22367959757928498; __constant__ double b22 = 0.12204792759220492; __constant__ double b23 = -0.012091266674498959; __constant__ double b24 = 0.0019689074312004371; __constant__ double b30 = 0.044887590835180592; __constant__ double b31 = 0.15973856856089786; __constant__ double b32 = 0.32285378852557547; __constant__ double b33 = 0.12204792759220492; __constant__ double b34 = -0.0069121172735362915; __constant__ double b40 = 0.019343435528957094; __constant__ double b41 = 0.22312684732165494; __constant__ double b42 = 0.23418268877986459; __constant__ double b43 = 0.32792261792646064; __constant__ double b44 = 0.077952072407795078; const double b50 = 0.066666666666666667; const double b51 = 0.10981508874708385; const double b52 = 0.37359383699761912; const double b53 = 0.18126454003786724; const double b54 = 0.26865986755076313; const double c0 = 0.033333333333333333; const double c1 = 0.18923747814892349; const double c2 = 0.27742918851774318; const double c3 = 0.27742918851774318; const double c4 = 0.18923747814892349; const double c5 = 0.033333333333333333; // the following coefficients allow us to get rk12 internal xk values from rk10 fk values __constant__ double g10 = 0.043407276098971173; __constant__ double g11 = 0.049891561330903419; __constant__ double g12 = -0.012483721919363355; __constant__ double g13 = 0.0064848904066894701; __constant__ double g14 = -0.0038158693974615597; __constant__ double g15 = 0.0014039153409773882; __constant__ double g20 = 0.030385164419638569; __constant__ double g21 = 0.19605322645426044; __constant__ double g22 = 0.047860687574395354; __constant__ double g23 = -0.012887249003100515; __constant__ double g24 = 0.0064058521980400821; __constant__ double g25 = -0.0022420783785910372; __constant__ double g30 = 0.032291666666666667; __constant__ double g31 = 0.19311806292811784; __constant__ double g32 = 0.25797759963091718; __constant__ double g33 = 0.019451588886825999; __constant__ double g34 = -0.0038805847791943522; __constant__ double g35 = 0.0010416666666666667; __constant__ double g40 = 0.035575411711924371; __constant__ double g41 = 0.18283162595088341; __constant__ double g42 = 0.29031643752084369; __constant__ double g43 = 0.22956850094334782; __constant__ double g44 = -0.0068157483053369507; __constant__ double g45 = 0.0029481689136947641; __constant__ double g50 = 0.031929417992355945; __constant__ double g51 = 0.19305334754638505; __constant__ double g52 = 0.27094429811105371; __constant__ double g53 = 0.28991291043710653; __constant__ double g54 = 0.13934591681802007; __constant__ double g55 = -0.010073942765637839; const double g60 = 0.033333333333333333; const double g61 = 0.18923747814892349; const double g62 = 0.27742918851774318; const double g63 = 0.27742918851774318; const double g64 = 0.18923747814892349; const double g65 = 0.033333333333333333; // the following constants are the 12th order method's coefficients const double ah0 = 0.0; const double ah1 = 0.084888051860716535; const double ah2 = 0.26557560326464289; const double ah3 = 0.50000000000000000; const double ah4 = 0.73442439673535711; const double ah5 = 0.91511194813928346; const double ah6 = 1.0000000000000000; __constant__ double bh10 = 0.033684534770907752; __constant__ double bh11 = 0.057301749935629582; __constant__ double bh12 = -0.0082444880936983822; __constant__ double bh13 = 0.0029151263642014432; __constant__ double bh14 = -0.00096482361331657787; __constant__ double bh15 = 0.00019595249699271744; __constant__ double bh20 = 0.015902242088596380; __constant__ double bh21 = 0.16276437062291593; __constant__ double bh22 = 0.096031583397703751; __constant__ double bh23 = -0.011758319711158930; __constant__ double bh24 = 0.0032543514515832418; __constant__ double bh25 = -0.00061862458499748489; __constant__ double bh30 = 0.031250000000000000; __constant__ double bh31 = 0.11881843285766042; __constant__ double bh32 = 0.24868761828096535; __constant__ double bh33 = 0.11000000000000000; __constant__ double bh34 = -0.010410996557394222; __constant__ double bh35 = 0.0016549454187684515; __constant__ double bh40 = 0.015902242088596380; __constant__ double bh41 = 0.15809680304274781; __constant__ double bh42 = 0.18880881534382426; __constant__ double bh43 = 0.28087114502765051; __constant__ double bh44 = 0.096031583397703751; __constant__ double bh45 = -0.0052861921651656089; __constant__ double bh50 = 0.033684534770907752; __constant__ double bh51 = 0.11440754737426645; __constant__ double bh52 = 0.24657204460460206; __constant__ double bh53 = 0.20929436236889375; __constant__ double bh54 = 0.25385170908498387; __constant__ double bh55 = 0.057301749935629582; const double bh60 = 0; const double bh61 = 0.19581988897471611; const double bh62 = 0.14418011102528389; const double bh63 = 0.32000000000000000; const double bh64 = 0.14418011102528389; const double bh65 = 0.19581988897471611; const double ch0 = 0.023809523809523810; const double ch1 = 0.13841302368078297; const double ch2 = 0.21587269060493131; const double ch3 = 0.24380952380952381; const double ch4 = 0.21587269060493131; const double ch5 = 0.13841302368078297; const double ch6 = 0.023809523809523810; __global__ void guessKernel(double*device_X_Total, double* device_X_Not,double* device_F_Not, double h){ device_X_Total[threadIdx.x] = device_X_Not[threadIdx.x] + a1 * h * device_F_Not[threadIdx.x]; device_X_Total[threadIdx.x +2] = device_X_Not[threadIdx.x] + a2 * h * device_F_Not[threadIdx.x]; device_X_Total[threadIdx.x +4] = device_X_Not[threadIdx.x] + a3 * h * device_F_Not[threadIdx.x]; device_X_Total[threadIdx.x +6] = device_X_Not[threadIdx.x] + a4 * h * device_F_Not[threadIdx.x]; }
13,554
#include <stdlib.h> #include <sys/time.h> #include<stdio.h> #include<cuda.h> #include<math.h> #define SQRT_TWO_PI 2.506628274631000 #define BLOCK_D1 1024 #define BLOCK_D2 1 #define BLOCK_D3 1 // Note: Needs compute capability >= 2.0 for calculation with doubles, so compile with: // nvcc kernelExample.cu -arch=compute_20 -code=sm_20,compute_20 -o kernelExample // -use_fast_math doesn't seem to have any effect on speed // CUDA kernel: __global__ void calc_loglik(double* vals, int n, double mu, double sigma) { // note that this assumes no third dimension to the grid // id of the block int myblock = blockIdx.x + blockIdx.y * gridDim.x; // size of each block (within grid of blocks) int blocksize = blockDim.x * blockDim.y * blockDim.z; // id of thread in a given block int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; // assign overall id/index of the thread int idx = myblock * blocksize + subthread; if(idx < n) { double std = (vals[idx] - mu)/sigma; double e = exp( - 0.5 * std * std); vals[idx] = e / ( sigma * SQRT_TWO_PI); } } // CPU analog for speed comparison int calc_loglik_cpu(double* vals, int n, double mu, double sigma) { double std, e; for(int idx = 0; idx < n; idx++) { std = (vals[idx] - mu)/sigma; e = exp( - 0.5 * std * std); vals[idx] = e / ( sigma * SQRT_TWO_PI); } return 0; } /* --------------------------- host code ------------------------------*/ void fill( double *p, int n ) { int i; srand48(0); for( i = 0; i < n; i++ ) p[i] = 2*drand48()-1; } double read_timer() { struct timeval end; gettimeofday( &end, NULL ); return end.tv_sec+1.e-6*end.tv_usec; } int main (int argc, char *argv[]) { double* cpu_vals; double* gpu_vals; int n; cudaError_t cudaStat; printf("====================================================\n"); for( n = 32768; n <= 134217728; n*=8 ) { cpu_vals = (double*) malloc( sizeof(double)*n ); cudaStat = cudaMalloc(&gpu_vals, sizeof(double)*n); if(cudaStat != cudaSuccess) { printf ("device memory allocation failed"); return EXIT_FAILURE; } // fixed block dimensions (1024x1x1 threads) const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3); // determine number of blocks we need for a given problem size int tmp = ceil(pow(n/BLOCK_D1, 0.5)); printf("Grid dimension is %i x %i\n", tmp, tmp); dim3 gridSize(tmp, tmp, 1); int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*tmp*tmp; if (nthreads < n){ printf("\n============ NOT ENOUGH THREADS TO COVER n=%d ===============\n\n",n); } else { printf("Launching %d threads (n=%d)\n", nthreads, n); } double mu = 0.0; double sigma = 1.0; // simulate 'data' fill(cpu_vals, n); printf("Input values: %f %f %f...\n", cpu_vals[0], cpu_vals[1], cpu_vals[2]); cudaDeviceSynchronize(); double tInit = read_timer(); // copy input data to the GPU cudaStat = cudaMemcpy(gpu_vals, cpu_vals, n*sizeof(double), cudaMemcpyHostToDevice); printf("Memory Copy from Host to Device "); if (cudaStat){ printf("failed.\n"); } else { printf("successful.\n"); } cudaDeviceSynchronize(); double tTransferToGPU = read_timer(); // do the calculation calc_loglik<<<gridSize, blockSize>>>(gpu_vals, n, mu, sigma); cudaDeviceSynchronize(); double tCalc = read_timer(); cudaStat = cudaMemcpy(cpu_vals, gpu_vals, n, cudaMemcpyDeviceToHost); printf("Memory Copy from Device to Host "); if (cudaStat){ printf("failed.\n"); } else { printf("successful.\n"); } cudaDeviceSynchronize(); double tTransferFromGPU = read_timer(); printf("Output values: %f %f %f...\n", cpu_vals[0], cpu_vals[1], cpu_vals[2]); // do calculation on CPU for comparison (unfair as this will only use one core) fill(cpu_vals, n); double tInit2 = read_timer(); calc_loglik_cpu(cpu_vals, n, mu, sigma); double tCalcCPU = read_timer(); printf("Output values (CPU): %f %f %f...\n", cpu_vals[0], cpu_vals[1], cpu_vals[2]); printf("Timing results for n = %d\n", n); printf("Transfer to GPU time: %f\n", tTransferToGPU - tInit); printf("Calculation time (GPU): %f\n", tCalc - tTransferToGPU); printf("Calculation time (CPU): %f\n", tCalcCPU - tInit2); printf("Transfer from GPU time: %f\n", tTransferFromGPU - tCalc); printf("Freeing memory...\n"); printf("====================================================\n"); free(cpu_vals); cudaFree(gpu_vals); } printf("\n\nFinished.\n\n"); return 0; }
13,555
#include "includes.h" #define MAX_CUDA_THREADS_PER_BLOCK 1024 __global__ void Max_Sequential_Addressing_Shared(float* data, int data_size){ int idx = blockDim.x * blockIdx.x + threadIdx.x; __shared__ float sdata[MAX_CUDA_THREADS_PER_BLOCK]; if (idx < data_size){ /*copy to shared memory*/ sdata[threadIdx.x] = data[idx]; __syncthreads(); for(int stride=blockDim.x/2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { float lhs = sdata[threadIdx.x]; float rhs = sdata[threadIdx.x + stride]; sdata[threadIdx.x] = lhs < rhs ? rhs : lhs; } __syncthreads(); } } if (idx == 0) data[0] = sdata[0]; }
13,556
#include <math.h> #include <float.h> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #define sizex 16 #define sizey 16 #define sizesq sizey * sizex __device__ void warpReduce(volatile float* sdata, int tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void gpu_Heat (float *h, float *g, float *residuals, int N) { // global thread IDs int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if (i>0 && j>0 && i < N-1 && j < N-1) { g[i*N+j]= 0.25f * (h[ i*N + (j-1) ]+ // left h[ i*N + (j+1) ]+ // right h[ (i-1)*N + j ]+ // top h[ (i+1)*N + j ]); // bottom float diff = g[(i*N)+j] - h[(i*N) + j]; residuals[(i*N)+j] = diff * diff; } } __global__ void gpu_residual (float *residuals, float* block_res) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; int i = (blockIdx.x * blockDim.x) + threadIdx.x; sdata[tid] = residuals[i]; __syncthreads(); for(unsigned int s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; __syncthreads(); } } if (tid < 32) warpReduce(sdata, tid); if (tid == 0) { block_res[blockIdx.x] = sdata[tid]; } } // __global__ void gpu_Heat(float *h, float *g, float *block_res, int N) { // // int tidx = threadIdx.x; // int tidy = threadIdx.y; // int bx = blockIdx.x * blockDim.x; // int by = blockIdx.y * blockDim.y; // // __shared__ float sh_u[sizey+2][sizex+2]; // extern __shared__ float res[]; // // #pragma unroll // for (int i = tidy; i<sizey+2; i+=sizey) { // #pragma unroll // for (int j = tidx; j<sizex+2; j+=sizex) { // int y = by+i-1; // int x = bx+j-1; // if (x>=0 && x<N && y>=0 && y<N) { // sh_u[i][j] = h[x*N+y]; // res[i*N + j] = 0; // } // } // } // __syncthreads(); // // int x = bx+tidx; // int y = by+tidy; // if (x>0 && x<N-1 && y>0 && y<N-1) { // int i = tidx+1; // int j = tidy+1; // g[x*N+y] = 0.25f * ( // sh_u[i+1][j] + // sh_u[i-1][j] + // sh_u[i][j-1] + // sh_u[i][j+1]); // float diff = g[(i*N)+j] - h[(i*N) + j]; // res[i*N+j] = diff * diff; // } // __syncthreads(); // // int Dim = blockDim.x * blockDim.y; // int tid = tidy * blockDim.x + tidx; // // for(unsigned int s=Dim/2; s>32; s>>=1) { // if (tid < s) { // res[tid] += res[tid + s]; // __syncthreads(); // } // } // // if (tid < 32) warpReduce(res, tid); // if (tid == 0) { // block_res[blockIdx.x] = res[tid]; // } // }
13,557
#include <curand_kernel.h> /** * Some kernel functions for LDA * * @author Lin Chi-Min (v381654729@gmail.com) */ /** * wpt[k][v] : words per topic * wt[k] : total words per topic * tpd[m][k] : topics per document * td[m] : total topics per document * * wpt and phis : a K * V matrix * tpd and thetas : a M * K matrix * * numElements = (num documents in one batch) * * p: K * numDocumentsInOneBatch */ extern "C" __global__ void drawLatentVariables( const int* __restrict__ docsWordCounts, const int* __restrict__ docsWordOffsets, const int* __restrict__ docsWordIndices, int* wpt, int* wt, int* tpd, int* td, const float* __restrict__ phis, const float* __restrict__ thetas, float* p, int docOffset, int K, int M, int V, int numDocumentsInOneBatch) { /** * with size numDocumentsInOneBatch * K */ int m = blockDim.x * blockIdx.x + threadIdx.x; if (m < numDocumentsInOneBatch) { int Nm = docsWordCounts[m]; int docIndex = docOffset + m; int docWordOffset = docsWordOffsets[m]; curandState s; // reset a random number generator curand_init(docIndex, 0, 0, &s); int pOffset = m * K; for (int i = 0; i < Nm; i++) { float sum = 0; int c_word = docsWordIndices[docWordOffset + i]; if (c_word < 0 || c_word >= V){ continue; } int j; for (j = 0; j < K; j++) { sum += phis[j + (c_word * K)] * thetas[docIndex + (j * M)]; p[j + pOffset] = sum; } float stop = curand_uniform(&s) * sum; for (j = 0; j < K; j++) { if (stop < p[j + pOffset]) { break; } } if (j == K){ j--; } atomicAdd(&wpt[j + (c_word * K)], 1); atomicAdd(&wt[j], 1); tpd[docIndex + (j * M)]++; } td[docIndex] += Nm; } } extern "C" __global__ void drawLatentVariablesForTestingQuick( const int* __restrict__ docsWordCounts, const int* __restrict__ docsWordOffsets, const int* __restrict__ docsWordIndices, int* tpd, int* td, const float* __restrict__ phis, const float* __restrict__ thetas, int docOffset, int K, int M, int numDocumentsInOneBatch) { extern __shared__ float p[]; /** * with size numDocumentsInOneBatch * K */ int m = blockDim.x * blockIdx.x + threadIdx.x; if (m < numDocumentsInOneBatch) { int Nm = docsWordCounts[m]; int docIndex = docOffset + m; int docWordOffset = docsWordOffsets[m]; curandState s; // reset a random number generator curand_init(docIndex, 0, 0, &s); int pOffset = m * K; for (int i = 0; i < Nm; i++) { float sum = 0; int c_word = docsWordIndices[docWordOffset + i]; if (c_word < 0){ continue; } int j; for (j = 0; j < K; j++) { sum += phis[j + (c_word * K)] * thetas[docIndex + (j * M)]; p[j + pOffset] = sum; } float stop = curand_uniform(&s) * sum; for (j = 0; j < K; j++) { if (stop < p[j + pOffset]) { break; } } if (j == K){ j--; } tpd[docIndex + (j * M)]++; } td[docIndex] += Nm; } } /** * Use this to infer topics for testing; * phis are fixed and not updated * */ extern "C" __global__ void drawLatentVariablesForTesting( const int* __restrict__ docsWordCounts, const int* __restrict__ docsWordOffsets, const int* __restrict__ docsWordIndices, int* tpd, int* td, const float* __restrict__ phis, const float* __restrict__ thetas, float* p, int docOffset, int K, int M, int numDocumentsInOneBatch) { /** * with size numDocumentsInOneBatch * K */ int m = blockDim.x * blockIdx.x + threadIdx.x; if (m < numDocumentsInOneBatch) { int Nm = docsWordCounts[m]; int docIndex = docOffset + m; int docWordOffset = docsWordOffsets[m]; curandState s; // reset a random number generator curand_init(docIndex, 0, 0, &s); int pOffset = m * K; for (int i = 0; i < Nm; i++) { float sum = 0; int c_word = docsWordIndices[docWordOffset + i]; if (c_word < 0){ continue; } int j; for (j = 0; j < K; j++) { sum += phis[j + (c_word * K)] * thetas[docIndex + (j * M)]; p[j + pOffset] = sum; } float stop = curand_uniform(&s) * sum; for (j = 0; j < K; j++) { if (stop < p[j + pOffset]) { break; } } if (j == K){ j--; } tpd[docIndex + (j * M)]++; } td[docIndex] += Nm; } } /** * K = 30 * V = 120000 * * numElements = K * V * * wpt and phis : a K * V matrix * * wpt[k][v] : words per topic * wt[k] : total words per topic * */ extern "C" __global__ void computePhis(const int* __restrict__ wpt, const int* __restrict__ wt, float* phis, float beta, float betaV, int K, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { int k = i % K; phis[i] = (wpt[i] + beta) / (wt[k] + betaV); } } extern "C" __global__ void computePhisExact(const int* __restrict__ wpt, const int* __restrict__ wt, float* phis, int K, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { int k = i % K; phis[i] = (wpt[i] + 0.0) / wt[k]; } } /** * M = 90000 * K = 30 * * numElements = M * K * * thetas : a M * K matrix * * tpd[m][k] : topics per document * td[m] : total topics per document */ extern "C" __global__ void computeThetas(const int* __restrict__ tpd, const int* __restrict__ td, float* thetas, float alpha, float alphaK, int M, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { int m = i % M; thetas[i] = (tpd[i] + alpha) / (td[m] + alphaK); } }
13,558
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define BLOCK_SIZE 16 __global__ void kernel_shared(float * a, float * b, int n, float * c) { int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; int aBegin = n * BLOCK_SIZE * by; int aEnd = aBegin + n - 1; int bBegin = BLOCK_SIZE * bx; int aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * n; float sum = 0.0f; __shared__ float as[BLOCK_SIZE][BLOCK_SIZE + 1]; __shared__ float bs[BLOCK_SIZE][BLOCK_SIZE + 1]; for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep) { as[tx][ty] = a[ia + n * ty + tx]; bs[tx][ty] = b[ib + n * ty + tx]; __syncthreads(); // Synchronize to make sure the matrices are loaded for (int k = 0; k < BLOCK_SIZE; k++) sum += as[k][ty] * bs[tx][k]; __syncthreads(); // Synchronize to make sure submatrices not needed } c[n * BLOCK_SIZE * by + BLOCK_SIZE * bx + n * ty + tx] = sum; } __global__ void kernel_shared_1(float * a, float * b, int n, float * c) { int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; int aBegin = n * BLOCK_SIZE * by; int aEnd = aBegin + n - 1; int bBegin = BLOCK_SIZE * bx; int aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * n; float sum = 0.0f; for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep) { __shared__ float as[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float bs[BLOCK_SIZE][BLOCK_SIZE]; as[ty][tx] = a[ia + n * ty + tx]; bs[ty][tx] = b[ib + n * ty + tx]; __syncthreads(); // Synchronize to make sure the matrices are loaded for (int k = 0; k < BLOCK_SIZE; k++) sum += as[ty][k] * bs[k][tx]; __syncthreads(); // Synchronize to make sure submatrices not needed } c[n * BLOCK_SIZE * by + BLOCK_SIZE * bx + n * ty + tx] = sum; } __global__ void kernel_global(float * a, float * b, int n, float * c) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0.0f; int ia = n * BLOCK_SIZE * by + n * ty; int ib = BLOCK_SIZE * bx + tx; int ic = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; for (int k = 0; k < n; k++) sum += a[ia + k] * b[ib + k*n]; c[ic + n * ty + tx] = sum; } int main() { int N = 1024; int m, n, k; float CPUstart, CPUstop; float timerValueGPU, timerValueCPU; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int numBytes = N*N*sizeof(float); float *devA, *devB, *devC, *a, *b, *c, *cc, *bT, *aT; a = (float*)malloc(numBytes); b = (float*)malloc(numBytes); bT = (float*)malloc(numBytes); aT = (float*)malloc(numBytes); c = (float*)malloc(numBytes); cc = (float*)malloc(numBytes); for (n = 0; n<N; n++) { for (m = 0; m<N; m++) { a[m + n*N] = 2.0f*m + n; b[m + n*N] = m - n; aT[m + n*N] = m + n*2.0f; bT[m + n*N] = n - m; } } cudaMalloc((void**)&devA, numBytes); // allocate DRAM cudaMalloc((void**)&devB, numBytes); // allocate DRAM cudaMalloc((void**)&devC, numBytes); // allocate DRAM dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(N / threads.x, N / threads.y); // DEVICE ------------------------------------------------------ cudaEventRecord(start, 0); cudaMemcpy(devA, a, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(devB, b, numBytes, cudaMemcpyHostToDevice); kernel_shared <<<blocks, threads >>> (devA, devB, N, devC); //kernel_shared_1 << <blocks, threads >> > (devA, devB, N, devC); //kernel_global <<< blocks, threads >>> ( devA, devB, N, devC ); cudaMemcpy(c, devC, numBytes, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timerValueGPU, start, stop); printf("\n GPU calculation time %f msec\n", timerValueGPU); //--------------------------------------------------------------- // HOST --------------------------------------------------------- CPUstart = clock(); for (n = 0; n<N; n++) { for (m = 0; m<N; m++) { cc[m + n*N] = 0.f; for (k = 0; k<N; k++) cc[m + n*N] += a[k + n*N] * bT[k + m*N]; // T // for(k=0;k<N;k++) cc[m+n*N]+=a[k+n*N]*b[m+k*N]; // } } CPUstop = clock(); timerValueCPU = 1000.*(CPUstop - CPUstart) / CLOCKS_PER_SEC; printf("CPU time : %.3f ms\n", timerValueCPU); printf("Rate : %.3f \n", timerValueCPU / timerValueGPU); //--------------------------------------------------------------- cudaFree(devA); cudaFree(devB); cudaFree(devC); free(a); free(b); free(bT); free(aT); free(c); free(cc); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
13,559
#include <stdio.h> #include <stdlib.h> /* TEMPO SEQUENCIAL: real 0m0.414s user 0m0.185s sys 0m0.221s TEMPO CUDA: real 0m2.330s user 0m1.143s sys 0m1.099s */ __global__ void scan_cuda(double* a, double *s, int width) { int t = threadIdx.x; int b = blockIdx.x*blockDim.x; double x; // cria vetor na memória local __shared__ double p[1024]; // carrega elementos do vetor da memória global para a local if(b+t < width) p[t] = a[b+t]; // espera que todas as threads tenham carregado seus elementos __syncthreads(); for (int i = 1; i < blockDim.x; i *= 2) { // realiza o scan em log n passos if(t >= i) // verifica se a thread ainda participa neste passo x = p[t] + p[t-i]; // atribui a soma para uma variável temporária __syncthreads(); // espera threads fazerem as somas if(t >= i) p[t] = x; // copia a soma em definitivo para o vetor local __syncthreads(); } if(b + t < width) // copia da memória local para a global a[b+t] = p[t]; if(t == blockDim.x-1) // se for a última thread do bloco s[blockIdx.x+1] = a[b+t]; // copia o seu valor para o vetor de saída } __global__ void add_cuda(double *a, double *s, int width) { int t = threadIdx.x; int b = blockIdx.x*blockDim.x; // soma o somatório do último elemento do bloco anterior ao elemento atual if(b+t < width) a[b+t] += s[blockIdx.x]; } int main() { int width = 40000000; int size = width * sizeof(double); int block_size = 1024; int num_blocks = (width-1)/block_size+1; int s_size = (num_blocks * sizeof(double)); double *a = (double*) malloc (size); double *s = (double*) malloc (s_size); for(int i = 0; i < width; i++) a[i] = i; double *d_a, *d_s; // alocar vetores "a" e "s" no device cudaMalloc((void **) &d_a, size); cudaMalloc((void **) &d_s, s_size); // copiar vetor "a" para o device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); // definição do número de blocos e threads (dimGrid e dimBlock) dim3 dimGrid(num_blocks,1,1); dim3 dimBlock(block_size,1,1); // chamada do kernel scan scan_cuda<<<dimGrid,dimBlock>>>(d_a, d_s, width); // copiar vetor "s" para o host cudaMemcpy(s, d_s, s_size, cudaMemcpyDeviceToHost); // scan no host (já implementado) s[0] = 0; for (int i = 1; i < num_blocks; i++) s[i] += s[i-1]; // copiar vetor "s" para o device cudaMemcpy(d_s, s, s_size, cudaMemcpyHostToDevice); // chamada do kernel da soma add_cuda<<<dimGrid,dimBlock>>>(d_a, d_s, width); // copiar o vetor "a" para o host cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost); printf("\na[%d] = %f\n",width-1,a[width-1]); cudaFree(d_a); cudaFree(d_s); }
13,560
#include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <cufft.h> #include <math.h> #define MAX_BLOCKS 65535 #define MAX_THREADS 512 #define THREADS_PER_BLOCK 1024 using namespace std; /*--------------------------------------------------------------*/ inline __device__ int getAcceleratedIndex(float, int, float, int); __global__ void resampleOnDevice(float*, float*, float, int, float); __global__ void harmonicSumOnDevice(float*, float*, int, int, int); __global__ void formSpecOnDevice(float*, float*); void GPU_harmonic_sum(float*, float*, int, int); extern "C"{ int checkError(void); int seekGPU(float*, size_t, float*, size_t, float); } /*-------------------------------------------------------------*/ inline __device__ int getAcceleratedIndex(float accel_fact, int size_by_2, int id){ return (int)(id + accel_fact*( ((id-size_by_2)*(id-size_by_2)) - (size_by_2*size_by_2))); } __global__ void resampleOnDevice(float* input_d, float* output_d, float accel_fact, int size, float size_by_2) { int id = threadIdx.x + blockIdx.x * blockDim.x; int index0 = getAcceleratedIndex(accel_fact,size_by_2,id); int index1 = getAcceleratedIndex(accel_fact,size_by_2,id+1); output_d[index0] = input_d[id]; if (index1-index0 > 1){ if (index0+1 < size) output_d[index0+1] = input_d[id]; } } __global__ void harmonicSumOnDevice(float *d_idata, float *d_odata, int gulp_index, int size, int harmonic) { int ii; int Index = blockIdx.x * blockDim.x + threadIdx.x; if(Index<size){ d_odata[gulp_index+Index] = d_idata[gulp_index+Index]; for(ii=1; ii<harmonic; ii++){ d_odata[gulp_index+Index] += d_idata[(ii*(gulp_index+Index))/harmonic]; } d_odata[gulp_index+Index] = d_odata[gulp_index+Index]/sqrt((float)harmonic); // can use *rsqrt to optimise further } } __global__ void formSpecOnDevice(float* f_spectrum_d, float* p_spectrum_d) { int id = blockIdx.x * blockDim.x + threadIdx.x; float i,r,a,b,rl,il; r = f_spectrum_d[2*id]; i = f_spectrum_d[2*id+1]; a = r*r + i*i; if (id == 0){ rl = 0; il = 0; } else { rl = f_spectrum_d[2*(id-1)]; il = f_spectrum_d[2*(id-1)+1]; } a = r*r + i*i ; b = ((r-rl)*(r-rl) + (i-il)*(i-il))/2 ; p_spectrum_d[id] = rsqrtf(fmax(a,b)); } void GPU_harmonic_sum(float* d_input_array, float* d_output_array, int original_size, int harmonic) { int gulps; int gulp_counter; int gulp_index = 0; int gulp_size; gulps = original_size/(MAX_BLOCKS*MAX_THREADS)+1; for(gulp_counter = 0; gulp_counter<gulps; gulp_counter++){ if(gulp_counter<gulps-1){ gulp_size = MAX_BLOCKS*MAX_THREADS; } else { gulp_size = original_size - gulp_counter*MAX_BLOCKS*MAX_THREADS; } harmonicSumOnDevice<<<MAX_BLOCKS,MAX_THREADS>>>(d_input_array,d_output_array,gulp_index,gulp_size,harmonic); gulp_index = gulp_index + MAX_BLOCKS*MAX_THREADS; } } extern "C" { int checkError(void){ cudaError err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr,"CUDA error: %s\n",cudaGetErrorString(err)); return(-1);} err = cudaDeviceSynchronize(); if (cudaSuccess != err){ fprintf(stderr,"CUDA error: %s\n",cudaGetErrorString(err)); return(-1);} return 0; } int seekGPU(float* timeseries_h, size_t size, float* accels, size_t naccels, float tsamp) { int ii; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); float elapsedTime; int nharms = 5; //Print some device properties cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop, 0); printf("Device name: %s\n", dev_prop.name); printf("Maximum Threads per Block: %d\n", dev_prop.maxThreadsPerBlock); printf("Maximum Dimensions of a Block: %d %d %d\n", dev_prop.maxThreadsDim[0], dev_prop.maxThreadsDim[1], dev_prop.maxThreadsDim[2]); printf("Maximum Dimensions of a Grid: %d %d %d\n", dev_prop.maxGridSize[0], dev_prop.maxGridSize[1], dev_prop.maxGridSize[2]); printf("Warp size in threads: %d\n", dev_prop.warpSize); printf("Shared Memory per Block in Bytes: %d\n", dev_prop.sharedMemPerBlock); printf("\n"); cudaEventRecord(start, 0); cufftHandle plan; cufftPlan1d(&plan, size, CUFFT_R2C, 1); checkError(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time to create plan: %f ms\n", elapsedTime); cudaEventRecord(start, 0); cufftReal *timeseries_d; cufftReal *resampled_d; cufftComplex *f_spectrum_d; cufftReal *p_spectrum_d; cufftReal *p_harmonics_d[nharms]; size_t real_size = sizeof(cufftReal)*size; size_t complex_size = sizeof(cufftComplex)*(size/2+1); cudaMalloc((void**)&timeseries_d, real_size); cudaMalloc((void**)&resampled_d, real_size); cudaMalloc((void**)&f_spectrum_d, complex_size); cudaMalloc((void**)&p_spectrum_d, complex_size); for (ii=0;ii<nharms;ii++){ cudaMalloc((void**)&p_harmonics_d[ii], real_size); } checkError(); cudaMemcpy(timeseries_d, timeseries_h, real_size, cudaMemcpyHostToDevice); checkError(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time to allocate memory and copy: %f ms\n", elapsedTime); cudaEventRecord(start, 0); dim3 grid(size/THREADS_PER_BLOCK,1,1); int size_by_2 = size/2; float accel_fact; printf("Size of data: %d\n", size); printf("Launching %d threads on (%d,%d,%d) blocks\n", THREADS_PER_BLOCK, grid.x, grid.y, grid.z); for (ii=0;ii<naccels;ii++){ accel_fact = ((accels[ii]*tsamp) / (2 * 299792458.0)); resampleOnDevice<<<grid, THREADS_PER_BLOCK>>>((float*) timeseries_d, (float*) resampled_d, accel_fact, size, size_by_2); cufftExecR2C(plan, (cufftReal *)resampled_d, (cufftComplex *)f_spectrum_d); formSpecOnDevice<<<grid,THREADS_PER_BLOCK>>>((float*) f_spectrum_d, (float*) p_spectrum_d); //for (ii=0;ii<nharms;ii++){ //GPU_harmonic_sum(p_spectrum_d, p_harmonics_d[ii], complex_size, ii); NEEDS NEW HARMSUM ROUTINE //} } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time to execute: %f ms\n", elapsedTime); checkError(); //cudaMemcpy(outbuffer, odata, complex_size, cudaMemcpyDeviceToHost); //This is useful if you want to debug results cudaMemcpy(timeseries_h, p_spectrum_d, real_size, cudaMemcpyDeviceToHost); checkError(); cufftDestroy(plan); cudaFree(timeseries_d); cudaFree(resampled_d); cudaFree(f_spectrum_d); cudaFree(p_spectrum_d); for (ii=0;ii<nharms;ii++){ cudaFree(p_harmonics_d[ii]); } cudaDeviceReset(); return checkError(); } }
13,561
// // Created by kindr on 2021/5/3. // #include <vector> #include <cuda_runtime_api.h> #include <string> #include "matrixTranspose.cuh" #include "../../common/arrayHelper.cuh" #include "../../common/utils.cuh" __global__ void copyRow(float *out, const float *in, const size_t nx, const size_t ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { unsigned int idx = iy * nx + ix; out[idx] = in[idx]; } } __global__ void copyCol(float *out, const float *in, const size_t nx, const size_t ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { unsigned int idx = iy * nx + ix; out[idx] = in[idx]; } } __global__ void transformNaiveRow(float *out, const float *in, const size_t nx, const size_t ny) { unsigned int ix = threadIdx.x + blockDim.x * blockIdx.x; unsigned int iy = threadIdx.y + blockDim.y * blockIdx.y; unsigned int idx_row = ix + iy * nx; unsigned int idx_col = ix * ny + iy; if (ix < nx && iy < ny) { out[idx_col] = in[idx_row]; } } __global__ void transformNaiveCol(float *out, const float *in, const size_t nx, const size_t ny) { unsigned int ix = threadIdx.x + blockDim.x * blockIdx.x; unsigned int iy = threadIdx.y + blockDim.y * blockIdx.y; unsigned int idx_row = ix + iy * nx; unsigned int idx_col = ix * ny + iy; if (ix < nx && iy < ny) { out[idx_row] = in[idx_col]; } } __global__ void transformNaiveRowDiagonal(float *out, const float *in, const size_t nx, const size_t ny) { unsigned int block_y = blockIdx.x; unsigned int block_x = (blockIdx.x + blockIdx.y) % gridDim.x; unsigned int ix = threadIdx.x + blockDim.x * block_x; unsigned int iy = threadIdx.y + blockDim.y * block_y; unsigned int idx_row = ix + iy * nx; unsigned int idx_col = ix * ny + iy; if (ix < nx && iy < ny) { out[idx_col] = in[idx_row]; } } __global__ void transformNaiveColDiagonal(float *out, const float *in, const size_t nx, const size_t ny) { unsigned int block_y = blockIdx.x; unsigned int block_x = (blockIdx.x + blockIdx.y) % gridDim.x; unsigned int ix = threadIdx.x + blockDim.x * block_x; unsigned int iy = threadIdx.y + blockDim.y * block_y; unsigned int idx_row = ix + iy * nx; unsigned int idx_col = ix * ny + iy; if (ix < nx && iy < ny) { out[idx_row] = in[idx_col]; } } void matrixTranspose(size_t n, size_t nThread) { float *InMatrix, *outMatrix; size_t nElement = n * n; size_t nBytes = nElement * sizeof(float); cudaMallocManaged(&InMatrix, nBytes, cudaMemAttachGlobal); cudaMallocManaged(&outMatrix, nBytes, cudaMemAttachGlobal); randomInitArray(InMatrix, nElement); size_t nBlock = (n + nThread - 1) / nThread; auto transpose = [&](auto kernel, const std::string &tag) { TIME([&]() { printf("%s: ", tag.data()); kernel(); cudaDeviceSynchronize(); CHECK(cudaGetLastError()) memset(outMatrix, 0, nBytes); }); }; const dim3 &dimGrid = dim3(nBlock, nBlock); const dim3 &dimBlock = dim3(nThread, nThread); auto f_copyRow = [&]() { copyRow<<<dimGrid, dimBlock>>>(outMatrix, InMatrix, n, n); }; auto f_copyCol = [&]() { copyCol<<<dimGrid, dimBlock>>>(outMatrix, InMatrix, n, n); }; transpose(f_copyRow, "copyRow"); transpose(f_copyCol, "copyCol"); auto f_transRow = [&]() { transformNaiveRow<<<dimGrid, dimBlock>>>(outMatrix, InMatrix, n, n); }; auto f_transCol = [&]() { transformNaiveCol<<<dimGrid, dimBlock>>>(outMatrix, InMatrix, n, n); }; transpose(f_transRow, "transRow"); transpose(f_transCol, "transCol"); auto f_transRowDia = [&]() { transformNaiveRowDiagonal<<<dimGrid, dimBlock>>>(outMatrix, InMatrix, n, n); }; auto f_transColDia = [&]() { transformNaiveColDiagonal<<<dimGrid, dimBlock>>>(outMatrix, InMatrix, n, n); }; transpose(f_transRowDia, "transRowDia"); transpose(f_transColDia, "transColDia"); }
13,562
// Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible? // Assigns every element in an array with its index. // nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple #include <stdio.h> const int N = 16; const int blocksize = 16; __global__ void simple(float *c) { c[threadIdx.x] = sqrt(c[threadIdx.x]); } int main() { //used to get data float *c = new float[N]; float *to_square = new float[N]; to_square[0]=1; //fill c with numbers for (int i=1; i < N; i++){ to_square[i]=to_square[i-1]*2; } //pointer to cuda data float *cd; const int size = N*sizeof(float); //allocate data on gpu cudaMalloc( (void**)&cd, size ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); //upload data to gpu cudaMemcpy( cd, to_square, size, cudaMemcpyHostToDevice ); //start computation simple<<<dimGrid, dimBlock>>>(cd); //sync cudaThreadSynchronize(); //download data cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost ); //free data on gpu cudaFree( cd ); //display data printf("\n"); for (int i = 0; i < N; i++) printf("gpu:%f cpu: %f \n", c[i],sqrt(to_square[i])); printf("\n"); delete[] c; printf("done\n"); return EXIT_SUCCESS; }
13,563
#include "includes.h" __global__ void kernel_bfs(int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_graph_height, bool *g_pixel_mask, int vertex_num, int width, int height, int vertex_num1, int width1, int height1, bool *g_over, int *g_counter) { /******************************* *threadId is calculated ****** *****************************/ int thid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (thid < vertex_num && g_pixel_mask[thid] == true) { int col = thid % width1, row = thid / width1; if (col < width - 1 && col > 0 && row < height - 1 && row > 0) { int height_l = 0, height_d = 0, height_u = 0, height_r = 0; height_r = g_graph_height[thid + 1]; height_l = g_graph_height[thid - 1]; height_d = g_graph_height[thid + width1]; height_u = g_graph_height[thid - width1]; if (((height_l == (*g_counter) && g_right_weight[thid - 1] > 0)) || ((height_d == (*g_counter) && g_up_weight[thid + width1] > 0) || (height_r == (*g_counter) && g_left_weight[thid + 1] > 0) || (height_u == (*g_counter) && g_down_weight[thid - width1] > 0))) { g_graph_height[thid] = (*g_counter) + 1; g_pixel_mask[thid] = false; *g_over = true; } } } }
13,564
# include <cuda.h> # include <stdlib.h> # include <stdio.h> # include <time.h> # include <curand_kernel.h> # define N 10 # define CUDA_ERROR_CHECK(error) {\ e = error; \ if (e != cudaSuccess){ \ printf("%s\n", cudaGetErrorString(e)); \ exit(0); \ } \ } __constant__ int a[2][2]; __global__ void setup_kernel ( curandState * state, unsigned long seed ) { int id = threadIdx.x; curand_init ( seed, id, 0, &state[id] ); } __global__ void generate( curandState* globalState ) { int ind = threadIdx.x; curandState localState = globalState[ind]; int i = 0; while (i<10){ float RANDOM = curand_uniform( &localState ); i++; } globalState[ind] = localState; } int main( int argc, char** argv) { cudaError_t e; int ** c_a = (int **)malloc(sizeof(int*)*2); c_a[0] = (int*)malloc(sizeof(int)*2); c_a[1] = (int*)malloc(sizeof(int)*2); c_a[0][0] = 1; c_a[0][1] = 2; c_a[1][0] = 4; c_a[1][1] = 8; printf("c_a: [%d][%d] [%d][%d]\n", c_a[0][0], c_a[0][1], c_a[1][0], c_a[1][1]); CUDA_ERROR_CHECK(cudaMemcpyToSymbol(a, c_a, 2*2*sizeof(int))) dim3 tpb(N,1,1); curandState* devStates; cudaMalloc ( &devStates, N*sizeof( curandState ) ); // setup seeds setup_kernel <<< 1, tpb >>> ( devStates, time(NULL) ); // generate random numbers generate <<< 1, tpb >>> ( devStates ); return 0; }
13,565
#include<iostream> using namespace std; __global__ void addition(int *a,int *b,int *c,int n) { int large_id=blockIdx.x*blockDim.x+threadIdx.x; while(large_id<n) { c[large_id]=a[large_id]+b[large_id]; large_id+=blockDim.x*gridDim.x; } } int main() { int *a,*b,*c; int n=20; a=(int*)malloc(n*sizeof(int)); b=(int*)malloc(n*sizeof(int)); c=(int*)malloc(n*sizeof(int)); for(int i=0;i<n;i++) { a[i]=i+1; b[i]=i+1; c[i]=0; } cudaEvent_t start,end; int size=n*sizeof(int); int *dev_a,*dev_b,*dev_c; cudaMalloc(&dev_a,size); cudaMalloc(&dev_b,size); cudaMalloc(&dev_c,size); cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); addition<<<128,128>>>(dev_a,dev_b,dev_c,n); cudaEventRecord(end); cudaEventSynchronize(end); float time=0; cudaEventElapsedTime(&time,start,end); cudaMemcpy(c,dev_c,size,cudaMemcpyDeviceToHost); for(int i = 0; i < n; i++) { cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<endl; } cout<<"\n Time elapsed:"<<time<<endl; cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); }
13,566
#include <stdio.h> #include <math.h> #include <stdlib.h> #define MAXLINESIZE 35 __device__ int dNUM; __device__ int dBLOCK_N; __constant__ int dTHREAD_N; // NUmber of threads = 96, Number of SM = 2, Number of cores per SM = 48 __global__ void calculate_triliteration(float *dda, float *ddb, float *ddc, float *dx0, float *dy0){ extern __shared__ float temp[]; int blockId = blockIdx.x; int threadId = threadIdx.x; int idx = blockId * dTHREAD_N + threadId; int totalThreads = dTHREAD_N * dBLOCK_N; for(int i = idx; i < dNUM; i += totalThreads){ //printf("%d\n", i); temp[threadId] = dda[i] + ddb[i] + ddc[i]; __syncthreads(); if(i % 4 == 0){ dx0[i/4] = temp[threadId] + temp[threadId + 1] + temp[threadId + 2] + temp[threadId + 3] ; } __syncthreads(); //dy0[idx] = idx; } } int main(int args, char ** argv){ if(args != 5){ printf("Invalid input.....\n"); return -1; } int NUM = pow(2, atoi(argv[1])); int BLOCK_N = atoi(argv[2]); int THREAD_N = atoi(argv[3]); char *INPUT_FILE = argv[4]; printf("\n\tInput Size : %d\n", NUM); printf("\tBlock_N : %d\n", BLOCK_N); printf("\tTHREAD_N : %d\n", THREAD_N); FILE * input_fd; input_fd = fopen(INPUT_FILE, "r"); char line[MAXLINESIZE]; int line_count = 0; float da[NUM],db[NUM],dc[NUM], x0[NUM / 4], y0[NUM/ 4]; while ( fgets(line, MAXLINESIZE, input_fd) != NULL && line_count < NUM) { da[line_count] = 0.0; db[line_count] = 0.0; dc[line_count] = 0.0; sscanf(line, "%f %f %f\n", &da[line_count], &db[line_count], &dc[line_count]); da[line_count] = 1.0; db[line_count] = 1.0; dc[line_count] = 1.0; //printf("da = %f, db = %f, dc = %f\n", da[line_count], db[line_count], dc[line_count]); line_count ++; } float * dda, * ddb, * ddc; float * dx0, * dy0; int _floatSize = NUM * sizeof(float); cudaMalloc( (void**)&dda, _floatSize); cudaMalloc( (void**)&ddb, _floatSize); cudaMalloc( (void**)&ddc, _floatSize); cudaMalloc( (void**)&dx0, _floatSize / 4); cudaMalloc( (void**)&dy0, _floatSize / 4); cudaMemcpyToSymbol(dNUM, &NUM, sizeof(int)); cudaMemcpyToSymbol(dBLOCK_N, &BLOCK_N, sizeof(int)); cudaMemcpyToSymbol(dTHREAD_N, &THREAD_N, sizeof(int)); cudaMemcpy( dda, da, _floatSize, cudaMemcpyHostToDevice ); cudaMemcpy( ddb, db, _floatSize, cudaMemcpyHostToDevice ); cudaMemcpy( ddc, dc, _floatSize, cudaMemcpyHostToDevice ); calculate_triliteration<<<BLOCK_N, THREAD_N, THREAD_N * sizeof(float)>>>(dda, ddb, ddc, dx0, dy0); cudaMemcpy( x0, dx0, _floatSize / 4, cudaMemcpyDeviceToHost); //cudaMemcpy( y0, dy0, _floatSize, cudaMemcpyDeviceToHost); for(int i = 0; i < NUM / 4; i ++){ printf("%f\n", x0[i]); } cudaFree(dda); cudaFree(ddb); cudaFree(ddc); cudaFree(dx0); cudaFree(dy0); fclose(input_fd); return 1; }
13,567
#include <stdio.h> //__global__修飾子は,この関数がCPUから呼び出され,GPUで実行されることをコンパイラに認識させる. __global__ void helloFromGPU() { printf("Hello World from GPU!\n"); } int main(int argc, char **argv) { printf("Hello World from CPU!\n"); //<<<...>>>はホストスレッドからのデバイスコードの呼び出しを指定する, //カーネルは一連のスレッドによって実行され,すべてのスレッドが同じコードを実行する. //<<<...>>>で囲まれているパラメータは,このカー熱を実行するスレッドの数を指定する. //この例では,GPUスレッドを10個実行することになる. helloFromGPU <<<1, 10>>>(); cudaDeviceReset(); return 0; }
13,568
/* Sample Implementation of Yamazaki and Tanaka (2005). Neural Modeling of an Internal Clock. Neural Computation 17:1032--1058. using only global memory of CUDA. Licensed under Creative Commons Attribution License (CC-BY) http://creativecommons.org/licenses/by/3.0/ */ #include<stdio.h> #include<stdlib.h> #include<math.h> #define N 1024 //2048 // To be 2^k #define T 1000 #define Pr 0.5 #define I 1.0 #define Kappa 2.0 #define Tau 100.0 #define BLOCK_SIZE 64 #define INNER_LOOP 1000 float *z, *u, *result; int *w; void initialize() { int i, j, k; w = (int *)malloc(N*N*sizeof(int)); z = (float *)malloc(N*sizeof(float)); u = (float *)malloc(N*sizeof(float)); result = (float *)malloc(T*N*sizeof(float)); for(i = 0; i < N; i++){ z[i] = 0; u[i] = I; } srand(23); for(i = 0; i < N; i++){ k = 0; for(j = 0; j < N; j++){ if ((float)rand()/(float)RAND_MAX < Pr){ w[k+N*i] = j; k++; } } w[k+N*i] = -1; } } void finalize() { free(w); free(z); free(u); free(result); } __global__ void Kernel(const int *w, float *z, float *u, float *result, const float decay, int t, int *global_sync) { int i = blockIdx.x*BLOCK_SIZE + threadIdx.x; int j, k, s; float r; volatile float *vz; float uLocal, zLocal; float resultLocal[INNER_LOOP]; int wLocal[N]; float decayLocal; vz = z; uLocal = u[i]; for(j = 0; j < N; j++){ wLocal[j] = w[i*N+j]; } decayLocal = decay; for(s = 0; s < INNER_LOOP; s++){ r = 0; for(k = 0; wLocal[k] != -1; k++){ j = wLocal[k]; r += vz[j]; } uLocal = decayLocal*uLocal + (1 - decayLocal)*I - Kappa*r/N; if (uLocal > 0){ zLocal = uLocal; }else{ zLocal = 0; } resultLocal[s] = zLocal; vz[i] = zLocal; if (threadIdx.x == 0){ atomicAdd(global_sync, 1); if (blockIdx.x == 0){ while(atomicAdd(global_sync,0) < N/BLOCK_SIZE); atomicExch(global_sync,0); }else{ while(atomicAdd(global_sync,0) > 0); } } __syncthreads(); } u[i] = uLocal; for(s = 0; s < INNER_LOOP; s++){ result[i*T+t*INNER_LOOP+s] = resultLocal[s]; } } void loop() { float *zd, *ud, *resultd; int *wd; float decay; cudaError_t stat; int t; int *global_syncd; decay = exp(-1.0/Tau); cudaMalloc((void**)&wd, N*N*sizeof(int)); cudaMalloc((void**)&zd, N*sizeof(float)); cudaMalloc((void**)&ud, N*sizeof(float)); cudaMalloc((void**)&resultd, N*T*sizeof(float)); cudaMemcpy(wd, w, N*N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(zd, z, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(ud, u, N*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&global_syncd, sizeof(int)); dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(N/BLOCK_SIZE); for(t = 0; t < T/INNER_LOOP; t++){ Kernel<<<dimGrid,dimBlock>>>(wd, zd, ud, resultd, decay, t, global_syncd); } stat = cudaMemcpy(result, resultd, N*T*sizeof(float), cudaMemcpyDeviceToHost); if (stat != cudaSuccess){ puts("error"); } cudaFree(wd); cudaFree(zd); cudaFree(ud); cudaFree(resultd); cudaFree(global_syncd); } void output(char *prefix) { FILE *f; int t, i; char fn[1024]; sprintf(fn, "%s.r", prefix); f = fopen(fn, "w"); for(i = 0; i < N; i++){ for(t = 0; t < T; t++){ if (result[t+T*i] > 0){ fprintf(f, "%d %d\n", t, i); } } } fclose(f); } int main(int argc, char *argv[]) { char *prefix; if (argc < 2){ fprintf(stderr, "%s <prefix>\n", argv[0]); exit(1); } prefix = argv[1]; initialize(); loop(); output(prefix); finalize(); return 0; }
13,569
#include "includes.h" __global__ void calcul_min( unsigned long *ord, int ind_start, int ind_end, unsigned long long *ymin, int *ind_min, int size_max_parallel ){ int a = threadIdx.x; int size_tot = (ind_end - ind_start -1); //On n'effectue pas le calcul aux indices ind_start ni ind_end int nb_threads = ceilf((float)size_tot/(float)size_max_parallel); //size of region to compute in the current thread int size_parallel = ceilf( (float)size_tot/(float)nb_threads ); //have to be computed before the case of a different size_parallel value int ind_start_loc = ind_start + a * size_parallel + 1; if ( a == (nb_threads - 1) ) size_parallel = size_tot - (nb_threads - 1) * size_parallel; unsigned long min_loc = ord[ind_start_loc]; int ind_min_loc = ind_start_loc; int i = 0; //printf("FINDING YMIN\n"); for ( i = ind_start_loc; i < ind_start_loc + size_parallel; i++ ){ //Looking for the lowest ordinate if ( ord[i]< min_loc ){ min_loc = ord[i]; ind_min_loc = i; } } atomicMin(ymin, min_loc); __syncthreads(); if (*ymin == min_loc) *ind_min = ind_min_loc; return; }
13,570
#include <cstdio> #include <string> #include<cuda_runtime.h> #include<sys/time.h> #include<vector> using namespace std; void print_data(float* arr, int width, int length, string flag) { int count=8; printf("%s :\n", flag.c_str()); for (int i=0; i<count; i++) { for(int j=0; j<count; j++) { printf("%.3lf ", arr[i*length +i]); } printf("\n"); } } float average(const vector<float> &timing) { double avg = 0; for(vector<float>::const_iterator it = timing.begin(); it != timing.end(); it++) avg += *it; avg /= timing.size(); return avg; } void fillMat(float *mat, size_t rows, size_t cols) { for(int row = 0; row < rows; ++row) { for(int col = 0; col < cols; ++col) { mat[row * cols + col] = col; } } } __global__ void matrixMulCUDA(float *C, float *A, float*B, int wA, int wB) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; float tmp = 0.0f; for (int i=0; i<wA; i++) { tmp += A[y * wA + i] * A[i * wB + x]; } C[y * wB + x] = tmp; } template <int BLOCK_SIZE> __global__ void matrixMulCUDA_share(float *C, float *A, float *B, int wA, int wB) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = by * wA; int aEnd = aBegin + wA -1; int aStep = BLOCK_SIZE; int bBegin = bx * BLOCK_SIZE; int bStep = wB * BLOCK_SIZE; float cSub = 0; __shared__ float subA[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float subB[BLOCK_SIZE][BLOCK_SIZE]; for (int a = aBegin, b = bBegin; a < aEnd; a+=aStep, b+=bStep) { subA[ty][tx] = A[a + ty * wA + tx]; subB[ty][tx] = B[b + ty * wB + tx]; __syncthreads(); for (int i=0; i<BLOCK_SIZE; i++) { cSub += subA[ty][i] * subB[i][tx]; } __syncthreads(); } C[((by*BLOCK_SIZE+ty)*wB + bx*BLOCK_SIZE+tx)] = cSub; } template<int WIDTH> void testMatrixMul() { size_t mem_size = sizeof(float) * WIDTH * WIDTH; float *mat1 = (float *)malloc(mem_size); float *mat2 = (float *)malloc(mem_size); float *mat3 = (float *)malloc(mem_size); // initialize mat1 and mat2 fillMat(mat1, WIDTH, WIDTH); fillMat(mat2, WIDTH, WIDTH); // matrixMul<WIDTH>(mat3, mat1, mat2); float* d_mat1, *d_mat2, *d_mat3; cudaError_t error; error = cudaMalloc((void**)&d_mat1, mem_size); error = cudaMemcpy(d_mat1, mat1, mem_size, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy d_mat1 returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(1); } error = cudaMalloc((void**)&d_mat2, mem_size); error = cudaMemcpy(d_mat2, mat2, mem_size, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy d_mat2 returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(1); } error = cudaMalloc((void**)&d_mat3, mem_size); int block_size = 32; dim3 threads(block_size, block_size); dim3 grid(WIDTH/threads.x, WIDTH/threads.y); // Allocate CUDA events that we'll use for timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); vector<float> times; int loops = 3; for (int i=0;i<loops; i++) { cudaEventRecord(start, NULL); matrixMulCUDA<<<grid, threads>>>(d_mat3, d_mat1, d_mat2, WIDTH, WIDTH); // if (block_size ==16) { // matrixMulCUDA_share<16><<<grid, threads>>>(d_mat3, d_mat1, d_mat2, WIDTH, WIDTH); // } // else{ // matrixMulCUDA_share<32><<<grid, threads>>>(d_mat3, d_mat1, d_mat2, WIDTH, WIDTH); // } cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float time = 0; cudaEventElapsedTime(&time, start, stop); times.push_back(time); } float avg_time = average(times); printf("cuda %d time: %lf ms\n", WIDTH, avg_time); // cudaEventRecord(stop, NULL); // cudaEventSynchronize(stop); // float time = 0; // cudaEventElapsedTime(&time, start, stop); // printf("run time:%.lf ms\n", time/loops); error = cudaMemcpy(mat3, d_mat3, mem_size, cudaMemcpyDeviceToHost); print_data(mat1, WIDTH, WIDTH, "mat1"); print_data(mat2, WIDTH, WIDTH, "mat2"); print_data(mat3, WIDTH, WIDTH, "mat3"); free(mat1); free(mat2); free(mat3); cudaFree(d_mat1); cudaFree(d_mat2); cudaFree(d_mat3); } int main(int argc, char const *argv[]) { // testMatrixMul<256 >(); // testMatrixMul<512 >(); // testMatrixMul<768 >(); testMatrixMul<2048>(); // testMatrixMul<1280>(); // testMatrixMul<1536>(); // testMatrixMul<1792>(); // testMatrixMul<2048>(); return 0; }
13,571
#include "cuda_class.cuh" __global__ void Kernel (uint32_t *d_a, uint32_t *d_b, uint32_t *d_c){ uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t tid = idx + idy * blockDim.x * gridDim.x; d_c[tid] = d_a[tid] + d_b[tid]; }; CudaClass::CudaClass(uint32_t *h_a, uint32_t *h_b, uint32_t *h_c, uint32_t length){ this->length = length; this->mem_size = length * sizeof(uint32_t); this->h_a = h_a; this->h_b = h_b; this->h_c = h_c; this->h_zeros = (uint32_t *) malloc(length*sizeof(uint32_t)); for(int i = 0; i < length; i++){ h_zeros[i] = 0; } } void CudaClass::allocateDeviceMemory(){ cudaMalloc((void**) &d_a, mem_size); cudaMalloc((void**) &d_b, mem_size); cudaMalloc((void**) &d_c, mem_size); } void CudaClass::copyInputToDevice(){ cudaMemcpy(d_a, h_a, mem_size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, mem_size, cudaMemcpyHostToDevice); } void CudaClass::runKernelGPU(){ // Kernel<<<length, 1>>>(d_a, d_b, d_c); Kernel<<<length, 1>>>(d_a, d_b, d_c); } void CudaClass::copyOutputToHost(){ cudaMemcpy(h_c, d_c, mem_size, cudaMemcpyDeviceToHost); } void CudaClass::freeDeviceMemory(){ cudaMemcpy(d_c, h_zeros, mem_size, cudaMemcpyHostToDevice); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
13,572
// REQUIRES: clang-driver // REQUIRES: x86-registered-target // REQUIRES: nvptx-registered-target // // RUN: %clang -v --target=i386-apple-macosx \ // RUN: --sysroot=%S/Inputs/CUDA-macosx --cuda-path-ignore-env 2>&1 | FileCheck %s // CHECK: Found CUDA installation: {{.*}}/Inputs/CUDA-macosx/usr/local/cuda
13,573
#include <stdio.h> // Size of array #define N 1048576 // Kernel __global__ void add_vectors(int *a, int *b, int *c) { int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < N) c[id] = a[id] + b[id]; } // Main program int main() { // Number of bytes to allocate for N integers size_t bytes = N*sizeof(int); // Allocate memory for arrays A, B, and C on host int *A = (int*)malloc(bytes); int *B = (int*)malloc(bytes); int *C = (int*)malloc(bytes); // Allocate memory for arrays d_A, d_B, and d_C on device int *d_A, *d_B, *d_C; cudaMalloc(&d_A, bytes); cudaMalloc(&d_B, bytes); cudaMalloc(&d_C, bytes); // Fill host arrays A and B for(int i=0; i<N; i++) { A[i] = 1; B[i] = 2; } // Copy data from host arrays A and B to device arrays d_A and d_B cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice); // Set execution configuration parameters // thr_per_blk: number of CUDA threads per grid block // blk_in_grid: number of blocks in grid int thr_per_blk = 256; int blk_in_grid = ceil( float(N) / thr_per_blk ); // Launch kernel add_vectors<<< blk_in_grid, thr_per_blk >>>(d_A, d_B, d_C); // Copy data from device array d_C to host array C cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost); // Verify results for(int i=0; i<N; i++) { if(C[i] != 3) { printf("\nError: value of C[%d] = %d instead of 3\n\n", i, C[i]); exit(-1); } } // Free CPU memory free(A); free(B); free(C); // Free GPU memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\n---------------------------\n"); printf("__SUCCESS__\n"); printf("---------------------------\n"); printf("N = %d\n", N); printf("Threads Per Block = %d\n", thr_per_blk); printf("Blocks In Grid = %d\n", blk_in_grid); printf("---------------------------\n\n"); return 0; }
13,574
#include "includes.h" /*********************************************************** tissueGPU1.cu GPU kernel to accumulate contributions of tissue source strengths qt to tissue solute levels pt. TWS December 2011 Cuda 10.1 Version, August 2019 ************************************************************/ __global__ void tissueGPU1Kernel(int *d_tisspoints, float *d_dtt000, float *d_pt000, float *d_qt000, int nnt) { int itp = blockDim.x * blockIdx.x + threadIdx.x; int jtp,ixyz,ix,iy,iz,jx,jy,jz,nnt2=2*nnt; float p = 0.; if(itp < nnt){ ix = d_tisspoints[itp]; iy = d_tisspoints[itp+nnt]; iz = d_tisspoints[itp+nnt2]; for(jtp=0; jtp<nnt; jtp++){ jx = d_tisspoints[jtp]; jy = d_tisspoints[jtp+nnt]; jz = d_tisspoints[jtp+nnt2]; ixyz = abs(jx-ix) + abs(jy-iy) + abs(jz-iz); p += d_qt000[jtp]*d_dtt000[ixyz]; } d_pt000[itp] = p; } }
13,575
#include "includes.h" // Lab2_AddingTwoVectors.cu : Defines the entry point for the console application. // Author: £ukasz Pawe³ Rabiec (259049) #define SIZE 32 __global__ void AddVectors(int* a, int* b, int* c) { int tid = blockIdx.x; if (tid < SIZE) { c[tid] = a[tid] + b[tid]; } }
13,576
//pass //--blockDim=1024 --gridDim=1 --no-inline // In CUDA providing the inline keyword should still keep a copy of // the function around (contrary to OpenCL). However, by default a // function with this keyword is not actually inlined at the optimisation // level used by GPUVerify. #define tid threadIdx.x __device__ inline void inlined(int *A, int offset) { int temp = A[tid + offset]; A[tid] += temp; } __global__ void inline_test(int *A, int offset) { inlined(A, offset); }
13,577
#include "includes.h" __global__ void GPUAdd(float *array1, float *array2, float *result, int WIDTH) { int i = blockDim.x * blockIdx.x + threadIdx.x; result[i] = array1[i] + array2[i]; }
13,578
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> __global__ void recombine_kernel(int* d_out, int* d_global_max, const int maxsize) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index >= maxsize || blockIdx.x == 0) { return; } int sum = 0; for(int i=0;i<blockIdx.x;i++) { sum += d_global_max[i]; } d_out[index] += sum; } __global__ void scan_kernel(int* d_out, int* d_global_max, const int maxsize) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index >= maxsize) { return; } int tmp; for(int i = 1; i < (blockDim.x); i = i << 1) { tmp = 0; if(((index + i) < (blockIdx.x + 1) * blockDim.x) && (index + i) < maxsize) { tmp = d_out[index]; } __syncthreads(); if(tmp) { d_out[index + i] += tmp; } } if(threadIdx.x == blockDim.x - 1 || index == maxsize - 1) { d_global_max[blockIdx.x] = d_out[index]; } } int main(int argc, char **argv) { //Generate a random array of ints int maxsize = 1 << 10; int *container_array = new int[maxsize]; for(int i=1;i<maxsize+1;i++) { container_array[i-1] = i; } int* d_output; int *h_output = new int[maxsize]; int *h_global_max = new int[10]; int *d_global_max; int blocksize = 1024; int gridsize = (maxsize / blocksize) + 1; cudaMalloc(&d_output, sizeof(int) * maxsize); cudaMalloc(&d_global_max, sizeof(int) * gridsize); cudaMemcpy(d_output, container_array, sizeof(int) * maxsize, cudaMemcpyHostToDevice); scan_kernel<<<gridsize, blocksize>>>(d_output, d_global_max, maxsize); recombine_kernel<<<gridsize, blocksize>>>(d_output, d_global_max, maxsize); cudaMemcpy(h_output, d_output, sizeof(int) * maxsize, cudaMemcpyDeviceToHost); cudaMemcpy(h_global_max, d_global_max, sizeof(int) * gridsize, cudaMemcpyDeviceToHost); for(int i=0;i<maxsize;i++) { printf("%d\n", h_output[i]); } cudaFree(d_global_max); cudaFree(d_output); free(h_output); return 0; }
13,579
#include "includes.h" __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __global__ void scalarDiv(double2* in, double factor, double2* out){ double2 result; unsigned int gid = getGid3d3d(); result.x = (in[gid].x / factor); result.y = (in[gid].y / factor); out[gid] = result; }
13,580
/* ============================================================================ Name : batched-matmul-cuda.cu Author : salehjg Version : Copyright : Description : ============================================================================ */ // System includes #include <stdio.h> #include <assert.h> #include <iostream> #include <numeric> #include <stdlib.h> // CUDA runtime #include <cuda.h> #include <cuda_runtime.h> #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = cudaDeviceSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } // C = AB template <int BLOCK_SIZE> __global__ void kernel_batched_matmul( const float * matA, const float * matB, float * matC, int dim0, int dim1A, int dim2A, int dim1B, int dim2B, int dim1C, int dim2C){ extern __shared__ float smem[]; const unsigned int len_subA = BLOCK_SIZE * dim2A,len_subB = BLOCK_SIZE * dim1B; //len of sub matrices of A and B. const unsigned long len_A = dim0*dim1A*dim2A, len_B = dim0*dim1B*dim2B, len_C = dim0*dim1C*dim2C; const unsigned long len_A_signleBatch = dim1A*dim2A, len_B_signleBatch = dim1B*dim2B, len_C_signleBatch = dim1C*dim2C; const unsigned int BLOCKSIZE_P2 = BLOCK_SIZE*BLOCK_SIZE; //smemA = smem + 0; //smemB = smem + len_subA; // Block index unsigned int bx = blockIdx.x; // mapped to the sub-matrices of output unsigned int by = blockIdx.y; // mapped to the sub-matrices of output unsigned int bz = blockIdx.z; // batch index // Thread index unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int c_pos_x, c_pos_y; c_pos_x = bx*BLOCK_SIZE + tx; c_pos_y = by*BLOCK_SIZE + ty; unsigned long gidx1,gidx2; unsigned int _d1,_d2; //printf("## bx:%u, by:%u, tx:%u, ty:%u, c_pos_x:%u, c_pos_y:%u\n",bx,by,tx,ty,c_pos_x,c_pos_y); unsigned long offsetA = (by * BLOCK_SIZE) * dim2A; unsigned long offsetB = (bx * BLOCK_SIZE); //first row (d1=0) // Load sub matrices from global memory into shared memory unsigned long idxA, idxB; idxA = ty* BLOCK_SIZE + tx; idxB = ty* BLOCK_SIZE + tx; //printf("*** bx:%u, by:%u, tx:%u, ty:%u ,idxA:%ld, idxB:%ld\n",bx,by,tx,ty,idxA,idxB); while(idxA < len_subA){//Block-stride loop gidx1 = offsetA + idxA; if(idxA < len_subA && gidx1 < len_A) { smem[idxA] = matA[bz * len_A_signleBatch + gidx1]; /*printf("bx:%u, by:%u, tx:%u, ty:%u ,idxA:%ld, gidx1:%ld\n",bx,by,tx,ty,idxA,gidx1);*/ }else{ smem[idxA] = 0; } idxA += BLOCKSIZE_P2; } ///TODO: It might be better to store transposed subMatB in shared memory to avoid shared memory read conflict. /// But then we might get shared memory write conflict. (?) while(idxB < len_subB ){//Block-stride loop //gidx2 = offsetB + (bx*BLOCK_SIZE)*dim2B + (idxB % dim2B); _d2 = idxB%BLOCK_SIZE; _d1 = (idxB/BLOCK_SIZE); gidx2 = offsetB + _d1*dim2B + _d2; if(idxB < len_subB && _d1<dim1B && _d2<dim2B){ smem[len_subA+idxB] = matB[bz * len_B_signleBatch +gidx2]; /*printf("* bx:%u, by:%u ,tx:%u, ty:%u ,idxB:%ld, _d1:%d, _d2:%d, gidx2:%ld\n",bx,by,tx,ty,idxB,_d1,_d2,gidx2);*/ }else{ smem[len_subA+idxB] = 0; } idxB += BLOCKSIZE_P2; } __syncthreads(); // Multiply and add each result to produce output element of current thread in the thread block. if(c_pos_x<dim2C && c_pos_y<dim1C){ unsigned long idx = ty* BLOCK_SIZE + tx; float output_element = 0.0f; //dim2A=dim1B is common equal dimension of 2 matrices --- block-stride loop for (int k = 0; k < dim2A; k++) { output_element += smem[ty*dim2A+k] * smem[len_subA+ k*BLOCK_SIZE+tx]; /*printf("###bz:%d, c_pos_x:%d, c_pos_y:%d, smem[%d]=%f, smem[%d]=%f\n", bz,c_pos_x,c_pos_y, ty*dim2A+k,smem[ty*dim2A+k], len_subA+ k*BLOCK_SIZE+tx,smem[len_subA+ k*BLOCK_SIZE+tx]);*/ } ///TODO: Check matC index to not to exceed the len of matC! matC[bz * len_C_signleBatch + c_pos_y*dim2C + c_pos_x] = output_element; } } void batched_matmul( const float * matA, //row-major device ptr (batch, hA, wA) == (dim0A, dim1A , *dim2A* ) const float * matB, //row-major device ptr (batch, hB, wB) == (dim0B, *dim1B* , dim2B ) float * matC, //row-major device ptr (batch, hB, wB) == (dim0B, dim1A , dim2B ) int dim0A, int dim1A, int dim2A, int dim0B, int dim1B, int dim2B){ if(dim2A != dim1B){printf("ERR@batched_matmul: BAD SHAPE.\n"); return;} if(dim0B != dim0A){printf("ERR@batched_matmul: BAD BATCH SIZES.\n"); return;} const int BLOCK_DIM = 6; dim3 blocksize(BLOCK_DIM,BLOCK_DIM,1); dim3 gridsize(0,0,0); gridsize.x = (dim2B + BLOCK_DIM-1)/BLOCK_DIM; gridsize.y = (dim1A + BLOCK_DIM-1)/BLOCK_DIM; gridsize.z = (dim0A); unsigned long sharedmemsize = (BLOCK_DIM*dim2A + BLOCK_DIM* dim1B)*sizeof(float); printf("@batched_matmul:\n"); printf("\tBLOCK:(%d, %d)\n",blocksize.x,blocksize.y); printf("\t GRID:(%d, %d, %d)\n",gridsize.x,gridsize.y,gridsize.z); printf("\t SHARED: %d Bytes\n",sharedmemsize); if(BLOCK_DIM==6){ kernel_batched_matmul<6> <<<gridsize, blocksize, sharedmemsize>>>( matA, matB, matC, dim0A, dim1A, //hA dim2A, //wA dim1B, //hA dim2B, //wA dim1A, dim2B); CudaCheckError(); }else{ printf("ERR@batched_matmul: UNDEFINED BLOCK_DIM.\n"); return; } } void printTensorContent(float * tn,int dim0,int dim1,int dim2){ unsigned int _dim0,_dim1,_dim2,_dim3; _dim0 = dim0; _dim1 = dim1; _dim2 = dim2; if(dim0*dim1*dim2 > 1000){ _dim0 = 1; _dim1 = 1; } float val; unsigned long indx; for(int d0=0;d0<_dim0;d0++){ for(int d1=0;d1<_dim1;d1++){ for(int d2=0;d2<_dim2;d2++){ indx = d0*dim1*dim2+ d1*dim2+ d2; val = tn[indx]; printf("(%d, %d, %d): %f\n",d0,d1,d2,val);// "("<<d0<<", "<<d1<<", "<<d2<<")" << ": "<< val<<endl; }}} } void ConstantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } // rslt = MAT1 * MAT2 // everything is row-major, so matrixH means dim1 and matrixW means dim2 float* LA_MatMul(float* mat1,float* mat2, int batchsize, int matrix_rank, int matrixH1,int matrixW1, int matrixH2,int matrixW2){ if(matrix_rank!=3){printf("LA_MATMUL: invalid matrix rank\n");return nullptr;} if(matrixW1!=matrixH2){printf("LA_MATMUL: bad shapes\n");return nullptr;} float* rslt = (float*)malloc(batchsize*matrixH1*matrixW2*sizeof(float)); int indxS1=0; int indxS2=0; int indxD=0; for(int b=0;b<batchsize;b++) { // for element of output of matrixH1 x matrixW2 for(int j=0;j<matrixH1;j++){ for(int i=0;i<matrixW2;i++){ //mat1: select row j //mat2: select col i float sum=0; for(int mat1_x=0;mat1_x<matrixW1;mat1_x++) { indxS1 = b*matrixH1*matrixW1 + j*matrixW1 + mat1_x; /*indxS2 = b*matrixH2*matrixW2 + mat1_x*matrixW1 + j;*/ indxS2 = b*matrixH2*matrixW2 + mat1_x*matrixW2 + i; sum += mat1[indxS1] * mat2[indxS2]; } // for element of output of matrixH1 x matrixW2 indxD = b*matrixH1*matrixW2 + j*matrixW2 + i; rslt[indxD] = sum; } } } return rslt; } /** * Run a simple test of matrix multiplication using CUDA */ int MatrixMultiply(int dim0, int dim1A, int dim2A, int dim1B, int dim2B) { // Allocate host memory for matrices A and B unsigned int size_A = dim0 * dim1A * dim2A; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = reinterpret_cast<float *>(malloc(mem_size_A)); unsigned int size_B = dim0 * dim1B * dim2B; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = reinterpret_cast<float *>(malloc(mem_size_B)); // Initialize host memory const float valB = 0.5f; ConstantInit(h_A, size_A, 1.0f); ConstantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C unsigned int size_C = dim0 * dim1A * dim2B; unsigned int mem_size_C = size_C * sizeof(float); float *h_C = reinterpret_cast<float *>(malloc(mem_size_C)); float *host_results = LA_MatMul(h_A, h_B,dim0,3, dim1A,dim2A, dim1B,dim2B); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } CudaSafeCall(cudaMalloc(reinterpret_cast<void **>(&d_A), mem_size_A)); CudaSafeCall(cudaMalloc(reinterpret_cast<void **>(&d_B), mem_size_B)); CudaSafeCall(cudaMalloc(reinterpret_cast<void **>(&d_C), mem_size_C)); // copy host memory to device CudaSafeCall(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice)); printf("\n** LEN_A: %ld\n",size_A); printf("** LEN_B: %ld\n",size_B); printf("** LEN_C: %ld\n\n",size_C); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel //batched_matmul(d_A, d_B, d_C, dim0,dim1A,dim2A, dim0,dim1B,dim2B); printf("done\n"); cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; CudaSafeCall(cudaEventCreate(&start)); cudaEvent_t stop; CudaSafeCall(cudaEventCreate(&stop)); // Record the start event CudaSafeCall(cudaEventRecord(start, NULL)); // Execute the kernel int nIter = 1; for (int j = 0; j < nIter; j++) { batched_matmul(d_A, d_B, d_C, dim0,dim1A,dim2A, dim0,dim1B,dim2B); } // Record the stop event CudaSafeCall(cudaEventRecord(stop, NULL)); // Wait for the stop event to complete CudaSafeCall(cudaEventSynchronize(stop)); float msecTotal = 0.0f; CudaSafeCall(cudaEventElapsedTime(&msecTotal, start, stop)); printf("Time= %.3f msec\n", msecTotal / nIter); // Copy result from device to host CudaSafeCall(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost)); printf("\n\nMatA:\n"); printTensorContent(h_A,dim0,dim1A,dim2A); printf("\n\nMatB:\n"); printTensorContent(h_B,dim0,dim1B,dim2B); printf("\n\nMatC:\n"); printTensorContent(host_results,dim0,dim1A,dim2B); printf("\n\nChecking computed result for correctness: \n"); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6; // machine zero for (int i = 0; i < static_cast<int>(size_C); i++) { double abs_err = fabs(h_C[i] - (host_results[i])); double dot_length = dim2A; double abs_val = fabs(h_C[i]); double rel_err = abs_err / abs_val / dot_length; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f,\t\tref=%.8f error term is > %E\n", i, h_C[i], host_results[i], eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); CudaSafeCall(cudaFree(d_A)); CudaSafeCall(cudaFree(d_B)); CudaSafeCall(cudaFree(d_C)); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); // MatC = MatA * MatB // Everything is row-major, so dim2 is width of matrix and dim1 is height of it. int batchsize = 100; int dim1A = 64; int dim2A = 1024; int dim1B = 1024; int dim2B = 64; printf("MatrixA(dim0:%d, dim1: %d, dim2:%d)\nMatrixB(dim0:%d, dim1: %d, dim2:%d)\n", batchsize,dim1A,dim2A,batchsize,dim1B,dim2B); int matrix_result = MatrixMultiply(batchsize, dim1A, dim2A, dim1B, dim2B); exit(matrix_result); }
13,581
#include "includes.h" __global__ void backProp2(float* layer1, float* dsyn2, float* label, float* out) { int j = blockDim.x*blockIdx.x + threadIdx.x; int k = blockDim.y*blockIdx.y + threadIdx.y; float delta = (label[k] - out[k]) * (out[k]*(1.0-out[k])); dsyn2[j*10 + k] += delta * layer1[j] / (60000.0/10.0); }
13,582
#include <iostream> #include <algorithm> #include <chrono> #include <cuda.h> using namespace std; __global__ void saxpy(size_t n, float a, float *x, float *y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a * x[i] + y[i]; } int main(int argc, char **argv) { size_t N = atoi(argv[1]); float *x, *y, *res, *dx, *dy; float a = 2.0f; // Allocate and initialize vectors x and y on the CPU x = (float *) malloc(N * sizeof(float)); y = (float *) malloc(N * sizeof(float)); res = (float *) malloc(N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = i; y[i] = 1.0f; } // Allocate device vectors dx and dy, then copy x and y into them cudaMalloc(&dx, N * sizeof(float)); cudaMalloc(&dy, N * sizeof(float)); cudaMemcpy(dx, x, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dy, y, N * sizeof(float), cudaMemcpyHostToDevice); // Launch the CUDA kernel for saxpy int blockSize = 1024; saxpy<<<(N + blockSize - 1) / blockSize, blockSize>>>(N, a, dx, dy); auto begin = std::chrono::high_resolution_clock::now(); cudaMemcpy(res, dy, N * sizeof(float), cudaMemcpyDeviceToHost); // Verify results { int i; for (i = 0; i < N; i++) { float temp = a * x[i] + y[i]; if (std::abs(res[i] - temp) / std::max(1e-6f, temp) > 1e-6) { cout << res[i] << " " << temp << endl; break; } } if (i == N) { cout << "saxpy on GPU is correct." << endl; } else { cout << "saxpy on GPU is incorrect on element " << i << "." << endl; } } return 0; }
13,583
#include "includes.h" __global__ void calcConvolutionUpdateWeightsGPU( float *filters, float *filter_grads, int in_size_z, int number_filters, int kernel_size, float momentum, float decay, float learning_rate, int elements ) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if ( id < elements ) { int id_out = id; int i = id % kernel_size; id /= kernel_size; int j = id % kernel_size; id /= kernel_size; int z = id % in_size_z; id /= in_size_z; int filter = id; int filter_size = 1 * kernel_size * kernel_size * in_size_z; int filter_grad_index = (filter * filter_size + z * (kernel_size * kernel_size) + j * kernel_size + i) * 2; float grad = filter_grads[ filter_grad_index ]; float grad_prev = filter_grads[ filter_grad_index + 1 ]; float m = ( grad + grad_prev * momentum ); filter_grads[ filter_grad_index + 1 ] = m; float w = filters[ id_out ]; w -= learning_rate * ( m + (decay * w)); filters[ id_out ] = w; } /* original code int filters_size = filters.size(); for ( int a = 0; a < filters_size; ++a ){ for ( int z = 0; z < in.size.z; ++z ){ for ( int j = 0; j < kernel_size; ++j ){ for ( int i = 0; i < kernel_size; ++i ){ GradientObject& grad = filter_grads[a].get( 0, i, j, z ); float m = (grad.grad + grad.grad_prev * momentum); grad.grad_prev = m; float& w = filters[a].get( 0, i, j, z ); w -= lr * ( m + (decay * w)); } } } } */ }
13,584
#include <iostream> #include <stdlib.h> #include <iomanip> #include <time.h> #include <sys/time.h> #include <cuda.h> using namespace std; #define MAX_ARRAY_SIZE 1024 #define RANDOM_MAX 1000 #define TILE_DIM 16 #define BLOCK_ROWS 8 #define EPSILON 0.000001 #define NUM_BLOCKS (MAX_ARRAY_SIZE/TILE_DIM) float A[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE]; float C[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE]; void serial(); void init_F(); int check(); __global__ void matrixTranspose1(float *); __global__ void matrixTranspose2(const float *, float *); int main() { float *d_a; //float *d_c; struct timeval startTime, endTime; size_t memsize = MAX_ARRAY_SIZE * MAX_ARRAY_SIZE * sizeof(float); cudaMalloc((void**) &d_a, memsize); //cudaMalloc((void**) &d_c, memsize); init_F(); cudaMemcpy(d_a,A,memsize,cudaMemcpyHostToDevice); //cudaMemcpy(d_c,C,memsize,cudaMemcpyHostToDevice); gettimeofday(&startTime, NULL); //serial(); dim3 dimGrid2(MAX_ARRAY_SIZE/TILE_DIM, MAX_ARRAY_SIZE/TILE_DIM); dim3 dimBlock2(TILE_DIM, TILE_DIM); matrixTranspose1<<< 16, 1024 >>>(d_a); //matrixTranspose2<<< dimGrid2, dimBlock2 >>>(d_a,d_c); gettimeofday(&endTime, NULL); double seconds = endTime.tv_sec - startTime.tv_sec; double useconds = endTime.tv_usec - startTime.tv_usec; double duration = seconds + useconds/1000000.0; cout<<"\nTime taken for Matrix Transpose on GPU (time): "<<fixed<<setprecision(7)<<duration<<endl; cudaMemcpy(C,d_a,memsize,cudaMemcpyDeviceToHost); if(check() == 1) { cout<<"\nMatrix Transpose Successful!"<<endl; } cudaFree(d_a); return 0; } void init_F() { srand(time(NULL)); for (int i = 0; i < MAX_ARRAY_SIZE; i++) { for (int j = 0; j < MAX_ARRAY_SIZE; j++) { A[i][j] = rand() % RANDOM_MAX; } } } __global__ void matrixTranspose1(float *A) { int width = MAX_ARRAY_SIZE / gridDim.x; for(int i = blockIdx.x * width; i < blockIdx.x * width + width; i++) { int rowWidth = i / blockDim.x + 1; for(int j = threadIdx.x * rowWidth; j < i && j < threadIdx.x * rowWidth + rowWidth; j++) { float temp = A[i * MAX_ARRAY_SIZE + j]; A[i * MAX_ARRAY_SIZE + j] = A[j * MAX_ARRAY_SIZE + i]; A[j * MAX_ARRAY_SIZE + i] = temp; } } } __global__ void matrixTranspose2(const float *F, float *C) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = F[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) C[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } void serial() { for (int i = 0; i < MAX_ARRAY_SIZE; i++) { for (int j = (i+1); j < MAX_ARRAY_SIZE; j++) { float temp = A[i][j]; A[i][j] = A[j][i]; A[j][i] = temp; } } } int check() { for (int i = 0; i < MAX_ARRAY_SIZE; i++) { for (int j = 0; j < MAX_ARRAY_SIZE; j++) { if(abs(C[i * MAX_ARRAY_SIZE + j] - A[j * MAX_ARRAY_SIZE + i]) < EPSILON){ cout<<"\nMismatch at index: ("<<i<<","<<j<<")"<<endl; return 0; } } } return 1; }
13,585
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29) { float tmp_1 = +1.2717E35f; comp += tmp_1 + var_1 / (var_2 / (var_3 + (var_4 - (var_5 + +1.1904E-44f)))); if (comp <= (-1.6650E35f - var_6)) { comp += fabsf(-0.0f); comp += (+1.5888E-44f + (var_7 + var_8)); float tmp_2 = +0.0f; comp += tmp_2 * log10f(+1.8298E-42f); } if (comp >= var_9 / var_10 - -1.3790E36f / +1.9448E-37f / var_11) { float tmp_3 = +0.0f + (var_12 * (-1.2287E36f * (-1.7564E-44f + var_13))); comp = tmp_3 - (var_14 / var_15 * logf((-0.0f - (+1.7327E-20f + (+1.0167E-35f + (var_16 / var_17)))))); } if (comp == cosf(fmodf((var_18 * var_19), -1.1589E-37f - (-1.0502E-42f - var_20)))) { comp += var_21 * log10f(-0.0f + (var_22 / coshf((var_23 * (var_24 - -1.0887E36f))))); float tmp_4 = (+0.0f * var_25 + (+1.0707E-43f + var_26 - +1.3600E-35f)); comp = tmp_4 / (+0.0f - atan2f(-1.4231E21f, var_27 + var_28 - acosf(-0.0f + (+1.3707E-35f - var_29)))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30); cudaDeviceSynchronize(); return 0; }
13,586
//#include <helper_cuda.h> #include <algorithm> #include <time.h> #include <limits.h> //#define RADIX 4294967296 //#define RADIX 2147483658 #define RADIX 65536 //#define numElements 1048576 #define numElements 30000 #define numIterations 10 #define BLOCKSIZE 128 // countlength/threadsperblock void __global__ d_doPrefix(int *d_count, int countLength, int *d_prefix, int prefixLength) { // printf("do prefix = %d \n", threadIdx.x); int sum = 0; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < prefixLength) { d_prefix[index] = 0; } __syncthreads(); for(int i=index; i>=0; i--) { sum += d_count[i]; } if(index < prefixLength) atomicAdd(d_prefix +index+1, sum); //printf("finished doPrefix \n"); } void __global__ d_doCount(int *d_unsorted, int unsortedLength, int *d_count, int countLength, int offset) { //printf("do count \n"); int index = threadIdx.x + blockIdx.x * blockDim.x; //printf("index = %d \n", index); if(index <countLength) { d_count[index] = 0; } __syncthreads(); if(index < unsortedLength) { int numToSort = d_unsorted[index]; numToSort = numToSort >> offset; numToSort = (countLength-1)&(numToSort); //printf("num = %d \n", numToSort); atomicAdd(d_count + numToSort, 1); } //printf("finished count \n"); } /* * d_doReorder: * leftover from an attempt to find a parallel reorder strategy * did not get this working */ void __global__ d_doReorder(int* d_unsorted, int unsortedLength, int *d_sorted, int sortedLength, int *d_prefix, int prefixLength, int offset) { int index = threadIdx.x + blockIdx.x * blockDim.x; if( index <unsortedLength) { int currentNum; int newIndex; int prefix; //printf(" doReorder index %d \n", index); // shifting and masking currentNum = d_unsorted[index]; currentNum = currentNum >> offset; currentNum = (prefixLength -1) & currentNum; if (currentNum < prefixLength) prefix = d_prefix[currentNum]; //else //prefix = sortedLength; newIndex = index % prefix; //printf("prefix check: prefix = %d masked number = %d, real number = %d, index = %d, newIndex = %d \n", prefix, currentNum, d_unsorted[index], index, newIndex); d_sorted[newIndex] = d_unsorted[index]; //d_unsorted = d_sorted; } } /* * d_lazyReorder: * sequential reordering done on the GPU, */ void __global__ d_lazyReorder(int* d_unsorted, int unsortedLength, int *d_sorted, int sortedLength, int *d_prefix, int prefixLength, int offset, int threadCount) { //printf("lazy sort prefixlength %d, offset %d \n", prefixLength, offset); //int index = threadIdx.x + blockIdx.x * blockDim.x; int loopMax = ceil((float)unsortedLength/(float)threadCount); int currentNum; int newIndex; if(threadIdx.x < 1) { for (int i =0; i<unsortedLength; i++) { // shifting and masking currentNum = d_unsorted[i]; currentNum = currentNum >> offset; currentNum = (prefixLength -1) & currentNum; newIndex = d_prefix[currentNum]; d_prefix[currentNum]++; d_sorted[newIndex] = d_unsorted[i]; //d_unsorted = d_sorted; } } __syncthreads(); for (int i =0; i<loopMax; i++) { int index = threadIdx.x*loopMax + i; if( index < sortedLength) d_unsorted[index] = d_sorted[index]; } } /* * d_lazyReorderorig: * sequential reordering done on the GPU, */ void __global__ d_lazyReorderorig(int* d_unsorted, int unsortedLength, int *d_sorted, int sortedLength, int *d_prefix, int prefixLength, int offset) { //printf("lazy sort prefixlength %d, offset %d \n", prefixLength, offset); int currentNum; int newIndex; for (int i =0; i<unsortedLength; i++) { // shifting and masking currentNum = d_unsorted[i]; currentNum = currentNum >> offset; currentNum = (prefixLength -1) & currentNum; newIndex = d_prefix[currentNum]; d_prefix[currentNum]++; d_sorted[newIndex] = d_unsorted[i]; //d_unsorted = d_sorted; } for (int i =0; i<unsortedLength; i++) { d_unsorted[i] = d_sorted[i]; } } // allocate space // copy from host to dev // run kernel // copay from dev to host // free space /* * cudaRadix: * master function for the cuda implementation * sets up the resources and starts the kernels */ void cudaRadix(int *h_unsorted, int *h_sorted) { //printf("started cudaRadix \n"); int sortBits, countLength, prefixLength; int countSize, unsortedSize, sortedSize, prefixSize; //int *h_count, *d_count, *d_unsorted, *d_sorted, *h_prefix, *d_prefix; int *d_count, *d_unsorted, *d_sorted, *d_prefix; //int *zeros; //int zerosSize, zerosLength; //sortBits = 4; sortBits = 11; countLength = 1 << sortBits; //zerosLength = 1 << sortBits; prefixLength = countLength; countSize = (1 << sortBits)*sizeof(int); //zerosSize = (1 << sortBits)*sizeof(int); prefixSize = countSize; unsortedSize = numElements*sizeof(int); sortedSize = unsortedSize; //printf("count size is = %d \n", countSize); //zeros = (int *) malloc (zerosSize); //for (unsigned int i=0; i<zerosLength; i++) zeros[i]=0; //printArray(h_count, countLength); // allocate device space cudaMalloc((void**) &d_unsorted, unsortedSize); if (! d_unsorted) { //printf("stdErr: unable to cuda malloc %d bytes for unsorted", unsortedSize); exit(-1); } cudaMalloc((void**) &d_sorted, sortedSize); if (! d_sorted) { //printf("stdErr: unable to cuda malloc %d bytes for sorted", sortedSize); exit(-1); } cudaMalloc((void**) &d_count, countSize); if (! d_count) { //printf("stdErr: unable to cuda malloc %d bytes for count", countSize); exit(-1); } cudaMalloc((void**) &d_prefix, prefixSize); if (! d_prefix) { //printf("stdErr: unable to cuda malloc %d bytes for prefix", prefixSize); exit(-1); } //printf("passed mallocs \n"); // copy from host to dev // count cudaMemcpy(d_unsorted, h_unsorted, unsortedSize, cudaMemcpyHostToDevice); //cudaMemcpy(d_count, h_count, countSize, cudaMemcpyHostToDevice); //cudaMemcpy(d_count, zeros, countSize, cudaMemcpyHostToDevice); // prefix //cudaMemcpy(d_prefix, h_prefix, prefixSize, cudaMemcpyHostToDevice); //cudaMemcpy(d_prefix, zeros, prefixSize, cudaMemcpyHostToDevice); // reorder cudaMemcpy(d_sorted, h_sorted, sortedSize, cudaMemcpyHostToDevice); //printf("passed copy to dev \n"); // timing without mem copy cudaEvent_t cuda_start_event, cuda_stop_event; //checkCudaErrors(cudaEventCreate(&cuda_start_event)); //checkCudaErrors(cudaEventCreate(&cuda_stop_event)); //checkCudaErrors(cudaEventRecord(cuda_start_event, 0)); // setup kernel count dim3 threadsPerBlock(BLOCKSIZE); dim3 numBlocks(ceil((float)numElements/(float)threadsPerBlock.x)); dim3 numPrefixBlocks(ceil((float)countLength/(float)threadsPerBlock.x)); for(int sortLoop=0; sortLoop<32; sortLoop += sortBits) { // run kernel count //cudaMemcpy(d_count, zeros, countSize, cudaMemcpyHostToDevice); d_doCount <<< numBlocks, threadsPerBlock >>> (d_unsorted, numElements, d_count, countLength, sortLoop); // setup and run prefix kernel //cudaMemcpy(d_prefix, zeros, countSize, cudaMemcpyHostToDevice); d_doPrefix <<< numPrefixBlocks, threadsPerBlock >>> (d_count, countLength, d_prefix, prefixLength); //run the reorder kernel //void __global__ d_doReorder(int* d_unsorted, int unsortedLength, int *d_sorted, int sortedLength, int *d_prefix, int prefixLength, int offset) //d_doReorderorig <<< numBlocks, threadsPerBlock >>> (d_unsorted, numElements, d_sorted, numElements, d_prefix, prefixLength, 0); d_lazyReorder <<< 1, 1024 >>>(d_unsorted, numElements, d_sorted, numElements, d_prefix, prefixLength, sortLoop, 1024); //d_lazyReorderorig <<< 1, 1 >>>(d_unsorted, numElements, d_sorted, numElements, d_prefix, prefixLength, sortLoop); //printf("passed kernels \n") } // timing //checkCudaErrors(cudaEventRecord(cuda_stop_event, 0)); //checkCudaErrors(cudaEventSynchronize(cuda_stop_event)); float cuda_time = 0; //checkCudaErrors(cudaEventElapsedTime(&cuda_time, cuda_start_event, cuda_stop_event)); cuda_time /= 1.0e3f; // copy from dev to host cudaMemcpy(h_unsorted, d_unsorted, unsortedSize, cudaMemcpyDeviceToHost); cudaMemcpy(h_sorted, d_sorted, sortedSize, cudaMemcpyDeviceToHost); //cudaMemcpy(h_count, d_count, countSize, cudaMemcpyDeviceToHost); //cudaMemcpy(h_prefix, d_prefix, prefixSize, cudaMemcpyDeviceToHost); //printf("passed host to dev cpy \n"); // free space cudaFree(d_unsorted); cudaFree(d_count); cudaFree(d_prefix); cudaFree(d_sorted); //printf("passed cuda free \n"); //printf("finished cudaRadix \n"); /* printf("cuda count array \n"); printArray(h_count, countLength); printf("cuda prefix array \n"); //sequentialPrefixSum(h_prefix, h_count); printArray(h_prefix, prefixLength); */ } int main(int argc, char **argv) { int *unsorted, *sorted; //int *count, *prefix; // initialize list. Value in range 0..RADIX unsorted = (int *) malloc (numElements*sizeof(int)); sorted = (int *) malloc (numElements*sizeof(int)); //count = (int *) malloc (RADIX*sizeof(int)); //prefix = (int *) malloc (RADIX*sizeof(int)); for (int i=0; i<numElements; i++) { unsorted[i] = (int) (rand() % RADIX); } /* //initialize list for Thrust thrust::host_vector<int> h_keys(numElements); thrust::host_vector<int> h_keysSorted(numElements); for (int i = 0; i < (int)numElements; i++) h_keys[i] = unsorted[i]; */ // initialize items for cuda int *h_unsorted, *h_sorted; //int *h_count, *h_prefix; h_unsorted = (int *) malloc (numElements*sizeof(int)); h_sorted = (int *) malloc (numElements*sizeof(int)); for(int i=0; i<numElements; i++) { h_unsorted[i] = unsorted[i]; } // CUDA RUN /* // start global timing cudaEvent_t cuda_start_event, cuda_stop_event; checkCudaErrors(cudaEventCreate(&cuda_start_event)); checkCudaErrors(cudaEventCreate(&cuda_stop_event)); checkCudaErrors(cudaEventRecord(cuda_start_event, 0)); */ //(void) sequentialSort(unsorted,sorted); cudaRadix(h_unsorted, h_sorted); }
13,587
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cufft.h> #include <cuComplex.h> static const int WORK_SIZE = 10; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } int main(void) { int *d = NULL; int i; float2 idata[WORK_SIZE]; float2 odata[WORK_SIZE]; for (i = 0; i < WORK_SIZE; i++){ idata[i].x = i; idata[i].y = 0; } cufftReal a; cufftHandle plan; cufftComplex *data; cudaMalloc((void**)&data, sizeof(float2)*WORK_SIZE); cudaMemcpy(data,idata,sizeof(float2)*WORK_SIZE,cudaMemcpyHostToDevice); //cufftPlan1d(&plan, WORK_SIZE, CUFFT_C2C,1); //cufftPlanMany(cufftHandle *plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, cufftType type, int batch); int rank = 1; int n[1]; int inembed[1]; int istride=1; int idist=5; int onembed[1]; int ostride = 1; int odist = 5; int batch =2; n[0]=WORK_SIZE/2; inembed[0]=WORK_SIZE; onembed[0]=WORK_SIZE; cufftPlanMany(&plan, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, batch); cufftExecC2C(plan, data, data, CUFFT_FORWARD); cudaDeviceSynchronize(); CUDA_CHECK_RETURN(cudaMemcpy(odata, data, sizeof(float2)*WORK_SIZE, cudaMemcpyDeviceToHost)); for (i = 0; i < WORK_SIZE; i++) printf("%f\n",cuCabsf(odata[i])); CUDA_CHECK_RETURN(cudaFree((int*) d)); CUDA_CHECK_RETURN(cudaDeviceReset()); cudaFree(data); cufftDestroy(plan); return 0; }
13,588
#include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void PartialMM(double *A, double *B, double *C, unsigned int N) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; double c = 0.0; #pragma unroll for (unsigned int k = 0u; k < N; ++k) { c += A[i * N + k] * B[k * N + j]; } C[i * N + j] = c; } int main(int argc, char *argv[]) { unsigned int threads = 1024u; unsigned int N = 1024u; if (argc > 1) { threads = (unsigned int) atoi(argv[1]); } if (argc > 2) { N = (unsigned int) atoi(argv[2]); } srand((unsigned int) time(NULL)); unsigned int size = N * N * sizeof(double); double *h_A, *h_B, *h_C; double *d_A, *d_B, *d_C; h_A = (double *) malloc(size); h_B = (double *) malloc(size); h_C = (double *) malloc(size); cudaMalloc((void **) &d_A, size); cudaMalloc((void **) &d_B, size); cudaMalloc((void **) &d_C, size); for (unsigned int k = 0u; k < N * N; ++k) { h_A[k] = 2.0;//rand(); h_B[k] = 3.0;//rand(); h_C[k] = 0.0; } cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice); dim3 block(1, 1); switch (threads) { case 1: block.x = 1; block.y = 1; break; case 2: block.x = 2; block.y = 1; break; case 4: block.x = 2; block.y = 2; break; case 8: block.x = 4; block.y = 2; break; case 16: block.x = 4; block.y = 4; break; case 32: block.x = 8; block.y = 4; break; case 64: block.x = 8; block.y = 8; break; case 128: block.x = 16; block.y = 8; break; case 256: block.x = 16; block.y = 16; break; case 512: block.x = 32; block.y = 16; break; case 1024: block.x = 32; block.y = 32; break; } dim3 grid(N / block.x, N / block.y); //@formatter:off PartialMM<<<grid, block>>>(d_A, d_B, d_C, N); //@formatter:on cudaDeviceSynchronize(); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); printf("Size\t\t%dx%d\n", N, N); printf("Result:"); for (unsigned int k = 0u; k < N * N; ++k) { if (k % N == 0) printf("\n"); printf("%f ", h_C[k]); } free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); return EXIT_SUCCESS; }
13,589
/* *** * This file realize the box filter based on shared memory but not texture memory */ #include <iostream> #include <cassert> using namespace std; #define INDX(row, col, colNum) (((row) * (colNum)) + col) #define SIZE 100 #define BLOCKSIZE 16 #define FILTER_WIDTH 3 #define FILTER_RAD (FILTER_WIDTH / 2) #define INNER_TILE (BLOCKSIZE - FILTER_RAD) __global__ void boxfilter_kernel(float *dataIn, float *dataOut, int wid, int hei, const float * const __restrict__ filter) { __shared__ float shMat[BLOCKSIZE][BLOCKSIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int idx = tx + blockIdx.x * blockDim.x; int idy = ty + blockIdx.y * blockDim.y; int offset = idx + idy * blockDim.x * gridDim.x; if(tx < wid && ty < hei) shMat[ty][tx] = dataIn[offset]; else shMat[ty][tx] = 0; /* // load data to shared memory if((tileX >= 0) && (tileX < wid) && (tileY >= 0) && (tileY < hei)) shMat[ty][tx] = dataIn[INDX(tileY, tileX, wid)]; else shMat[ty][tx] = 0; */ __syncthreads(); float res = 0.0f; if((tx > FILTER_RAD) && (tx < INNER_TILE) && (ty > FILTER_RAD) && (ty < INNER_TILE)) { for(int fr = -FILTER_RAD; fr < FILTER_RAD; ++fr) { for(int fc = -FILTER_RAD; fc < FILTER_RAD; ++fc) { res += shMat[fr + ty][fc + tx] * filter[INDX(fr, fc, FILTER_WIDTH)]; } } if(idx < wid && idy < hei) dataOut[INDX(idy, idx, wid)] = res; } } int main(int argc, char **argv) { cout << "hello world ..." << endl; float *img = new float [SIZE]; for(int i = 0; i < SIZE; ++i) img[i] = 1; float *filter = new float [FILTER_WIDTH * FILTER_WIDTH]; for(int i = 0; i < FILTER_WIDTH; ++i) for(int j = 0; j < FILTER_WIDTH; ++j) { filter[INDX(i, j, FILTER_WIDTH)] = 1 / (FILTER_WIDTH * FILTER_WIDTH); } int width = 10; int height = 10; float *d_in, *d_out; cudaError_t cudaState = cudaSuccess; cudaState = cudaMalloc((void **)&d_in, sizeof(float) * width * height); assert(cudaState == cudaSuccess); cudaState = cudaMalloc((void **)&d_out, sizeof(float) * width * height); assert(cudaState == cudaSuccess); // copy data from host to device cudaState = cudaMemcpy(d_in, img, sizeof(float) * SIZE, cudaMemcpyHostToDevice); assert(cudaState == cudaSuccess); float *d_filter ; cudaState = cudaMalloc((void **)&d_filter, FILTER_WIDTH * FILTER_WIDTH * sizeof(float)); assert(cudaState == cudaSuccess); cudaState = cudaMemcpy(d_filter, filter, FILTER_WIDTH * FILTER_WIDTH * sizeof(float), cudaMemcpyHostToDevice); assert(cudaState == cudaSuccess); dim3 threadPerBlock(BLOCKSIZE, BLOCKSIZE); dim3 blockPerGrid; blockPerGrid.x = (width + threadPerBlock.x - 1) / BLOCKSIZE; blockPerGrid.y = (height + BLOCKSIZE - 1) / BLOCKSIZE; boxfilter_kernel<<<blockPerGrid, threadPerBlock>>>(d_in, d_out, width, height, d_filter); // copy data back to host cudaState = cudaMemcpy(img, d_out, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); assert(cudaState == cudaSuccess); for(int i = 0; i < 10; ++i) { for(int j = 0; j < 10; ++j) cout << img[INDX(i, j, 10)] << "; "; cout << endl; } delete [] img; return 0; }
13,590
#include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> __global__ void squareKernel(float *d_in, float *d_out){ const unsigned int lid = threadIdx.x; // Local id inside a block const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id d_out[gid] = d_in[gid]*d_in[gid]; // Place result(square) in d_out } int main( int argc, char** argv){ unsigned int N = 1023; unsigned int mem_size = N*sizeof(float); // Init host mem float* h_in = (float*) malloc(mem_size); float* h_out = (float*) malloc(mem_size); for (unsigned int i=0; i<N; ++i) h_in[i] = (float)i; // Init device mem float* d_in; float* d_out; cudaMalloc((void**)&d_in, mem_size); cudaMalloc((void**)&d_out, mem_size); // Copy host mem to device cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice); // Exec kernel squareKernel<<< 1, N>>>(d_in, d_out); // Copy result from device to host cudaMemcpy(h_out, d_out, mem_size, cudaMemcpyDeviceToHost); // Print result for(unsigned int i=0; i<N; ++i) printf("%.6f\n", h_out[i]); free(h_in); free(h_out); cudaFree(d_in); cudaFree(d_out); return 0; }
13,591
/* Mary Barker Homework 1 Vector addition on GPU to compile: nvcc BarkerHW1_GPU.cu OUTPUTS: N = 100 Time in milliseconds= 0.026000000000000 Last Values are A[99] = 198.000000000000000 B[99] = 99.000000000000000 C[99] = 297.000000000000000 N = 600 Time in milliseconds= 0.027000000000000 Last Values are A[599] = 1198.000000000000000 B[599] = 599.000000000000000 C[599] = 1797.000000000000000 N = 2000 Time in milliseconds= 0.035000000000000 Last Values are A[1999] = 3998.000000000000000 B[1999] = 1999.000000000000000 C[1999] = 5997.000000000000000 */ #include <sys/time.h> #include <stdio.h> //Length of vectors to be added. #define N 100 //if N is greater than dimBlock.x program will break float *A_CPU, *B_CPU, *C_CPU; //CPU pointers float *A_GPU, *B_GPU, *C_GPU; //GPU pointers void AllocateMemory() { //Allocate Device (GPU) Memory, & allocates the value of the specific pointer/array cudaMalloc(&A_GPU,N*sizeof(float)); cudaMalloc(&B_GPU,N*sizeof(float)); cudaMalloc(&C_GPU,N*sizeof(float)); //Allocate Host (CPU) Memory A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); } //Loads values into vectors that we will add. void Innitialize() { int i; for(i = 0; i < N; i++) { A_CPU[i] = (float)2*i; B_CPU[i] = (float)i; } } //Cleaning up memory after we are finished. void CleanUp(float *A_CPU,float *B_CPU,float *C_CPU,float *A_GPU,float *B_GPU,float *C_GPU) //free { free(A_CPU); free(B_CPU); free(C_CPU); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU); } //This is the kernel. It is the function that will run on the GPU. //It adds vectors A and B then stores result in vector C __global__ void Addition(float *A, float *B, float *C, int n) { int id = blockIdx.x; if(id < n) C[id] = A[id] + B[id]; } int main() { int i; timeval start, end; //Partitioning off the memory that you will be using. AllocateMemory(); //Loading up values to be added. Innitialize(); //Starting the timer gettimeofday(&start, NULL); //Copy Memory from CPU to GPU cudaMemcpyAsync(A_GPU, A_CPU, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync(B_GPU, B_CPU, N*sizeof(float), cudaMemcpyHostToDevice); //Calling the Kernel (GPU) function. Addition<<<dim3(N), 1>>>(A_GPU, B_GPU, C_GPU, N); //Copy Memory from GPU to CPU cudaMemcpyAsync(C_CPU, C_GPU, N*sizeof(float), cudaMemcpyDeviceToHost); //Stopping the timer gettimeofday(&end, NULL); //Calculating the total time used in the addition and converting it to milliseconds. float time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); //Displaying the time printf("Time in milliseconds= %.15f\n", (time/1000.0)); // Displaying vector info you will want to comment out the vector print line when your //vector becomes big. This is just to make sure everything is running correctly. for(i = 0; i < N; i++) { //printf("A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", i, A_CPU[i], i, B_CPU[i], i, C_CPU[i]); } //Displaying the last value of the addition for a check when all vector display has been commented out. printf("Last Values are A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", N-1, A_CPU[N-1], N-1, B_CPU[N-1], N-1, C_CPU[N-1]); //You're done so cleanup your mess. CleanUp(A_CPU,B_CPU,C_CPU,A_GPU,B_GPU,C_GPU); return(0); }
13,592
/* Copyright 2018 Lip Wee Yeo Amano Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda_runtime.h> #include <iostream> #include <cstring> #include <string> // Define this to turn on error checking #define CUDA_ERROR_CHECK #define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__) #define CudaCheckError(err, errMessage) __cudaCheckError(err, errMessage) #define CudaSyncAndCheckError(errMessage) __cudaSyncAndCheckError(errMessage) __host__ inline std::string __cudaSafeCall(cudaError err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (cudaSuccess != err) return cudaGetErrorString(err); else #endif //CUDA_ERROR_CHECK return ""; } __host__ inline bool __cudaCheckError(cudaError err, const char *errorMessage) { #ifdef CUDA_ERROR_CHECK if (err != cudaSuccess) { auto errorMsgChar = cudaGetErrorString(err); std::string errorMsg{ errorMsgChar }; std::memcpy((void *)errorMessage, errorMsgChar, errorMsg.length()); std::memset((void *)&errorMessage[errorMsg.length()], 0, 1); return false; } #endif //CUDA_ERROR_CHECK return true; } __host__ inline bool __cudaSyncAndCheckError(const char *errorMessage) { cudaError_t response{ cudaSuccess }; std::string cudaErrors{ "" }; #ifdef CUDA_ERROR_CHECK response = cudaGetLastError(); if (response != cudaSuccess) { while (response != cudaSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += cudaGetErrorString(response); response = cudaGetLastError(); } auto errorChar = cudaErrors.c_str(); std::memcpy((void *)errorMessage, errorChar, cudaErrors.length()); std::memset((void *)&errorMessage[cudaErrors.length()], 0, 1); return false; } #endif //CUDA_ERROR_CHECK response = cudaDeviceSynchronize(); if (response != cudaSuccess) { response = cudaGetLastError(); while (response != cudaSuccess) { if (!cudaErrors.empty()) cudaErrors += " <- "; cudaErrors += cudaGetErrorString(response); response = cudaGetLastError(); } auto errorChar = cudaErrors.c_str(); std::memcpy((void *)errorMessage, errorChar, cudaErrors.length()); std::memset((void *)&errorMessage[cudaErrors.length()], 0, 1); return false; } return true; }
13,593
#include <iostream> #include <cmath> #include <stdio.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include <thrust/transform.h> using namespace std; #define A 0.2 #define B 0.01 __global__ void kernel(float coeff, float* f, float* res) { int curr = threadIdx.x + blockDim.x * blockIdx.x; int prev = curr - 1; if (prev == -1) { res[curr] = f[curr]; } else { res[curr] = f[curr] + (coeff) * (f[prev] - f[curr]); } } struct Functor { const float coeff; Functor(float _coeff) : coeff(_coeff) {} __host__ __device__ float operator()(float x, float y) { return x + coeff * (y - x); } }; float x_func(float x) { return x * x * exp(-(x - A) * (x - A) / B); } float t_func(float t) { return 0; } int main() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); printf("device: %s \n\n", deviceProp.name); int Nx = 256; //1024 int Nt = 256; //1024 float tl = 0.5; //0.2 float dx = 2.0f / Nx; //1.0f / Nx float dt = tl / Nt; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); float *x, *t; thrust::host_vector <float> thr(Nx * Nt); float* cda; cudaHostAlloc((void**)&x, Nx * sizeof(float), cudaHostAllocDefault); cudaHostAlloc((void**)&t, Nt * sizeof(float), cudaHostAllocDefault); cudaHostAlloc((void**)&cda, Nt * Nx * sizeof(float), cudaHostAllocDefault); for (int i = 0; i < Nx; i++) { for (int j = 0; j < Nt; j++) { thr[i + j * Nt] = 0; cda[i + j * Nt] = 0; } } float value = 0; for (int i = 0; i < Nx; i++, value += dx) { x[i] = value; thr[i] = x_func(x[i]); cda[i] = x_func(x[i]); } value = 0; for (int i = 0; i < Nt; i++, value += dt) { t[i] = value; thr[i * Nt] = t_func(t[i]); cda[i * Nt] = t_func(t[i]); } thrust::device_vector<float> dev(Nx * Nt); thrust::copy(thr.begin(), thr.end(), dev.begin()); Functor func(dt / dx); //создать функтор, используется конструктор и задается coef /// thrust: cudaEventRecord(start, 0); for (int j = 0; j < Nt - 1; j++) { thrust::transform(dev.begin() + (j * Nx) + 1, dev.begin() + ((j + 1) * Nx), dev.begin() + (j * Nx), dev.begin() + ((j + 1) * Nx) + 1, func); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); thrust::copy(dev.begin(), dev.end(), thr.begin()); //скопировать с устройства на хост cudaEventElapsedTime(&time, start, stop); cout << "Thrust time is " << time << "ms\n"; /// обычный CUDA: float* dev_cda; cudaMalloc((void **)&dev_cda, Nx * Nt * sizeof(float)); cudaMemcpy(dev_cda, cda, Nx * Nt * sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(start, 0); /* for (int i = 0; i < Nt - 1; i++) { kernel <<< Nx / 256, 256 >>> (dt / dx, dev_cda + (i * Nx), dev_cda + ((i + 1) * Nx)); cudaDeviceSynchronize(); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaMemcpy(cda, dev_cda, Nx * Nt * sizeof(float), cudaMemcpyDeviceToHost); cudaEventElapsedTime(&time, start, stop); cout << "CUDA time is " << time << "ms\n\n"; */ //проверка: float thr_sum = 0.0f; float cda_sum = 0.0f; for (int i = 0; i < Nx; i++) { for (int j = 0; j < Nt; j++) { cda_sum += cda[i + j * Nt]; thr_sum += thr[i + j * Nt]; //if (cda[i + j * Nt] != thr[i + j * Nt]) // printf("difference in [%d + %d * Nt]\n", i, j); } } printf("thr_sum = %f \ncda_sum = %f \n", thr_sum, cda_sum); //для cda.gpi, thr.gpi FILE *fp_thr, *fp_cda; fp_thr = fopen("thr.dat", "w"); fp_cda = fopen("cda.dat", "w"); for (int j = 0; j < Nt; j++) { for (int i = 0; i < Nx; i++) { fprintf(fp_thr, "%f\n", thr[i + j * Nt]); fprintf(fp_cda, "%f\n", cda[i + j * Nt]); } fprintf(fp_thr, "\n\n\n"); fprintf(fp_cda, "\n\n\n"); } /* //это для test.gpi for (int i = 0; i < Nx; i++) { fprintf(fp_cda, "%f\n", cda[i]); } */ cudaFree(dev_cda); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; } /* void iteration(float _coeff, thrust::device_vector<float>::iterator x, thrust::device_vector<float>::iterator xs, thrust::device_vector<float>::iterator y) { Functor func(_coeff); thrust::transform(x + 1, xs, x, y + 1, func); }*/
13,594
#include "includes.h" #define BLOCK_SIZE 16 __global__ void MultiplyGPU(float* a, float* b, float* c,int t) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; float aux =0; if (i < t) { if (j < t) { for (int k = 0; k < t; k++) { aux += a[i * t + k] * b[k * t + j]; } c[i * t + j] = aux; } } }
13,595
/** * parms * top left bottom right * * example: * 0 0 10 10 */ #include <stdio.h> #include <stdlib.h> #include <cstdlib> #include <math.h> #define MATRIX_WIDTH 2016 #define MATRIX_HEIGHT 2016 #define MATRIX_SIZE MATRIX_WIDTH*MATRIX_HEIGHT #define TILE_WIDTH 48//48 #define TILE_HEIGHT 48//48 #define ELEMENTS_PER_THREAD_Y 16 #define ROWS_PER_THREAD 2 #define ELEMENTS_PER_MATRIX_ROW MATRIX_WIDTH*TILE_HEIGHT #define TILE_SIZE TILE_WIDTH*TILE_HEIGHT #define INIT_THREADS_PER_BLOCK 256 #define INIT_ELEMENTS_PER_THREAD 90 #define INIT_ELEMENTS_PER_BLOCK INIT_ELEMENTS_PER_THREAD*INIT_THREADS_PER_BLOCK #define TILE_DIM 32 #define BLOCK_ROWS 8 // window struct struct WINDOW { int top; int left; int bottom; int right; } show_window = { 0, 0, 10, 10 }; /* * Matrix transpose - device function * ---------------------------------------- * Description: * this function transpose matrix element block by block * and line by line * Parms: * - idata (int*) - input matrix * - odata (int*) - output matix (allocated alrea) */ __global__ void transpose_matrix(int* idata, int* odata) { // allocate shared memory __shared__ int tile[TILE_HEIGHT][TILE_WIDTH]; // estimate current position int inputPos = blockIdx.y*ELEMENTS_PER_MATRIX_ROW + blockIdx.x*TILE_WIDTH; int outputPos = blockIdx.x*ELEMENTS_PER_MATRIX_ROW + blockIdx.y*TILE_WIDTH; int coPos; int startPos = threadIdx.y*ROWS_PER_THREAD; // copy to shared memory for(int j=0; j <= ROWS_PER_THREAD-1; j++) { coPos = startPos*MATRIX_WIDTH + (MATRIX_WIDTH*j); for(int i=threadIdx.x; i < TILE_WIDTH; i+=ELEMENTS_PER_THREAD_Y) { tile[startPos+j][i] = idata[inputPos+i+coPos]; } } __syncthreads(); // copy back to global memory for(int j=0; j <= ROWS_PER_THREAD-1; j++) { coPos = startPos*MATRIX_WIDTH + (MATRIX_WIDTH*j); for(int i=threadIdx.x; i < TILE_WIDTH; i+=ELEMENTS_PER_THREAD_Y) { odata[outputPos+i+coPos] = tile[i][startPos+j]; } } } /* * Initialize matrix - device function * ---------------------------------------- * Description: * this function initialze matrix INIT_ELEMENTS_PER_THREAD elements by elements * Parms: * - idata (int*) - matrix * - size (int) - matrix size (number of elements) */ __global__ void init_matrix( int *idata, int size ) { int elements_count; int start_id = (threadIdx.x * INIT_ELEMENTS_PER_THREAD ); // set elements count if ( start_id+INIT_ELEMENTS_PER_THREAD > size ) { elements_count = size-start_id; }else{ elements_count = INIT_ELEMENTS_PER_THREAD; } // set elementsPerThread elements int value = start_id + ( blockIdx.x * INIT_ELEMENTS_PER_BLOCK ); for( int i=0; i < elements_count; i++ ) idata[value+i] = value+i; } __global__ void init_matrix_zero( int *idata, int size ) { int elements_count; int start_id = (threadIdx.x * INIT_ELEMENTS_PER_THREAD ); // set elements count if ( start_id+INIT_ELEMENTS_PER_THREAD > size ) { elements_count = size-start_id; }else{ elements_count = INIT_ELEMENTS_PER_THREAD; } // set elementsPerThread elements int value = start_id + ( blockIdx.x * INIT_ELEMENTS_PER_BLOCK ); for( int i=0; i < elements_count; i++ ) idata[value+i] = 0; } /* * Print matrix window - host function * ---------------------------------------- * Description: * this function copy data form device * and print output to screen * host matrix size has to be the same as device matrix size * Pamrs: * - h_matrix (int*) - host matrix * - d_matrix (int*) - device matrix * - start_height, start_width, end_height, end_width (int) - displaying area */ void print_matrix_window(int* h_matrix, int* d_matrix, int start_height, int start_width, int end_height, int end_width ) { cudaMemcpy( h_matrix, d_matrix, sizeof(int)*MATRIX_SIZE, cudaMemcpyDeviceToHost); printf("--------------------\n"); for(int y=start_height; y < end_height; y++) { for(int x=start_width; x < end_width; x++) { printf("%d\t", h_matrix[x + ( y * MATRIX_WIDTH ) ]); } printf("\n"); } } /* * return GB per second for given data */ double GBperSec(float runtime, double bytes) { return 100*(bytes/1073741824)/runtime; } int main( int argc, char *argv[]) { if( argc > 4) { show_window.top=atoi(argv[1]); show_window.left=atoi(argv[2]); show_window.bottom=atoi(argv[3]); show_window.right=atoi(argv[4]); } int *d_idata, *d_odata, *h_matrix, blocks_count; cudaEvent_t init_start, init_end; float init_time; // create init events cudaEventCreate(&init_start); cudaEventCreate(&init_end); cudaEvent_t transpose_start, transpose_end; float transpose_time; // create transpose events cudaEventCreate(&transpose_start); cudaEventCreate(&transpose_end); // size validation if ( MATRIX_WIDTH % TILE_WIDTH != 0 || MATRIX_HEIGHT % TILE_HEIGHT != 0) { printf("Invalid matrix size\n"); return 1; } cudaMalloc( (void**)&d_idata, sizeof(int) * MATRIX_SIZE ); cudaMalloc( (void**)&d_odata, sizeof(int) * MATRIX_SIZE ); h_matrix = new int[ MATRIX_SIZE ]; // matrix initialization // set data to global memory blocks_count = ceil(((MATRIX_SIZE+INIT_THREADS_PER_BLOCK-1)/INIT_THREADS_PER_BLOCK)/INIT_ELEMENTS_PER_THREAD); cudaEventRecord(init_start, 0); init_matrix<<< blocks_count, INIT_THREADS_PER_BLOCK >>>(d_idata, MATRIX_SIZE); init_matrix_zero<<< blocks_count, INIT_THREADS_PER_BLOCK >>>(d_odata, MATRIX_SIZE); cudaEventRecord(init_end, 0); cudaEventSynchronize(init_end); cudaEventElapsedTime(&init_time, init_start, init_end); printf("init matrix:\n\tblocks count: %d\n\tthreads per block: %d\n\telements per thread: %d\n\ttime: %f ms\n\tspeed: %lf GB/s\n", blocks_count, INIT_THREADS_PER_BLOCK, INIT_ELEMENTS_PER_THREAD, init_time, GBperSec(init_time, MATRIX_SIZE*sizeof(int))); print_matrix_window(h_matrix, d_idata, show_window.top, show_window.left, show_window.bottom, show_window.right); dim3 blocks(MATRIX_HEIGHT/TILE_HEIGHT, MATRIX_WIDTH/TILE_HEIGHT); dim3 threads(ELEMENTS_PER_THREAD_Y, TILE_WIDTH/ROWS_PER_THREAD); cudaEventRecord(transpose_start, 0); transpose_matrix<<< blocks, threads >>>(d_idata, d_odata); cudaEventRecord(transpose_end, 0); cudaEventSynchronize(transpose_end); cudaEventElapsedTime(&transpose_time, transpose_start, transpose_end); printf("\ntranspose matrix:\n\tblocks count x: %d\n\tblocks count y: %d\n\tthreads count x: %d\n\tthreads count y: %d\n\ttime: %f ms\n\tspeed: %lf GB/s\n", MATRIX_HEIGHT/TILE_HEIGHT, MATRIX_WIDTH/TILE_HEIGHT, ELEMENTS_PER_THREAD_Y, TILE_WIDTH/ROWS_PER_THREAD, transpose_time, GBperSec(transpose_time, MATRIX_SIZE*sizeof(int))); print_matrix_window(h_matrix, d_odata, show_window.top, show_window.left, show_window.bottom, show_window.right); // clear memory allocation cudaFree( d_idata ); cudaFree( d_odata ); delete [] h_matrix; return 0; }
13,596
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/count.h> #include <thrust/sort.h> struct greater_than_five { int threshold; greater_than_five(int t) {threshold = t;} __host__ __device__ bool operator()(int x) {return x > threshold;} }; //create the predicate functor (returns true for x > 10) greater_than_five pred(5); int main(int argc, char*argv[]) { thrust::host_vector<int> hostDay(15,0); thrust::host_vector<int> hostSite(15,0); thrust::host_vector<int> hostMeasure(15,0); hostDay[0]= 0; hostDay[1]= 0; hostDay[2]= 1; hostDay[3]= 2; hostDay[4]= 5; hostDay[5]= 5; hostDay[6]= 6; hostDay[7]= 6; hostDay[ 8]= 7; hostDay[9]= 8; hostDay[10]= 9; hostDay[11]= 9; hostDay[12]= 9; hostDay[13]=10; hostDay[14]=11; hostSite[0]= 2; hostSite[1]= 3; hostSite[2]= 0; hostSite[3]= 1; hostSite[4]= 1; hostSite[5]= 2; hostSite[6]= 0; hostSite[7]= 1; hostSite[8]= 2; hostSite[9]= 1; hostSite[10]= 3; hostSite[11]= 4; hostSite[12]= 0; hostSite[13]= 1; hostSite[14]= 2; hostMeasure[0]= 9; hostMeasure[1]= 5; hostMeasure[2]= 6; hostMeasure[3]= 3; hostMeasure[4]= 3; hostMeasure[5]= 8; hostMeasure[6]= 2; hostMeasure[7]= 6; hostMeasure[8]= 5; hostMeasure[9]=10; hostMeasure[10]= 9; hostMeasure[11]=11; hostMeasure[12]= 8; hostMeasure[13]= 4; hostMeasure[14]= 1; //copy the vector from host to device thrust::device_vector<int> deviceDay(15,0); thrust::device_vector<int> deviceSite(15,0); thrust::device_vector<int> deviceMeasure(15,0); thrust::copy(hostDay.begin(),hostDay.end(),deviceDay.begin()); thrust::copy(hostSite.begin(),hostSite.end(),deviceSite.begin()); thrust::copy(hostMeasure.begin(),hostMeasure.end(),deviceMeasure.begin()); using namespace thrust::placeholders; //problem a //int count5 = thrust::count_if(deviceMeasure.begin(),deviceMeasure.end(),_1 > 5); int count5 = thrust::count_if(deviceMeasure.begin(),deviceMeasure.end(),pred); printf("%d\n",count5); //problem b thrust::device_vector<int> sortSite(15,0); thrust::device_vector<int> measure(15,0); thrust::copy(deviceSite.begin(),deviceSite.end(),sortSite.begin()); thrust::copy(deviceMeasure.begin(),deviceMeasure.end(),measure.begin()); //sort the sites thrust::sort_by_key(sortSite.begin(),sortSite.end(),measure.begin()); //reduce by key thrust::reduce_by_key(sortSite.begin(),sortSite.end(),measure.begin(),deviceSite.begin(),deviceMeasure.begin()); thrust::copy(deviceSite.begin(),deviceSite.end(),hostSite.begin()); thrust::copy(deviceMeasure.begin(),deviceMeasure.end(),hostMeasure.begin()); for(int i=0;i<5;i++){ if(i!=4){ printf("%d ",hostMeasure[i]); } else { printf("%d", hostMeasure[i]); } } printf("\n"); return 0; }
13,597
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #define N 512 __global__ void ArithmeticMean (int *a, int *o) { int tid = blockDim.x*blockIdx.x+threadIdx.x; for(int i = N/2; i > 0; i = i/2) { if(tid < i) { a[tid]+=a[tid+i]; } } o[0] = a[0]; } int main() { int *h_a,*d_a,*o_a,*oh_a; int size = N*sizeof(int); h_a = (int *)malloc(size); oh_a = (int *)malloc(size); cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&o_a,size); for(int i = 1; i <= N ; i++) { h_a[i-1] = i; } cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice); ArithmeticMean<<<1, N/2>>>(d_a,o_a); cudaDeviceSynchronize(); cudaMemcpy(h_a,d_a,size,cudaMemcpyDeviceToHost); cudaMemcpy(oh_a,o_a,size,cudaMemcpyDeviceToHost); float AM =(float) oh_a[0]/N; printf("Arithmatic Mean is %.2f\n", AM); cudaFree(d_a); free(h_a); return 0; }
13,598
 #include <iostream> #include <thrust/iterator/constant_iterator.h> #include <thrust/reduce.h> int main() { // create iterators thrust::constant_iterator<int> first(10); thrust::constant_iterator<int> last = first + 3; std::cout << first[0] << std::endl; // returns 10 std::cout << first[1] << std::endl; // returns 10 std::cout << first[100] << std::endl; // returns 10 // sum of [first, last) int result = thrust::reduce(first, last); // returns 30 (i.e. 3 * 10) std::cout << result << std::endl; return 0; }
13,599
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include <cstdlib> #include <time.h> int main(void) { typedef uint type_t; int max_count = 50'000'000; thrust::host_vector<int> host(max_count); for(int i = 0; i < max_count; ++i) host[i] = rand() + 2 * rand(); // fill all 32 bits. // Copy in host data. thrust::device_vector<int> gpu = host; int sizes[] { 1, 2, 4, 6, 8, 10, 12, 14, 16, 20, 25, 30, 35, 40, 45, 50 }; for(int size : sizes) { // Sort 5 billion keys at least. int count = 1'000'000 * size; int num_iterations = (int)ceil(5.0e9 / count); cudaDeviceSynchronize(); timespec start; clock_gettime(CLOCK_REALTIME, &start); for(int i = 0; i < num_iterations; ++i) thrust::sort(gpu.begin(), gpu.begin() + count); cudaDeviceSynchronize(); timespec end; clock_gettime(CLOCK_REALTIME, &end); double elapsed = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) * 1.0e-9; double rate = (double)count * num_iterations / elapsed / 1.0e6; printf("%9d: %20.5f time=%f, iterations=%d\n", count, rate, elapsed, num_iterations); } return 0; }
13,600
#include "includes.h" __global__ void Subtract( float * x, size_t idx, size_t N, float W0, float W1) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { x[(idx-2)*N+i] = W0*x[(idx-1)*N+i] - W1*x[(idx-2)*N+i]; } return; }