serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
8,001
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <fstream> #include <time.h> #include <numeric> #include <random> #include <ctime> #include <chrono> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i]*3 + b[i] -8; } int main(int argc, char** argv) { // generate 2 <<30 random integers const int CH_BUF_LEN = 32*32; std::ifstream infileA("file_a.txt", std::ifstream::binary); if (!infileA.is_open()) { std::cerr << "no file test_a.txt" << std::endl; return 1; } std::ifstream infileB("file_b.txt", std::ifstream::binary); if (!infileB.is_open()) { std::cerr << "no file test_b.txt" << std::endl; return 2; } std::ofstream outfile("new.txt", std::ofstream::binary); if (!outfile.is_open()) { std::cerr << "cannot create new.txt" << std::endl; return 2; } cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { std::cerr<< "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?" <<std::endl; return 12; } auto start = std::chrono::system_clock::now(); std::time_t start_time = std::chrono::system_clock::to_time_t(start); #pragma warning(suppress : 4996) std::cout << "start at " << std::ctime(&start_time) << std::endl; // get size of file infileA.ignore(std::numeric_limits<std::streamsize>::max()); std::streamsize sizeA = infileA.gcount(); infileA.clear(); // Since ignore will have set eof. infileA.seekg(0, std::ios_base::beg); infileB.ignore(std::numeric_limits<std::streamsize>::max()); std::streamsize sizeB = infileA.gcount(); infileB.clear(); // Since ignore will have set eof. infileB.seekg(0, std::ios_base::beg); if (sizeA != sizeB) { std::cerr << "file_a.txt size not match file_b.txt size" << std::endl; return 4; } int tmpBufA[CH_BUF_LEN] = { 0 }; int tmpBufB[CH_BUF_LEN] = { 0 }; int tmpBufC[CH_BUF_LEN] = { 0 }; for (int i = 0; i < sizeA; i += CH_BUF_LEN * sizeof(int)) { infileA.seekg(i); infileB.seekg(i); // read content of infile infileA.read((char*)tmpBufA, CH_BUF_LEN * sizeof(int)); infileA.read((char*)tmpBufB, CH_BUF_LEN * sizeof(int)); // write to outfile // Add vectors in parallel. cudaStatus = addWithCuda(tmpBufC, tmpBufA , tmpBufB, CH_BUF_LEN); if (cudaStatus != cudaSuccess) { std::cout << "addWithCuda failed!" << std::endl;; return 1; } outfile.write((char*)tmpBufC, CH_BUF_LEN * sizeof(int)); } cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { std::cerr << "cudaDeviceReset failed!" << std::endl; return 1; } outfile.close(); //infileA.close(); //infileB.close(); auto end = std::chrono::system_clock::now(); std::time_t end_time = std::chrono::system_clock::to_time_t(end); std::chrono::duration<double> elapsed_seconds = end - start; #pragma warning(suppress : 4996) std::cout << "finished at " << std::ctime(&end_time) << "elapsed time: " << elapsed_seconds.count() << "s" << std::endl; return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<256, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
8,002
#include <iostream> #include <ctime> #include <cuda.h> #include <cuda_runtime.h> using namespace std; #define TILE_WIDTH 100 // for shared kernel // CPU computation __host__ void cpuTranspose(float* M, float* R, int dim1, int dim2){ for (int i = 0; i < dim1; i++){ for (int j = 0; j < dim2; j++){ R[j*dim2 + i] = M[i*dim2 + j]; } } } // NAIVE APPROACH - global memory access only __global__ void transpose(float* M, float* R, int dim1, int dim2){ int column = blockDim.x* blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; if(column < dim2 && row < dim1){ R[column*dim2 + row] = M[column + row*dim2]; } // __syncthreads() not needed above as only non-conflicting read/write operations occuring } // SHARED MEM APROACH - use shared memory __global__ void sharedMem_transpose(float* M, float* R, int dim1, int dim2){ // fill data into shared memory __shared__ float M_Shared[TILE_WIDTH][TILE_WIDTH]; int column = TILE_WIDTH * blockIdx.x + threadIdx.x; int row = TILE_WIDTH * blockIdx.y + threadIdx.y; int index_in = row*dim2 + column; int index_out = column*dim2 + row; if(row < dim1 && column < dim2 && index_in < dim1*dim2){ M_Shared[threadIdx.y][threadIdx.x] = M[index_in]; } __syncthreads(); // transfer to global mem after all threads finish computation if(row < dim1 && column < dim2 && index_out < dim1*dim2){ R[index_out] = M_Shared[threadIdx.y][threadIdx.x]; } } int main(void){ int const dim1 = 3000; int const dim2 = 3000; float *M_h; float *R_h; float *M_d; float *R_d; size_t size = dim1*dim2*sizeof(float); cudaMallocHost((float**)&M_h,size); //page locked host mem allocation R_h = (float*)malloc(size); cudaMalloc((float **)&M_d, size); // init matrix for (int i = 0; i < dim1*dim2; ++i) { M_h[i]=i; } // asynchronous mem copies can only happen from pinned memory (direct RAM transfer) // CPU cannot be held up in mem copies for async copy cudaMemcpyAsync(M_d,M_h,size,cudaMemcpyHostToDevice); cudaMalloc((float**)&R_d,size); cudaMemset(R_d,0,size); // init kernel // TILE_WIDTH is chosen as per shared memory availaibility in a block // TILE_WIDTH doesn't have much use for naive access and we could lump computatiom into fewer blocks. int threadNumX = TILE_WIDTH; int threadNumY = TILE_WIDTH; int blockNumX = dim1 / TILE_WIDTH + (dim1%TILE_WIDTH == 0 ? 0 : 1 ); int blockNumY = dim2 / TILE_WIDTH + (dim2%TILE_WIDTH == 0 ? 0 : 1 ); dim3 blockSize(threadNumX,threadNumY); dim3 gridSize(blockNumX, blockNumY); // CUDA TIMER to Measure the performance cudaEvent_t start_naive, start_shared, stop_shared, stop_naive; float elapsedTime1, elapsedTime2; cudaEventCreate(&start_naive); cudaEventCreate(&stop_naive); cudaEventCreate(&start_shared); cudaEventCreate(&stop_shared); cudaEventRecord(start_naive, 0); transpose<<<gridSize,blockSize>>>(M_d,R_d,dim1,dim2); cudaEventRecord(stop_naive, 0); cudaEventSynchronize(stop_naive); cudaEventElapsedTime(&elapsedTime1, start_naive, stop_naive); cudaEventRecord(start_shared,0); sharedMem_transpose<<<gridSize,blockSize>>>(M_d,R_d,dim1,dim2); cudaEventRecord(stop_shared, 0); cudaEventSynchronize(stop_shared); cudaEventElapsedTime(&elapsedTime2, start_shared, stop_shared); clock_t begin = clock(); cpuTranspose(M_h, R_h, dim1, dim2); //matrix multiplication on cpu clock_t end = clock(); double elapsedTime3 = (double)1000*(end - begin) / CLOCKS_PER_SEC; cout <<"Time for the NAIVE kernel: "<<elapsedTime1<<" ms"<<endl; cout <<"Time for the SHARED kernel: "<<elapsedTime2<<" ms"<<endl; cout <<"Time for the CPU code "<<elapsedTime3<<" ms"<<endl; cudaFreeHost(M_h); free(R_h); cudaFree(R_d); cudaFree(M_d); return 0; }
8,003
template <class T> __device__ T add_two_vector(T x, T y){ return (x + y); } extern "C" { __global__ void add_two_vector_kernel(int nx, int *a, int *b, int *res){ const int x = threadIdx.x + blockDim.x * blockIdx.x; if (x < nx){ res[x] = add_two_vector<int>(a[x], b[x]); } } }
8,004
/* * This is a simple CUDA code that negates an array of integers. * It introduces the concepts of device memory management, and * kernel invocation. * * Training material developed by James Perry and Alan Gray * Copyright EPCC, The University of Edinburgh, 2010 */ #include <stdio.h> #include <stdlib.h> /* Forward Declaration*/ /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char*); /* The number of integer elements in the array */ #define ARRAY_SIZE 256 /* * The number of CUDA blocks and threads per block to use. * These should always multiply to give the array size. * For the single block kernel, NUM_BLOCKS should be 1 and * THREADS_PER_BLOCK should be the array size */ #define NUM_BLOCKS 1 #define THREADS_PER_BLOCK 256 /* The actual array negation kernel (basic single block version) */ __global__ void negate(int *d_a) { /* Part 2B: negate an element of d_a */ } /* Multi-block version of kernel for part 2C */ __global__ void negate_multiblock(int *d_a) { /* Part 2C: negate an element of d_a, using multiple blocks this time */ } /* Main routine */ int main(int argc, char *argv[]) { int *h_a, *h_out; int *d_a; int i; size_t sz = ARRAY_SIZE * sizeof(int); /* Print device details */ int deviceNum; cudaGetDevice(&deviceNum); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, deviceNum); printf(" Device name: %s\n", prop.name); /* * allocate memory on host * h_a holds the input array, h_out holds the result */ h_a = (int *) malloc(sz); h_out = (int *) malloc(sz); /* * allocate memory on device */ /* Part 1A: allocate device memory */ /* initialise host arrays */ for (i = 0; i < ARRAY_SIZE; i++) { h_a[i] = i; h_out[i] = 0; } /* copy input array from host to GPU */ /* Part 1B: copy host array h_a to device array d_a */ /* run the kernel on the GPU */ /* Part 2A: configure and launch kernel (un-comment and complete) */ /* dim3 blocksPerGrid( ); */ /* dim3 threadsPerBlock( ); */ /* negate<<< , >>>( ); */ /* wait for all threads to complete and check for errors */ cudaDeviceSynchronize(); checkCUDAError("kernel invocation"); /* copy the result array back to the host */ /* Part 1C: copy device array d_a to host array h_out */ checkCUDAError("memcpy"); /* print out the result */ printf("Results: "); for (i = 0; i < ARRAY_SIZE; i++) { printf("%d, ", h_out[i]); } printf("\n\n"); /* free device buffer */ /* Part 1D: free d_a */ /* free host buffers */ free(h_a); free(h_out); return 0; } /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
8,005
#include <cuda.h> #include <iostream> using namespace std; // simple vector addition kernel __global__ void vecAddKernel(float* A, float* B, float* C, int n) { // compute index from block and thread indices int index = blockIdx.x * blockDim.x + threadIdx.x; // check boundary condition if (index < n) C[index] = A[index] + B[index]; } // add to vectors h_A and h_B to get h_C of size n on GPU void vecAdd(float* h_A, float* h_B, float* h_C, int n) { const int THREAD_SIZE = 1024; // for cudaMalloc and cudaMemcpy int size = n * sizeof(float); // device variables float* d_A; float* d_B; float* d_C; // malloc vector A cudaMalloc((void**) &d_A, size); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); // malloc vector B cudaMalloc((void**) &d_B, size); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // malloc vector C cudaMalloc((void**) &d_C, size); // run vector addition kernel dim3 DimGrid((n-1)/THREAD_SIZE + 1, 1, 1); dim3 DimBlock(THREAD_SIZE, 1, 1); vecAddKernel<<<DimGrid,DimBlock>>>(d_A, d_B, d_C, n); // copy contents cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // free memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } // This program adds two vectors together using CUDA. int main(int argc, char* argv[]) { const int VECTOR_SIZE = 10000; // create addend vector A float* A = new float[VECTOR_SIZE]; for (int i = 0; i < VECTOR_SIZE; i++) A[i] = 2*i; // create addend vector B float* B = new float[VECTOR_SIZE]; for (int i = 0; i < VECTOR_SIZE; i++) B[i] = 3*i; // create sum vector C float* C = new float[VECTOR_SIZE]; for (int i = 0; i < VECTOR_SIZE; i++) C[i] = 0; // perform vector addition with CUDA vecAdd(A, B, C, VECTOR_SIZE); // print result for (int i = 0; i < VECTOR_SIZE; i++) cout << "C[" << i << "]=" << C[i] << endl; // free memory delete[] A; delete[] B; delete[] C; return 0; }
8,006
#include <thrust/device_vector.h> #include <thrust/host_vector.h> //#include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/transform.h> #include <iostream> #include <cstdint> struct saxpy_functor : public thrust::binary_function<float,float,float> { const float a; saxpy_functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float& x, const float& y) const { return a * x + y; } }; int main(int argc, char *argv[]) { std::size_t N = 1<<20; thrust::host_vector<float> host_a(N,1.f); thrust::host_vector<float> host_b(N,2.f); const float scale = 42.f; thrust::device_vector<float> dev_a = host_a; thrust::device_vector<float> dev_b = host_b; thrust::transform(dev_a.begin(), dev_a.end(), // input range #1 dev_b.begin(), // input range #2 dev_a.begin(), // output range saxpy_functor(scale)); // placeholder expression // thrust::transform(thrust::system::cuda::par, // dev_a.begin(), dev_a.end(), // input range #1 // dev_b.begin(), // input range #2 // dev_a.begin(), // output range // saxpy_functor(scale)); // placeholder expression host_a = dev_a; float max_error = 0.0f; for (const float& item : host_a ) max_error = std::max(max_error, std::abs(item-44.0f)); std::cout << "Max error: " << max_error << std::endl; return 0; }
8,007
#include <cuComplex.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void freq_correction_kernel(cuFloatComplex *in, cuFloatComplex *out, float freq_offset, float start_idx, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { // e ix = cos x + i sin x float x = -freq_offset * (float)(start_idx + i); out[i] = cuCmulf(in[i], make_cuFloatComplex(cos(x), sin(x))); // in[i] * expf(make_cuFloatComplex(0, -freq_offset * (start_idx + i))); } } void exec_freq_correction(cuFloatComplex *in, cuFloatComplex *out, float freq_offset, float start_idx, int n, int grid_size, int block_size, cudaStream_t stream) { freq_correction_kernel<<<grid_size, block_size, 0, stream>>>( in, out, freq_offset, start_idx, n); } void get_block_and_grid_freq_correction(int *minGrid, int *minBlock) { cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, freq_correction_kernel, 0, 0); }
8,008
#include "includes.h" __global__ void reduceInterleaved(int *g_idata, int *g_odata, unsigned int n) { // set the thread id. unsigned int tid = threadIdx.x; unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; // convert global data pointer to the local pointer of this block. int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check. if (idx >= n) return; // in-place reduction in global memory for (int stride = blockDim.x/2; stride>0; stride>>=1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock. __syncthreads(); } // write result for this block to global mem. if (tid == 0) { g_odata[blockIdx.x] = idata[0]; } }
8,009
#include <stdio.h> #include <stdlib.h> // Code Generated with GCDObsidian __global__ void kernel0(int *input0,int *result0){ unsigned int tid = threadIdx.x; unsigned int bid = blockIdx.x; // extern __shared__ unsigned char sbase[]; result0[((bid*32)+tid)] = input0[((bid*32)+tid&15)] + 32; } // coordination code we want to generate int coord(int *input0, int input0size, int *output0, int output0size){ int* dinput0; int* doutput0; cudaMalloc((void**)&dinput0, sizeof(int) * input0size ); cudaMalloc((void**)&doutput0, sizeof(int) * output0size ); cudaMemcpy(dinput0, input0, sizeof(int) * input0size, cudaMemcpyHostToDevice); kernel0<<<1, 32,0 >>>((int*)dinput0,(int*)doutput0); cudaMemcpy(output0, doutput0, sizeof(int) * 32 , cudaMemcpyDeviceToHost); cudaFree(dinput0); cudaFree(doutput0); return 0; // Also. add some error checking... } int main(int argc, char **argv){ int values[32]; int result[32]; //generate input data for (int i = 0; i < 32; ++i) { values[i] = i; } coord(values,32,result,32); // show results for (int i = 0; i < 32; ++i) { printf("%d ", ((int*)result)[i]); } }
8,010
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> void f(int *A, int *B) { for(int i = 0; i < 5; i++) A[i] = 10; } __global__ void fCuKernel(int *A, int *B) { for(int i = 0; i < 5; i++) A[threadIdx.x] = 10; } #define CHECK(r) {_check((r), __LINE__);} void _check(cudaError_t r, int line) { if (r != cudaSuccess) { printf("CUDA error on line %d: %s\n", line, cudaGetErrorString(r)); exit(0); } } void print_arr(int *A) { for(int i = 0; i < 5; i++) printf("%d := %d\n", i, A[i]); } int main() { int *A = NULL; int *B = NULL; const int ManagedMemory = 1; printf("*** ManagedMemory: %d\n", ManagedMemory); if (ManagedMemory) { printf("CudaMallocManaged-allocating A & B...\n"); CHECK(cudaMallocManaged(&A, sizeof(int) * 5, cudaMemAttachGlobal)); printf("A allocated...\n"); CHECK(cudaMallocManaged(&B, sizeof(int) * 5, cudaMemAttachGlobal)); printf("B allocated...\n"); } else { A = (int *)malloc(sizeof(int) * 5); B = (int *)malloc(sizeof(int) * 5); } assert(A != NULL); assert(B != NULL); for(int i = 0; i < 5; i++) { B[i] = i + 1; A[i] = -42; } printf("launching kernel...\n"); // f(A, B); fCuKernel<<<1,5>>>(A, B); cudaDeviceSynchronize(); printf("printing A...\n"); print_arr(A); return 0; }
8,011
#include "vec3.cuh" #include <math.h> CUDA_GEN_BOTH vec3 vec3::operator+(const vec3& b) { vec3 ret(*this); ret += b; return ret; } CUDA_GEN_BOTH vec3 vec3::operator-(const vec3& b) { vec3 ret(*this); ret -= b; return ret; } CUDA_GEN_BOTH vec3 vec3::operator*(const vec3& b) { vec3 ret(*this); ret *= b; return ret; } CUDA_GEN_BOTH vec3 vec3::operator+(float b) { vec3 ret(*this); ret += b; return ret; } CUDA_GEN_BOTH vec3 vec3::operator-(float b) { vec3 ret(*this); ret -= b; return ret; } CUDA_GEN_BOTH vec3 vec3::operator*(float b) { vec3 ret(*this); ret *= b; return ret; } CUDA_GEN_BOTH vec3 vec3::operator/(float b) { vec3 ret(*this); ret /= b; return ret; } CUDA_GEN_BOTH vec3& vec3::operator+=(const vec3& b) { this->x += b.x; this->y += b.y; this->z += b.z; return *this; } CUDA_GEN_BOTH vec3& vec3::operator-=(const vec3& b) { this->x -= b.x; this->y -= b.y; this->z -= b.z; return *this; } CUDA_GEN_BOTH vec3& vec3::operator*=(const vec3& b) { this->x *= b.x; this->y *= b.y; this->z *= b.z; return *this; } CUDA_GEN_BOTH vec3& vec3::operator+=(float b) { this->x += b; this->y += b; this->z += b; return *this; } CUDA_GEN_BOTH vec3& vec3::operator-=(float b) { this->x -= b; this->y -= b; this->z -= b; return *this; } CUDA_GEN_BOTH vec3& vec3::operator*=(float b) { this->x *= b; this->y *= b; this->z *= b; return *this; } CUDA_GEN_BOTH vec3& vec3::operator/=(float b) { this->x /= b; this->y /= b; this->z /= b; return *this; } CUDA_GEN_BOTH float vec3::dot(const vec3& b) { return x * b.x + y * b.y + z * b.z; } CUDA_GEN_BOTH vec3 vec3::cross(const vec3& b) { return { y*b.z - z*b.y, z*b.x - x*b.z, x*b.y - y*b.x }; } CUDA_GEN_BOTH float vec3::mag() { return sqrtf(x*x + y*y + z*z); } CUDA_GEN_BOTH float vec3::magSq() { return x*x + y*y + z*z; } CUDA_GEN_BOTH vec3& vec3::flipSign() { x = -x; y = -y; z = -z; return *this; } CUDA_GEN_BOTH bool vec3::isZero(float epsilon) { return magSq() <= epsilon*epsilon; } CUDA_GEN_BOTH bool vec3::isEqual(const vec3& b, float epsilon) { return (*this - b).magSq() <= epsilon*epsilon; } CUDA_GEN_BOTH vec3& vec3::clampMag(float mag) { if (this->magSq() > mag*mag) this->operator*(mag / this->mag()); return *this; } CUDA_GEN_BOTH vec3 operator+(float a, const vec3& b) { return {a + b.x, a + b.y, a+ b.z}; } CUDA_GEN_BOTH vec3 operator-(float a, const vec3& b) { return { a - b.x, a - b.y, a - b.z }; } CUDA_GEN_BOTH vec3 operator*(float a, const vec3& b) { return { a*b.x, a*b.y, a*b.z }; }
8,012
#include "includes.h" __global__ void cu_unpooling(const float* src, const float* loc, float* dst, const int colsdst, const int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ int cdst = (int)(loc[tid]) % colsdst; int rdst = (int)(loc[tid]) / colsdst; dst[rdst * colsdst + cdst] = src[tid]; tid += stride; } }
8,013
#include <cuda_runtime.h> #include <stdio.h> /*** アプリケーションのデータサイズは一定にキープ。 ブロックサイズの変化似合わせてグリッドサイズが変化することがわかる */ int main(int argc, char **argv) { // データ要素の合計数を定義 int nElem = 1024; // グリッドとブロックの構造を定義 dim3 block(1024); dim3 grid((nElem + block.x - 1) / block.x); // (1024 + 1024 -1) / 1024 = 2047 / 1024 = 1 --> grid = 1 printf("grid.x %d block.x %d \n", grid.x, block.x); // ブロックをリセット block.x = 512; grid.x = (nElem + block.x - 1) / block.x; // (1024 + 512 -1) / 512 = 1535 / 512 = 2 --> grid = 2 printf("grid.x %d block.x %d \n", grid.x, block.x); // ブロックをリセット block.x = 256; grid.x = (nElem + block.x - 1) / block.x; // (1024 + 256 - 1) / 256 = 1279 / 256 = 4 printf("grid.x %d block.x %d \n", grid.x, block.x); // ブロックをリセット block.x = 128; grid.x = (nElem + block.x - 1) / block.x; // (1024 + 128 - 1) / 128 = 1151 / 128 = 8 printf("grid.x %d block.x %d \n", grid.x, block.x); // デバイスをリセット cudaDeviceReset(); }
8,014
#include <iostream> #include <chrono> /* time manipulation */ const int LENGTH{1 << 30}; const int MAX_VAL{1 << 4}; /** * @description: kernel definition * @param {float*} x: vector x * @param {float*} y: vector y * @param {float*} z: vector z, the addition of x and y * @param {int} length: the length of vectors */ __global__ void add_vectors(float* x, float* y, float* z, int length) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < length ) { z[tid] = x[tid] + y[tid]; } } /** * @description: initialize vectors * @param {float*} x: vector x * @param {float*} y: vector y * @param {float*} z: vector z * @param {float*} z_from_cuda: vector z_from_cuda * @param {int} length: the length of vectors */ void initialize(float* x, float* y, float* z, float* z_from_cuda, int length) { srand(1); for ( int i = 0; i < length; i++ ) { x[i] = rand() % MAX_VAL; y[i] = rand() % MAX_VAL; z[i] = 0; z_from_cuda[i] = 0; } } /** * @description: sequential version of vector addition * @param {float*} x: vector x * @param {float*} y: vector y * @param {float*} z: vector z * @param {int} length: the length of vectors */ void sequential_add_vectors(float* x, float* y, float* z, int length) { for ( int i = 0; i < length; i++ ) { z[i] = x[i] + y[i]; } } /** * @description: validate results from sequential version and parallel version * @param {float*} h_z: vector h_z * @param {float*} h_z_from_cuda: vector h_z_from_cuda * @param {int} length: the length of vectors */ void validate_result(float* h_z, float* h_z_from_cuda, int length){ for ( int i = 0; i < length; i++ ) { if ( h_z_from_cuda[i] != h_z[i] ){ std::cout << "False" << std::endl; return; } } std::cout << "True" << std::endl; } int main() { std::chrono::steady_clock::time_point start_time_cuda; std::chrono::steady_clock::time_point end_time_cuda; std::chrono::duration<double> duration_cuda; std::chrono::steady_clock::time_point start_time; std::chrono::steady_clock::time_point end_time; std::chrono::duration<double> duration; double t1; double tp; // set the ID of the CUDA device cudaSetDevice(0); // create local data objects float* h_x = new float[LENGTH]; float* h_y = new float[LENGTH]; float* h_z = new float[LENGTH]; float* h_z_from_cuda = new float[LENGTH]; initialize(h_x, h_y, h_z, h_z_from_cuda, LENGTH); std::cout << "Running sequential version: " << std::endl; start_time = std::chrono::steady_clock::now(); sequential_add_vectors(h_x, h_y, h_z, LENGTH); end_time = std::chrono::steady_clock::now(); duration = end_time - start_time; t1 = duration.count(); std::cout << "Time: " << t1 << std::endl; // cuda version const int threadsPerBlock = 1024; const int numOfBlocks = (LENGTH + threadsPerBlock - 1) / threadsPerBlock; size_t size = LENGTH * sizeof(float); float* d_x; float* d_y; float* d_z; cudaMalloc(&d_x, size); cudaMalloc(&d_y, size); cudaMalloc(&d_z, size); std::cout << "Running cuda version: " << std::endl; start_time_cuda = std::chrono::steady_clock::now(); // Copy vectors from host memory to device memory cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice); // cuda operation add_vectors<<<numOfBlocks, threadsPerBlock>>>(d_x, d_y, d_z, LENGTH); // Copy result from device memory to host memory cudaMemcpy(h_z_from_cuda, d_z, size, cudaMemcpyDeviceToHost); end_time_cuda = std::chrono::steady_clock::now(); duration_cuda = end_time_cuda - start_time_cuda; tp = duration_cuda.count(); std::cout << "Time: " << tp << std::endl; // free device memory cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); // synchronize the GPU preventing premature termination cudaDeviceSynchronize(); validate_result(h_z, h_z_from_cuda, LENGTH); // free host memory delete[] h_z_from_cuda; delete[] h_z; delete[] h_x; delete[] h_y; double speedup = t1 / tp; double efficiency = speedup / (numOfBlocks * threadsPerBlock); std::cout << "speedup: " << speedup << std::endl; std::cout << "efficiency: " << efficiency << std::endl; return 0; }
8,015
// Ainda não tem nada aqui, pode ir embora. //Se quer ver um exemplo veja o test.cu.
8,016
#define TTR_HI 157807.0 #define TTR_HeI 285335.0 #define TTR_HeII 631515.0 #define ERG_TO_EV 6.242e11 __device__ float rec_HII(float _t, int _case) { float _a; float lam = 2.0*TTR_HI/_t; // Case A if(_case == 0) { _a = 1.269e-13*powf(lam, 1.503)/powf(1.0 + powf(lam/0.522, 0.470), 1.923); } else { _a = 2.753e-14*powf(lam, 1.500)/powf(1.0 + powf(lam/2.740, 0.407), 2.242); } return _a; } __device__ float rec_cool_HII(float _t, int _case) { float _a; float lam = 2.0*TTR_HI/_t; // Case A if(_case == 0) { _a = _t*1.778e-29*powf(lam, 1.965)/powf(1.0 + powf(lam/0.541, 0.502), 2.697); } else { _a = _t*3.435e-30*powf(lam, 1.970)/powf(1.0 + powf(lam/2.250, 0.376), 3.720); } return _a*ERG_TO_EV; } __device__ float rec_HeII(float _t, int _case) { float _a; float lam = 2.0*TTR_HeI/_t; if(_t <= 5.e3 || _t >= 5.e5) return 0.0; // Case A if(_case == 0) { _a = 3.0e-14*powf(lam, 0.654); } else { _a = 1.26e-14*powf(lam, 0.750); } return _a; } __device__ float rec_cool_HeII(float _t, int _case) { float kb = 8.617e-5; float _a = (kb*_t)*rec_HeII(_t, _case); return _a; } __device__ float rec_HeIII(float _t, int _case) { float _a; float lam = 2.0*TTR_HeII/_t; // Case A if(_case == 0) { _a = 2.0*1.269e-13*powf(lam, 1.503)/powf(1.0 + powf(lam/0.522, 0.470), 1.923); } else { _a = 2.0*2.753e-14*powf(lam, 1.500)/powf(1.0 + powf(lam/2.740, 0.407), 2.242); } return _a; } __device__ float rec_cool_HeIII(float _t, int _case) { float _a; float lam = 2.0*TTR_HeII/_t; // Case A if(_case == 0) { _a = _t*8.0*1.778e-29*powf(lam, 1.965)/powf(1.0 + powf(lam/0.541, 0.502), 2.697); } else { _a = _t*8.0*3.435e-30*powf(lam, 1.970)/powf(1.0 + powf(lam/2.250, 0.376), 3.720); } return _a*ERG_TO_EV; } __device__ float col_HI(float _t) { float lam = 2.0*TTR_HI/_t; if(_t < 1.e4 || _t > 1.e9) return 0.0; float _a = 21.11*powf(_t, -1.5)*expf(-lam/2.0); _a *= powf(lam, -1.089)/powf(1.0 + powf(lam/0.354, 0.874), 1.101); return _a; } __device__ float col_cool_HI(float _t) { float kb = 8.617e-5; float _a = (kb*TTR_HI)*col_HI(_t); return _a; } __device__ float col_HeI(float _t) { float lam = 2.0*TTR_HeI/_t; if(_t < 1.e4 || _t > 1.e9) return 0.0; float _a = 32.38*powf(_t, -1.5)*expf(-lam/2.0); _a *= powf(lam, -1.146)/powf(1.0 + powf(lam/0.416, 0.987), 1.056); return _a; } __device__ float col_cool_HeI(float _t) { float kb = 8.617e-5; float _a = (kb*TTR_HeI)*col_HeI(_t); return _a; } __device__ float col_HeII(float _t) { float lam = 2.0*TTR_HeII/_t; if(_t < 1.e4 || _t > 1.e9) return 0.0; float _a = 19.95*powf(_t, -1.5)*expf(-lam/2.0); _a *= powf(lam, -1.089)/powf(1.0 + powf(lam/0.553, 0.735), 1.275); return _a; } __device__ float col_cool_HeII(float _t) { float kb = 8.617e-5; //float lam = 2.0*TTR_HeII/_t; float _a = (kb*TTR_HeII)*col_HeII(_t); return _a; } __device__ float colex_HI(float _t) { float lam = 2.0*TTR_HI/_t; if(_t < 5.e3 || _t > 5.e5) return 0.0; float _a = 7.5e-19*expf(-0.375*lam)/(1.0+powf(_t/1.e5, 0.5)); return _a*ERG_TO_EV; }
8,017
// Copyright 2020 NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #define THREADBLOCK_SIZE 512 __launch_bounds__(THREADBLOCK_SIZE) __global__ void gatherKernel( const int8_t ** __restrict numericalInputPtrBuffer, const int32_t ** __restrict categoricalInputPtrBuffer, const size_t * __restrict sampleSizesBuf, const size_t * __restrict sampleOffsetsBuf, int8_t * __restrict numericalOutputBuf, int32_t * __restrict categoricalOutputBuf, int sampleCount, int numericVolume, int categoricalVolume) { int sampleId = blockIdx.x; int laneId = threadIdx.x; const int8_t * numericalInputBuffer = numericalInputPtrBuffer[sampleId]; const int32_t * categoricalInputBuffer = categoricalInputPtrBuffer[sampleId]; int sampleSize = sampleSizesBuf[sampleId]; int sampleOffset = sampleOffsetsBuf[sampleId]; int numericalElems = sampleSize * numericVolume; int8_t * numericDstBuf = numericalOutputBuf + sampleOffset * numericVolume; for(int elemId = laneId; elemId < numericalElems; elemId += THREADBLOCK_SIZE) { numericDstBuf[elemId] = __ldg(numericalInputBuffer + elemId); } int categoricalElems = sampleSize * categoricalVolume; int32_t * categoricalDstBuf = categoricalOutputBuf + sampleOffset * categoricalVolume; for(int elemId = laneId; elemId < categoricalElems; elemId += THREADBLOCK_SIZE) { categoricalDstBuf[elemId] = __ldg(categoricalInputBuffer + elemId); } } void runGatherKernel( const int8_t ** numericalInputPtrBuffer, const int32_t ** categoricalInputPtrBuffer, const size_t * sampleSizesBuf, const size_t * sampleOffsetsBuf, int8_t * numericalOutputBuf, int32_t * categoricalOutputBuf, int sampleCount, int numericVolume, int categoricalVolume, cudaStream_t stream) { gatherKernel<<<sampleCount,THREADBLOCK_SIZE,0,stream>>>( numericalInputPtrBuffer, categoricalInputPtrBuffer, sampleSizesBuf, sampleOffsetsBuf, numericalOutputBuf, categoricalOutputBuf, sampleCount, numericVolume, categoricalVolume); assert(cudaGetLastError() == cudaSuccess); }
8,018
/******************************************************************************************** source Code : globalMemoryAccessPatterns.cu Objective : Example code to demonstrate the different access patterns of the global memory and the corresponding advantages in terms of the bandwidth that is achievable. Description : The six patterns that are discussed in the programming guide which the program is implementing 1) coalesced float memory access, resulting in a single memory transaction 2) coalesced float memory access (divergent warp), resulting in a single memory transaction 3) non-sequential float memory access, resulting in 16 memory transactions 4) access with a misaligned starting address, resulting in 16 memory transactions 5) non-contiguous float memory access, resulting in 16 memory transactions 6) non-coalesced float3 memory access, resulting in 16 memory transactions the six patterns along with the cudaMemcpy, the bandwidth is being calculated input : none output : The different bandwidths that are achieved for different access patterns Modified : Aug 2011 Author : RarchK *****************************************************************************************/ #include <cuda.h> #include <stdio.h> #include <float.h> // there are six patterns that have been quoted in programming guide 2.3.1 for devices with compute capability 1.1 #define NO_OF_PATTERNS 6 #define ARRAY_SIZE 20000000 // 20 MB #define BLOCK_SIZE 512 #define NTIMES 10 #define MIN(x,y) ((x)<(y)?(x):(y)) #define MAX(x,y) ((x)>(y)?(x):(y)) void printResults(void); // function declaration ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // the access pattern where successive threads are acceessing the successive memory locations in the Global memory // and the array is aligned at (128) // the first scenario from the performance guidelines of the programming guide // coalesced float memory access, resulting in a single memory transaction // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void coalescedGMAccess(float* dest,float* src,long size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) dest[idx] = src[idx]; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // the access pattern where successive threads are accessing the successive memory locations in the Global memory // and array is aligned but some of the threads are not acessing any memory location // the second scenario from the performance guide lines of the programming guide // coalesced float memory access (divergent warp), resulting in a single memory transaction // /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void coalescedGMAccessDivergent(float* dest,float* src,long size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) //every warp there will be 4 threads that will not be accessing any memory location if(idx % 8 != 0) dest[idx] = src[idx]; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // kernel for setting the array with given element // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void setArray(float *array, float value, int size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < size) array[idx] = value; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // the access pattern where successive threads are accessing the successive memory locations in the Global memory // but the starting address is misaligned // the fourth scenario from the performance guidelines of the programming guide // access with a misaligned starting address, resulting in 16 memory transactions. // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void nonCoalescedGMAccessMisalign(float* dest,float* src,long size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size-1) // the last thread is skipped, it will not access any location dest[idx+1] = src[idx+1]; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // the access pattern where successive threads are not accessing the successive memory locations in the Global memory // the starting address is aligned // the third scenario from the performance guidelines of the programming guide // non-sequential float memory access, resulting in 16 memory transactions // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void nonCoalescedGMAccessNonSeq(float* dest,float* src,long size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { //in each half warp the third thread accesses the location which would have been accessed by the fourth thread in coalesced access //and fourth thread accesses the location of the third thread in the coalesced access if(idx % 16 == 3) idx = 4; else if(idx % 16 == 4) idx = 3; dest[idx] = src[idx]; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // the access pattern where the successive threads are not accessing the successive memory locations // the starting address is aligned // the fifth scenario from the performance guidelines of the programming guide // non-contiguous float memory access, resulting in 16 memory transactions // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void nonCoalescedGMAccessNonContiguous(float* dest,float* src,long size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { if(idx > 3) // threads with their id more than 3 will access the location id+1 idx++; dest[idx] = src[idx]; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // access pattern involves accessing contiguous float3 elements' first component by the contiguous threads // the starting address is aligned // the sixth scenario from the performance guidelines of the programming guide // non-coalesced float3 memory access, resulting in 16 memory transactions // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void nonCoalescedGMAccessFloat3(float3 *src, float* dest,long size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { dest[idx] = src[idx].x; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // one extra location to hold results for cudaMemcpy also other than the 6 patterns static double avgtime[NO_OF_PATTERNS+1] = {0}, maxtime[NO_OF_PATTERNS+1] = {0}, mintime[NO_OF_PATTERNS+1]; static float bandWidths[NO_OF_PATTERNS+1] = {0}; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // the main routene // for timing the different access patterns // finding the band widths // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc,char* argv[]) { float *srcArray , *destArray; float3 *srcArray3; // the array of float3 datatype float elapsedTimes[NO_OF_PATTERNS+1][NTIMES]; cudaEvent_t start,stop; cudaError_t err = cudaSuccess; double bytes = 2 * sizeof(float) * ARRAY_SIZE; // allocating the memory for the two arrays on the device // the memory allocated using cudaMalloc will give the memory address that is aligned (128) err = cudaMalloc((void **)&srcArray,ARRAY_SIZE*sizeof(float)); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - srcArray\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&destArray,ARRAY_SIZE*sizeof(float)); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - destArray\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&srcArray3,ARRAY_SIZE*sizeof(float3)); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - srcArray3\n exiting out of the program.....\n"); exit(-1); } // event creation, which will be used for timing the code cudaEventCreate(&start); cudaEventCreate(&stop); //finding the 1D grid size int gridSize = ARRAY_SIZE/BLOCK_SIZE; if( ARRAY_SIZE % BLOCK_SIZE != 0 ) gridSize += 1; // intializing the arrays on the device setArray <<< gridSize,BLOCK_SIZE >>> (srcArray,1.0f,ARRAY_SIZE); setArray <<< gridSize,BLOCK_SIZE >>> (destArray,0.0f,ARRAY_SIZE); cudaThreadSynchronize(); // running each pattern NTIMES for(int k=0; k < NTIMES; k++) { // timing the kernels corresponding to different access patterns // PATTERN 1 // timing the code with coalesced float memory access, resulting in a single memory transaction from the Global memory cudaEventRecord(start,0); coalescedGMAccess <<< gridSize,BLOCK_SIZE >>> (destArray,srcArray,ARRAY_SIZE); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[0][k],start,stop); // PATTERN 2 // timing the code with coalesced float memory access (divergent warp), resulting in a single memory transaction from the Global memory cudaEventRecord(start,0); coalescedGMAccessDivergent <<< gridSize,BLOCK_SIZE >>> (destArray,srcArray,ARRAY_SIZE); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[1][k],start,stop); // PATTERN 3 //timing the code with non-sequential float memory access, resulting in 16 memory transactions from the Global memory cudaEventRecord(start,0); nonCoalescedGMAccessNonSeq <<< gridSize,BLOCK_SIZE >>> (destArray,srcArray,ARRAY_SIZE); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[2][k],start,stop); // PATTERN 4 //timing the code of access pattern with a misaligned starting address, resulting in 16 memory transactions from the Global memory cudaEventRecord(start,0); nonCoalescedGMAccessMisalign <<< gridSize,BLOCK_SIZE >>> (destArray,srcArray,ARRAY_SIZE); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[3][k],start,stop); //PATTERN 5 //timing the code with non-contiguous float memory access, resulting in 16 memory transactions cudaEventRecord(start,0); nonCoalescedGMAccessNonContiguous <<< gridSize,BLOCK_SIZE >>> (destArray,srcArray,ARRAY_SIZE); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[4][k],start,stop); //PATTERN 6 //timing the code for no-coalesced access of float3 array cudaEventRecord(start,0); nonCoalescedGMAccessFloat3 <<< gridSize,BLOCK_SIZE >>> (srcArray3,destArray,ARRAY_SIZE); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[5][k],start,stop); //PATTERN 7 //timing the code that uses the cudaMemcpy API call cudaEventRecord(start,0); cudaMemcpy(destArray,srcArray,ARRAY_SIZE * sizeof(float),cudaMemcpyDeviceToDevice); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[6][k],start,stop); } // end of the for loop involving NTIMES // intializing the mintime array for(int i=0; i < NO_OF_PATTERNS+1;i++) mintime[i] = FLT_MAX; for (int k=1; k < NTIMES; k++) // skiping the first iteration { for (int i=0; i < NO_OF_PATTERNS+1; i++) { avgtime[i] = avgtime[i] + elapsedTimes[i][k]; mintime[i] = MIN(mintime[i],elapsedTimes[i][k]); maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]); } } // calculation of the different band widths that are achieved by different access patterns that are given in the performane guide lines for(int i=0; i < NO_OF_PATTERNS+1; i++) { avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time bandWidths[i] = bytes/mintime[i]; } printResults(); printf("\n\n**** successful termination of the program ****\n\n"); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(srcArray); cudaFree(destArray); return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // prints the results containig the minimum, maximum, average times that the different access patterns have taken // the associated maximum bandwidth of the different patterns // /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void printResults() { printf("\nComparing different access patterns of the global memory for the devices with compute capability <= 1.1\n"); printf("The array size (single precision): %d\n",ARRAY_SIZE); printf("\n-------------------------------------------------------------------------------------------------------------------------------\n"); printf("Pattern \t\t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n"); printf("-------------------------------------------------------------------------------------------------------------------------------\n"); // printing the results for different access patterns for(int i=0; i < NO_OF_PATTERNS+1; i++) { switch(i) { case 0: printf("coalesced access "); break; case 1: printf("coalesced access (divergent warp) "); break; case 2: printf("non-sequential access "); break; case 3: printf("accessing misaligned address "); break; case 4: printf("non-contiguous access "); break; case 5: printf("non-coalesced float3 access "); break; case 6: printf("using cudaMemcpy "); break; } printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]); } printf("\n ------------------------------------------------------------------------------------------------------------------------------\n"); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
8,019
#include <algorithm> #include <chrono> #include <cstddef> #include <iostream> #include <cuda_runtime.h> #define THREADS_PER_DIM 32 namespace { __global__ void multiply_dp_gpu_kernel( double * A, double * B, double * C, size_t N ) { double dot = 0.0; const size_t row = blockIdx.y * blockDim.y + threadIdx.y; const size_t col = blockIdx.x * blockDim.x + threadIdx.x; if( row < N && col < N ) { for( size_t idx = 0; idx < N; ++idx ) dot += A[row + idx * N] * B[idx + col * N]; C[row + col * N] = dot; } } __global__ void multiply_dp_gpu_shared_kernel( double * A, double * B, double * C, size_t N, size_t dp_length ) { const size_t block_row = blockIdx.y; const size_t block_col = blockIdx.x; const size_t row = blockIdx.y * blockDim.y + threadIdx.y; const size_t col = blockIdx.x * blockDim.x + threadIdx.x; if( row < N && col < N ) { double dot = 0.0; const size_t number_subblocks = ceil( static_cast<double>(N) / static_cast<double>(blockDim.x) ); __shared__ double Asub[THREADS_PER_DIM * THREADS_PER_DIM]; __shared__ double Bsub[THREADS_PER_DIM * THREADS_PER_DIM]; for( size_t subblock = 0; subblock < number_subblocks; ++subblock ) { size_t Arow = block_row * blockDim.y + threadIdx.y; size_t Acol = subblock * blockDim.x + threadIdx.x; size_t Brow = subblock * blockDim.y + threadIdx.y; size_t Bcol = block_col * blockDim.x + threadIdx.x; Asub[threadIdx.x * THREADS_PER_DIM + threadIdx.y] = A[Arow + Acol * N]; Bsub[threadIdx.x * THREADS_PER_DIM + threadIdx.y] = B[Brow + Bcol * N]; __syncthreads(); for( size_t idx = 0; idx < dp_length; ++idx ) dot += Asub[idx * THREADS_PER_DIM + threadIdx.y] * Bsub[threadIdx.x * THREADS_PER_DIM + idx]; __syncthreads(); } C[row + col * N] = dot; } } } void multiply_dp_gpu( double * A, double * B, double * C, size_t N, double & duration ) { const size_t threads_per_dim = THREADS_PER_DIM; const size_t min_blocks_per_dim = 1; const size_t blocks_per_dim = std::max( N / threads_per_dim, min_blocks_per_dim ); dim3 block_size( threads_per_dim, threads_per_dim ); dim3 grid_size( blocks_per_dim, blocks_per_dim ); cudaGetLastError(); const auto start = std::chrono::steady_clock::now(); multiply_dp_gpu_kernel<<< grid_size, block_size >>>( A, B, C, N ); cudaDeviceSynchronize(); const auto end = std::chrono::steady_clock::now(); duration = std::chrono::duration<double>( end - start ).count(); std::cout << "N = " << N << ": Last CUDA error: " << cudaGetLastError() << std::endl; } void multiply_dp_gpu_shared( double * A, double * B, double * C, size_t N, double & duration ) { const size_t threads_per_dim = THREADS_PER_DIM; const size_t min_blocks_per_dim = 1; const size_t blocks_per_dim = std::max( N / threads_per_dim, min_blocks_per_dim ); const size_t dp_length = std::min( N, threads_per_dim ); dim3 block_size( threads_per_dim, threads_per_dim ); dim3 grid_size( blocks_per_dim, blocks_per_dim ); cudaGetLastError(); const auto start = std::chrono::steady_clock::now(); multiply_dp_gpu_shared_kernel<<< grid_size, block_size >>>( A, B, C, N, dp_length ); cudaDeviceSynchronize(); const auto end = std::chrono::steady_clock::now(); duration = std::chrono::duration<double>( end - start ).count(); std::cout << "N = " << N << ": Last CUDA error: " << cudaGetLastError() << std::endl; }
8,020
#include "includes.h" __global__ void kernelFormBinStart ( int* devOutputBinStart, unsigned int* devInputBinCirPairBin, unsigned int bcPairLen) { __shared__ int cache[257]; //256 bcpair + the last bc pair in the previous block int bcPairIdx = blockDim.x * blockIdx.x + threadIdx.x; if (bcPairIdx >= bcPairLen) { return; } cache[1 + threadIdx.x] = devInputBinCirPairBin[bcPairIdx]; if ( threadIdx.x == 0 ) { if ( bcPairIdx != 0 ) { cache[0] = devInputBinCirPairBin[bcPairIdx - 1]; } else { cache[0] = -1; } } __syncthreads(); if (cache[1 + threadIdx.x] != cache[threadIdx.x]) { //printf("b: %d, s: %d\n", cache[1 + threadIdx.x], bcPairIdx); devOutputBinStart[cache[1 + threadIdx.x]] = bcPairIdx; } }
8,021
#include "includes.h" __global__ static void timedReduction(const float *input, float *output, clock_t *timer) { // __shared__ float shared[2 * blockDim.x]; extern __shared__ float shared[]; const int tid = threadIdx.x; const int bid = blockIdx.x; if (tid == 0) timer[bid] = clock(); // Copy input. shared[tid] = input[tid]; shared[tid + blockDim.x] = input[tid + blockDim.x]; // Perform reduction to find minimum. for (int d = blockDim.x; d > 0; d /= 2) { __syncthreads(); if (tid < d) { float f0 = shared[tid]; float f1 = shared[tid + d]; if (f1 < f0) { shared[tid] = f1; } } } // Write result. if (tid == 0) output[bid] = shared[0]; __syncthreads(); if (tid == 0) timer[bid+gridDim.x] = clock(); }
8,022
__device__ __forceinline__ float sigmoid(float a) { return 1.0 / (1.0 + exp(-a)); } __global__ void sigmoidKernel(float *a, int l) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < l; i += stride) a[i] = sigmoid(a[i]); } extern "C" void sigmoid_wrapper(float *a, int l) { int blockSize = 256; int numBlocks = (l + blockSize - 1) / blockSize; sigmoidKernel<<<numBlocks, blockSize>>>(a, l); } __device__ __forceinline__ float sigmoid_prime(float a) { float s = sigmoid(a); return s * (1.0f - s); } __global__ void sigmoidPrimeKernel(float *a, int l) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < l; i += stride) a[i] = sigmoid_prime(a[i]); } extern "C" void sigmoid_prime_wrapper(float *a, int l) { int blockSize = 256; int numBlocks = (l + blockSize - 1) / blockSize; sigmoidPrimeKernel<<<numBlocks, blockSize>>>(a, l); }
8,023
#include "includes.h" __global__ void non_diag_mask_kernel(const int64_t *row_data, const int64_t *col_data, bool *out_data, int64_t N, int64_t k, int64_t num_diag, int64_t numel) { int64_t thread_idx = blockDim.x * blockIdx.x + threadIdx.x; if (thread_idx < numel) { int64_t r = row_data[thread_idx], c = col_data[thread_idx]; if (k < 0) { if (r + k < 0) { out_data[thread_idx] = true; } else if (r + k >= N) { out_data[thread_idx + num_diag] = true; } else if (r + k > c) { out_data[thread_idx + r + k] = true; } else if (r + k < c) { out_data[thread_idx + r + k + 1] = true; } } else { if (r + k >= N) { out_data[thread_idx + num_diag] = true; } else if (r + k > c) { out_data[thread_idx + r] = true; } else if (r + k < c) { out_data[thread_idx + r + 1] = true; } } } }
8,024
/** * Parallel computing (2015-2016 course) * * Antennas setup * CUDA implementation * * @author Hector Del Campo Pando * @author Alberto Gutierrez Perez */ // Includes generales #include <stdlib.h> #include <stdio.h> #include <limits.h> // Include para las utilidades de computación paralela #include <time.h> #include <cuda.h> #include <math.h> /** * Estructura antena */ typedef struct { int y; int x; } Antena; /** * Estructura para guardar la informacion del numero maximo global */ typedef struct { int max; int pos; } Max_data; /** * Macros para acceder al maximo y su posicion */ #define valor(m) m->max #define pos(max) max->pos /** * Macro para acceder a las posiciones del mapa */ #define m(y,x) mapa[ (y * cols) + x ] #define posicion(y,x) (y * cols) + x #define row(n) ((int)n) / cols #define col(n) n % cols /** * Macro para la funcion manhattan */ #define manhattan(a, i, j) (abs(a.x -j) + abs(a.y - i)) * (abs(a.x -j) + abs(a.y - i)) #define nueva_antena(n) {row(n), col(n)} #define posicion_thread() (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.x + threadIdx.y * blockDim.x) /** * Macros para la reduccion */ #define MAX_THREADS 1024 #define NUM_THREADS_BLOCK 256 #define NUM_BLOCKS ((int)rows*cols/NUM_THREADS_BLOCK + 1) __global__ void iniciarMapa(int *mapa, int rows, int cols){ int posicion = posicion_thread(); if(posicion < rows*cols) mapa[posicion] = INT_MAX; } template <unsigned int blockSize> __global__ void max_kernel(int *entrada_max, int *entrada_pos, int *salida_max, int *salida_pos, int size){ extern __shared__ int s[]; unsigned int nHilos = blockDim.x; unsigned int id = threadIdx.x; //id thread en bloque unsigned int idBloque = blockIdx.x; //id del bloque en grid unsigned int posicion = id + idBloque*nHilos; //Posicion en el mapa int *maximos = (int*)s; int *posiciones = (int*)&maximos[nHilos]; int myMax = INT_MIN, pos = INT_MAX; if(entrada_pos == NULL){ while(posicion < size){ if(entrada_max[posicion] > myMax){ myMax = entrada_max[posicion]; pos = posicion; } posicion+=(nHilos*gridDim.x); } }else{ while(posicion < size){ if(entrada_max[posicion] > myMax){ myMax = entrada_max[posicion]; pos = entrada_pos[posicion]; } posicion+=(nHilos*gridDim.x); } } maximos[id] = myMax; posiciones[id] = pos; __syncthreads(); if(blockSize>= 1024){ if( id < 512) if(maximos[id + 512] > maximos[id] || (maximos[id + 512] == maximos[id] && posiciones[id+512] < posiciones[id])){ maximos[id] = maximos[id+512]; posiciones[id] = posiciones[id+512]; } __syncthreads(); } if(blockSize>= 512){ if( id < 256) if(maximos[id + 256] > maximos[id] || (maximos[id + 256] == maximos[id] && posiciones[id+256] < posiciones[id])){ maximos[id] = maximos[id+256]; posiciones[id] = posiciones[id+256]; } __syncthreads(); } if(blockSize>= 256){ if( id < 128) if(maximos[id + 128] > maximos[id] || (maximos[id + 128] == maximos[id] && posiciones[id+128] < posiciones[id])){ maximos[id] = maximos[id+128]; posiciones[id] = posiciones[id+128]; } __syncthreads(); } if(blockSize>= 128){ if( id < 64) if(maximos[id + 64] > maximos[id] || (maximos[id + 64] == maximos[id] && posiciones[id+64] < posiciones[id])){ maximos[id] = maximos[id+64]; posiciones[id] = posiciones[id+64]; } __syncthreads(); } if(id < 32){ if(blockSize>= 64){ if((maximos[id + 32] > maximos[id] || (maximos[id + 32] == maximos[id] && posiciones[id+32] < posiciones[id]))){maximos[id] = maximos[id+32]; posiciones[id] = posiciones[id+32];}} if(blockSize>=32){ if((maximos[id + 16] > maximos[id] || (maximos[id + 16] == maximos[id] && posiciones[id+16] < posiciones[id]))){maximos[id] = maximos[id+16]; posiciones[id] = posiciones[id+16];}} if(blockSize>=16){ if((maximos[id + 8] > maximos[id] || (maximos[id + 8] == maximos[id] && posiciones[id+8] < posiciones[id]))){maximos[id] = maximos[id+8]; posiciones[id] = posiciones[id+8];}} if(blockSize>=8){ if((maximos[id + 4] > maximos[id] || (maximos[id + 4] == maximos[id] && posiciones[id+4] < posiciones[id]))){maximos[id] = maximos[id+4]; posiciones[id] = posiciones[id+4];}} if(blockSize>=4){ if((maximos[id + 2] > maximos[id] || (maximos[id + 2] == maximos[id] && posiciones[id+2] < posiciones[id]))){maximos[id] = maximos[id+2]; posiciones[id] = posiciones[id+2];}} if(blockSize>=2){ if((maximos[id + 1] > maximos[id] || (maximos[id + 1] == maximos[id] && posiciones[id+1] < posiciones[id]))){maximos[id] = maximos[id+1]; posiciones[id] = posiciones[id+1];}} } if(id == 0){ salida_max[idBloque] = maximos[0]; salida_pos[idBloque] = posiciones[0]; } } __global__ void actualizar_kernel( int *mapa, Antena antena, int rows, int cols){ m(antena.y, antena.x) = 0; int nuevadist; unsigned int desplHor = threadIdx.x + blockIdx.x*blockDim.x; unsigned int desplVer = threadIdx.y + blockIdx.y*blockDim.y; int j; for(int i = antena.y - desplVer; i >= 0; i-=blockDim.y){ j = antena.x + desplHor; nuevadist = manhattan(antena,i,j); if(nuevadist > m(i,j)) break; for(; j < cols; j+=blockDim.x){ nuevadist = manhattan(antena,i,j); if(nuevadist > m(i,j)) break; m(i,j) = nuevadist; } } for(int i = antena.y + desplVer; i < rows; i+=blockDim.y){ j = antena.x - desplHor; nuevadist = manhattan(antena,i,j); if(nuevadist > m(i,j)) break; for(; j >= 0; j-=blockDim.x){ nuevadist = manhattan(antena,i,j); if(nuevadist > m(i,j)) break; m(i,j) = nuevadist; } } for(int i = antena.y + desplVer; i < rows; i+=blockDim.y){ j = antena.x + desplHor; nuevadist = manhattan(antena,i,j); if(nuevadist > m(i,j)) break; for(; j < cols; j+=blockDim.x){ nuevadist = manhattan(antena,i,j); if(nuevadist > m(i,j)) break; m(i,j) = nuevadist; } } for(int i = antena.y - desplVer; i >= 0; i-=blockDim.y){ j = antena.x - desplHor; nuevadist = manhattan(antena,i,j); if(nuevadist > m(i,j)) break; for(; j >= 0; j-=blockDim.x){ nuevadist = manhattan(antena,i,j); if(nuevadist > m(i,j)) break; m(i,j) = nuevadist; } } } /** * Función de ayuda para imprimir el mapa */ void print_mapa(int * mapa, int rows, int cols, Antena * a){ if(rows > 50 || cols > 30){ printf("Mapa muy grande para imprimir\n"); return; }; #define ANSI_COLOR_RED "\x1b[31m" #define ANSI_COLOR_GREEN "\x1b[32m" #define ANSI_COLOR_RESET "\x1b[0m" printf("Mapa [%d,%d]\n",rows,cols); for(int i=0; i<rows; i++){ for(int j=0; j<cols; j++){ int val = m(i,j); if(val == 0){ if(a != NULL && a->x == j && a->y == i){ printf( ANSI_COLOR_RED " A" ANSI_COLOR_RESET); } else { printf( ANSI_COLOR_GREEN " A" ANSI_COLOR_RESET); } } else { printf("%4d",val); } } printf("\n"); } printf("\n"); } /** * Funcion depuracion para CUDA */ void print_mapa_cuda(int *mapa, int rows, int cols){ int *m = (int*)malloc(sizeof(int)*rows*cols); cudaMemcpy(m, mapa, sizeof(int)*rows*cols, cudaMemcpyDeviceToHost); print_mapa(m,rows,cols,NULL); free(m); getchar(); } /** * Actualizar el mapa con la nueva antena */ void actualizar(int *mapa, Antena antena, int rows, int cols){ dim3 hilos(6,6); dim3 bloques(32); actualizar_kernel<<<bloques, hilos>>>(mapa,antena,rows,cols); } /** * Calcular la distancia máxima en el mapa */ Max_data calcular_max(int * mapa, int *maximos, int *posiciones, int rows, int cols){ int posicion; int maximo; int aux = NUM_BLOCKS, nBloques = NUM_BLOCKS; //reduccion por bloques max_kernel<NUM_THREADS_BLOCK><<<nBloques, NUM_THREADS_BLOCK, sizeof(int)*NUM_THREADS_BLOCK*2>>>(mapa, NULL, maximos, posiciones,rows*cols); //reduccion de los bloques nBloques = (nBloques - 1)/NUM_THREADS_BLOCK + 1; while(aux != 1){ max_kernel<NUM_THREADS_BLOCK><<<nBloques, NUM_THREADS_BLOCK, sizeof(int)*2*NUM_THREADS_BLOCK>>>(maximos, posiciones, maximos, posiciones, aux); aux = nBloques; nBloques = (nBloques - 1)/NUM_THREADS_BLOCK + 1; } //Fin reduccion bloques cudaMemcpy(&maximo, &maximos[0], sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&posicion, &posiciones[0], sizeof(int), cudaMemcpyDeviceToHost); Max_data max_global = {maximo, posicion}; return max_global; } /** * Función principal */ int main(int nargs, char ** vargs){ // // 1. LEER DATOS DE ENTRADA // // Comprobar número de argumentos if(nargs < 7){ fprintf(stderr,"Uso: %s rows cols distMax nAntenas x0 y0 [x1 y1, ...]\n",vargs[0]); return -1; } // Leer los argumentos de entrada int rows = atoi(vargs[1]); int cols = atoi(vargs[2]); int distMax = atoi(vargs[3]); int nAntenas = atoi(vargs[4]); if(nAntenas<1 || nargs != (nAntenas*2+5)){ fprintf(stderr,"Error en la lista de antenas\n"); return -1; } // Mensaje printf("Calculando el número de antenas necesarias para cubrir un mapa de" " (%d x %d)\ncon una distancia máxima no superior a %d " "y con %d antenas iniciales\n\n",rows,cols,distMax,nAntenas); // Reservar memoria para las antenas Antena *antenas = (Antena*)malloc(sizeof(Antena) * (size_t) nAntenas); if(!antenas){ fprintf(stderr,"Error al reservar memoria para las antenas inicales\n"); return -1; } // Leer antenas for(int i=0; i<nAntenas; i++){ antenas[i].x = atoi(vargs[5+i*2]); antenas[i].y = atoi(vargs[6+i*2]); if(antenas[i].y<0 || antenas[i].y>=rows || antenas[i].x<0 || antenas[i].x>=cols ){ fprintf(stderr,"Antena #%d está fuera del mapa\n",i); return -1; } } // // 2. INICIACIÓN // // Medir el tiempo clock_t reloj = clock(); double tiempo; cudaSetDevice(0); // Crear el mapa int * mapa; cudaMalloc((void**) &mapa , (rows*cols) * sizeof(int) ); // Iniciar el mapa con el valor MAX INT iniciarMapa<<<NUM_BLOCKS, NUM_THREADS_BLOCK>>>(mapa,rows,cols); // Colocar las antenas iniciales for(int i=0; i<nAntenas; i++){ actualizar(mapa, antenas[i], rows, cols); } // Debug #ifdef DEBUG print_mapa(mapa,rows,cols,NULL); #endif // // 3. CALCULO DE LAS NUEVAS ANTENAS // // Contador de antenas int nuevas = 0; Max_data max; // Variables para CUDA int nBloques = NUM_BLOCKS; int *posiciones; int *maximos; cudaMalloc((void**) &posiciones, nBloques * sizeof(int)); cudaMalloc((void**) &maximos, nBloques * sizeof(int)); while(1){ // Calcular el máximo max = calcular_max(mapa,maximos,posiciones,rows,cols); // Salimos si ya hemos cumplido el maximo if (max.max <= distMax) break; // Incrementamos el contador nuevas++; // Calculo de la nueva antena y actualización del mapa Antena antena = nueva_antena(max.pos); actualizar(mapa,antena,rows,cols); } reloj = clock() - reloj; // Debug #ifdef DEBUG print_mapa(mapa,rows,cols,NULL); #endif //Liberar recursos en el host cudaFree(mapa); cudaFree(posiciones); cudaFree(maximos); cudaDeviceReset(); // // 4. MOSTRAR RESULTADOS // // tiempo tiempo = (double)reloj / CLOCKS_PER_SEC ; // Salida printf("Result: %d\n",nuevas); printf("Time: %f\n",tiempo); return 0; }
8,025
#include <stdio.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> // template <int BLOCK_SIZE> __global__ void MatrixMulCUDA5( // float * __restrict__ A_Val, // int* __restrict__ A_col_idx, // int* __restrict__ A_row_ptr, // float * __restrict__ B, // float * __restrict__ C, // const int K, // const int N) { // // Block index // int bx = blockIdx.x; // int by = blockIdx.y; // // Thread index // int tx = threadIdx.x; // int ty = threadIdx.y; // float4 Csub[4] = { // {0, 0, 0, 0}, // {0, 0, 0, 0}, // {0, 0, 0, 0}, // {0, 0, 0, 0}}; // int row_ptr_start = A_row_ptr[by]; // int row_ptr_end = A_row_ptr[by + 1]; // for (int row_ptr = row_ptr_start ; row_ptr < row_ptr_end ; row_ptr = row_ptr + 1) { // int tile_idx = A_col_idx[row_ptr]; // __shared__ float As[BLOCK_SIZE * BLOCK_SIZE]; // __shared__ float Bs[BLOCK_SIZE * BLOCK_SIZE]; // float* A = A_Val + BLOCK_SIZE * BLOCK_SIZE * row_ptr; // #pragma unroll // for ( int i = 0 ; i < 4 ; i ++ ) { // reinterpret_cast<float4*>(As + BLOCK_SIZE * (ty * 4 + i) + tx * 4)[0] // = reinterpret_cast<float4*>( A + BLOCK_SIZE * (ty * 4 + i) + tx * 4 )[0]; // reinterpret_cast<float4*>(Bs + BLOCK_SIZE * (ty * 4 + i) + tx * 4)[0] // = reinterpret_cast<float4*>(B + (BLOCK_SIZE * tile_idx + ty * 4 + i ) * N + BLOCK_SIZE * bx + tx * 4 )[0]; // } // __syncthreads(); // #pragma unroll // for (int k = 0; k < BLOCK_SIZE; ++k) { // Csub[0].x = fma(As[ty * 4 * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4], Csub[0].x); // Csub[0].y = fma(As[ty * 4 * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 1], Csub[0].y); // Csub[0].z = fma(As[ty * 4 * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 2], Csub[0].z); // Csub[0].w = fma(As[ty * 4 * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 3], Csub[0].w); // Csub[1].x = fma(As[(ty * 4 + 1) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4], Csub[1].x); // Csub[1].y = fma(As[(ty * 4 + 1) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 1], Csub[1].y); // Csub[1].z = fma(As[(ty * 4 + 1) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 2], Csub[1].z); // Csub[1].w = fma(As[(ty * 4 + 1) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 3], Csub[1].w); // Csub[2].x = fma(As[(ty * 4 + 2) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4], Csub[2].x); // Csub[2].y = fma(As[(ty * 4 + 2) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 1], Csub[2].y); // Csub[2].z = fma(As[(ty * 4 + 2) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 2], Csub[2].z); // Csub[2].w = fma(As[(ty * 4 + 2) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 3], Csub[2].w); // Csub[3].x = fma(As[(ty * 4 + 3) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4], Csub[3].x); // Csub[3].y = fma(As[(ty * 4 + 3) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 1], Csub[3].y); // Csub[3].z = fma(As[(ty * 4 + 3) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 2], Csub[3].z); // Csub[3].w = fma(As[(ty * 4 + 3) * BLOCK_SIZE + k], Bs[k * BLOCK_SIZE + tx * 4 + 3], Csub[3].w); // } // // wait threads to finish , otherwise next tile will overwrite the shared memory // __syncthreads(); // } // reinterpret_cast<float4*> (C + N * ( BLOCK_SIZE * by + ty * 4 ) + BLOCK_SIZE * bx + tx * 4 )[0] = Csub[0]; // reinterpret_cast<float4*> (C + N * ( BLOCK_SIZE * by + ty * 4 + 1) + BLOCK_SIZE * bx + tx * 4 )[0] = Csub[1]; // reinterpret_cast<float4*> (C + N * ( BLOCK_SIZE * by + ty * 4 + 2) + BLOCK_SIZE * bx + tx * 4 )[0] = Csub[2]; // reinterpret_cast<float4*> (C + N * ( BLOCK_SIZE * by + ty * 4 + 3) + BLOCK_SIZE * bx + tx * 4 )[0] = Csub[3]; // } // cal offset from row col and ld , in row-major matrix, ld is the width of the matrix #define OFFSET(row, col, ld) ((row) * (ld) + (col)) // transfer float4 #define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0]) template < const int BLOCK_SIZE_M, // width of block of C that each thread block calculate const int BLOCK_SIZE_K, // height of block of A that each thread block load into shared memory const int BLOCK_SIZE_N, // height of block of C that each thread block calculate const int THREAD_SIZE_Y, // height of block of C that each thread calculate const int THREAD_SIZE_X, // width of block of C that each thread calculate const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not > __global__ void MatrixMulCUDA5( float * __restrict__ A_Val, int* __restrict__ A_col_idx, int* __restrict__ A_row_ptr, float * __restrict__ B, float * __restrict__ C, const int K, const int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // size of thread block const int bszx = BLOCK_SIZE_N / THREAD_SIZE_X; const int bszy = BLOCK_SIZE_M / THREAD_SIZE_Y; const int THREAD_NUM_PER_BLOCK = bszy * bszx; // thread id const int tid = ty * bszx + tx; __shared__ float As[BLOCK_SIZE_M][BLOCK_SIZE_K]; // avoid bank conflict __shared__ float Bs[BLOCK_SIZE_K][BLOCK_SIZE_N]; // registers for C float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0}; // registers for A and B float frag_a[THREAD_SIZE_Y]; float frag_b[THREAD_SIZE_X]; // threads needed to load one row of tile // / 4 is because float4 is used const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4; const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4; // row number and col number that needs to be loaded by this thread const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW; const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4; const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4; // row stride that thread uses to load multiple rows of a tile const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW; int row_ptr_start = A_row_ptr[by]; int row_ptr_end = A_row_ptr[by + 1]; for (int row_ptr = row_ptr_start ; row_ptr < row_ptr_end ; row_ptr = row_ptr + 1) { int tile_idx = A_col_idx[row_ptr] * BLOCK_SIZE_K; float* A = A_Val + BLOCK_SIZE_M * BLOCK_SIZE_K * row_ptr; // load A from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) { FETCH_FLOAT4(As[A_TILE_ROW_START + i][A_TILE_COL]) = FETCH_FLOAT4(A[OFFSET( A_TILE_ROW_START + i, // row A_TILE_COL, // col BLOCK_SIZE_K )]); } // load B from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) { FETCH_FLOAT4(Bs[B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET( tile_idx + B_TILE_ROW_START + i, // row B_TILE_COL + BLOCK_SIZE_N * bx, // col K )]); } __syncthreads(); // compute c #pragma unroll for (int k = 0; k < BLOCK_SIZE_K; ++ k) { // load A from shared memory to register #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { frag_a[thread_y] = As[ty * THREAD_SIZE_Y + thread_y][k]; } // load B from shared memory to register #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) { FETCH_FLOAT4(frag_b[thread_x]) = FETCH_FLOAT4(Bs[k][THREAD_SIZE_X * tx + thread_x]); } #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { accum[thread_y][thread_x] += frag_a[thread_y] * frag_b[thread_x]; } } } __syncthreads(); } // store back to C #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { C[OFFSET( BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y, BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x, N)] = accum[thread_y][thread_x]; } } }
8,026
#include "includes.h" /*------------------------GPU RANKING----------------------------------------START-------*/ /*------------------------shfl_scan_test-----------------------------------------------Start*/ /*------------------------shfl_scan_test-----------------------------------------------End*/ /*------------------------Final Ranking-----------------------------------------------Start*/ /*------------------------Final_ranking-----------------------------------------------End*/ /*-----------------------GPU RANKING------------------------------------------END--------*/ /*-----------------------iDivUp--------------------------------------------------------Start*/ __global__ void final_ranking(float *data , int *rank , float *partial_data , int *partial_rank , int len) { __shared__ float value_buf; __shared__ int rank_buf; int id = ((blockIdx.x*blockDim.x)+threadIdx.x); if(id>len) return; if(threadIdx.x == 0) { value_buf = partial_data[blockIdx.x]; rank_buf = partial_rank[blockIdx.x]; } __syncthreads(); if(data[id] == value_buf) { rank[id] = rank_buf; } }
8,027
#include <assert.h> #include <stdio.h> #include <stdlib.h> #define IMG_SIZE 256 #define BLOCK_DIM_X 4 #define BLOCK_DIM_Y 4 #define N (IMG_SIZE * IMG_SIZE) __device__ int iterate_pixel(float x, float y, float c_re, float c_im) { int c=0; float z_re=x; float z_im=y; while (c<255) { float re2=z_re*z_re; float im2=z_im*z_im; if ((re2+im2) > 4) break; z_im=2*z_re*z_im + c_im; z_re=re2-im2 + c_re; c++; } return c; } __global__ void calc_fractal(int width, int height, float c_re, float c_im, unsigned char* dest) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int index = col + row * (blockDim.x * gridDim.x); /* Varies from 0 to 65536 */ int x = index % IMG_SIZE; int y = index / width; float f_x = (float)(x*0.8) / (float)(width)-0.8; float f_y = (float)(y*0.8) / (float)(height)-0.8; dest[index]=iterate_pixel(f_x,f_y, c_re, c_im); } // Write a width by height 8-bit color image into File "filename" void write_ppm(unsigned char* data,unsigned int width,unsigned int height,char* filename) { if (data == NULL) { printf("Provide a valid data pointer!\n"); return; } if (filename == NULL) { printf("Provide a valid filename!\n"); return; } if ( (width>4096) || (height>4096)) { printf("Only pictures upto 4096x4096 are supported!\n"); return; } FILE *f=fopen(filename,"wb"); if (f == NULL) { printf("Opening File %s failed!\n",filename); return; } if (fprintf(f,"P6 %i %i 255\n",width,height) <= 0) { printf("Writing to file failed!\n"); return; }; int i; for (i=0;i<height;i++) { unsigned char buffer[4096*3]; int j; for (j=0;j<width;j++) { int v=data[i*width+j]; int s; s= v << 0; s=s > 255? 255 : s; buffer[j*3+0]=s; s= v << 1; s=s > 255? 255 : s; buffer[j*3+1]=s; s= v << 2; s=s > 255? 255 : s; buffer[j*3+2]=s; } if (fwrite(buffer,width*3,1,f) != 1) { printf("Writing of line %i to file failed!\n",i); return; } } fclose(f); } int main(void) { unsigned char *img, *dev_img; int size = IMG_SIZE * IMG_SIZE * sizeof(char); dim3 dimBlock(BLOCK_DIM_X, BLOCK_DIM_Y); dim3 dimGrid(IMG_SIZE/BLOCK_DIM_X, IMG_SIZE/BLOCK_DIM_Y); img = (unsigned char *) malloc(IMG_SIZE*IMG_SIZE); cudaMalloc((void**)&dev_img, size); assert(img != NULL); assert(dev_img != NULL); calc_fractal<<<dimGrid, dimBlock>>> (256, 256, 0.28, 0.008, dev_img); cudaMemcpy(img, dev_img, size, cudaMemcpyDeviceToHost); write_ppm(img,IMG_SIZE,IMG_SIZE,"julia.ppm"); free(img); cudaFree(dev_img); return 0; }
8,028
#include "includes.h" //Bibliotecas Basicas //Biblioteca Thrust //Biblioteca cuRAND //PARAMETROS GLOBAIS const int QUANT_PAIS_AVALIA = 4; int POP_TAM = 200; int N_CIDADES = 20; int BLOCKSIZE = 1024; int TOTALTHREADS = 2048; int N_GERA = 100; const int MUT = 10; const int MAX = 19; const int MIN = 0; const int ELITE = 2; /* * Busca por erros nos processos da gpu */ __global__ void cruza(unsigned int n, unsigned int np, int *cidadesAle, int *pop, int *newPop, int *poolPais, int *mutacoes) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int paiA, paiB, copiaPai, crossover, mutar, pontoMutar; for (int i=index; i<n; i+=stride) { copiaPai = cidadesAle[i*4]; crossover = cidadesAle[(i+1)*4] % np; mutar = cidadesAle[(i+2)*4]; pontoMutar = cidadesAle[(i+3)*4] % np; paiA = poolPais[i]; paiB = poolPais[i+1]; if (copiaPai < ELITE) { for (int j=0; j<np; j++) { newPop[(i*np) + j] = pop[(paiA*np) + j]; continue; } } for(int j=0;j<np;j++) { newPop[(i*np) + j] = pop[(paiA*np) + j]; } int t=0, aux=0, crossoverSup; crossoverSup=(crossover +mutacoes[i]>MAX)?(MAX):(crossover +mutacoes[i]); for(int j=crossover; j<crossoverSup;j++) { t=0; while(newPop[(i*np) +t]!=pop[(paiB*np) + j]) { t++; } aux = newPop[i*np+j]; newPop[i*np+j] = newPop[i*np+t]; newPop[i*np+t] = aux; } if (mutar < MUT) { int mut = (mutacoes[i]>MAX)?(MAX):((mutacoes[i]<MIN)?(MIN):(mutacoes[i])); t=0; while(newPop[(i*np) +t]!=mut) { t++; } aux = newPop[i*np+pontoMutar]; newPop[i*np+pontoMutar] = newPop[i*np+t]; newPop[i*np+t] = aux; } } }
8,029
#include "includes.h" extern "C" { } __global__ void vdiv(const float *a, const float *b, float *c) { int i = blockIdx.x *blockDim.x + threadIdx.x; c[i] = a[i] / b[i]; }
8,030
#include "includes.h" __global__ void bpnn_adjust_weights_cuda(float * delta, int hid, float * ly, int in, float * w, float * oldw ) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // if( threadIdx.x==0 && threadIdx.y==0 && blockIdx.x==0 && blockIdx.y==0) printf("%d, %d\n", blockDim.x, blockDim.y); int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; //eta = 0.3; //momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));//TODO: worth optimization oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by ==0) { w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } }
8,031
#include <stdio.h> // Replace this with the saxpy from cuBlas or whatever? // I doubt it matters, but it's definitely weird to have this void __device__ saxpy(float* X, const float* Y, float scale, int n) { for (int i=0; i < n; ++i) X[i] += Y[i] * scale; } void __global__ maxout(float* best__bo, int* which__bo, const float* cands__bop, int B, int O, int P) { int b = blockIdx.x * blockDim.x + threadIdx.x; if (b >= B) return; for (int o=0; o < O; ++o) { which__bo[0] = 0; best__bo[0] = cands__bop[0]; cands__bop += 1; for (int p=1; p < P; ++p) { if (cands__bop[0] > best__bo[0]) { which__bo[0] = p; best__bo[0] = cands__bop[0]; } cands__bop += 1; } best__bo += 1; which__bo += 1; } } void __global__ mean_pool(float* means__bo, const float* X__to, const int* lengths__b, int B, int T, int O) { // Compute means of a batch of concatenated sequences, using the lengths.''' int b = blockIdx.x; // Batch-item we're averaging if (b >= B) return; // Go to the regions we're working on for (int i=0; i < b; ++i) { means__bo += O; X__to += lengths__b[i] * O; } int length = lengths__b[b]; // Each invocation of the kernel averages one batch. float scale = 1. / length; for (int _=0; _ < length; ++_) // Iterate over rows { saxpy(means__bo, X__to, scale, O); X__to += O; } } void __global__ sum_pool(float* sums__bo, const float* X__to, const int* lengths__b, int B, int T, int O) { // Compute sums of a batch of concatenated sequences, using the lengths.''' int b = blockIdx.x; // Batch-item we're summing if (b >= B) return; // Go to the regions we're working on for (int i=0; i < b; ++i) { sums__bo += O; X__to += lengths__b[i] * O; } int length = lengths__b[b]; // Each invocation of the kernel sums one batch. for (int _=0; _ < length; ++_) // Iterate over rows { saxpy(sums__bo, X__to, 1.0, O); X__to += O; } } void __global__ max_pool(float* maxes__bo, int* which__bo, const float* X__to, const int* lengths__b, int B, int T, int O) { // Compute means of a batch of concatenated sequences, using the lengths.''' int b = blockIdx.x; // Batch-item we're averaging if (b >= B) return; // Go to the regions we're working on for (int i=0; i < b; ++i) { maxes__bo += O; which__bo += O; X__to += lengths__b[i] * O; } // Each invocation of the kernel maxes one batch. // Start by assuming maxes are at i=0 for (int j=0; j < O; ++j) { maxes__bo[j] = X__to[j]; which__bo[j] = 0; } X__to += O; int length = lengths__b[b]; for (int i=1; i < length; ++i) // Iterate over rows { for (int j=0; j < O; ++j) { if (X__to[j] > maxes__bo[j]) { maxes__bo[j] = X__to[j]; which__bo[j] = i; } } X__to += O; } } void __global__ backprop_mean_pool(float* dX__to, const float* d_means__bo, const int* lengths__b, int B, int T, int O) { int b = blockIdx.x; // Batch-item we're averaging if (b >= B) return; // Go to the regions we're working on for (int i=0; i < b; ++i) { d_means__bo += O; dX__to += lengths__b[i] * O; } int length = lengths__b[b]; float scale = 1./ length; for (int _=0; _ < length; _++) { saxpy(dX__to, d_means__bo, scale, O); dX__to += O; } } void __global__ backprop_sum_pool(float* dX__to, const float* d_sum__bo, const int* lengths__b, int B, int T, int O) { int b = blockIdx.x; // Batch-item we're averaging if (b >= B) return; // Go to the regions we're working on for (int i=0; i < b; ++i) { d_sum__bo += O; dX__to += lengths__b[i] * O; } int length = lengths__b[b]; for (int _=0; _ < length; _++) { saxpy(dX__to, d_sum__bo, 1.0, O); dX__to += O; } } void __global__ backprop_max_pool(float* dX__to, const float* d_maxes__bo, const int* which__bo, const int* lengths__b, int B, int T, int O) { int b = blockIdx.x; // Batch-item we're averaging if (b >= B) return; // Go to the regions we're working on for (int i=0; i < b; ++i) { d_maxes__bo += O; which__bo += O; dX__to += lengths__b[i] * O; } int length = lengths__b[b]; for (int i=0; i < length; ++i) { for (int j=0; j < O; ++j) { if (which__bo[j] == i) dX__to[j] += d_maxes__bo[j]; } dX__to += O; } }
8,032
//============================================================================ // Name : parallelization1.cpp // Author : // Version : // Copyright : Your copyright notice // Description : Hello World in C++, Ansi-style //============================================================================ #include <stdio.h> #include <assert.h> void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { /* for(int i = 0; i < N; ++i) { result[i] = a[i] + b[i]; } */ size_t stride_s = gridDim.x * blockDim.x; for(int i = threadIdx.x + blockDim.x*blockIdx.x; i<N; i = i + stride_s) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *array, int N) { for(int i = 0; i < N; i++) { if(array[i] != target) { printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target); exit(1); } } printf("SUCCESS! All values added correctly.\n"); } cudaError_t cudaRunCheck(cudaError_t result) { if(result != cudaSuccess) { printf("Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } int main() { const int N = 2<<20; size_t size = N * sizeof(float); float *a; float *b; float *c; /* a = (float *)malloc(size); b = (float *)malloc(size); c = (float *)malloc(size); */ cudaRunCheck(cudaMallocManaged(&a, size)); cudaRunCheck(cudaMallocManaged(&b, size)); cudaRunCheck(cudaMallocManaged(&c, size)); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); size_t nbrThreadsBlock = 1024; size_t nbrBlocks = (N + nbrThreadsBlock -1)/nbrThreadsBlock; addVectorsInto<<<nbrBlocks, nbrThreadsBlock>>>(c, a, b, N); cudaDeviceSynchronize(); cudaError_t kernelRun_err = cudaGetLastError(); if(kernelRun_err != cudaSuccess) { printf("Error kernel launch: %s", cudaGetErrorString(kernelRun_err)); } checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
8,033
#include <iostream> #include <stdio.h> #include <vector> #define MAX_THREADS 256 #define SIZE 131072 #define __START__ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #define __STOP__(_V) cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); _V.push_back(time); cudaEventDestroy(start); cudaEventDestroy(stop); #define __NEXT__(_V) __STOP__(_V) __START__ template<class T> __global__ void square_kernel(T *d_vector) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= SIZE) return; d_vector[i] = d_vector[i]*d_vector[i]; } void showMean(std::vector<float> v) { float sum(0); for (unsigned int i(0) ; i!=v.size() ; i++) sum += v[i]; std::cout << 1000.*sum/v.size() << " microseconds" << std::endl; } int main(int argc, char **argv) { std::cout << "SIZE (Datatype): " << SIZE << std::endl; cudaEvent_t start, stop; std::vector<float> intRun, floatRun, doubleRun; float time(0); cudaFree(0); // Force runtime API context establishment int h_vector_i[SIZE]; // For input and output float h_vector_f[SIZE]; // For input and output double h_vector_d[SIZE]; // For input and output for (unsigned int i(0) ; i!=SIZE ; i++) { h_vector_i[i] = i; h_vector_f[i] = i; h_vector_d[i] = i; } for (unsigned int i(0) ; i!=1000 ; i++) { int *d_vector_i; float *d_vector_f; double *d_vector_d; /* INT */ cudaMalloc(&d_vector_i, SIZE*sizeof(int)); cudaMemcpy(d_vector_i, h_vector_i, SIZE*sizeof(int), cudaMemcpyHostToDevice); __START__ square_kernel<<<(SIZE+MAX_THREADS-1)/MAX_THREADS, MAX_THREADS>>>(d_vector_i); cudaThreadSynchronize(); // Block until the device is finished __STOP__(intRun); cudaMemcpy(h_vector_i, d_vector_i, SIZE*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_vector_i); /* FLOAT */ cudaMalloc(&d_vector_f, SIZE*sizeof(float)); cudaMemcpy(d_vector_f, h_vector_f, SIZE*sizeof(float), cudaMemcpyHostToDevice); __START__ square_kernel<<<(SIZE+MAX_THREADS-1)/MAX_THREADS, MAX_THREADS>>>(d_vector_f); cudaThreadSynchronize(); // Block until the device is finished __STOP__(floatRun); cudaMemcpy(h_vector_f, d_vector_f, SIZE*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_vector_f); /* DOUBLE */ cudaMalloc(&d_vector_d, SIZE*sizeof(double)); cudaMemcpy(d_vector_d, h_vector_d, SIZE*sizeof(double), cudaMemcpyHostToDevice); __START__ square_kernel<<<(SIZE+MAX_THREADS-1)/MAX_THREADS, MAX_THREADS>>>(d_vector_d); cudaThreadSynchronize(); // Block until the device is finished __STOP__(doubleRun); cudaMemcpy(h_vector_d, d_vector_d, SIZE*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_vector_d); } showMean(intRun); showMean(floatRun); showMean(doubleRun); }
8,034
/// CUDA compileable code platform specific to Linux systems. Language standard: Host: C++11; Device: CUDA C. Requires GCC and NVCC linked to CUDA 8.0+ libraries. /// This library is released for public use as material supplementary to PhD thesis: Development of Specialized Non-Linear Inversion Algorithms, Basis Functions, Eikonal Solvers, and Their Integration for Use in Joint Seismic and Gravitational Tomographic Inversion /// Author: Zagid Abatchev - abatchev@ucla.edu ; PI: Paul Davis. Department of Earth, Planetary, and Space Sciences; University of California, Los Angeles; Date of publication: March 2019./// /// This library is published as is and may have unresolved bugs. Use at own discretion. Users are encouraged to adapt some or all of this code without restriction. Refer to Github page for updated source code, located at: https://github.com/abatchev/ZTM . ///GFM_GPU.cu is a standalone gravity perturbation field forward model, implemented in CUDA C. Uses system shared virtual memory for fast file access, requires NVCC and CUDA 8.0+ to compile and properly run./// ///WARNING: This is an experimental implementation, and requires properly setting inversion specific grid size and decimation factors di, dj, dk, fh, fv inside main(). /// #include <math.h> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <fstream> #include <iostream> __global__ void GF(int bfr, int di, int dj, int dk, int fh, int fv, float *g, float *rho, float * s){ //CUDA device kernel function GF() for forward modeling perturbation gravity fields as a function of perturbed density fields. int n=blockDim.x*blockIdx.x+threadIdx.x; float c=66.7f/(fh*fh*fv); int ii=di*fh, jj=dj*fh; //Set raster index from CUDA kernel thread and block ID. float i=(n%ii)*1.f/fh, j=((n/ii)%jj)*1.f/fh, k=(n/(ii*jj))*1.f/fv; //Set gravity render field grid axial indices i,j,k. if(fabs(rho[n])>.001f){ //If absolute density perturbation is greater than .001cc, iterate over grid. for(int ni=bfr; ni<di-bfr; ni++){for(int nj=bfr; nj<dj-bfr; nj++){ //Iterate through all internal points in grid at least "bfr" axial indices away from a boundary. int m=ni+nj*di; float i0=ni, j0=nj, k0=s[m]; //Compute flattened linear index and convert axial indices to floats. atomicAdd(&g[m], c*rho[n]*(k0-k)/powf((i0-i)*(i0-i)+(j0-j)*(j0-j)+(k0-k)*(k0-k),1.5));}}}} //Perform atomic addition of perturbation gravity due to the intergrated mass of a grid block approximated at far field as a point mass perturbation. int main(){int di=61, dj=61, dk=35, fh=1, fv=2, bfr=10, ii=(di-1)*fh+1, jj=(dj-1)*fh+1, kk=(dk-1)*fv+1, N=ii*jj*kk, M=di*dj, lr,ls; //Declare inversion specific array dimensions di,dj,dk; horizontal and vertical grid point decimation fh, fv; boundary buffer size bfr (where gravity is not rendered due to edge effects). float *h_r=(float *)malloc(4*N); FILE *R=fopen("/dev/shm/R.dat","r"); lr=fread(h_r,4,N,R); fclose(R); //Allocate host memory for imported density perturbation field h_r. Import perturbation density field from shared memory directory in R.dat float *h_s=(float *)malloc(4*M); FILE *S=fopen("/dev/shm/S.dat","r"); ls=fread(h_s,4,M,S); fclose(S); ls+=lr; //Allocate host memory for imported gravity data h_s. Import perturbation density field from shared memory directory in S.dat float *h_g=(float *)malloc(4*M); for(int n=0;n<M;n++){h_g[n]=0;} //Allocate host memory for rendered gravity field h_g. Initialize with zeros. float *r; cudaMalloc((void **)&r,4*N); cudaMemcpy(r,h_r,4*N,cudaMemcpyHostToDevice); //Allocate device memory for imported density perturbation field R. float *g; cudaMalloc((void **)&g,4*M); cudaMemcpy(g,h_g,4*M,cudaMemcpyHostToDevice); //Allocate device memory for rendered gravity field g. float *s; cudaMalloc((void **)&s,4*M); cudaMemcpy(s,h_s,4*M,cudaMemcpyHostToDevice); //Allocate device memory for imported gravity data S. GF<<<N/ii,ii>>>(bfr,di,dj,dk,fh,fv,g,r,s); //Execute cuda kernel on the GPU cudaMemcpy(h_g,g,4*M,cudaMemcpyDeviceToHost); //Copy rendered field at pointer g from device to host memory (as h_g) FILE *G=fopen("/dev/shm/G.dat","w"); fwrite(h_g,4,M,G); fclose(G); //Open gravity field render file G.dat in system shared memory, write rendered array, close file. cudaFree(r); cudaFree(g); cudaFree(s); free(h_r); free(h_g); free(h_s);} //Free host and device memory.
8,035
#include <stdio.h> #include <cuda_runtime.h> #define NUM_BITS 2 #define DEBUG 1 __device__ void dispArr(int *arr, int n) { int i; //threadId with in a block, DMat doc to start with int thId = threadIdx.x; if (thId == 0) { printf("\n"); for (i = 0; i < n; i++) { printf(" %d ", arr[i]); } printf("\n"); } } //scan array arr of size n=nThreads, power of 2 __device__ void preSubScan(int *arr, int n, int prev) { int i, d, ai, bi, offset, temp; //threadId with in a block, DMat doc to start with int thId = threadIdx.x; //number of threads in blocks int nThreads = blockDim.x; d = 0; offset = 1; //build sum in place up the tree for (d = n>>1; d > 0; d >>=1) { __syncthreads(); if (thId < d) { ai = offset*(2*thId+1) - 1; bi = offset*(2*thId+2) - 1; arr[bi] += arr[ai]; } offset*=2; } //clear last element if (thId == 0) { arr[n-1] = 0; } //traverse down tree & build scan for (int d = 1; d < n; d *=2) { offset = offset >> 1; __syncthreads(); if (thId < d) { ai = offset*(2*thId + 1) - 1; bi = offset*(2*thId + 2) - 1; temp = arr[ai]; arr[ai] = arr[bi]; arr[bi] += temp; } } for (i = thId; i < n; i+=nThreads) { arr[i] += prev; } __syncthreads(); } //works efficiently for power of 2 __device__ void scan(int *arr, int n) { int i, j, prev, next, temp; //threadId with in a block, DMat doc to start with int thId = threadIdx.x; //number of threads in blocks int nThreads = blockDim.x; //divide the simpred into nThreads blocks, //scan each block in parallel, with next iteration using results from prev blocks prev = 0; next = 0; for (i = 0; i < n; i += nThreads) { dispArr(arr, n); next = 0; if (i+nThreads-1 < n) next = arr[i+nThreads-1]; if (n - i >= nThreads) { if (thId == 0) { printf("\ncalling presub scan i=%d nThreads=%d", i, nThreads); } preSubScan(arr + i, nThreads, (i>0?arr[i-1]:0) + prev); } else { //not power of 2 perform serial scan for others //this will be last iteration of loop if (thId == 0) { printf("\ndoing naive scan i=%d nThreads=%d", i, nThreads); dispArr(arr, n); for (j = i; j < n; j++) { if (j > 0) temp = prev + arr[j-1]; else temp = prev; prev = arr[j]; arr[j] = temp; printf("\ntemp=%d prev=%d arr[%d]=%d", temp, prev, j, arr[j]); } dispArr(arr, n); } }//end else prev = next; }//end for __syncthreads(); } __device__ void d_dispFArr(float *arr, int n) { int i; //threadId with in a block, DMat doc to start with int thId = threadIdx.x; if (thId == 0) { printf("\n"); for (i = 0; i < n; i++) { printf(" %f ", arr[i]); } printf("\n"); } } //assuming sizeof int == sizeof float __device__ void computeAtomicHisto(int *aggHisto, float *arrElem, int numElem, int numBits, int bitpos) { int i, j; int numBuckets = 1 << numBits; int mask = (1 << numBits) - 1; int key; void *vptr; int *iptr; //thread id within a block int threadId = threadIdx.x; //number of threads in block int nThreads = blockDim.x; for (i = threadId; i < numElem; i+=nThreads) { vptr = (void*)(arrElem + i); iptr = (int*)vptr; key = ( (*iptr) >> bitpos) & mask; atomicAdd(&(aggHisto[key]), 1); } } //assuming sizeof int == sizeof float __device__ void writeSortedVals(int *aggHisto, float *fromKeys, float *toKeys, int *fromVals, int *toVals, int numBits, int bitpos, int n) { int i, key; int mask = (1 << numBits) - 1; void *vptr; int *iptr; for (i = 0; i < n; i++) { vptr = (void*)(fromKeys + i); iptr = (int*)vptr; key = ( (*iptr) >> bitpos) & mask; if (DEBUG) { printf("toKeys[%d] = %f\n", aggHisto[key], fromKeys[i]); } toKeys[aggHisto[key]] = fromKeys[i]; toVals[aggHisto[key]] = fromVals[i]; aggHisto[key]++; } } __device__ void zeroedInt(int *arr, int count) { int i; //thread id within a block int threadId = threadIdx.x; //number of threads in block int nThreads = blockDim.x; for (i = threadId; i < count; i+=nThreads) { arr[i] = 0; } } //shared mem space for aggregated histogram //numbits means bits at a time __device__ void radixSort(float *fromKeys, float *toKeys, int *fromVals, int *toVals, int *aggHisto, int n, int numBits) { int i, j, elemPerThread; //get current block number int blockId = blockIdx.x; //thread id within a block int threadId = threadIdx.x; //number of threads in block int nThreads = blockDim.x; //global thread id int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; //shared mem space to copy array to be sorted float *tempFSwap; int *tempISwap; //bucket size int bucketSize = 1 << numBits; if (threadId == 0 && DEBUG) { printf("\n fromKeys: "); d_dispFArr(fromKeys, n); } //for each numbits chunk do following for (i = 0; i < sizeof(float)*8; i+=numBits) { if (threadId == 0 && DEBUG) { printf("\n fromKeys b4 zeroed histo : "); d_dispFArr(fromKeys, n); } //reset histogram zeroedInt(aggHisto, bucketSize); if (threadId == 0 && DEBUG) { printf("\n fromKeys b4 histo : "); d_dispFArr(fromKeys, n); } //aggregate in histogram in shared mem computeAtomicHisto(aggHisto, fromKeys, n, numBits, i); if (threadId == 0 && DEBUG) { printf("\naggHisto, bitpos:%d:", i); dispArr(aggHisto, bucketSize); printf("\n fromKey after histo : "); d_dispFArr(fromKeys, n); } //perform scan on aggHisto (assuming power of 2) scan(aggHisto, bucketSize); if (threadId == 0 && DEBUG) { printf("\naggHisto after scan, bitpos:%d:", i); dispArr(aggHisto, bucketSize); } __syncthreads(); if (threadId == 0) { //copy values to correct output by a single thread writeSortedVals(aggHisto, fromKeys, toKeys, fromVals, toVals, numBits, i, n); } __syncthreads(); if (threadId == 0 && DEBUG) { printf("\n sorted toKeys: "); d_dispFArr(toKeys, n); } //toKeys contains the sorted arr, for the next iteration point fromKeys to this location tempFSwap = toKeys; toKeys = fromKeys; fromKeys = tempFSwap; if (threadId == 0 && DEBUG) { printf("\n after swap toKeys: "); d_dispFArr(toKeys, n); printf("\n after swap fromKeys: "); d_dispFArr(fromKeys, n); } //toVals contains the sorted vals by keys, //for the next iteration point fromVals to this location tempISwap = toVals; toVals = fromVals; fromVals = tempISwap; } //at this point fromKeys and fromVal will contain sorted arr in mem } __global__ void testRadixSort(float *d_keys, int *d_vals, int n) { int i, j; extern __shared__ int s[]; int numBits = NUM_BITS; //thread id within a block int thId = threadIdx.x; //number of threads in block int nThreads = blockDim.x; int *fromVals = s; float *fromKeys = (float *)&d_keys[n]; int *toVals = (int *)&fromKeys[n]; float *toKeys = (float *)&toVals[n]; int *aggHisto = (int *)&toKeys[n]; //copy keys and val to shared mem for (i = thId; i < n; i+=nThreads) { fromKeys[i] = d_keys[i]; fromVals[i] = d_vals[i]; } radixSort(fromKeys, toKeys, fromVals, toVals, aggHisto, n, numBits); //copy sorted values back for (i = thId; i < n; i+=nThreads) { d_keys[i] = fromKeys[i]; d_vals[i] = fromVals[i]; } } int main(int argc, char *argv[]) { float h_fKeys[] = {1.0, 0.4, 0.316228, 0.365148, 0.670820, 0.447214, 0.258199, 0.4, 0.258199, 0.316228, 0.258199, 0.258199, 0.258199, 0.258199, 0.258199, 0.258199}; int h_iVal[] = {0, 53, 54, 81, 98, 195, 283, 583, 598, 615, 654, 690, 768, 904, 919, 946}; /* float h_fKeys[] = {1.0, 0.4, 0.316228, 0.365148, 0.670820}; int h_iVal[] = {0, 53, 54, 81, 98 }; float h_fKeys[] = {1.0, 0.4, 0.3, 0.2, 0.6}; int h_iVal[] = {0, 53, 54, 81, 98 }; */ int n = 16; int i; int numBits = NUM_BITS; float *d_keys; int *d_val; cudaMalloc((void **) &d_keys, sizeof(float)*n); cudaMemcpy((void *) d_keys, (void *) h_fKeys, sizeof(float)*n, cudaMemcpyHostToDevice ); cudaMalloc((void **) &d_val, sizeof(int)*n); cudaMemcpy((void *) d_val, (void *) h_iVal, sizeof(int)*n, cudaMemcpyHostToDevice ); testRadixSort<<<1, 128, sizeof(int)*(2*n + (1<<numBits)) + sizeof(float)*(2*n)>>>(d_keys, d_val, n); cudaMemcpy((void *) h_iVal, (void *) d_val, sizeof(int)*n, cudaMemcpyDeviceToHost ); cudaMemcpy((void *) h_fKeys, (void *) d_keys, sizeof(float)*n, cudaMemcpyDeviceToHost ); printf("\n"); for (i = 0; i < n; i++) { printf(" %f %d, ", h_fKeys[i], h_iVal[i]); } printf("\n"); return 0; }
8,036
/*************************************************** * Module that negs all the elements on a matrix * Author: Alonso Vidales <alonso.vidales@tras2.es> * * To be compiled with nvcc -ptx matrix_trans.cu * Debug: nvcc -arch=sm_20 -ptx matrix_trans.cu * **************************************************/ //#include <stdio.h> #ifdef __cplusplus extern "C" { #endif // CUDA Kernel __global__ void matrixTrans(double* C, double* A, int resW, int resH, int width, int height, int finalSize) { int x = threadIdx.x + (blockIdx.x * resW); int y = threadIdx.y + (blockIdx.y * resH); int resultPos = y * width + x; if (resultPos < finalSize && x < width) { C[resultPos] = A[x * height + y]; //printf("Block %d - %d, thread %d - %d Val: %f Pos: %d Row: %d\n", x, y, threadIdx.x, threadIdx.y, C[resultPos], resultPos, resultPos - (resultPos / width + 1)); } } #ifdef __cplusplus } #endif
8,037
#include <iostream> #include <cmath> #include <cassert> void checkError (cudaError_t err, int line) { if (err == cudaSuccess) return; std::cout << "Error code " << err << " : " << cudaGetErrorString(err) << " " << " on line " << line << ", aborting.\n"; assert(false); } #define CUDACALL(x) checkError(x, __LINE__) __global__ void dev_calculate_Gaussians (double* data, double mean, double sigma) { // EXERCISE: Write this function so that each thread updates one // index of the array data. The output value should be the Gaussian // probability of the input value, given mean and sigma. // (Optionally add a separate array for the output so that the input data // are not overwritten.) } __global__ void dev_reduce_vector (double* data, double* result) { // EXERCISE: Write this function so it takes the sum of // the values in data and puts them into result. // NB: You should assume that the size of data is smaller // than one block - you need not worry about synchronising // across blocks. } int main (int argc, char** argv) { int sizeOfVector = atoi(argv[1]); // EXERCISE: Check that the sizeOfVector variable // is small enough that the GPU is able to launch // that many threads in a single block. double mean = 5; double sigma = 3; // Generate a host-side vector and fill it with random numbers. double* host_data = new double[sizeOfVector]; for (int i = 0; i < sizeOfVector; ++i) { host_data[i] = (rand() % 11) - 5; } // Host-side numbers to check against device-side ones. double* host_probs = new double[sizeOfVector]; double host_sum = 0; for (int i = 0; i < sizeOfVector; ++i) { host_probs[i] = exp(-0.5 * pow((host_data[i] - mean) / sigma, 2)); host_probs[i] /= (sigma * sqrt(2*M_PI)); host_sum += host_probs[i]; } double* dev_data = 0; // EXERCISE: Create a device-side array with sizeOfVector elements and copy the host data into it. // EXERCISE: Launch a one-block kernel which will run the method dev_calculate_Gaussians // on each element of dev_data. // EXERCISE: Copy back the results of the calculation into host_data. // Check for reasonableness double tolerance = 1e-6; for (int i = 0; i < sizeOfVector; ++i) { if (fabs(host_data[i] - host_probs[i]) <= tolerance) continue; std::cout << "Problem with entry " << i << ": " << host_probs[i] << " " << host_data[i] << " " << (host_probs[i] - host_data[i]) << std::endl; } std::cout << "Sum from CPU: " << host_sum << std::endl; double* device_sum = 0; // EXERCISE: Allocate a single double on the device, putting its address into device_sum. // EXERCISE: Launch a kernel to sum the elements of dev_data and put the result in device_sum. // EXERCISE: Copy the result back into host_sum. std::cout << "Sum from GPU: " << host_sum << std::endl; return 0; }
8,038
#include <stdio.h> #include <cuda.h> #include <sys/time.h> static const int ThreadsPerBlock = 512; static __global__ void AddKernel(const int* a, const int* b, int* c, const int n) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < n) { c[idx] = a[idx] + b[idx]; } } static void random_ints(int* x, const int n) { for (int i = 0; i < n; i++) x[i] = i; } static void CheckCuda() { cudaError_t e; cudaDeviceSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { // check command line if (argc != 2) {fprintf(stderr, "usage: %s number_of_elements\n", argv[0]); exit(-1);} const int N = atoi(argv[1]); if (N < 1) {fprintf(stderr, "error: number_of_elements must be at least 1\n"); exit(-1);} printf("running %d-element vector addition\n", N); // alloc space for device copies of a, b, c int *d_a, *d_b, *d_c; const int size = N * sizeof(int); cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // alloc space for host copies of a, b, c and setup input values int* a = new int[N]; random_ints(a, N); int* b = new int[N]; random_ints(b, N); int* c = new int[N]; // copy inputs to device if (cudaSuccess != cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} if (cudaSuccess != cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // launch GPU kernel AddKernel<<<(N + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(d_a, d_b, d_c, N); cudaDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.6f s\n", runtime); CheckCuda(); // copy result back to host if (cudaSuccess != cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} // verify result for (int i = 0; i < N; i++) { if (c[i] != 2 * i) { fprintf(stderr, "error: mismatch at location %d\n", i); exit(-1); } } // cleanup delete [] a; delete [] b; delete [] c; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
8,039
//#include "../../include/layers/inner_product_layer.h" // //#include "../../include/util/math_function_ptr.h" // //namespace BigBang { // //template<typename dtype> //void InnerProductLayer<dtype>::Forward_GPU(const Tensor<dtype>* bottom, Tensor<dtype>* top) { // bigbang_gpu_gemm<dtype>(false, false, bottom_row_, weights_column_, bottom_column_, 1., // bottom->gpu_data(), weights_->gpu_data(), 0., top->mutable_gpu_data()); //} // //template<typename dtype> //void InnerProductLayer<dtype>::Backward_GPU(const Tensor<dtype>* top, Tensor<dtype>* bottom) { // const dtype* bottom_data = bottom->gpu_data(); // const dtype* top_diff_data = top->gpu_diff_data(); // //get the delta // bigbang_gpu_gemm<dtype>(false, true, top_row_, weights_column, top_column_, 1., top_diff_data, // weights_->gpu_data(), 0, bottom->mutable_gpu_diff_data()); // UpdateParams_GPU(bottom_data, top_diff_data); //} // //template<typename dtype> //void InnerProductLayer<dtype>::UpdateParams_GPU(const dtype* bottom_data, const dtype* delta) { // //update the biases // if (use_biases_) { // dtype* biases_mutable_diff_data = biases_->mutable_gpu_data(); // bigbang_gpu_column_sum_plus(delta, bottom_row_, biases_row_, biases_mutable_diff_data); // bigbang_gpu_minus(biases_->gpu_data(), biases_mutable_diff_data, biases_row_, alpha_ / bottom_row_, // biases_->mutable_gpu_data()); // } // // //update the weights // dtype* weights_diff_data = weights_->mutable_gpu_diff_data(); // cudaMemset(weights_diff_data, 0, sizeof(dtype)*weights_row_*weights_column_); // /*bigbang_cpu_gemm(bottom_data, bottom_row_, bottom_column_, true, delta, top_row_, top_column_, // false, alpha_ / bottom_row_, (dtype*)nullptr, 0, 0, false, weights_diff_data);*/ // bigbang_gpu_gemm<dtype>(true, false, bottom_row_, top_column_, bottom_column_, alpha_ / bottom_row_, // bottom_data, delta, 0, weights_diff_data); // bigbang_gpu_minus(weights_->gpu_data(), weights_diff_data, weights_row_*weights_column_, // static_cast<dtype>(1.0), weights_->mutable_gpu_data()); //} // // // //}
8,040
#include <stdio.h> // We assume that size are divisible #define RADIUS 2 #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 #define MATRIX_WIDTH (128) #define MATRIX_HEIGHT (128) #define NUM_ELEMENTS (MATRIX_HEIGHT * MATRIX_WIDTH) // CUDA API error checking macro #define cudaCheck(error) \ if (error != cudaSuccess) { \ printf("Fatal error: %s at %s:%d\n", \ cudaGetErrorString(error), \ __FILE__, __LINE__); \ exit(1); \ } __global__ void stencil_2d(int *in, int *out) { /* Fill kernel code! */ } int main() { unsigned int i, j; int h_in[NUM_ELEMENTS], h_out[NUM_ELEMENTS]; int *d_in, *d_out; // To access element (i, j) of the matrix, use h_in[i + j * MATRIX_WIDTH] // Initialize host data for (i = 0; i < (NUM_ELEMENTS); ++i) h_in[i] = 1; // Allocate space on the device cudaCheck( cudaMalloc( &d_in, NUM_ELEMENTS * sizeof(int)) ); cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) ); // Copy input data to device cudaCheck( cudaMemcpy( d_in, h_in, NUM_ELEMENTS * sizeof(int), cudaMemcpyHostToDevice) ); dim3 blocks = dim3(MATRIX_WIDTH / BLOCK_SIZE_X, MATRIX_HEIGHT / BLOCK_SIZE_Y, 1); dim3 threads = dim3(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); stencil_2d<<< blocks, threads >>> (d_in, d_out); cudaCheck(cudaPeekAtLastError()); cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) ); bool correct = true; // Verify results (inclusion-exclusion principle) for (j = 0; j < MATRIX_WIDTH; ++j) { for (i = 0; i < MATRIX_HEIGHT; ++i) { int expected = (2 * RADIUS + 1) * (2 * RADIUS + 1) - (j < RADIUS ? (RADIUS - j) * (2 * RADIUS + 1) : 0) - (i < RADIUS ? (RADIUS - i) * (2 * RADIUS + 1) : 0) + ((j < RADIUS && i < RADIUS) ? (RADIUS - j) * (RADIUS - i) : 0) - (j > MATRIX_WIDTH - RADIUS - 1 ? (j + RADIUS + 1 - MATRIX_WIDTH) * (2 * RADIUS + 1) : 0) - (i > MATRIX_HEIGHT - RADIUS - 1 ? (i + RADIUS + 1 - MATRIX_HEIGHT) * (2 * RADIUS + 1) : 0) + (j > MATRIX_WIDTH - RADIUS - 1 && i > MATRIX_HEIGHT - RADIUS - 1 ? (j + RADIUS + 1 - MATRIX_WIDTH) * (i + RADIUS + 1 - MATRIX_HEIGHT) : 0) + (j < RADIUS && i > MATRIX_HEIGHT - RADIUS - 1 ? (RADIUS - j) * (i + RADIUS + 1 - MATRIX_HEIGHT) : 0) + (i < RADIUS && j > MATRIX_WIDTH - RADIUS - 1 ? (RADIUS - i) * (j + RADIUS + 1 - MATRIX_WIDTH) : 0); if (h_out[j + i * MATRIX_WIDTH] != expected) { printf("Element h_out[%d + %d * MATRIX_WIDTH] == %d != %d\n", j, i, h_out[j + i * MATRIX_WIDTH], expected); correct = false; } } } if (correct) printf("SUCCESS!\n"); else printf("FAILURE!\n"); // Free out memory cudaFree(d_in); cudaFree(d_out); return 0; }
8,041
#include <stdio.h> #include <iostream> #define DELKA 16 #define BLOK_VELIKOST 32 #define CUDA_ALL __host__ __device__ #define CUDA_DEV __device__ #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__) ) using namespace std; class cVektor { public: int * hodnoty; int * statickePole; cVektor(); ~cVektor(); CUDA_DEV void sectiSVektorem( const cVektor & vektor ); CUDA_DEV void setHodnoty( ); void vypisHodnoty( ); }; cVektor::cVektor( ) { hodnoty = new int[DELKA]; for( int i=0 ; i<DELKA ; i++ ) { hodnoty[i] = 2 * i; } } cVektor::~cVektor( ) { delete [] hodnoty; } CUDA_DEV void cVektor::setHodnoty( ) { int blok = blockIdx.x; int vlakno = threadIdx.x; int i = BLOK_VELIKOST * blok + vlakno; printf( "thread id = %d, b = %d, v = %d\n", i, blok, vlakno ); if ( i < DELKA ) { hodnoty[i] = statickePole[i]; } } CUDA_DEV void cVektor::sectiSVektorem( const cVektor & vektor ) { int blok = blockIdx.x; int vlakno = threadIdx.x; int i = BLOK_VELIKOST * blok + vlakno; printf( "thread id = %d, b = %d, v = %d\n", i, blok, vlakno ); if ( i < DELKA ) { hodnoty[i] += vektor.hodnoty[i]; } } static void HandleError( cudaError_t chyba, const char * soubor, int radek ) { if ( chyba != cudaSuccess ) { printf( "%s v %s na radku %d\n", cudaGetErrorString( chyba ), soubor, radek ); exit( 1 ); } } void cVektor::vypisHodnoty( ) { for (int i=0 ; i<DELKA ; i++ ) { cout << hodnoty[i] << ' '; } cout << endl; } void cudaVektorInit( cVektor *& device, const cVektor * host, int * devStatickePole ) { if ( host == NULL ) { cerr << "cudaVektorUklid(): host nesmi by NULL" << endl; throw "cudaVektorUklid(): host nesmi by NULL"; } cudaMalloc( (void**)&device, sizeof(*device) ); cudaMemcpy( device, host, sizeof(*device), cudaMemcpyHostToDevice ); // hluboka kopie hodnot ---------------------- // zkopiruje data na device int * devHodnoty; HANDLE_ERROR( cudaMalloc( &devHodnoty, DELKA*sizeof(*devHodnoty) ) ); HANDLE_ERROR( cudaMemcpy( devHodnoty, host->hodnoty, DELKA*sizeof(*devHodnoty), cudaMemcpyHostToDevice ) ); // zkopruje ukazatel na hodnoty na device HANDLE_ERROR( cudaMemcpy( &(device->hodnoty), &(devHodnoty), sizeof(devHodnoty), cudaMemcpyHostToDevice ) ); // zkopruje ukazatel na hodnoty pseudo-statickeho pole HANDLE_ERROR( cudaMemcpy( &(device->statickePole), &(devStatickePole), sizeof(devHodnoty), cudaMemcpyHostToDevice ) ); } bool cudaVektorUklid( cVektor *& device, cVektor * host ) { bool ret = false; int * devHodnoty; // zkopiruje ukazatel z (tridy na device) do ukazale devHodnoty HANDLE_ERROR( cudaMemcpy( &(devHodnoty), &(device->hodnoty), sizeof(devHodnoty), cudaMemcpyDeviceToHost ) ); // pokud mam zkopirovat data if ( host != NULL ) { HANDLE_ERROR( cudaMemcpy( host->hodnoty, devHodnoty, DELKA*sizeof(*devHodnoty), cudaMemcpyDeviceToHost ) ); ret = true; } HANDLE_ERROR( cudaFree( devHodnoty ) ); HANDLE_ERROR( cudaFree( device ) ); return ret; } __global__ void wrapperSectiVektory( cVektor * devA, const cVektor * devB ) { printf( "CUDA HELLO\n" ); devA->setHodnoty( ); devA->sectiSVektorem( *devB ); printf( "CUDA bye\n" ); } int main( void ) { printf("CPU hello\n"); // Alokace cVektor * hostA = new cVektor(); cVektor * hostB = new cVektor(); int * hostPole = new int[DELKA]; for ( int i = 0 ; i<DELKA ; i++ ) { hostPole[i] = i*i + i - 3; } int * devPole; HANDLE_ERROR( cudaMalloc( (void**)&devPole, sizeof(*devPole)*DELKA ) ); HANDLE_ERROR( cudaMemcpy( devPole, hostPole, sizeof(*devPole)*DELKA, cudaMemcpyHostToDevice ) ); cVektor * devA, * devB; cudaVektorInit( devA, hostA, devPole ); cudaVektorInit( devB, hostB, devPole ); int bloku = (DELKA + BLOK_VELIKOST - 1)/BLOK_VELIKOST; wrapperSectiVektory<<<bloku,BLOK_VELIKOST>>>( devA, devB ); HANDLE_ERROR( cudaDeviceSynchronize() ); cudaVektorUklid( devA, hostA ); cudaVektorUklid( devB, NULL ); HANDLE_ERROR( cudaFree( devPole ) ); hostA->vypisHodnoty( ); delete [] hostPole; delete hostA; delete hostB; printf("CPU bye\n"); }
8,042
#include <cuda_runtime_api.h> #include <stdint.h> #define OFFSET_BANK(idx) ({ __typeof__ (idx) _idx = idx; ((_idx) + ((_idx) / 32)); }) __global__ void im2col_gpu_kernel( const int n, const float* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, float* data_col) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index < n) { const int h_index = index / width_col; const int h_col = h_index % height_col; const int w_col = index % width_col; const int c_im = h_index / height_col; const int c_col = c_im * kernel_h * kernel_w; const int h_offset = h_col * stride_h - pad_h; const int w_offset = w_col * stride_w - pad_w; float* data_col_ptr = data_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_offset) * width + w_offset; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h_im = h_offset + i * dilation_h; int w_im = w_offset + j * dilation_w; *data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * dilation_h * width + j * dilation_w] : 0; data_col_ptr += height_col * width_col; } } } } extern "C" void neuralops_cuda_caffe_im2col( const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float* data_col, cudaStream_t stream) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<<<(num_kernels+1024-1)/1024, 1024, 0, stream>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, data_col); } __global__ void col2im_gpu_kernel( const int n, const float* data_col, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, float* data_im) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index < n) { float val = 0; const int w_im = index % width + pad_w; const int h_im = (index / width) % height + pad_h; const int c_im = index / (width * height); int kernel_extent_w = (kernel_w - 1) * dilation_w + 1; int kernel_extent_h = (kernel_h - 1) * dilation_h + 1; // compute the start and end of the output const int w_col_start = (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1; const int w_col_end = min(w_im / stride_w + 1, width_col); const int h_col_start = (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1; const int h_col_end = min(h_im / stride_h + 1, height_col); // TODO: use LCM of stride and dilation to avoid unnecessary loops for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) { for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) { int h_k = (h_im - h_col * stride_h); int w_k = (w_im - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) * height_col + h_col) * width_col + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } extern "C" void neuralops_cuda_caffe_col2im( const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float* data_im, cudaStream_t stream) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) col2im_gpu_kernel<<<(num_kernels+1024-1)/1024, 1024, 0, stream>>>( num_kernels, data_col, height, width, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, data_im); } __global__ void conv_diag_affine_fwd_batch_kernel( const float *in_act, int spatial_dim, int num_channels, int batch_size, const float *scale, const float *bias, float *out_act) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int u = idx % spatial_dim; int c = (idx / spatial_dim) % num_channels; int batch_idx = idx / (spatial_dim * num_channels); if (u < spatial_dim && c < num_channels && batch_idx < batch_size) { float gamma = scale[c]; float beta = bias[c]; float y = gamma * in_act[idx] + beta; out_act[idx] = y; } } extern "C" void neuralops_cuda_conv2d_scale_fwd( const float *in_act, size_t spatial_dim, size_t num_channels, size_t batch_size, const float *scale, const float *bias, float *out_act, cudaStream_t stream) { int n = spatial_dim * num_channels * batch_size; conv_diag_affine_fwd_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( in_act, spatial_dim, num_channels, batch_size, scale, bias, out_act); } __global__ void conv_diag_affine_bwd_batch_kernel( const float *in_act, int spatial_dim, int num_channels, int batch_size, const float *out_delta, const float *scale, float *scale_grad, float *bias_grad, float *in_delta) { __shared__ float scale_grad_cache[1024+32]; __shared__ float bias_grad_cache[1024+32]; int idx = threadIdx.x + blockIdx.x * blockDim.x; int bank_idx = OFFSET_BANK(threadIdx.x); int block_spatial_dim = (spatial_dim+16*32-1)/(16*32); int warp_idx = idx % 32; int c = (idx / 32) % num_channels; int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32); int batch_idx = idx / (32 * num_channels * block_spatial_dim); if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) { float gamma = scale[c]; float d_gamma = 0.0f; float d_beta = 0.0f; int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels; int u_limit = min(spatial_dim, u0 + 16*32); for (int u = u0; u < u_limit; u += 32) { int i = i0 + u; float dy = out_delta[i]; d_gamma += dy * in_act[i]; d_beta += dy; in_delta[i] = dy * gamma; //in_delta[i] += dy * gamma; //atomicAdd(&in_delta[i], dy * gamma); } scale_grad_cache[bank_idx] = d_gamma; bias_grad_cache[bank_idx] = d_beta; } else { scale_grad_cache[bank_idx] = 0.0f; bias_grad_cache[bank_idx] = 0.0f; } __syncthreads(); if (c < num_channels && batch_idx < batch_size) { if (threadIdx.x % 2 == 0) { scale_grad_cache[bank_idx] += scale_grad_cache[bank_idx+1]; bias_grad_cache[bank_idx] += bias_grad_cache[bank_idx+1]; } } __syncthreads(); if (c < num_channels && batch_idx < batch_size) { if (threadIdx.x % 4 == 0) { scale_grad_cache[bank_idx] += scale_grad_cache[bank_idx+2]; bias_grad_cache[bank_idx] += bias_grad_cache[bank_idx+2]; } } __syncthreads(); if (c < num_channels && batch_idx < batch_size) { if (threadIdx.x % 8 == 0) { scale_grad_cache[bank_idx] += scale_grad_cache[bank_idx+4]; bias_grad_cache[bank_idx] += bias_grad_cache[bank_idx+4]; } } __syncthreads(); if (c < num_channels && batch_idx < batch_size) { if (threadIdx.x % 16 == 0) { scale_grad_cache[bank_idx] += scale_grad_cache[bank_idx+8]; bias_grad_cache[bank_idx] += bias_grad_cache[bank_idx+8]; } } __syncthreads(); if (c < num_channels && batch_idx < batch_size) { if (threadIdx.x % 32 == 0 && u0 < spatial_dim) { float d_gamma = scale_grad_cache[bank_idx] + scale_grad_cache[bank_idx+16]; atomicAdd(&scale_grad[c], d_gamma); float d_beta = bias_grad_cache[bank_idx] + bias_grad_cache[bank_idx+16]; atomicAdd(&bias_grad[c], d_beta); } } } extern "C" void neuralops_cuda_conv2d_scale_bwd( const float *in_act, size_t spatial_dim, size_t num_channels, size_t batch_size, const float *out_delta, const float *scale, float *scale_grad, float *bias_grad, float *in_delta, cudaStream_t stream) { int block_spatial_dim = (spatial_dim+16*32-1)/(16*32); int n = 32 * num_channels * block_spatial_dim * batch_size; conv_diag_affine_bwd_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( in_act, spatial_dim, num_channels, batch_size, out_delta, scale, scale_grad, bias_grad, in_delta); } __global__ void conv_scale_rfwd_kernel( const float *in_val, uint32_t spatial_dim, uint32_t num_channels, uint32_t batch_size, const float *in_r_val, const float *scale, const float *scale_r_dir, const float *bias_r_dir, float *out_r_val) { uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x; uint32_t u = idx % spatial_dim; uint32_t c = (idx / spatial_dim) % num_channels; uint32_t batch_idx = idx / (spatial_dim * num_channels); if (u < spatial_dim && c < num_channels && batch_idx < batch_size) { float alpha = scale[c]; float r_alpha = scale_r_dir[c]; float r_beta = bias_r_dir[c]; float r_y = alpha * in_r_val[idx] + r_alpha * in_val[idx] + r_beta; out_r_val[idx] = r_y; } } extern "C" void neuralops_cuda_conv_scale_rfwd( const float *in_val, size_t spatial_dim, size_t num_channels, size_t batch_size, const float *in_r_val, const float *scale, const float *scale_r_dir, const float *bias_r_dir, float *out_r_val, cudaStream_t stream) { uint32_t n = spatial_dim * num_channels * batch_size; conv_scale_rfwd_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( in_val, spatial_dim, num_channels, batch_size, in_r_val, scale, scale_r_dir, bias_r_dir, out_r_val); }
8,043
#include "includes.h" __global__ void kernel_128_winograd_AtIA(float *pInputs, float *pBiases, float *pScales, float *pOutputs) { int Tilex = blockIdx.x, Tiley = blockIdx.y, Iny = threadIdx.y, kz = blockIdx.z, Inx = threadIdx.x; int c_input = Inx*6 + Iny; __shared__ float bias, scale; extern __shared__ float input[]; input[c_input] = pInputs[c_input*16*128 + (Tilex*4+Tiley)*128 + kz]; bias = pBiases[kz]; scale = pScales[kz]; __syncthreads(); float tmp = 0; switch(Inx) { case 0: tmp = input[Iny] + input[6+Iny] + input[12+Iny] + input[18+Iny] + input[24+Iny]; break; case 1: tmp = input[6+Iny] - input[12+Iny] + 2*input[18+Iny] - 2*input[24+Iny]; break; case 2: tmp = input[6+Iny] + input[12+Iny] + 4*input[18+Iny] + 4*input[24+Iny]; break; case 3: tmp = input[6+Iny] - input[12+Iny] + 8*input[18+Iny] - 8*input[24+Iny] + input[30+Iny]; break; } __syncthreads(); input[c_input] = tmp; __syncthreads(); if (Inx > 3 || (Tilex == 3 && Inx > 1)) return; int x; float o; switch(Iny) { case 0: x = Inx*6; o = scale*(input[x]+input[x+1]+input[x+2]+input[x+3]+input[x+4])+ bias; pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+1)*128 + kz] = o > 0 ? o : 0; break; case 1: x = Inx*6; o = scale*(input[x+1] - input[x+2] + 2*input[x+3] - 2*input[x+4]) + bias; pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+2)*128 + kz] = o > 0 ? o : 0; break; case 2: if (Tiley == 3) break; x = Inx*6; o = scale*(input[x+1] + input[x+2] + 4*input[x+3] + 4*input[x+4]) + bias; pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+3)*128 + kz] = o > 0 ? o : 0; break; case 3: if (Tiley == 3) break; x = Inx*6; o = scale*(input[x+1] - input[x+2] + 8*input[x+3] - 8*input[x+4] + input[x+5]) + bias; pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+4)*128 + kz] = o > 0 ? o : 0; break; } }
8,044
#include <stdio.h> #include <cuda_runtime.h> __global__ void vecAddKernel(float* A, float* B, float* C, int n) { int i = blockDim.x*blockIdx.x + threadIdx.x; if(i<n) C[i] = A[i] + B[i]; } void vecAdd(float* A, float* B, float* C, int n) { int size = n * sizeof(float); float *d_A, *d_B, *d_C; cudaMalloc((void **) &d_A, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_B, size); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_C, size); vecAddKernel<<<ceil(n/256.00), 256>>>(d_A, d_B, d_C, n); cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); for(int i = 0; i < n; ++i) printf("C[%d] = %f\n",i,C[i]); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } int main() { int n = 10; float* A, *B, *C; A = (float*)malloc(n*sizeof(float)); B = (float*)malloc(n*sizeof(float)); C = (float*)malloc(n*sizeof(float)); for(int i = 0; i < n; ++i) { A[i]= 1; B[i]= 1; C[i]= 0; } vecAdd(A,B,C,n); }
8,045
/******************************************************************************* * PROGRAM: canny_edge_detector * FILE: x_y_calc.cu * NAME: Vuong Pham-Duy * Faculty of Computer Science and Technology * Ho Chi Minh University of Technology, Viet Nam * vuongpd95@gmail.com * DATE: 11/10/2016*******************************************************************************/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #define _USE_MATH_DEFINES #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #define VERBOSE 1 __global__ void derrivative_x_y_kernel(int rows, int cols, int blockSize, short int *d_smoothedim, short int *d_delta_x, short int *d_delta_y); __global__ void magnitude_x_y_kernel(int rows, int cols, int blockSize, short int *d_magnitude, short int *d_delta_x, short int *d_delta_y); /******************************************************************************* * PROCEDURE: x_y_calc * PURPOSE: calculate delta_x, delta_y and magnitude of the image * NAME: Vuong Pham-duy * DATE: 10/11/2016 *******************************************************************************/ void x_y_calc(int rows, int cols, int blockSize, int gridSize, short int **d_delta_x, short int **d_delta_y, short int **d_smoothedim, short int **d_magnitude) { /**************************************************************************** * Compute the first derivative in the x and y directions. ****************************************************************************/ if (VERBOSE) printf("Computing the X and Y first derivatives.\n"); derrivative_x_y_kernel<<<gridSize, blockSize>>>(rows, cols, blockSize, (*d_smoothedim), (*d_delta_x), (*d_delta_y)); /**************************************************************************** * Compute the magnitude of the gradient. ****************************************************************************/ if (VERBOSE) printf("Computing the magnitude of the gradient.\n"); magnitude_x_y_kernel<<<gridSize, blockSize>>>(rows, cols, blockSize, (*d_magnitude), (*d_delta_x), (*d_delta_y)); } /******************************************************************************* * PROCEDURE: derrivative_x_y_kernel * PURPOSE: Compute the first derivative of the image in both the x any y * directions. The differential filters that are used are: * * -1 * dx = -1 0 +1 and dy = 0 * +1 * * NAME: Vuong Pham-duy * DATE: 10/11/2016 *******************************************************************************/ __global__ void derrivative_x_y_kernel(int rows, int cols, int blockSize, short int *d_smoothedim, short int *d_delta_x, short int *d_delta_y) { /* This thread process the number img_idx element of image */ int img_idx = blockIdx.x * blockSize + threadIdx.x; if (img_idx >= (rows * cols)) return; int r = img_idx / cols; /* row position of the pixel, range [0, rows - 1] */ int c = img_idx - r * cols; /* col position of the pixel, range [0, cols - 1] */ /**************************************************************************** * Compute the x-derivative. Adjust the derivative at the borders to avoid * losing pixels. ****************************************************************************/ if (c > 0 && c < cols - 1 /* && cols >= 3 */) d_delta_x[img_idx] = d_smoothedim[img_idx + 1] - d_smoothedim[img_idx - 1]; else if (c == 0 /* && c + 1 < cols */) d_delta_x[img_idx] = d_smoothedim[img_idx + 1] - d_smoothedim[img_idx]; else if (c == cols - 1 /* && c - 1 >= 0 */) d_delta_x[img_idx] = d_smoothedim[img_idx] - d_smoothedim[img_idx - 1]; /**************************************************************************** * Compute the y-derivative. Adjust the derivative at the borders to avoid * losing pixels. ****************************************************************************/ if (r > 0 && r < cols - 1) d_delta_y[img_idx] = d_smoothedim[img_idx + cols] - d_smoothedim[img_idx - cols]; else if (r == 0) d_delta_y[img_idx] = d_smoothedim[img_idx + cols] - d_smoothedim[img_idx]; else if (r == rows - 1) d_delta_y[img_idx] = d_smoothedim[img_idx] - d_smoothedim[img_idx - cols]; } /******************************************************************************* * PROCEDURE: magnitude_x_y_kernel * PURPOSE: Compute the magnitude of the gradient. This is the square root of * the sum of the squared derivative values. * NAME: Vuong Pham-duy * DATE: 10/11/2016 *******************************************************************************/ __global__ void magnitude_x_y_kernel(int rows, int cols, int blockSize, short int *d_magnitude, short int *d_delta_x, short int *d_delta_y) { /* This thread process the number img_idx element of image */ int img_idx = blockIdx.x * blockSize + threadIdx.x; if (img_idx >= (rows * cols)) return; int sq1, sq2; sq1 = (int)d_delta_x[img_idx] * (int)d_delta_x[img_idx]; sq2 = (int)d_delta_y[img_idx] * (int)d_delta_y[img_idx]; d_magnitude[img_idx] = (short)(0.5 + sqrt((float)sq1 + (float)sq2)); }
8,046
#include "includes.h" __global__ void reduce_a(float *gdata, float *out){ __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x; sdata[tid] = 0.0f; size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < N) { // grid stride loop to load data sdata[tid] += gdata[idx]; idx += gridDim.x*blockDim.x; } for (unsigned int s=blockDim.x/2; s>0; s>>=1) { __syncthreads(); if (tid < s) // parallel sweep reduction sdata[tid] += sdata[tid + s]; } if (tid == 0) atomicAdd(out, sdata[0]); }
8,047
#include <cuda.h> #include <stdlib.h> #include <fstream> #include <string> #include <cmath> #include <climits> #include <iostream> #include <chrono> #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; #define MAX_THREAD_PER_BLOCK 512 #define DEBUG_PRINT /* PARALLEL NN - VERSION 2 */ typedef unsigned short int usint; const usint num_dimensions = 16; const usint numPointsTest = 1000; const usint numPointsTrain = 19000; const usint streamCount = 4; struct Coordinates { usint points[num_dimensions]; }; __device__ float getDistance(const Coordinates & coord1, const Coordinates & coord2) { float square_sum = 0; for (int i = 0; i < num_dimensions; i++) { const int c1 = coord1.points[i]; const int c2 = coord2.points[i]; square_sum += (c1 - c2)*(c1 - c2); } return sqrt(square_sum); } __global__ void nearestNeighbor(Coordinates * trainCoords, Coordinates * testCoords, const usint sizeTest, const usint sizeTrain, usint * nearestNeighbors) { const usint threadId = blockIdx.x*blockDim.x + threadIdx.x; if (threadId < sizeTest) { // DEBUG usint nearestNeighbor = 0; usint nearestDistance = USHRT_MAX; for (int trainCoordInd = 0; trainCoordInd < sizeTrain; trainCoordInd++) { float currentDistance = getDistance(trainCoords[trainCoordInd], testCoords[threadId]); if (currentDistance < nearestDistance) { nearestNeighbor = trainCoordInd; nearestDistance = currentDistance; } } nearestNeighbors[threadId] = nearestNeighbor; } } bool checkError(const cudaError_t & error, const char * msg = "") { if (error != cudaSuccess) { printf("CUDA ERROR: %s\n", msg); cout << error << endl; exit(1); } return true; } int main() { // 1 - INITIALIZE READ STREAMS const char * testFile = "test.txt"; const char * trainFile = "train.txt"; FILE * test_is = fopen(testFile, "r"), * train_is = fopen(trainFile, "r"); if (!test_is) { cerr << "Cannot open " << testFile << endl; exit(1); } if (!train_is) { cerr << "Cannot open " << trainFile << endl; exit(1); } cudaSetDevice(0); // initialize CUDA context cout << "\t--------------------\n"; chrono::high_resolution_clock::time_point begin = chrono::high_resolution_clock::now(), temp, end; // 2 - SET EXECUTION PARAMETERS cudaStream_t streams[streamCount]; // create four CUDA streams cudaError_t cudaError; usint numThreadsPerBlock = numPointsTest; usint numBlocks = 1; if (numPointsTest > MAX_THREAD_PER_BLOCK) { numBlocks = std::ceil(static_cast<double>(numPointsTest) / MAX_THREAD_PER_BLOCK); numThreadsPerBlock = MAX_THREAD_PER_BLOCK; } numThreadsPerBlock /= streamCount; cout << "Kernels will be called with " << numBlocks << " blocks with " << numThreadsPerBlock << " threads each\n"; // 3 - READ TRAIN COORDINATES FROM FILE STREAMS // device pointers Coordinates * d_testCoordinates[streamCount], *d_trainCoordinates; usint * d_nearestNeighbors[streamCount]; // host pointers Coordinates * h_testCoordinates[streamCount], *h_trainCoordinates; usint * h_nearestNeighbors[streamCount]; cudaError = cudaMallocHost((void**)&h_trainCoordinates, numPointsTrain * sizeof(Coordinates)); checkError(cudaError, "cudamallochost - h_trainCoordinates"); // read train points to host for (int i = 0; i < numPointsTrain; i++) { fscanf(train_is, "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n", &h_trainCoordinates[i].points[0], &h_trainCoordinates[i].points[1], &h_trainCoordinates[i].points[2], &h_trainCoordinates[i].points[3], &h_trainCoordinates[i].points[4], &h_trainCoordinates[i].points[5], &h_trainCoordinates[i].points[6], &h_trainCoordinates[i].points[7], &h_trainCoordinates[i].points[8], &h_trainCoordinates[i].points[9], &h_trainCoordinates[i].points[10], &h_trainCoordinates[i].points[11], &h_trainCoordinates[i].points[12], &h_trainCoordinates[i].points[13], &h_trainCoordinates[i].points[14], &h_trainCoordinates[i].points[15]); } cout << "done reading training coordinates to host pinned memory" << endl; // copy train coordinates to device cudaError = cudaMalloc((void**)&d_trainCoordinates, numPointsTrain * sizeof(Coordinates)); checkError(cudaError, "cudaMalloc - d_trainCoordinates"); cudaError = cudaMemcpy(d_trainCoordinates, h_trainCoordinates, numPointsTrain * sizeof(Coordinates), cudaMemcpyHostToDevice); checkError(cudaError, "cudaMemcpyAsync - d_trainCoordinates"); chrono::high_resolution_clock::time_point kernel_start = chrono::high_resolution_clock::now(); for (usint stream = 0; stream < streamCount; stream++) { // 1 - create stream cudaStreamCreate(&streams[stream]); // 2 - Host memory - allocate memory on host for results and test coordinates cudaError = cudaMallocHost((void**)&h_nearestNeighbors[stream], (numPointsTest / streamCount) * sizeof(usint)); checkError(cudaError, "cudamallochost - h_nearestneighbors"); cudaError = cudaMallocHost((void**)&h_testCoordinates[stream], (numPointsTest / streamCount) * sizeof(Coordinates)); checkError(cudaError, "cudamallochost - h_testCoordinates"); // 3 - Host memory - read test points for (int i = 0; i < numPointsTest / streamCount; i++) { fscanf(test_is, "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", &h_testCoordinates[stream][i].points[0], &h_testCoordinates[stream][i].points[1], &h_testCoordinates[stream][i].points[2], &h_testCoordinates[stream][i].points[3], &h_testCoordinates[stream][i].points[4], &h_testCoordinates[stream][i].points[5], &h_testCoordinates[stream][i].points[6], &h_testCoordinates[stream][i].points[7], &h_testCoordinates[stream][i].points[8], &h_testCoordinates[stream][i].points[9], &h_testCoordinates[stream][i].points[10], &h_testCoordinates[stream][i].points[11], &h_testCoordinates[stream][i].points[12], &h_testCoordinates[stream][i].points[13], &h_testCoordinates[stream][i].points[14], &h_testCoordinates[stream][i].points[15]); } // 4 - Device memory - allocate space for test coordiantes and result array for this stream to write its results to cudaError = cudaMalloc((void**)&d_testCoordinates[stream], (numPointsTest / streamCount) * sizeof(Coordinates)); checkError(cudaError, "cudaMalloc - d_testCoordiantes"); cudaError = cudaMalloc((void**)&d_nearestNeighbors[stream], (numPointsTest / streamCount) * sizeof(usint)); checkError(cudaError, "cudaMalloc - d_nearestNeighbors"); // 5 - copy test coordinates to device in async temp = chrono::high_resolution_clock::now(); cudaError = cudaMemcpyAsync(d_testCoordinates[stream], h_testCoordinates[stream], (numPointsTest / streamCount) * sizeof(Coordinates), cudaMemcpyHostToDevice, streams[stream]); checkError(cudaError, "cudaMemcpy - d_testCoordinates"); end = chrono::high_resolution_clock::now(); cout << "data copied to device memory [" << chrono::duration_cast<chrono::milliseconds>(end - temp).count() << " ms]\n" << "executing kernel with " << numBlocks << " blocks with " << numThreadsPerBlock << " threads each" << endl; // 6 - Inovke kernel for current stream usint *& currentResultArray = d_nearestNeighbors[stream]; nearestNeighbor<<< numBlocks, numThreadsPerBlock, 0, streams[stream] >>>(d_trainCoordinates, d_testCoordinates[stream], numPointsTest / streamCount, numPointsTrain, currentResultArray); cudaError = cudaMemcpyAsync(h_nearestNeighbors[stream], d_nearestNeighbors[stream], (numPointsTest / streamCount) * sizeof(usint), cudaMemcpyDeviceToHost, streams[stream]); checkError(cudaError, "cudaMemcpy - h_nearestNeighbors"); } // Wait for GPU to terminate and fetch results cudaError = cudaGetLastError(); checkError(cudaError, "before deviceSync() error!"); cudaDeviceSynchronize(); end = chrono::high_resolution_clock::now(); cout << "Computation + read test data: " << chrono::duration_cast<chrono::milliseconds>(end - kernel_start).count() << " ms\n"; cout << "\t--------------------\n"; end = chrono::high_resolution_clock::now(); ofstream os("output.txt"); for (int stream = 0; stream < streamCount; stream++) { for (int i = 0; i < numPointsTest / streamCount; i++) { os << h_nearestNeighbors[stream][i] << endl; } } end = chrono::high_resolution_clock::now(); cout << "\t--------------------\nTotal time: " << chrono::duration_cast<chrono::milliseconds>(end - begin).count() << " ms\nterminating\n"; return 0; }
8,048
// 相关 CUDA 库 #include "cuda_runtime.h" #include "cuda.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> using namespace std; const int N = 100; // 块数 const int BLOCK_data = 3; // 各块中的线程数 const int THREAD_data = 10; // CUDA初始化函数 bool InitCUDA() { int deviceCount; // 获取显示设备数 cudaGetDeviceCount (&deviceCount); if (deviceCount == 0) { cout << "找不到设备" << endl; return EXIT_FAILURE; } int i; for (i=0; i<deviceCount; i++) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop,i)==cudaSuccess) // 获取设备属性 { if (prop.major>=1) //cuda计算能力 { break; } } } if (i==deviceCount) { cout << "找不到支持 CUDA 计算的设备" << endl; return EXIT_FAILURE; } cudaSetDevice(i); // 选定使用的显示设备 return EXIT_SUCCESS; } // 此函数在主机端调用,设备端执行。 __global__ static void Sum (int *data,int *result) { // 取得线程号 const int tid = threadIdx.x; // 获得块号 const int bid = blockIdx.x; int sum = 0; // 有点像网格计算的思路 for (int i=bid*THREAD_data+tid; i<N; i+=BLOCK_data*THREAD_data) { sum += data[i]; } // result 数组存放各个线程的计算结果 result[bid*THREAD_data+tid] = sum; } int main () { // 初始化 CUDA 编译环境 if (InitCUDA()) { return EXIT_FAILURE; } cout << "成功建立 CUDA 计算环境" << endl << endl; // 建立,初始化,打印测试数组 int *data = new int [N]; cout << "测试矩阵: " << endl; for (int i=0; i<N; i++) { data[i] = rand()%10; cout << data[i] << " "; if ((i+1)%10 == 0) cout << endl; } cout << endl; int *gpudata, *result; // 在显存中为计算对象开辟空间 cudaMalloc ((void**)&gpudata, sizeof(int)*N); // 在显存中为结果对象开辟空间 cudaMalloc ((void**)&result, sizeof(int)*BLOCK_data*THREAD_data); // 将数组数据传输进显存 cudaMemcpy (gpudata, data, sizeof(int)*N, cudaMemcpyHostToDevice); // 调用 kernel 函数 - 此函数可以根据显存地址以及自身的块号,线程号处理数据。 Sum<<<BLOCK_data,THREAD_data,0>>> (gpudata,result); // 在内存中为计算对象开辟空间 int *sumArray = new int[THREAD_data*BLOCK_data]; // 从显存获取处理的结果 cudaMemcpy (sumArray, result, sizeof(int)*THREAD_data*BLOCK_data, cudaMemcpyDeviceToHost); // 释放显存 cudaFree (gpudata); cudaFree (result); // 计算 GPU 每个线程计算出来和的总和 int final_sum=0; for (int i=0; i<THREAD_data*BLOCK_data; i++) { final_sum += sumArray[i]; } cout << "GPU 求和结果为: " << final_sum << endl; // 使用 CPU 对矩阵进行求和并将结果对照 final_sum = 0; for (int i=0; i<N; i++) { final_sum += data[i]; } cout << "CPU 求和结果为: " << final_sum << endl; return 0; }
8,049
// CUDA programming // Exercise n. 03 #include <errno.h> #include <cuda.h> #include <stdio.h> #define BLOCKS 8 #define THREADS 1 // Prototype __global__ void add(int *a, int *b, int *c); __host__ void ints(int *m, int N); __host__ void print_array(int *a, int N); int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int N = BLOCKS * THREADS; int size = N * sizeof(int); // Allocate space for host copies of a, b, c a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); // Setup input values ints(a, N); ints(b, N); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Call the kernel on GPU add<<< BLOCKS, THREADS >>>(d_a, d_b, d_c); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); // Check the result print_array(a, N); print_array(b, N); print_array(c, N); // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return(EXIT_SUCCESS); } // Vector addition (on device) __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } // Initialisation __host__ void ints(int *m, int N) { int i; for(i = 0; i < N; i++) m[i] = i; } // Print the elements of the array __host__ void print_array(int *a, int N) { for(int i = 0; i < N; i++) { printf("%d\t", a[i]); } printf("\n"); }
8,050
#include <cuda_runtime.h> //#include <iostream> #include <cstdio> __global__ void test(/* const char * name -- CUDA cannot handle an array directly without cudaMemCpy */) { printf ("Hello\n"); // printf ("Hello, %s\n", name); // std::cout<<std::endl; // CUDA cannot handle std::cout inside a kernel } int main(){ test<<<1,3>>>(/* "Test" */); cudaDeviceSynchronize(); return 0; }
8,051
#include <cstdio> #include <cstdlib> #include <random> #include <sys/time.h> #include "kernel.cuh" #define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int loop_exe = 1; void matmul_serial (const float *A, const float *B, float *C, const int len) { printf("[CPU] Kernel start..\n"); timeval st, ed; gettimeofday(&st, NULL); // Main body for (int i=0; i<len; i++) { for (int j=0; j<len; j++) { float sum = 0; for (int k=0; k<len; k++) { sum += A[i*len+k]*B[k*len+j]; } C[i*len+j] = sum; } } // End of main body gettimeofday(&ed, NULL); float time = (ed.tv_sec - st.tv_sec) + ((ed.tv_usec-st.tv_usec)*1e-6); float gops = 1.0*len*len*len*1e-9; printf(" Total number of floating point multiplications : %.2fGops\n", gops); printf(" Elaped time: %.4f\n", time); printf(" GFLOPS : %.4f [Avg. of %d time(s)]\n", gops*loop_exe/time, loop_exe); } void matmul_cuda_basic (const float *A, const float *B, float *C, const int len) { /*** CUDA implementataion without any optimization methods **/ const int num_threads = 16; const dim3 dim_threads(num_threads, num_threads); const dim3 dim_blocks((len+num_threads-1)/num_threads, (len+num_threads-1)/num_threads); printf("[GPU] Basic kernel start..\n"); printf(" Grid size: [(%d, %d), (%d, %d)]\n", dim_blocks.y, dim_blocks.x, dim_threads.y, dim_threads.x); /*** Memcpy H to D ***/ float *d_A, *d_B, *d_C; cudaErrChk (cudaMalloc ((void **)&d_A, sizeof(float)*len*len)); cudaErrChk (cudaMalloc ((void **)&d_B, sizeof(float)*len*len)); cudaErrChk (cudaMalloc ((void **)&d_C, sizeof(float)*len*len)); cudaErrChk (cudaMemcpy (d_A, A, sizeof(float)*len*len, cudaMemcpyHostToDevice)); cudaErrChk (cudaMemcpy (d_B, B, sizeof(float)*len*len, cudaMemcpyHostToDevice)); float gops = 1.0*len*len*len*1e-9*loop_exe; float msec_total = 0.0f; cudaEvent_t start, stop; cudaErrChk (cudaEventCreate(&start)); cudaErrChk (cudaEventCreate(&stop)); cudaErrChk (cudaEventRecord(start, NULL)); // Main body for (int i=0; i<loop_exe; i++) { matmul_basic<<<dim_blocks, dim_threads>>>(d_A, d_B, d_C, len); cudaErrChk (cudaDeviceSynchronize ()) cudaErrChk( cudaGetLastError() ); } // End of main body cudaErrChk (cudaEventRecord(stop, NULL)); cudaErrChk (cudaEventSynchronize(stop)); cudaErrChk (cudaEventElapsedTime(&msec_total, start, stop)); printf(" Total number of floating point multiplications : %.2f Gops\n", gops); printf(" Elaped time: %.4f msec\n", msec_total); printf(" GFLOPS : %.4f [Avg. of %d time(s)]\n", gops/(msec_total*1e-3), loop_exe); cudaErrChk (cudaMemcpy(C, d_C, sizeof(float)*len*len, cudaMemcpyDeviceToHost)); cudaErrChk (cudaDeviceSynchronize ()) cudaErrChk (cudaFree (d_A)); cudaErrChk (cudaFree (d_B)); cudaErrChk (cudaFree (d_C)); } void matmul_cuda_shared (const float *A, const float *B, float *C, const int len) { /*** CUDA implementataion without any optimization methods **/ const int len_tile = 16; const dim3 dim_threads(len_tile, len_tile); const dim3 dim_blocks((len+len_tile-1)/len_tile, (len+len_tile-1)/len_tile); const int size_smem = 2*sizeof(float)*len_tile*len_tile; printf("[GPU] Kernel with shared memory start..\n"); printf(" Grid size: [(%d, %d), (%d, %d)]\n", dim_blocks.y, dim_blocks.x, dim_threads.y, dim_threads.x); printf(" Shared mem size: %.2fKB\n", size_smem/1024.0); /*** Memcpy H to D ***/ float *d_A, *d_B, *d_C; cudaErrChk (cudaMalloc ((void **)&d_A, sizeof(float)*len*len)); cudaErrChk (cudaMalloc ((void **)&d_B, sizeof(float)*len*len)); cudaErrChk (cudaMalloc ((void **)&d_C, sizeof(float)*len*len)); cudaErrChk (cudaMemcpy (d_A, A, sizeof(float)*len*len, cudaMemcpyHostToDevice)); cudaErrChk (cudaMemcpy (d_B, B, sizeof(float)*len*len, cudaMemcpyHostToDevice)); float gops = 1.0*len*len*len*1e-9*loop_exe; float msec_total = 0.0f; cudaEvent_t start, stop; cudaErrChk(cudaEventCreate(&start)); cudaErrChk(cudaEventCreate(&stop)); cudaErrChk(cudaEventRecord(start, NULL)); // Main body for (int i=0; i<loop_exe; i++) { matmul_tiled<<<dim_blocks, dim_threads, size_smem>>>(d_A, d_B, d_C, len, len_tile); cudaErrChk (cudaDeviceSynchronize ()) cudaErrChk( cudaGetLastError() ); } // End of main body cudaErrChk(cudaEventRecord(stop, NULL)); cudaErrChk(cudaEventSynchronize(stop)); cudaErrChk(cudaEventElapsedTime(&msec_total, start, stop)); printf(" Total number of floating point multiplications : %.2f Gops\n", gops); printf(" Elaped time: %.4f msec\n", msec_total); printf(" GFLOPS : %.4f [Avg. of %d time(s)]\n", gops/(msec_total*1e-3), loop_exe); cudaErrChk (cudaMemcpy(C, d_C, sizeof(float)*len*len, cudaMemcpyDeviceToHost)); cudaErrChk (cudaDeviceSynchronize ()) cudaErrChk (cudaFree (d_A)); cudaErrChk (cudaFree (d_B)); cudaErrChk (cudaFree (d_C)); } void matmul_cuda_shared_transposed (const float *A, const float *B, float *C, const int len) { /*** CUDA implementataion without any optimization methods **/ const int len_tile = 16; const dim3 dim_threads(len_tile, len_tile); const dim3 dim_blocks((len+len_tile-1)/len_tile, (len+len_tile-1)/len_tile); const int size_smem = 2*sizeof(float)*len_tile*len_tile; printf("[GPU] Kernel with shared transposed start..\n"); printf(" Grid size: [(%d, %d), (%d, %d)]\n", dim_blocks.y, dim_blocks.x, dim_threads.y, dim_threads.x); printf(" Shared mem size: %.2fKB\n", size_smem/1024.0); /*** Memcpy H to D ***/ float *d_A, *d_A_T, *d_B, *d_C; cudaErrChk (cudaMalloc ((void **)&d_A, sizeof(float)*len*len)); cudaErrChk (cudaMalloc ((void **)&d_A_T, sizeof(float)*len*len)); cudaErrChk (cudaMalloc ((void **)&d_B, sizeof(float)*len*len)); cudaErrChk (cudaMalloc ((void **)&d_C, sizeof(float)*len*len)); cudaErrChk (cudaMemcpy (d_A, A, sizeof(float)*len*len, cudaMemcpyHostToDevice)); transpose<<<dim_blocks, dim_threads>>>(d_A, d_A_T, len); cudaErrChk (cudaMemcpy (d_B, B, sizeof(float)*len*len, cudaMemcpyHostToDevice)); float gops = 1.0*len*len*len*1e-9*loop_exe; float msec_total = 0.0f; cudaEvent_t start, stop; cudaErrChk(cudaEventCreate(&start)); cudaErrChk(cudaEventCreate(&stop)); cudaErrChk(cudaEventRecord(start, NULL)); // Main body for (int i=0; i<loop_exe; i++) { matmul_tiled_transposed<<<dim_blocks, dim_threads, size_smem>>>(d_A_T, d_B, d_C, len, len_tile); cudaErrChk (cudaDeviceSynchronize ()) cudaErrChk( cudaGetLastError() ); } // End of main body cudaErrChk(cudaEventRecord(stop, NULL)); cudaErrChk(cudaEventSynchronize(stop)); cudaErrChk(cudaEventElapsedTime(&msec_total, start, stop)); printf(" Total number of floating point multiplications : %.2f Gops\n", gops); printf(" Elaped time: %.4f msec\n", msec_total); printf(" GFLOPS : %.4f [Avg. of %d time(s)]\n", gops/(msec_total*1e-3), loop_exe); cudaErrChk (cudaMemcpy(C, d_C, sizeof(float)*len*len, cudaMemcpyDeviceToHost)); cudaErrChk (cudaDeviceSynchronize ()) cudaErrChk (cudaFree (d_A)); cudaErrChk (cudaFree (d_A_T)); cudaErrChk (cudaFree (d_B)); cudaErrChk (cudaFree (d_C)); } /**************************************** ************** Host Code ************** ****************************************/ void h_initialize(float *mem, const int len) { for (int i=0; i<len; i++) { for (int j=0; j<len; j++) { mem[i*len+j] = (float)(rand()%1000); } } } bool h_test(const float *A, const float *B, const float *C, const int len) { printf("[TEST] Test start..\n"); for (int i=0; i<len; i++) { for (int j=0; j<len; j++) { float sum = 0; for (int k=0; k<len; k++) { sum += A[i*len+k]*B[k*len+j]; } if (sum != C[i*len+j]){ printf("C[%d][%d] = %.f != %f\n", i, j, C[i*len+j], sum); return false; } } } return true; } int main(int argc, char** argv) { /*** Program configuration ***/ printf("\n============================================\n"); printf("Matrix multiplication\n"); printf(" A * B = C\n"); printf(" arg : ./matmul [len] [Test:0,1] [loop_exe]\n"); printf("============================================\n\n"); int len = (int)1e+4; if (argc >= 2) len = atoi(argv[1]); if (argc >= 4) loop_exe = atoi(argv[3]); srand(0); /*** Data initialize ***/ float *A = (float *) malloc (len*len*sizeof(float)); float *B = (float *) malloc (len*len*sizeof(float)); float *C = (float *) calloc (len*len,sizeof(float)); h_initialize(A, len); h_initialize(B, len); printf("[Mem] Size of a matrix : [%d, %d]\n", len, len); printf("[Mem] Total size of matrices : %.3fGB\n", 3.0*len*len*sizeof(float)*1e-9); /*** Run a matmul ***/ // matmul_serial (A, B, C, len); matmul_cuda_basic (A, B, C, len); matmul_cuda_shared (A, B, C, len); matmul_cuda_shared_transposed (A, B, C, len); /*** Test the result ***/ if (argc >= 3 && atoi(argv[2]) == 1) { if (h_test (A, B, C, len) == true) { printf(" Test passed\n"); } else { printf(" [ERR] Test failed!!\n"); } } else { printf("[TEST] Test skipped..\n"); } /*** Finalize ***/ free (A); free (B); free (C); printf("============================================\n\n"); return 0; }
8,052
#include <stdio.h> #include <math.h> #define CUDA_CALL(c) \ do { \ cudaError_t res = c; \ if (res != cudaSuccess) { \ fprintf(stderr, "error at line %d: %s \n", __LINE__, cudaGetErrorString(res)); \ exit(EXIT_FAILURE); \ } \ } while (0) // Problem dimension #define STENCIL_SIZE (16 * 1024 * 1024) #define RADIUS (3) #define NUM_CHANNELS (4) #define BLOCK_SIZE (32) // Given the pixel index and channel, return the position of the // element in the 1D array. static __device__ __host__ int getIndex(int index, int channel) { return index * NUM_CHANNELS + channel; } static __constant__ int weight[RADIUS * 2 + 1] = { 1, 2, 3, 4, 3, 2, 1 }; static __constant__ int denominator = 16; __global__ void stencilKernel(unsigned char *in, int numPixels, unsigned char *out) { int i = blockIdx.x * blockDim.x + threadIdx.x; int radius, channel; int outLocal[NUM_CHANNELS] = { 0 }; if (i < RADIUS) { return; } if (i >= numPixels - RADIUS) { return; } for (radius = -RADIUS; radius <= RADIUS; ++radius) { for (channel = 0; channel < NUM_CHANNELS; ++channel) { outLocal[channel] += (int)in[getIndex(i + radius, channel)] * weight[RADIUS + radius]; } } for (channel = 0; channel < NUM_CHANNELS; ++channel) { out[getIndex(i, channel)] = (unsigned char)(outLocal[channel] / denominator); } } static void stencilGpu(unsigned char *in, int numPixels, unsigned char *out) { unsigned char *inGPU; unsigned char *outGPU; size_t arraySize; arraySize = numPixels * NUM_CHANNELS * sizeof(unsigned char); CUDA_CALL(cudaMalloc(&inGPU, arraySize)); CUDA_CALL(cudaMalloc(&outGPU, arraySize)); CUDA_CALL(cudaMemcpy(inGPU, in, arraySize, cudaMemcpyHostToDevice)); stencilKernel<<<ceil((float)numPixels / BLOCK_SIZE), BLOCK_SIZE>>>(inGPU, numPixels, outGPU); CUDA_CALL(cudaGetLastError()); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaMemcpy(out, outGPU, arraySize, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaFree(inGPU)); CUDA_CALL(cudaFree(outGPU)); } int main() { unsigned char *in; unsigned char *outGPU; size_t arraySize = STENCIL_SIZE * NUM_CHANNELS * sizeof(unsigned char); in = (unsigned char *)malloc(arraySize); outGPU = (unsigned char *)malloc(arraySize); if (in == NULL || outGPU == NULL) { fprintf(stderr, "Allocation failed\n"); exit(EXIT_FAILURE); } stencilGpu(in, STENCIL_SIZE, outGPU); free(in); free(outGPU); return 0; }
8,053
#include <cstdio> #include <time.h> #include <vector> #define QUEENS (16) __global__ void countQueens(int* frontQueensPos, int* data, int* numFQP) { int localResult = 0; //printf("%d\n", numFQP[0]); int thisThread = ((blockIdx.x * gridDim.x + blockIdx.y) * gridDim.y + threadIdx.x)* blockDim.x + threadIdx.y; // printf("1_%d %d %d %d %d %d %d %d\n", thisThread, blockIdx.x, gridDim.x, blockIdx.y, gridDim.y, threadIdx.x, blockDim.x, threadIdx.y); // if (thisThread >= QUEENS * QUEENS * QUEENS * QUEENS) // return; if (blockIdx.x >= QUEENS || blockIdx.y >= QUEENS || threadIdx.x >= QUEENS || threadIdx.y >= QUEENS) return; int* queenPos = new int[QUEENS]; queenPos[3] = blockIdx.x; queenPos[4] = blockIdx.y; queenPos[5] = threadIdx.x; queenPos[6] = threadIdx.y; for (int i = 4; i <= 6; i++) { for (int j = 3; j < i; j++) { if ((queenPos[i] - i) == (queenPos[j] - j) || (queenPos[i] + i) == (queenPos[j] + j) || queenPos[i] == queenPos[j]) { return; } } } int totalFQP = numFQP[0] / 3; for (int FQP_number = 0; FQP_number < totalFQP; FQP_number++) { // printf("1_%d %d %d %d %d %d %d %d\n", thisThread, blockIdx.x, gridDim.x, blockIdx.y, gridDim.y, threadIdx.x, blockDim.x, threadIdx.y); // if (thisThread >= QUEENS * QUEENS * QUEENS * QUEENS) // return; for (int i = 0; i < 3; i++) queenPos[i] = frontQueensPos[(FQP_number * 3) + i]; bool legal = true; //if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12) // printf("1_%d %d %d %d %d %d %d_%d\n", queenPos[0], queenPos[1], queenPos[2], queenPos[3], queenPos[4], queenPos[5], queenPos[6], totalFQP); for (int i = 3; i <= 6; i++) { for (int j = 0; j < 3; j++) { if ((queenPos[i] - i) == (queenPos[j] - j) || (queenPos[i] + i) == (queenPos[j] + j) || queenPos[i] == queenPos[j]) { legal = false; break; } } if (!legal) break; } if (!legal) continue; //if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12) // printf("1_%d %d %d %d %d %d %d_%d\n", queenPos[0], queenPos[1], queenPos[2], queenPos[3], queenPos[4], queenPos[5], queenPos[6], localResult); //printf("1_%d %d %d %d %d %d %d\n", thisThread, queenPos[2], blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, data[thisThread]); //backtrace int posNow = 7; queenPos[posNow] = -1; while (posNow > 6) { queenPos[posNow] ++; while (queenPos[posNow] < QUEENS) { legal = true; for (int j = posNow - 1; j >= 0; j--) { if ((queenPos[posNow] - posNow) == (queenPos[j] - j) || (queenPos[posNow] + posNow) == (queenPos[j] + j) || queenPos[posNow] == queenPos[j]) { legal = false; break; } } if (!legal) queenPos[posNow] ++; else break; } if (queenPos[posNow] < QUEENS) { if (posNow == (QUEENS - 1)) { localResult++; //if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12) // printf("2_%d %d %d %d %d %d %d_%d\n", queenPos[7], queenPos[8], queenPos[9], queenPos[10], queenPos[11], queenPos[12], queenPos[13], localResult); posNow--; } else { posNow++; queenPos[posNow] = -1; } } else posNow--; } } //if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12) // printf("2.5_%d\n", localResult); data[thisThread] = localResult; //if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12) // printf("3_%d %d %d %d %d %d\n", thisThread, blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, data[thisThread]); } __host__ void initData(int* data) { for (int i = 0; i < QUEENS*QUEENS*QUEENS*QUEENS; i++) data[i] = 0; } __host__ int NQueens(int seedLower, int seedUpper) { clock_t start, mid0, mid1, mid2, end; int resultHere = 0; int* d_FQP; std::vector <int> frontQueenPosV; int *frontQueenPos; int *tempFrontQueensPos = new int[3]; int* d_data; int data[QUEENS*QUEENS*QUEENS*QUEENS]; int totalResult = 0; initData(data); int seedFrom = seedLower; int seedTo = seedUpper; start = clock(); if (seedTo < seedFrom) return 0; if (seedTo > QUEENS * QUEENS * QUEENS) seedTo = QUEENS * QUEENS * QUEENS; if (seedFrom < 0) seedFrom = 0; for (int i = seedFrom; i < seedTo; i++) { tempFrontQueensPos[0] = i / QUEENS / QUEENS; tempFrontQueensPos[1] = i / QUEENS % QUEENS; tempFrontQueensPos[2] = i % QUEENS; if ((tempFrontQueensPos[0] - 0) == (tempFrontQueensPos[1] - 1) || (tempFrontQueensPos[0] + 0) == (tempFrontQueensPos[1] + 1) || tempFrontQueensPos[0] == tempFrontQueensPos[1]) continue; if ((tempFrontQueensPos[2] - 2) == (tempFrontQueensPos[1] - 1) || (tempFrontQueensPos[2] + 2) == (tempFrontQueensPos[1] + 1) || tempFrontQueensPos[2] == tempFrontQueensPos[1]) continue; if ((tempFrontQueensPos[0] - 0) == (tempFrontQueensPos[2] - 2) || (tempFrontQueensPos[0] + 0) == (tempFrontQueensPos[2] + 2) || tempFrontQueensPos[0] == tempFrontQueensPos[2]) continue; frontQueenPosV.push_back(tempFrontQueensPos[0]); frontQueenPosV.push_back(tempFrontQueensPos[1]); frontQueenPosV.push_back(tempFrontQueensPos[2]); } //printf("%d\n", frontQueenPosV.size()); frontQueenPos = new int[frontQueenPosV.size()]; if (!frontQueenPosV.empty()) memcpy(frontQueenPos, &frontQueenPosV[0], frontQueenPosV.size() * sizeof(int)); else return 0; int numFQP = frontQueenPosV.size(); int* d_numFQP; mid0 = clock(); cudaMalloc((void**)&d_data, QUEENS*QUEENS*QUEENS*QUEENS * sizeof(int)); cudaMalloc((void**)&d_FQP, frontQueenPosV.size() * sizeof(int)); cudaMalloc((void**)&d_numFQP, sizeof(int)); cudaMemcpy(d_data, data, QUEENS*QUEENS*QUEENS*QUEENS * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_FQP, frontQueenPos, frontQueenPosV.size() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_numFQP, &numFQP, sizeof(int), cudaMemcpyHostToDevice); dim3 blocksPerGrid(QUEENS, QUEENS, 1); dim3 threadsPerBlock(QUEENS, QUEENS, 1); mid1 = clock(); //cudaMemcpy(d_FQP, frontQueensPos, (QUEENS - 11) * sizeof(int), cudaMemcpyHostToDevice); countQueens <<< blocksPerGrid, threadsPerBlock >>> (d_FQP, d_data, d_numFQP); cudaDeviceSynchronize(); /* cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf(cudaGetErrorString(error)); exit(EXIT_FAILURE); }*/ cudaMemcpy(data, d_data, QUEENS*QUEENS*QUEENS*QUEENS * sizeof(int), cudaMemcpyDeviceToHost); mid2 = clock(); for (int dNum = 0; dNum < QUEENS*QUEENS*QUEENS*QUEENS; dNum++) totalResult += data[dNum]; cudaFree(d_data); cudaFree(d_FQP); end = clock(); printf("%d__%d, %d, %d, %d, %d\n", totalResult, mid0 - start, mid1 - mid0, mid2 - mid1, end-mid2 , end - mid0); return totalResult; } int main() { NQueens(0, QUEENS * QUEENS * QUEENS * QUEENS); return 0; }
8,054
#include <iostream> #include <cmath> #include <vector> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> // #define SIGN(x) ((x) > 0 ? 1 : ((x) ? -1 : 0)) #define CSC(call) \ do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(0); \ } \ } while(0) using namespace std; #define co1 1.8 #define co2 2.8 __host__ __device__ double fun(double x) { return tan(co1 * x) - co2 * x; } __host__ __device__ double pi(void) { return M_PI / co1; } __device__ double bisection(double left, double right, double eps, int limit = 1000000) { double mid = 0; double fl = fun(left); double fr = fun(right); while (right - left > eps && abs(right - left) > abs(eps) && limit) { double fm; limit--; mid = (left + right) / 2; fm = fun(mid); // cout << left << "' '" << mid << " " << right << endl; // cout << fl << " " << fm << " " << fr << endl; // cout << endl; if (fm) { // если одинак знак с левого края и по середине if (fm * fl > 0) { fl = fm; left = mid; } // если одинак знак с правого края и по середине if (fm * fr > 0) { fr = fm; right = mid; } } else break; } return mid; } __global__ void caller(double *borders, double *results, double eps) { int idx = threadIdx.x + blockIdx.x * blockDim.x; results[idx] = bisection(borders[idx] + eps, borders[idx + 1] - eps, eps); } int main(void) { double left, right, eps; cout << "l, r, e: "; cin >> left >> right >> eps; cudaEvent_t start, end; bool has_zero = false; if (eps > 0.0001) cout << "Epsilon is too big" << endl; // next, we kinda wanna to find all the tear points, don't we? double tear = floor(left / pi()) * pi() - pi() / 2; has_zero = left < 0 && right > 0; int size = (right + pi() - left) / pi() + 2; if (has_zero) size += 2; thrust::device_vector<double> tear_points(size); thrust::device_vector<double> results (size - 1); thrust::sequence(tear_points.begin(), tear_points.end(), tear, pi()); if (has_zero) { tear_points[size - 1] = 0 - 100 * eps; tear_points[size - 2] = 0 + 100 * eps; thrust::sort(tear_points.begin(), tear_points.end()); } // we should add 0 as break point, otherwise there are 3 roots in there cout << "Size: " << size << endl; cout << "Called the kernel" << endl; //~ for (int i = 0; i < tear_points.size(); ++i) //~ cout << tear_points[i] << endl; CSC(cudaEventCreate(&start)); CSC(cudaEventCreate(&end )); CSC(cudaEventRecord( start)); caller<<<1, size - 1>>>( thrust::raw_pointer_cast(tear_points.data()), thrust::raw_pointer_cast(results.data()), eps ); CSC(cudaGetLastError()); CSC(cudaEventRecord (end)); CSC(cudaEventSynchronize(end)); float t; CSC(cudaEventElapsedTime(&t, start, end)); CSC(cudaEventDestroy(start)); CSC(cudaEventDestroy(end)); cout << "Results: " << endl; for (int i = 1; i < results.size(); ++i) { double m = results[i]; if (m > left && m < right) { cout << "Interval: [" << tear_points[i] << ", " << tear_points[i + 1] << "]" << endl; cout << "fun(" << m << ") = " << fun(m) << "\t (" << (int)fun(m) << ")" << endl; cout << endl; } } cout << "Time: " << t << endl; return 0; }
8,055
// Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible? // Assigns every element in an array with its index. // nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple // Question answers: // // 1 ) 1 block is 16 threads and the grid contains 1 block -> 16 threads // 1 block is 1 SM -> 16 SM // // 2 ) the square roots calculated using CUDA cannot be assumed to be the same assumed // to be the same as if they were caculate on the CPU. The CUDA computations will // contain some error. #include <stdio.h> const int N = 16; const int blocksize = 16; __global__ //void simple(float *c) void simple(float* c, float* input) { //c[threadIdx.x] = threadIdx.x ; int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = sqrt(input[index]); } int main() { float *c = new float[N]; float* test = new float[N]; for(int i = 0; i < N; ++i){ c[i] = i; test[i] = i; } float *cd, *bd; const int size = N*sizeof(float); cudaMalloc( (void**)&cd, size ); cudaMalloc( (void**)&bd, size ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); cudaMemcpy( bd, c, size, cudaMemcpyHostToDevice ); simple<<<dimGrid, dimBlock>>>(cd, bd); cudaThreadSynchronize(); cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost ); // cudaMemCpy(dest, src, datasize, arg) cudaFree( cd ); cudaFree( bd ); for (int i = 0; i < N; i++){ printf("gpu %f \n", c[i]); printf("cpu %f \n", sqrt(test[i])); } printf("\n"); delete[] c; printf("done\n"); return EXIT_SUCCESS; }
8,056
#include <stdio.h> #include <stdlib.h> #include <set> #include <sstream> #include <string> #include <fstream> #include <iostream> #include <cstring> #include <math.h> #include <thrust/count.h> #define MAXBLOCKS 1<<30 using namespace std; //jones plassmann luby algorithm for graphcoloring __global__ void colorJPLKernel(int n, int c, int* NNZ, int* preSum, int* colIndex,int* randoms, int* colors){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; if (index < n){ for (int i = index; i < n; i += stride){ bool f = true; if ((colors[i] != -1 )){ continue; } int ir = randoms[i]; for (int k = preSum[i]; k < preSum[i + 1]; k++){ int j = colIndex[k]; int jc = colors[j]; if (((jc != -1) && (jc != c)) || (i == j)){ continue; } int jr = randoms[j]; if (ir <= jr){ f = false; } } if (f){ colors[i] = c; } } } } //jones plassmann luby algorithm for graphcoloring void colorJPL(int n, int* NNZ, int* preSum, int* colIndex, int* colors){ int* randoms; cudaMallocManaged(&randoms, sizeof(int)* n); for (int i = 0; i < n; i++){ randoms[i] = rand(); } thrust::fill(colors, colors+n, -1); for(int c = 1; c< n+1; c++){ int nt = 256; int nb = min((n + nt -1)/nt, MAXBLOCKS); colorJPLKernel<<<nb,nt>>>(n,c,NNZ,preSum,colIndex, randoms,colors); cudaDeviceSynchronize(); int left = (int)thrust::count(colors, colors+n, -1); if (left ==0){ break; } } printf("\n"); for(int i = 0; i < n; i++){ printf("%d ", colors[i]); } printf("\n"); cudaFree(randoms); } // Counts the number of unique colors in a solution int CountColors(int V, int* color) { int num_colors = 0; set<int> seen_colors; for (int i = 0; i < V; i++) { if (seen_colors.find(color[i]) == seen_colors.end()) { seen_colors.insert(color[i]); num_colors++; } } return num_colors; } // Returns true if the color assignment is valid for the graph bool IsValidColoring(bool* graph, int V, int* color) { for (int i = 0; i < V; i++) { for (int j = 0; j < V; j++) { if (graph[i * V + j]) { if (i != j && color[i] == color[j]) { printf("Vertex %d and Vertex %d are connected and have the same color %d\n", i, j, color[i]); return false; } if (color[i] < 1) { printf("Vertex %d has invalid color %d\n", i, color[i]); return false; } } } } return true; } // Read DIMACS graphs // Assumes input nodes are numbered starting from 1 void ReadColFile(const char filename[], bool** graph, int* V) { string line; ifstream infile(filename); if (infile.fail()) { printf("Failed to open %s\n", filename); return; } int num_rows, num_edges; while (getline(infile, line)) { istringstream iss(line); string s; int node1, node2; iss >> s; if (s == "p") { iss >> s; // read string "edge" iss >> num_rows; iss >> num_edges; *V = num_rows; *graph = new bool[num_rows * num_rows]; memset(*graph, 0, num_rows * num_rows * sizeof(bool)); continue; } else if (s != "e") continue; iss >> node1 >> node2; // Assume node numbering starts at 1 (*graph)[(node1 - 1) * num_rows + (node2 - 1)] = true; (*graph)[(node2 - 1) * num_rows + (node1 - 1)] = true; } infile.close(); } // Read MatrixMarket graphs // Assumes input nodes are numbered starting from 1 void ReadMMFile(const char filename[], bool** graph, int* V) { string line; ifstream infile(filename); if (infile.fail()) { printf("Failed to open %s\n", filename); return; } // Reading comments while (getline(infile, line)) { istringstream iss(line); if (line.find("%") == string::npos) break; } // Reading metadata istringstream iss(line); int num_rows, num_cols, num_edges; iss >> num_rows >> num_cols >> num_edges; *graph = new bool[num_rows * num_rows]; memset(*graph, 0, num_rows * num_rows * sizeof(bool)); *V = num_rows; // Reading nodes while (getline(infile, line)) { istringstream iss(line); int node1, node2, weight; iss >> node1 >> node2 >> weight; // Assume node numbering starts at 1 (*graph)[(node1 - 1) * num_rows + (node2 - 1)] = true; (*graph)[(node2 - 1) * num_rows + (node1 - 1)] = true; } infile.close(); } //store sparse graph in compressed sparse row format void CSRConvert(bool** graph, int rows, int** NNZ, int* preSum, int** colIndex, int* counter){ //assume square matrix int cols = rows; *counter = 0; int rowElem[rows]; for (int i = 0; i < rows; i++){ rowElem[i] = 0; } //initialize preSum preSum[0] = 0; for (int i = 0; i < rows; i++){ for (int j = 0; j < cols; j++){ if ((*graph)[i*rows + j] == false){ continue; } else{ //reallocate size of NNZ and colIndex *NNZ = (int*)realloc(*NNZ, sizeof(int)*(*counter + 1)); (*NNZ)[*counter] = 1; *colIndex = (int*)realloc(*colIndex, sizeof(int) * (*counter +1)); (*colIndex)[*counter] = j; //preSum[counter + 1] = preSum[counter] + prevRowCount; rowElem[i]++; *counter += 1; } } } for (int i = 0; i < rows +1; i++){ preSum[i+1] = preSum[i] + rowElem[i]; } } void GraphColoringGPU(const char filename[], int** color){ bool* graph; int V; if (string(filename).find(".col") != string::npos) ReadColFile(filename, &graph, &V); else{ ReadMMFile(filename, &graph, &V); } //convert the sparse array into compact sparse row format int *NNZ = (int*)malloc(sizeof(int)); int *preSum = (int*)malloc(sizeof(int) * (V + 1)); int *colIndex= (int*)malloc(sizeof(int)); int counter = 0; CSRConvert(&graph, V, &NNZ, preSum, &colIndex, &counter); //migrate values to GPU int* Ao; int* Av; int* Ac; int* colors; cudaMallocManaged(&Ao, sizeof(int)*(V+1)); cudaMallocManaged(&Av, sizeof(int)*counter); cudaMallocManaged(&Ac, sizeof(int)*counter); cudaMallocManaged(&colors, sizeof(int)* V); for(int i = 0; i < counter; i++){ Av[i] = NNZ[i]; Ac[i] = colIndex[i]; } //printf("offset values : "); for (int i = 0; i < V + 1; i++){ Ao[i] = preSum[i]; } colorJPL(V, Av, Ao, Ac,colors); printf("JPL coloring found solution with %d colors\n", CountColors(V, colors)); printf("Valid coloring: %d\n", IsValidColoring(graph, V, colors)); free(NNZ); free(preSum); free(colIndex); cudaFree(Ao); cudaFree(Av); cudaFree(Ac); } int main(int argc, char* argv[]){ const char fileName[] = "/home/zwharris/EEC289Q/Hw3/planar16.col"; int* color; GraphColoringGPU(fileName, &color); return 0; }
8,057
/*Trail C++ program*/ #include<stdio.h> #include<stdlib.h> #include<time.h> #define N 512 __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; //Each block performs the addition command separately on its contents } int main(void) { printf("Hello! This is my first cuda C program with Ubuntu 11.10\n"); /* Do something more if you want */ int *a, *b, *c; //host or cpu copies int *d_a, *d_b, *d_c; //device or GPU copies int size = N*sizeof(int); //Allocate space for device copies of a,b,c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); //Input values: Allocate space for host copies of a,b,c and setup input values a = (int *)malloc(size); b = (int *)malloc(size); for (int i=0; i<N; i++) { srand(time(NULL)); a[i] = rand(); b[i] = rand(); } c = (int *)malloc(size); //setup input values //a=2; //b=7; //copy inputs to device(GPU) memory cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); //launch add() kernel on N GPU add<<<N,1>>>(d_a, d_b, d_c); //copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); //int result = c; printf("Result=%p \n", c); //cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
8,058
#include <stdio.h> __global__ void histogram(int n, int* color, int* bucket) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { int c = color[i]; atomicAdd(&bucket[c], 1); } } void host_histogram() { int N = 1 << 20; int M = 1 << 10; int *color_, *bucket_, *d_color, *d_bucket; color_ = (int*)malloc(N * sizeof(int)); bucket_ = (int*)malloc(M * sizeof(int)); cudaMalloc(&d_color, N * sizeof(int)); cudaMalloc(&d_bucket, M * sizeof(int)); memset(bucket_, 0, M * sizeof(int)); for (int i = 0; i < N; i++) { color_[i] = rand() % M; bucket_[color_[i]]++; } printf("cpu bucket: %d,%d,%d,%d,%d\n", bucket_[0], bucket_[1], bucket_[2], bucket_[3], bucket_[4]); memset(bucket_, 0, M * sizeof(int)); cudaMemcpy(d_color, color_, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemset(d_bucket, 0, M * sizeof(int)); histogram<<<(N + 255) / 256, 256>>>(N, d_color, d_bucket); cudaMemcpy(bucket_, d_bucket, M * sizeof(int), cudaMemcpyDeviceToHost); printf("gpu bucket: %d,%d,%d,%d,%d\n", bucket_[0], bucket_[1], bucket_[2], bucket_[3], bucket_[4]); cudaFree(d_color); cudaFree(d_bucket); free(color_); free(bucket_); } /** * Introduce local maximums and update global only when new local maximum found */ __global__ void global_max(int* values, int* global_max, int* local_max, int num_locals) { int i = threadIdx.x + blockDim.x * blockIdx.x; int val = values[i]; int li = i % num_locals; int old_max = atomicMax(&local_max[li], val); if (old_max < val) { atomicMax(global_max, val); } } void host_global_max() { int N = 1 << 20; int num_locals_ = 1 << 10; int *values_, *d_values, *d_local_max, *d_global_max; values_ = (int*)malloc(N * sizeof(int)); cudaMalloc(&d_values, N * sizeof(int)); cudaMalloc(&d_local_max, num_locals_ * sizeof(int)); cudaMalloc(&d_global_max, sizeof(int)); int h_global_max = -1; for (int i = 0; i < N; i++) { values_[i] = rand(); if (h_global_max < values_[i]) h_global_max = values_[i]; } printf("cpu global_max: %d\n", h_global_max); h_global_max = -1; cudaMemcpy(d_values, values_, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemset(d_local_max, 0, num_locals_ * sizeof(int)); cudaMemset(d_global_max, 0, sizeof(int)); global_max<<<(N + 255) / 256, 256>>>(d_values, d_global_max, d_local_max, num_locals_); cudaMemcpy(&h_global_max, d_global_max, sizeof(int), cudaMemcpyDeviceToHost); printf("gpu global_max: %d\n", h_global_max); cudaFree(d_values); cudaFree(d_local_max); cudaFree(d_global_max); free(values_); } int main(void) { host_histogram(); host_global_max(); }
8,059
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/time.h> __global__ void kernel(int *a) { a[blockIdx.x * blockDim.x + threadIdx.x ] = 0; } double tiempo( void ) { struct timeval tv; gettimeofday(&tv, NULL); return (double) (tv.tv_usec) / 1000000 + (double) (tv.tv_sec); } int main(int argc, char** argv) { double tiempoInicio; double tiempoFin; int n; if (argc == 2) { n = atoi(argv[1]); } else { n = 64; } printf("\nElementos a reservar: %d\n\n\n", n); int numBytes = n * sizeof(int); int *d_a; int *h_a; cudaMalloc((void **) &d_a, numBytes ); h_a = (int *)malloc(numBytes); dim3 blockSize(8); dim3 gridSize(8); tiempoInicio = tiempo(); kernel <<<gridSize, blockSize>>>(d_a); cudaThreadSynchronize(); tiempoFin = tiempo(); if ( cudaSuccess != cudaGetLastError() ) printf( "Error!\n" ); printf("Tiempo de inicio Kernel: %lf\n", tiempoInicio); printf("Tiempo de fin Kernel: %lf\n", tiempoFin); printf("Tiempo total: %lf\n\n\n", tiempoFin - tiempoInicio); tiempoInicio = tiempo(); cudaMemcpy (d_a, h_a, numBytes, cudaMemcpyDeviceToHost); tiempoFin = tiempo(); printf("Tiempo de inicio Transferencia: %lf\n", tiempoInicio); printf("Tiempo de fin Transferencia: %lf\n", tiempoFin); printf("Tiempo total: %lf\n", tiempoFin - tiempoInicio); printf("Done.\n"); return 0; }
8,060
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <cuda_runtime.h> #include <float.h> #include <cuda.h> //#define BOX_SIZE 23000 /* size of the data box on one dimension */ /* Constant declaration */ #define MAX_BRIGHTNESS 255 /* Maximum gray level */ #define GRAYLEVEL 700 /* No. of gray levels */ #define MAX_FILENAME 256 /* Filename length limit */ #define MAX_BUFFERSIZE 256 /* Global constant declaration */ /* Image storage arrays */ unsigned char* image1_y; unsigned char* image2_y; /* struct used to hold image dimensions*/ typedef struct dim { int x_size1; int y_size1; int x_size2; int y_size2; } dimension; /*prewitt cuda kernel function*/ __global__ void prewitt(dimension size_gpu, unsigned char * image1_in_d, unsigned char* image2_out_d, double max, double min) { int x_coord = threadIdx.x + blockIdx.x * blockDim.x; int y_coord = blockIdx.y; /* removes any unnecessary threads */ if(x_coord > size_gpu.x_size1 || y_coord > size_gpu.y_size1) { return; } int i, j; __syncthreads(); extern __shared__ unsigned char block_image[]; __syncthreads(); //initialize shared memory for each block //each shared memory segment is size [3][1026] to access the surrounding pixels used in the block if(x_coord % 1024 == 0) { for(i = -1; i < 2; i++) { for(j = -1; j < 1025; j++) { if(j + blockIdx.x * blockDim.x == size_gpu.x_size1 + 1) { break; } if( ((int)(j + blockIdx.x * blockDim.x) < 0) || (y_coord + i < 0) || (y_coord + i >= size_gpu.y_size1) || (j + blockIdx.x * blockDim.x >= size_gpu.x_size1) ) { block_image[(i+1) * 1026 + (j+1)] = NULL; } else { block_image[(i+1) * 1026 + (j+1)] = image1_in_d[ (blockIdx.x * blockDim.x) + j + ((y_coord + i) * size_gpu.x_size1) ]; } } } } __syncthreads(); double grad; int kernel[3][3] = { {-1, 0, 1}, {-1, 0, 1}, {-1, 0, 1} }; //normalizes the pixel in the grayscale image and inputs the new pixel into image2_out_d if(x_coord != 0 && x_coord != size_gpu.x_size1 - 1 && y_coord != 0 && y_coord != size_gpu.y_size1 - 1) { grad = 0.0; for(j = -1; j <= 1; j++) { for(i = -1; i <= 1; i++) { if(threadIdx.x != 0 && block_image[(j + 1) * 1026 + threadIdx.x + i] != NULL) { grad += kernel[j + 1][i + 1] * block_image[ (j + 1) * 1026 + threadIdx.x + i]; } else if(block_image[(j + 1) * 1026 + threadIdx.x + 1 + i] != NULL) { grad += kernel[j + 1][i + 1] * block_image[ (j + 1) * 1026 + threadIdx.x + 1 + i]; } } } grad = 255 * (grad - min) / (max - min); image2_out_d[y_coord * size_gpu.x_size1 + x_coord] = grad; } __syncthreads(); } int main(void) { ///////Image input from mypgm.h //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// dimension size; char file_name[MAX_FILENAME]; char buffer[MAX_BUFFERSIZE]; FILE *fp; /* File pointer */ int max_gray; /* Maximum gray level */ int x, y; /* Loop variable */ /* Input file open */ printf("\n-----------------------------------------------------\n"); printf("Monochromatic image file input routine \n"); printf("-----------------------------------------------------\n\n"); printf(" Only pgm binary file is acceptable\n\n"); printf("Name of input image file? (*.pgm) : "); scanf("%s", file_name); fp = fopen(file_name, "rb"); if (NULL == fp) { printf(" The file doesn't exist!\n\n"); exit(1); } /* Check of file-type ---P5 */ fgets(buffer, MAX_BUFFERSIZE, fp); if (buffer[0] != 'P' || buffer[1] != '5') { printf(" Mistaken file format, not P5!\n\n"); exit(1); } /* input of x_size1, y_size1 */ size.x_size1 = 0; size.y_size1 = 0; while (size.x_size1 == 0 || size.y_size1 == 0) { fgets(buffer, MAX_BUFFERSIZE, fp); if (buffer[0] != '#') { sscanf(buffer, "%d %d", &size.x_size1, &size.y_size1); } } image1_y = (unsigned char*)malloc(size.x_size1 * size.y_size1 * sizeof(unsigned char)); image2_y = (unsigned char*)malloc(size.x_size1 * size.y_size1 * sizeof(unsigned char)); /* input of max_gray */ max_gray = 0; while (max_gray == 0) { fgets(buffer, MAX_BUFFERSIZE, fp); if (buffer[0] != '#') { sscanf(buffer, "%d", &max_gray); } } /* Display of parameters */ printf("\n Image width = %d, Image height = %d\n", size.x_size1, size.y_size1); printf(" Maximum gray level = %d\n\n",max_gray); if (max_gray != MAX_BRIGHTNESS) { printf(" Invalid value of maximum gray level!\n\n"); exit(1); } /* Input of image data*/ printf("Total Size: %d\n\n", size.y_size1 * size.x_size1); for (y = 0; y < size.y_size1; y++) { for (x = 0; x < size.x_size1; x++) { image1_y[y * size.x_size1 + x] = (unsigned char)fgetc(fp); image2_y[y * size.x_size1 + x] = 0; } } printf("-----Image data input OK-----\n\n"); printf("-----------------------------------------------------\n\n"); printf("\n"); printf("\n"); printf("\n"); //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// unsigned char* image1_in_d; unsigned char* image2_out_d; unsigned char* result; const int sizeE = size.y_size1 * size.x_size1 * sizeof(unsigned char); //finds the darkest and lightest pixels in the grayscale image double max = -DBL_MAX; double min = DBL_MAX; int i, j; int kernel[3][3] = { {-1, 0, -1}, {-1, 0, -1}, {-1, 0, -1} }; double grad; for (y = 1; y < size.y_size1 - 1; y++) { for (x = 1; x < size.x_size1 - 1; x++) { grad = 0.0; for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { grad += kernel[j + 1][i + 1] * image1_y[(y + j) * size.x_size1 + x + i]; } } if (grad < min) min = grad; if (grad > max) max = grad; } } //if the darkest and lightest pixel is the same then we know the image is blank if((int)(max-min) == 0) { printf("Nothing Exists!!!\n"); exit(1); } result = (unsigned char *)malloc(sizeE); int block = 1024; dim3 grid ((size.x_size1 / 1024) + 1, size.y_size1); //allocates the memory in the cuda kernel to hold the imput image and output image cudaMalloc((void**)&image1_in_d, sizeE); cudaMalloc((void**)&image2_out_d, sizeE); //copies the image memory into the cuda kernel cudaMemcpy(image1_in_d, image1_y , sizeE, cudaMemcpyHostToDevice); cudaMemcpy(image2_out_d, image2_y , sizeE, cudaMemcpyHostToDevice); //starts timer for cuda operation cudaEvent_t m_start, m_stop; float m_time; cudaEventCreate( &m_start ); cudaEventCreate( &m_stop ); cudaEventRecord( m_start, 0 ); //calls the cuda kernel prewitt<<<grid, block, 3 * 1026 * sizeof(unsigned char)>>>(size, image1_in_d, image2_out_d, max, min); //enda timer for cuda operation and outputs the result cudaDeviceSynchronize(); cudaEventRecord( m_stop, 0 ); cudaEventSynchronize( m_stop ); cudaEventElapsedTime( &m_time, m_start, m_stop); printf( "******** Total Running Time of Kernal = %0.5f sec ********\n ", m_time/1000 ); cudaEventDestroy( m_start); cudaEventDestroy( m_stop); //waits for all threads to exit the cuda before continuing cudaThreadSynchronize(); //sets image 2 size to image 1 size size.x_size2 = size.x_size1; size.y_size2 = size.y_size1; //copies over the output image to the results array cudaMemcpy(result,image2_out_d, sizeE, cudaMemcpyDeviceToHost); ///////Image output from mypgm.h //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// /* Output file open */ printf("-----------------------------------------------------\n"); printf("Monochromatic image file output routine\n"); printf("-----------------------------------------------------\n\n"); printf("Name of output image file? (*.pgm) : "); scanf("%s",file_name); fp = fopen(file_name, "wb"); /* output of pgm file header information */ fputs("P5\n", fp); fputs("# Created by Image Processing\n", fp); fprintf(fp, "%d %d\n", size.x_size1, size.y_size1); fprintf(fp, "%d\n", MAX_BRIGHTNESS); /* Output of image data */ for (y = 0; y < size.y_size1; y++) { for (x = 0; x < size.x_size1; x++) { fputc(result[y * size.x_size1 + x], fp); } } printf("\n-----Image data output OK-----\n\n"); printf("-----------------------------------------------------\n\n"); fclose(fp); //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// //frees all allocated memory before exiting program cudaFree(image2_out_d); cudaFree(image1_in_d); free(result); free(image1_y); free(image2_y); return 0; }
8,061
/* //algortimo p-twdtw #include <stdio.h> #define min2(x,y) (x < y ? x : y) __global__ void loop() { int num_rows = 4; int num_cols = 4; int tid = threadIdx.x; for (int si = 0; si < num_rows; si++) { if (tid <= min2(si, num_cols - 1)){ printf("si%d\n", si); int i = si - tid; int j = tid; printf("A tid %d calcula o elemento i %d e j %d\n" , tid, i ,j); } __syncthreads(); } for (int sj = num_cols - 2; sj >= 0; sj--) { if (tid <= min2(sj, num_rows - 1)) { printf("sj%d\n", sj); int i = num_rows - tid - 1; int j = num_cols - (sj - tid) - 1; printf("A tid %d calcula o elemento i %d e j %d\n" , tid, i ,j); } __syncthreads(); } } int main() { loop<<<1, 4>>>(); cudaDeviceSynchronize(); } */ /* // t0: j >= 0 e j < 2 t1: j >= 2 e j < 4 t2: j >= 4 e j < 6 O i só muda quando o j chega no final.(importante) base = (tid * num_per_thread) + si * num_cols; --------------------------------------------------------- t0: id.x = 0 j >= base & j < (base + num_per_thread) index >= base && index < (base + num_per_thread) i = 0 index_min = base = 0 * 2 + 0 * 6 = 0 index_max = base + num_per_thread = 0 + 2 = 2 index >= 0 && index < 2 i = 1 index_min = base = 0 * 2 + 1 * 6 = 6 index_max = base + num_per_thread = 6 + 2 = 8 index >= 6 && index < 8 --------------------------------------------------------- t1: id.x = 1 j >= base & j < (base + num_per_thread) i = 0 index_min = base = 1 * 2 + 0 * 6 = 2 index_max = base + num_threads = 4 index >= 2 && index < 4 i = 1 index_min = base = 1 * 2 + 1 * 6 = 8 index_max = base + num_threads = 10 index >= 8 && index < 10 --------------------------------------------------------- t2: id.x = 2 j >= base & j < (base + num_per_thread) i = 0 index_min = base = 2 * 2 + 0 * 6 = 4 index_max = base + num_threads = 6 index >= 4 && index < 6 i = 1 index_min = base = 2 * 2 + 1 * 6 = 10 index_max = base + num_threads = 12 index >= 10 && index < 12 --------------------------------------------------------- j >= base & j < (base + num_per_thread) só pra entender a faixa de coluna de cada thread for (i = 0; i < num_rows; i++) { base = (id.x * num_per_thread) + i * num_cols if (diagonal_superior) { for (index = base; index < base + num_per_threads; index++) { calcula custo acumulado(index) } } if (diagonal_inferior) { for (index = base; index < base + num_per_threads; index++) { calcula custo acumulado(index) } } }*/ #include <stdio.h> #define min2(x,y) (x < y ? x : y) #define max2(x,y) (x > y ? x : y) __global__ void loop(int num_threads) { int num_cols = 6; int window = (num_cols / num_threads); //window num_per_thread int num_rows = 6; int tid = threadIdx.x; for (int si = 0; si < num_rows; si++) { int base = (tid * window) + (si - tid) * num_cols; int aux = tid * (window - 1); if (tid <= min2(si, num_cols-1)) { printf("quantidade de janelas no passo %d\n", si); int auxi = si -tid; for (int index = base; index < base + window; index++) { //printf("A tid %d calcula o elemento %d\n" , tid, index); int i = si - tid + auxi; int j = tid + aux; aux = aux + 1; printf("A tid %d calcula o elemento i %d e j %d\n" , tid, i ,j); } } __syncthreads(); } int si = num_rows -1; int aux = 0; for (int sj = (num_cols/window) - 2; sj >= 0; sj--) { int base = (tid * window) + ((si - tid) * num_cols) + window + aux; int auxj = 0; aux = aux + window; if (tid <= min2(sj, num_rows - 1)) { printf("quantidade de janelas no passo %d\n", sj); for (int index = base; index < base + window; index++) { //printf("A tid %d calcula o elemento %d\n" , tid, index); int i = num_rows - tid - 1; int j = num_cols - (window * sj) - window + auxj + tid*window; auxj = auxj + 1; printf("A tid %d calcula o elemento i %d e j %d\n" , tid, i ,j); } } __syncthreads(); } } int main() { int num_threads = 3; loop<<<1, num_threads>>>(num_threads); cudaDeviceSynchronize(); }
8,062
#include "includes.h" __global__ void kernel_cudaWarmUpGPU() { int ind=blockIdx.x*blockDim.x+threadIdx.x; ind = ind + 1; }
8,063
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <unistd.h> __global__ void cuda_dot(double *m1, double *m2, int size_y, int size_v, int size_x, double *new_m) { m2[(threadIdx.y * size_v) + threadIdx.x]; double tmp = 0; for (int x = 0; x < size_x; ++x) { tmp += m1[(blockIdx.x * size_x) + x] * m2[(x * size_v) + threadIdx.x]; } new_m[(blockIdx.x * size_v) + threadIdx.x] = tmp; } __global__ void cuda_mult(double *m1, double *m2, double *new_m) { new_m[blockIdx.x * blockDim.x + threadIdx.x] = m1[blockIdx.x * blockDim.x + threadIdx.x] * m2[blockIdx.x * blockDim.x + threadIdx.x]; } __global__ void cuda_sub(double *m1, double *m2, double *new_m) { new_m[blockIdx.x * blockDim.x + threadIdx.x] = m1[blockIdx.x * blockDim.x + threadIdx.x] - m2[blockIdx.x * blockDim.x + threadIdx.x]; } __global__ void cuda_add(double *m1, double *m2, double *new_m) { new_m[blockIdx.x * blockDim.x + threadIdx.x] = m1[blockIdx.x * blockDim.x + threadIdx.x] + m2[blockIdx.x * blockDim.x + threadIdx.x]; } extern "C" { int find_nb_blocks(int size, int max_th) { int nb_block = 1; while (((float)size / (float)nb_block) > (float)max_th || (size % nb_block) != 0) { nb_block += 1; } return (nb_block); } double *dot(double *m1, double *m2, int size_y, int size_v, int size_x) { double *new_m = (double *)malloc((size_v * size_y) * sizeof(double)); double *cuda_new_m = NULL; double *cuda_m1 = NULL; double *cuda_m2 = NULL; cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != 0) { fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error)); exit(1); } cudaMalloc((void**)&cuda_new_m, (size_y * size_v) * sizeof(double)); memset(new_m, 0, (size_y * size_v) * sizeof(double)); cudaMalloc((void**)&cuda_m1, (size_x * size_y) * sizeof(double)); cudaMalloc((void**)&cuda_m2, (size_x * size_v) * sizeof(double)); cudaMemcpy(cuda_m1, m1, (size_x * size_y) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cuda_m2, m2, (size_x * size_v) * sizeof(double), cudaMemcpyHostToDevice); cuda_dot<<<size_y, size_v>>>(cuda_m1, cuda_m2, size_y, size_v, size_x, cuda_new_m); cudaMemcpy(new_m, cuda_new_m, (size_y * size_v) * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(cuda_new_m); cudaFree(cuda_m1); cudaFree(cuda_m2); return(new_m); } double *mult(double *m1, double *m2, int size) { double *new_m = (double *)malloc((size) * sizeof(double)); double *cuda_new_m = NULL; double *cuda_m1 = NULL; double *cuda_m2 = NULL; dim3 numBlocks(find_nb_blocks(size, 1024)); dim3 threadsPerBlock(size / numBlocks.x); cudaMalloc((void**)&cuda_new_m, size * sizeof(double)); memset(new_m, 0, size * sizeof(double)); cudaMalloc((void**)&cuda_m1, size * sizeof(double)); cudaMalloc((void**)&cuda_m2, size * sizeof(double)); cudaMemcpy(cuda_m1, m1, (size) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cuda_m2, m2, (size) * sizeof(double), cudaMemcpyHostToDevice); cuda_mult<<<numBlocks, threadsPerBlock>>>(cuda_m1, cuda_m2, cuda_new_m); cudaMemcpy(new_m, cuda_new_m, size * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(cuda_new_m); cudaFree(cuda_m1); cudaFree(cuda_m2); return(new_m); } double *transpose(double *m, int size_y, int size_x) { double *new_m = (double *)malloc(sizeof(double) * (size_x * size_y)); memset(new_m, 0, size_x * size_y * sizeof(double)); for (int y = 0; y < size_y; ++y) { for (int x = 0; x < size_x; ++x) { new_m[x * size_y + y] = m[y * size_x + x]; } } return (new_m); } double *substract(double *m1, double *m2, int size) { double *new_m = (double *)malloc((size) * sizeof(double)); double *cuda_new_m = NULL; double *cuda_m1 = NULL; double *cuda_m2 = NULL; dim3 numBlocks(find_nb_blocks(size, 1024)); dim3 threadsPerBlock(size / numBlocks.x); cudaMalloc((void**)&cuda_new_m, size * sizeof(double)); memset(new_m, 0, size * sizeof(double)); cudaMalloc((void**)&cuda_m1, size * sizeof(double)); cudaMalloc((void**)&cuda_m2, size * sizeof(double)); cudaMemcpy(cuda_m1, m1, (size) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cuda_m2, m2, (size) * sizeof(double), cudaMemcpyHostToDevice); cuda_sub<<<numBlocks, threadsPerBlock>>>(cuda_m1, cuda_m2, cuda_new_m); cudaMemcpy(new_m, cuda_new_m, size * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(cuda_new_m); cudaFree(cuda_m1); cudaFree(cuda_m2); return(new_m); } double *add(double *m1, double *m2, int size) { double *new_m = (double *)malloc((size) * sizeof(double)); double *cuda_new_m = NULL; double *cuda_m1 = NULL; double *cuda_m2 = NULL; dim3 numBlocks(find_nb_blocks(size, 1024)); dim3 threadsPerBlock(size / numBlocks.x); cudaMalloc((void**)&cuda_new_m, size * sizeof(double)); memset(new_m, 0, size * sizeof(double)); cudaMalloc((void**)&cuda_m1, size * sizeof(double)); cudaMalloc((void**)&cuda_m2, size * sizeof(double)); cudaMemcpy(cuda_m1, m1, (size) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cuda_m2, m2, (size) * sizeof(double), cudaMemcpyHostToDevice); cuda_add<<<numBlocks, threadsPerBlock>>>(cuda_m1, cuda_m2, cuda_new_m); cudaMemcpy(new_m, cuda_new_m, size * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(cuda_new_m); cudaFree(cuda_m1); cudaFree(cuda_m2); return(new_m); } }
8,064
#include "includes.h" __global__ void devInverseReindexInt3(int N, int3 *destArray, int3 *srcArray, int *reindex, int realSize, int nDims) { for (unsigned int n = 0; n < nDims; n++) { int i = blockIdx.x*blockDim.x + threadIdx.x; while (i < N) { int tmp = srcArray[i + n*realSize].x; destArray[i + n*realSize].x = reindex[tmp]; tmp = srcArray[i + n*realSize].y; destArray[i + n*realSize].y = reindex[tmp]; tmp = srcArray[i + n*realSize].z; destArray[i + n*realSize].z = reindex[tmp]; i += gridDim.x*blockDim.x; } } }
8,065
#include "includes.h" __global__ void GPUmemo( float *data, int pts ) { __shared__ float* trace; trace = (float *)malloc(pts*sizeof(float)); int Blocks; for( Blocks = 0; Blocks < gridDim.x; Blocks++ ) { trace[threadIdx.x] = data[threadIdx.x + Blocks*pts]; } }
8,066
#include <iostream> using namespace std; __global__ void init(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { x[i] = 1.0f; y[i] = 2.0f; } } __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i+= stride) { y[i] = x[i] + y[i]; } } int main() { const int N = 1 << 20; float *x, *y; cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); /* for (int i = 0; i < N; i++) { */ /* x[i] = 1.0f; */ /* y[i] = 2.0f; */ /* } */ int blockSize = 512; int numBlocks = (N + blockSize - 1) / blockSize; /* cout << "blockSize" << blockSize << endl; */ /* cout << "numBlocks" << numBlocks << endl; */ init<<<numBlocks, blockSize>>>(N, x, y); add<<<numBlocks, blockSize>>>(N, x, y); cudaDeviceSynchronize(); float maxErr = 0.0f; for (int i = 0; i < N; i++) { maxErr = max(maxErr, abs(y[i] - 3.0f)); } cout << maxErr << endl; cudaFree(x); cudaFree(y); return 0; }
8,067
#include <iostream> #include <cuda_runtime_api.h> #include <chrono> //#define DEBUG_DEV #ifdef DEBUG_DEV #define getErrorCuda(command)\ command;\ cudaDeviceSynchronize();\ cudaThreadSynchronize();\ if (cudaPeekAtLastError() != cudaSuccess){\ std::cout << #command << " : " << cudaGetErrorString(cudaGetLastError())\ << " in file " << __FILE__ << " at line " << __LINE__ << std::endl;\ exit(1);\ } #endif #ifndef DEBUG_DEV #define getErrorCuda(command) command; #endif __constant__ float const_stencilWeight[21]; // base case __global__ void stencil(float *src, float *dst, int size, float *stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i <= 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; } // read only cache stencil coefficients __global__ void stencilReadOnly1(float *src, float *dst, int size, float* stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i <= 10; i++) { out += src[idx+i] * __ldg(&stencilWeight[i+10]); } dst[idx] = out; } // read only data __global__ void stencilReadOnly2(float *src, float *dst, int size, float* stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i <= 10; i++) { out += __ldg(&src[idx+i]) * stencilWeight[i+10]; } dst[idx] = out; } // read only coefficients and data __global__ void stencilReadOnly3(float *src, float *dst, int size, float* stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i <= 10; i++) { out += __ldg(&src[idx+i]) * __ldg(&stencilWeight[i+10]); } dst[idx] = out; } // constat memory coefficients __global__ void stencilConst1(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i <= 10; i++) { out += src[idx+i] * const_stencilWeight[i+10]; } dst[idx] = out; } // constant memory coefficients and data through read only cache __global__ void stencilConst2(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i <= 10; i++) { out += __ldg(&src[idx+i]) * const_stencilWeight[i+10]; } dst[idx] = out; } // constant memory coefficients and data from shared __global__ void stencilShared1(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float buffer[1024+21]; for(int i = threadIdx.x; i < 1024+21; i = i + 1024) { buffer[i] = src[idx+i]; } idx += 11; if (idx >= size) return; __syncthreads(); float out = 0; #pragma unroll for(int i = -10;i <= 10; i++) { out += buffer[threadIdx.x+10+i] * const_stencilWeight[i+10]; } dst[idx] = out; } // constant memory coefficients and data from shared thorugh read only __global__ void stencilShared2(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float buffer[1024+21]; for(int i = threadIdx.x; i < 1024+21; i = i + 1024) { buffer[i] = __ldg(&src[idx+i]); } idx += 11; if (idx >= size) return; __syncthreads(); float out = 0; #pragma unroll for(int i = -10;i <= 10; i++) { out += buffer[threadIdx.x+10+i] * const_stencilWeight[i+10]; } dst[idx] = out; } void verify(float *arr, float *corr, int count) { for(int i = 40; i < count; i++) { if(arr[i] != corr[i]) { std::cout << "error verifying resutls" << std::endl; exit(1); } } } int main() { float *a; float *b; float *bOut; float *bCorr; float *weights; getErrorCuda(cudaMalloc(&a, sizeof(float)*102400000)); getErrorCuda(cudaMalloc(&b, sizeof(float)*102400000)); getErrorCuda(cudaMallocHost(&bOut, sizeof(float)*102400000)); getErrorCuda(cudaMallocManaged(&bCorr, sizeof(float)*102400000)); getErrorCuda(cudaMallocManaged(&weights, sizeof(float)*21)); cudaDeviceSynchronize(); for(int i = 0; i < 102400000;i++) { //a[i] = 0; //b[i] = 0; bCorr[i] = 0; } cudaDeviceSynchronize(); int blockSize = 1024; int blocks = 10000; for(int i = 0; i < 21;i++) weights[i] = i-10; cudaDeviceSynchronize(); cudaMemcpyToSymbol(const_stencilWeight, weights, sizeof(float)*21); stencil<<<blocks, blockSize>>>(a, bCorr, 10240000-11, weights); cudaDeviceSynchronize(); stencil<<<blocks, blockSize>>>(a, b, 10240000-11, weights); cudaDeviceSynchronize(); getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); cudaSetDevice(0); float minTime = 10000; for(int i = 0; i < 100; i++) { std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); stencil<<<blocks, blockSize>>>(a, b, 10240000-11, weights); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "Non optimized time " << (blockSize*blocks)/minTime << " elem/s" << " Read BW " << (21*blockSize*blocks*sizeof(float)/1000.0/1000.0/1000.0 )/minTime << " GB/s" << std::endl; minTime = 10000; std::cout << std::endl; for(int i = 0; i < 100; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); stencilReadOnly1<<<blocks, blockSize>>>(a, b, 10240000-11, weights); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "read only cache stencil coefficients time " <<(blockSize*blocks)/minTime << " elem/s" << " Read BW " << (21*blockSize*blocks*sizeof(float)/1000.0/1000.0/1000.0 )/minTime << " GB/s" << std::endl; minTime = 10000; for(int i = 0; i < 100; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); stencilReadOnly2<<<blocks, blockSize>>>(a, b, 10240000-11, weights); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "read only data time " << (blockSize*blocks)/minTime << " elem/s" << " Read BW " << (21*blockSize*blocks*sizeof(float)/1000.0/1000.0/1000.0 )/minTime << " GB/s" << std::endl; minTime = 10000; for(int i = 0; i < 100; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); stencilReadOnly3<<<blocks, blockSize>>>(a, b, 10240000-11, weights); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "read only coefficients and data time " << (blockSize*blocks)/minTime << " elem/s" << " Read BW " << (21*blockSize*blocks*sizeof(float)/1000.0/1000.0/1000.0 )/minTime << " GB/s" << std::endl; minTime = 10000; std::cout << std::endl; for(int i = 0; i < 100; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); stencilConst1<<<blocks, blockSize>>>(a, b, 10240000); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients " << (blockSize*blocks)/minTime << " elem/s" << " Read BW " << (21*blockSize*blocks*sizeof(float)/1000.0/1000.0/1000.0 )/minTime << " GB/s" << std::endl; minTime = 10000; for(int i = 0; i < 100; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); stencilConst2<<<blocks, blockSize>>>(a, b, 10240000); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients and data through read only cache time " << (blockSize*blocks)/minTime << " elem/s" << " Read BW " << (21*blockSize*blocks*sizeof(float)/1000.0/1000.0/1000.0 )/minTime << " GB/s" << std::endl; std::cout << std::endl; minTime = 10000; for(int i = 0; i < 100; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); stencilShared1<<<blocks, blockSize>>>(a, b, 10240000); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients and data from shared time " << (blockSize*blocks)/minTime << " elem/s" << " Read BW " << (21*blockSize*blocks*sizeof(float)/1000.0/1000.0/1000.0 )/minTime << " GB/s" << std::endl; minTime = 10000; minTime = 10000; for(int i = 0; i < 100; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); stencilShared2<<<blocks, blockSize>>>(a, b, 10240000); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients and data from shared thorugh read only time " << (blockSize*blocks)/minTime << " elem/s" << " Read BW " << (21*blockSize*blocks*sizeof(float)/1000.0/1000.0/1000.0 )/minTime << " GB/s" << std::endl; minTime = 10000; }
8,068
#include "classifier.cuh" #include <fstream> #include <iostream> #include <map> #include <math.h> #include <vector> #include <string> vector<vector<float>> Load_State(string file_name) { ifstream in_state_(file_name.c_str(), ifstream::in); vector<vector<float>> state_out; string start; while (getline(in_state_, start)) { vector<float> x_coord; istringstream ss(start); float a; ss >> a; x_coord.push_back(a); string value; while (getline(ss, value, ',')) { float b; ss >> b; x_coord.push_back(b); } state_out.push_back(x_coord); } return state_out; } vector<float> Load_State_1D(string file_name, unsigned int &n_rows) { ifstream in_state_(file_name.c_str(), ifstream::in); vector<float> state_out; string start; n_rows = 0; while (getline(in_state_, start)) { // vector<float> x_coord; istringstream ss(start); float a; ss >> a; state_out.push_back(a); // x_coord.push_back(a); string value; while (getline(ss, value, ',')) { float b; ss >> b; // x_coord.push_back(b); state_out.push_back(b); } ++n_rows; // state_out.push_back(x_coord); } return state_out; } vector<int> Load_Label(string file_name) { ifstream in_label_(file_name.c_str(), ifstream::in); vector<int> label_out; string line; while (getline(in_label_, line)) { istringstream iss(line); int label; iss >> label; label_out.push_back(label); } return label_out; } int main(int argc, char *argv[]) { /* algoID 0: GaussianNB 1: BernoulliNB 2: MultinomialNB 3: ComplementNB */ int algoID = atoi(argv[1]); if (algoID == 1 || algoID == 2 || algoID == 3 || algoID == 0 ) { cout<<"Loading data "<<endl; } else { cout << "Invalid option. Code is exiting" << endl; exit(1); } cout<<"Selected algoID: "<<algoID <<endl; vector<float> X_train; vector<float> X_test; vector<int> Y_train; vector<int> Y_test; unsigned int n_rows_train; unsigned int n_rows_test; if (algoID == 0) { /* GaussianNB */ #pragma omp parallel sections { #pragma omp section X_train = Load_State_1D("../data/train_states.csv", n_rows_train); #pragma omp section X_test = Load_State_1D("../data/test_states.csv", n_rows_test); #pragma omp section Y_train = Load_Label("../data/train_labels.csv"); #pragma omp section Y_test = Load_Label("../data/test_labels.csv"); } } else if (algoID == 1) { /* BernoulliNB */ #pragma omp parallel sections { #pragma omp section X_train = Load_State_1D("../data/X_train_onehot.csv", n_rows_train); #pragma omp section X_test = Load_State_1D("../data/X_test_onehot.csv", n_rows_test); #pragma omp section Y_train = Load_Label("../data/y_train_onehot.csv"); #pragma omp section Y_test = Load_Label("../data/y_test_onehot.csv"); } } else if (algoID == 2 || algoID == 3) { /* MultinomialNB or ComplementNB */ #pragma omp parallel sections { #pragma omp section X_train = Load_State_1D("../data/X_train_bow.csv", n_rows_train); #pragma omp section X_test = Load_State_1D("../data/X_test_bow.csv", n_rows_test); #pragma omp section Y_train = Load_Label("../data/y_train_bow.csv"); #pragma omp section Y_test = Load_Label("../data/y_test_bow.csv"); } } cout << "X_train number of elements: " << X_train.size() << endl; cout << "Y_train number of elements: " << Y_train.size() << endl; cout << "X_test number of elements: " << X_test.size() << endl; cout << "Y_test number of elements: " << Y_test.size() << endl; unsigned int n_cols = X_train.size() / n_rows_train; cout << "Number of rows:" << n_rows_train << endl; cout << "Number of cols:" << n_cols << endl; // Timing CUDA events cudaEvent_t training_start, training_stop, testing_start, testing_stop; float ms_train = 0.0, ms_test = 0.0; cudaEventCreate(&training_start); cudaEventCreate(&training_stop); cudaEventCreate(&testing_start); cudaEventCreate(&testing_stop); // Classifier name string classifier; if (algoID == 0) { classifier = "Gaussian"; GaussianNB model = GaussianNB(); /* Training */ cout << "Training a " << classifier << " Naive Bayes classifier" << endl; cudaEventRecord(training_start); model.train(X_train, Y_train); cudaDeviceSynchronize(); cudaEventRecord(training_stop); cudaEventSynchronize(training_stop); cudaEventElapsedTime(&ms_train, training_start, training_stop); cout << "Training time: " << ms_train << " ms" << endl; /* Testing */ cout << "Testing..." << endl; int score = 0; cudaEventRecord(testing_start); score = model.predict(X_test, Y_test); cudaEventRecord(testing_stop); cudaEventSynchronize(testing_stop); float fraction_correct = float(score) / Y_test.size(); cout << "Test accuracy: " << (100 * fraction_correct) << " percent" << endl; // Prints the time taken to run the code in ms cudaEventElapsedTime(&ms_test, testing_start, testing_stop); cout << "Testing time: " << ms_test << " ms" << endl; } else if (algoID == 1) { classifier = "Bernoulli"; BernoulliNB model = BernoulliNB(); /* Training */ cout << "Training a " << classifier << " Naive Bayes classifier" << endl; cudaEventRecord(training_start); model.train(X_train, Y_train); cudaEventRecord(training_stop); cudaEventSynchronize(training_stop); cudaEventElapsedTime(&ms_train, training_start, training_stop); cout << "Training time: " << ms_train << " ms" << endl; /* Testing */ cout << "Testing..." << endl; int score = 0; cudaEventRecord(testing_start); score = model.predict(X_test, Y_test); cudaEventRecord(testing_stop); cudaEventSynchronize(testing_stop); float fraction_correct = float(score) / Y_test.size(); cout << "Test accuracy: " << (100 * fraction_correct) << " percent" << endl; // Prints the time taken to run the code in ms cudaEventElapsedTime(&ms_test, testing_start, testing_stop); cout << "Testing time: " << ms_test << " ms" << endl; } else if (algoID == 2) { classifier = "Multinomial"; MultinomialNB model = MultinomialNB(); /* Training */ cout << "Training a " << classifier << " Naive Bayes classifier" << endl; cudaEventRecord(training_start); model.train(X_train, Y_train); cudaEventRecord(training_stop); cudaEventSynchronize(training_stop); cudaEventElapsedTime(&ms_train, training_start, training_stop); cout << "Training time: " << ms_train << " ms" << endl; /* Testing */ cout << "Testing..." << endl; int score = 0; cudaEventRecord(testing_start); score = model.predict(X_test, Y_test); cudaEventRecord(testing_stop); cudaEventSynchronize(testing_stop); float fraction_correct = float(score) / Y_test.size(); cout << "Test accuracy: " << (100 * fraction_correct) << " percent" << endl; // Prints the time taken to run the code in ms cudaEventElapsedTime(&ms_test, testing_start, testing_stop); cout << "Testing time: " << ms_test << " ms" << endl; } else if (algoID == 3) { classifier = "Complement"; ComplementNB model = ComplementNB(); /* Training */ cout << "Training a " << classifier << " Naive Bayes classifier" << endl; cudaEventRecord(training_start); model.train(X_train, Y_train); cudaEventRecord(training_stop); cudaEventSynchronize(training_stop); cudaEventElapsedTime(&ms_train, training_start, training_stop); cout << "Training time: " << ms_train << " ms" << endl; /* Testing */ cout << "Testing..." << endl; int score = 0; cudaEventRecord(testing_start); score = model.predict(X_test, Y_test); cudaEventRecord(testing_stop); cudaEventSynchronize(testing_stop); float fraction_correct = float(score) / Y_test.size(); cout << "Test accuracy: " << (100 * fraction_correct) << " percent" << endl; // Prints the time taken to run the code in ms cudaEventElapsedTime(&ms_test, testing_start, testing_stop); cout << "Testing time: " << ms_test << " ms" << endl; } /* Cleanup */ cudaEventDestroy(training_start); cudaEventDestroy(training_stop); return 0; }
8,069
#include "includes.h" __global__ void reduction_kernel_interleaved_warp_unrolling8_1(int * input, int * temp_array, int size) { int tid = threadIdx.x; //element index for this thread int index = blockDim.x * blockIdx.x * 8 + threadIdx.x; //local data pointer int * i_data = input + blockDim.x * blockIdx.x * 8; if ((index + 7 * blockDim.x) < size) { int a1 = input[index]; int a2 = input[index + blockDim.x]; int a3 = input[index + 2 * blockDim.x]; int a4 = input[index + 3 * blockDim.x]; int a5 = input[index + 4 * blockDim.x]; int a6 = input[index + 5 * blockDim.x]; int a7 = input[index + 6 * blockDim.x]; int a8 = input[index + 7 * blockDim.x]; input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8; } __syncthreads(); for (int offset = blockDim.x / 2; offset >= 64; offset = offset / 2) { if (tid < offset) { i_data[tid] += i_data[tid + offset]; } __syncthreads(); } if (tid < 32) { volatile int * vsmem = i_data; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) { temp_array[blockIdx.x] = i_data[0]; } }
8,070
#include "includes.h" __global__ void kExpandAndAdd(float* source, float* mat, float* indices, float* target, int width, int height, float mult, int width2){ const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width*height; i += numThreads) { const int pos = height * (int)indices[i / height] + i % height; target[i] = (pos < height * width2)? source[i] + mult * mat[pos] : 1.0/0.0 - 1.0/0.0; } }
8,071
#include <iostream> #include <cuda.h> using namespace std; __global__ void stencil_kernel(const float* image, const float* mask, float* output, unsigned int n, unsigned int R) { extern __shared__ float shared_arr[]; float *msk = shared_arr; float *out = (float*)&msk[2*R+1]; float *img = (float*)&out[blockDim.x]; //Read Input elements int gidx = threadIdx.x + blockIdx.x * blockDim.x; int lidx = threadIdx.x + R; img[lidx] = image[gidx]; if(threadIdx.x < R) { if((gidx - (signed) R) < 0) { //signed added to remove warnings img[lidx-R] = 0; img[lidx + blockDim.x] = image[gidx + blockDim.x]; } else if(gidx + blockDim.x >= n) { img[lidx - R] = image[gidx - R]; img[lidx + blockDim.x] = 0; } else { img[lidx - R] = image[gidx - R]; img[lidx + blockDim.x] = image[gidx + blockDim.x]; } } msk[threadIdx.x] = mask[threadIdx.x]; out[threadIdx.x] = 0; __syncthreads(); //Applying stencil function for(int j = 0; j <= 2*R; j++) { out[threadIdx.x] += img[threadIdx.x+j] * mask[j]; } output[gidx] = out[threadIdx.x]; } __host__ void stencil(const float* image, const float* mask, float* output, unsigned int n, unsigned int R, unsigned int threads_per_block) { unsigned int m = threads_per_block; size_t shared_array_size = (2*m + 4*R + 1)*sizeof(float); //Image=m+2R, Mask=2R+1, Output=m stencil_kernel<<<(n + m-1)/m, m, shared_array_size>>>(image, mask, output, n, R); cudaDeviceSynchronize(); }
8,072
// g++ -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_OMP -I../../../thrust/ -fopenmp -x c++ stocks_reduction.cu -o stocks_reduction-cpu && ./stocks_reduction-cpu < stocks.txt // https://thrust.github.io/doc/group__reductions.html #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <iostream> #include <thrust/reduce.h> #include <chrono> int main() { int N = 0; thrust::host_vector<double> vec_cpu; while (std::cin.fail() == false) { N += 1; double price; std::cin >> price; vec_cpu.push_back(price); } // std::cout << N << "\n"; thrust::device_vector<double> vec_gpu(vec_cpu); // porque eu vou querer transferir do host vector para o device vector para fazer as coisas de reduce? // O preço médio das ações nos últimos 10 anos. double mean = thrust::reduce(vec_gpu.begin(), vec_gpu.end(), 0, thrust::plus<double>()) / N; std::cout << "preço médio no último 10 anos: " << mean << "\n"; //O preço médio das ações no último ano (365 dias atrás). double mean_year = thrust::reduce(vec_gpu.begin() + N - 365, vec_gpu.end(), 0, thrust::plus<double>()) / N; std::cout << "maior valor do último ano: " << mean_year << "\n"; // maior valor da sequência inteira double max = thrust::reduce(vec_gpu.begin(), vec_gpu.end(), 0, thrust::maximum<double>()); std::cout << "maior valor da sequência inteira: " << max << "\n"; // menor valor da sequência inteira double min = thrust::reduce(vec_gpu.begin(), vec_gpu.end(), max, thrust::minimum<double>()); std::cout << "menor valor da sequência inteira: " << min << "\n"; // maior valor do último ano double max_year = thrust::reduce(vec_gpu.begin() + N - 365, vec_gpu.end(), 0, thrust::maximum<double>()); std::cout << "maior valor do último ano: " << max_year << "\n"; // menor valor do último ano double min_year = thrust::reduce(vec_gpu.begin() + N - 365, vec_gpu.end(), max_year, thrust::minimum<double>()); std::cout << "menor valor do último ano: " << min_year << "\n"; }
8,073
#include "cufft.h" #ifndef pi #define pi 4.0f*atanf(1.0f) #endif #ifndef threads_num #define threads_num 256 #endif static __global__ void DataTypeConvertFloatToComplex_1d(float *d_in, cufftComplex *d_out, int n); static __global__ void DataGetBackFft_1d(cufftComplex *d_in, float *d_out, float dx, int n); static __global__ void PhaseShiftForwFft_1d(cufftComplex *d_in, cufftComplex *d_out, int n); static __global__ void PhaseShiftBackFft_1d(cufftComplex *d_in, cufftComplex *d_out, int n); void cufft_fftf_1d(float *d_in, float *d_out, float dx, int n) { /* Data conversion from float to Complex */ cufftComplex *d_in_data; cudaMalloc((void **)&d_in_data, n*sizeof(cufftComplex)); DataTypeConvertFloatToComplex_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in, d_in_data, n); /* Create a 1D forward/inverse FFT plan */ cufftHandle plan; cufftPlan1d(&plan, n, CUFFT_C2C, 1); /* Use the CUFFT plan to transform the array */ cufftComplex *d_tmp_data; cudaMalloc((void **)&d_tmp_data, n*sizeof(cufftComplex)); cufftExecC2C(plan, d_in_data, d_tmp_data, CUFFT_FORWARD); /* 1/2 phase shift for staggered grid */ cufftComplex *d_tmp_data_shift; cudaMalloc((void **)&d_tmp_data_shift, n*sizeof(cufftComplex)); PhaseShiftForwFft_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_tmp_data, d_tmp_data_shift, n); /* Inverse fourier transform */ cufftComplex *d_out_data; cudaMalloc((void **)&d_out_data, n*sizeof(cufftComplex)); cufftExecC2C(plan, d_tmp_data_shift, d_out_data, CUFFT_INVERSE); /* Data normalization*/ DataGetBackFft_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_data, d_out, dx, n); cufftDestroy(plan); cudaFree(d_in_data); cudaFree(d_tmp_data); cudaFree(d_tmp_data_shift); cudaFree(d_out_data); } void cufft_fftb_1d(float *d_in, float *d_out, float dx, int n) { /* Data conversion from float to Complex */ cufftComplex *d_in_data; cudaMalloc((void **)&d_in_data, n*sizeof(cufftComplex)); DataTypeConvertFloatToComplex_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in, d_in_data, n); /* Create a 1D forward/inverse FFT plan */ cufftHandle plan; cufftPlan1d(&plan, n, CUFFT_C2C, 1); /* Use the CUFFT plan to transform the array */ cufftComplex *d_tmp_data; cudaMalloc((void **)&d_tmp_data, n*sizeof(cufftComplex)); cufftExecC2C(plan, d_in_data, d_tmp_data, CUFFT_FORWARD); /* -1/2 phase shift for staggered grid */ cufftComplex *d_tmp_data_shift; cudaMalloc((void **)&d_tmp_data_shift, n*sizeof(cufftComplex)); PhaseShiftBackFft_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_tmp_data, d_tmp_data_shift, n); /* Inverse fourier transform */ cufftComplex *d_out_data; cudaMalloc((void **)&d_out_data, n*sizeof(cufftComplex)); cufftExecC2C(plan, d_tmp_data_shift, d_out_data, CUFFT_INVERSE); /* Data normalization*/ DataGetBackFft_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_data, d_out, dx, n); cufftDestroy(plan); cudaFree(d_in_data); cudaFree(d_tmp_data); cudaFree(d_tmp_data_shift); cudaFree(d_out_data); } static __global__ void DataTypeConvertFloatToComplex_1d(float *d_in, cufftComplex *d_out, int n) { //int tid = threadIdx.x; int ix = blockIdx.x * blockDim.x + threadIdx.x; //for (int ix=tid; ix<n; ix+=threads_num) if (ix < n) { d_out[ix].x = d_in[ix]; d_out[ix].y = 0.0f; } } static __global__ void DataGetBackFft_1d(cufftComplex *d_in, float *d_out, float dx, int n) { //int tid = threadIdx.x; int ix = blockIdx.x * blockDim.x + threadIdx.x; //for (int ix=tid; ix<n; ix+=threads_num) if (ix < n) d_out[ix] = d_in[ix].x/((float)n*dx); } static __global__ void PhaseShiftForwFft_1d(cufftComplex *d_in, cufftComplex *d_out, int n) { //int tid = threadIdx.x; int ix = blockIdx.x * blockDim.x + threadIdx.x; float d_k; //for (int ix=tid; ix<n; ix+=threads_num) if (ix < n) { if (ix<n/2) d_k = (float)ix*pi/(float)(n/2); else d_k = -pi+(float)(ix-n/2)*pi/(float)(n/2); d_out[ix].y = d_k*(d_in[ix].x*cosf(d_k/2.0f)+d_in[ix].y*sinf(d_k/2.0f)); d_out[ix].x = d_k*(-d_in[ix].x*sinf(d_k/2.0f)+d_in[ix].y*cosf(d_k/2.0f)); } } static __global__ void PhaseShiftBackFft_1d(cufftComplex *d_in, cufftComplex *d_out, int n) { //int tid = threadIdx.x; int ix = blockIdx.x * blockDim.x + threadIdx.x; float d_k; //for (int ix=tid; ix<n; ix+=threads_num) if (ix < n) { if (ix<n/2) d_k = (float)ix*pi/(float)(n/2); else d_k = -pi+(float)(ix-n/2)*pi/(float)(n/2); d_out[ix].y = d_k*(d_in[ix].x*cosf(d_k/2.0f)-d_in[ix].y*sinf(d_k/2.0f)); d_out[ix].x = d_k*(d_in[ix].x*sinf(d_k/2.0f)+d_in[ix].y*cosf(d_k/2.0f)); } } void data_fft_derv(float *h_in, float *h_out, float dx, int n) { float *d_in; cudaMalloc((void **)&d_in, n*sizeof(float)); cudaMemcpy(d_in, h_in, n*sizeof(float), cudaMemcpyHostToDevice); float *d_out; cudaMalloc((void **)&d_out, n*sizeof(float)); cufft_fftb_1d(d_in, d_out, dx, n); cudaMemcpy(h_out, d_out, n*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_out); }
8,074
#include "includes.h" __global__ static void calc_e(int objs,double* a,double b,int* y,double* kval,double* e){ int id=blockDim.x * blockIdx.x + threadIdx.x; if (id<objs){ double fx=b; for (int i=0;i<objs;i++){ //access to a and y are not coalesced fx+=a[i]*y[i]*kval[i*objs+id]; } e[id]=fx-y[id]; } }
8,075
#include "includes.h" /* * Implementations */ __global__ void ca_backward_kernel_t(const float *dw, const float *t, const float *f, float *dt, int num, int chn, int height, int width) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int sp = height * width; int len = height + width - 1; int plane = blockIdx.z; if (x < width && y < height && plane < chn) { for (int batch = 0; batch < num; ++batch) { for (int i = 0; i < width; ++i) { float _dw = dw[(batch * len + i) * sp + y*width + x]; float _f = f[(batch * chn + plane) * sp + y*width + i]; dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f; } for (int i = 0; i < height; ++i) { if (i == y) continue; int j = i<y ? i : i-1; float _dw = dw[(batch * len + width + j) * sp + y*width + x]; float _f = f[(batch * chn + plane) * sp + i*width + x]; dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f; } } } }
8,076
#include "includes.h" __global__ void FF_calc ( float *q_S_ref_dS, float *WK, float *vdW, int num_q, int num_ele, float c1, float r_m, float *FF_table, float rho) { __shared__ float q_pt, q_WK, C1, expC1; __shared__ float FF_pt[7]; // num_ele + 1, the last one for water. __shared__ float vdW_s[7]; __shared__ float WK_s[66]; __shared__ float C1_PI_43_rho; if (blockIdx.x >= num_q) return; // out of q range for (int ii = blockIdx.x; ii < num_q; ii += gridDim.x) { q_pt = q_S_ref_dS[ii]; q_WK = q_pt / 4.0 / PI; // FoXS C1 term expC1 = -powf(4.0 * PI / 3.0, 1.5) * q_WK * q_WK * r_m * r_m * (c1 * c1 - 1.0) / 4.0 / PI; C1 = powf(c1,3) * exp(expC1); C1_PI_43_rho = C1 * PI * 4.0 / 3.0 * rho; for (int jj = threadIdx.x; jj < 11 * num_ele; jj += blockDim.x) { WK_s[jj] = WK[jj]; } __syncthreads(); // Calculate Form factor for this block (or q vector) for (int jj = threadIdx.x; jj < num_ele + 1; jj += blockDim.x) { vdW_s[jj] = vdW[jj]; if (jj == num_ele) { // water FF_pt[jj] = WK_s[3*11+5]; FF_pt[jj] += 2.0 * WK_s[5]; FF_pt[jj] -= C1_PI_43_rho * powf(vdW_s[jj],3.0) * exp(-PI * vdW_s[jj] * vdW_s[jj] * q_WK * q_WK); for (int kk = 0; kk < 5; kk ++) { FF_pt[jj] += WK_s[3*11+kk] * exp(-WK_s[3*11+kk+6] * q_WK * q_WK); FF_pt[jj] += WK_s[kk] * exp(-WK_s[kk+6] * q_WK * q_WK); FF_pt[jj] += WK_s[kk] * exp(-WK_s[kk+6] * q_WK * q_WK); } } else { FF_pt[jj] = WK_s[jj*11+5]; // The part is for excluded volume FF_pt[jj] -= C1_PI_43_rho * powf(vdW_s[jj],3.0) * exp(-PI * vdW_s[jj] * vdW_s[jj] * q_WK * q_WK); for (int kk = 0; kk < 5; kk++) { FF_pt[jj] += WK_s[jj*11+kk] * exp(-WK_s[jj*11+kk+6] * q_WK * q_WK); } } FF_table[ii*(num_ele+1)+jj] = FF_pt[jj]; } } }
8,077
// *********************************************************************** // // Demo program for education in subject // Computer Architectures and Paralel Systems // Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava // email:petr.olivka@vsb.cz // // Example of CUDA Technology Usage // Multiplication of elements in float array // // *********************************************************************** #include <cuda_runtime.h> #include <stdio.h> // Demo kernel for array elements multiplication. // Every thread selects one element and multiply it. __global__ void kernel_mult( float *pole1, float *pole2, int L) { int l = blockDim.x * blockIdx.x + threadIdx.x; // if grid is greater then length of array... if (l>=L) return; for(int i=0;i<30;i++) { pole1[l]*=pole2[l]; pole1[l]/=pole2[l]; } pole1[l] *= pole2[l]; } void run_mult( float *P1, float *P2, int Length) { cudaError_t cerr; int threads = 1024; int blocks = ( Length + threads - 1 ) / threads; printf("blocks: %d\n", blocks); // Memory allocation in GPU device float *cudaP1; float *cudaP2; cerr = cudaMalloc( &cudaP1, Length * sizeof( float ) ); if ( cerr != cudaSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) ); cerr = cudaMalloc( &cudaP2, Length * sizeof( float ) ); if ( cerr != cudaSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) ); // Copy data from PC to GPU device cerr = cudaMemcpy( cudaP1, P1, Length * sizeof( float ), cudaMemcpyHostToDevice ); if ( cerr != cudaSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) ); cerr = cudaMemcpy( cudaP2, P2, Length * sizeof( float ), cudaMemcpyHostToDevice ); if ( cerr != cudaSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) ); // Grid creation kernel_mult<<< blocks, threads >>>(cudaP1, cudaP2, Length); if ( ( cerr = cudaGetLastError() ) != cudaSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) ); // Copy data from GPU device to PC cerr = cudaMemcpy( P1, cudaP1, Length * sizeof( float ), cudaMemcpyDeviceToHost ); if ( cerr != cudaSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) ); // Free memory cudaFree(cudaP1); cudaFree(cudaP2); }
8,078
#ifndef POP_H #define POP_H #pragma once #include "util/device_utils.cuh" // Populates the possValues array // Removes all values that are not possible, based on starting board // returns an array containing all the Squares in the current row __device__ void getRow(int tid, Square* board, Square* localRow) { int rowNum = (tid/9); // had floor here before int startOfRow = rowNum*9; //Square output[9]; for (int i = 0; i<9; i++) { localRow[i].value = board[startOfRow + i].value; localRow[i].isLocked = board[startOfRow + i].isLocked; for (int j=0; j<9; j++) { localRow[i].possValues[j] = board[startOfRow + i].possValues[j]; } } //return output; } __device__ void getCol(int tid, Square* board, Square* localCol) { int colNum = tid%9; // also first element of the column //Square output[9]; for (int i = 0; i<9; i++) { localCol[i].value = board[colNum + (9*i)].value; localCol[i].isLocked = board[colNum + (9*i)].isLocked; for (int j=0; j<9; j++) { localCol[i].possValues[j] = board[colNum + (9*i)].possValues[j]; } } //return output; } __device__ void getBlock(int tid, Square* board, Square* localBlock) { int blockRow; // tells us if it's in the top/mid/bot if (tid<27) blockRow = 0; //top else if (tid<54) blockRow = 1; //middle else blockRow = 2; //bottom int blockCol; // tells us if it's on the left/mid/right int col = tid%9; if (col<3) blockCol = 0; //left side else if (col<6) blockCol = 1; //middle else blockCol = 2; //right side //now we know exactly which block we are dealing with, sooooo int starter = blockRow*27 + blockCol*3; int offset; for (int i=0; i<9; i++) { if (i<3) offset = i; else if (i<6) offset = i -3 +9; else offset = i -6 +18; localBlock[i].value = board[starter + offset].value; localBlock[i].isLocked = board[starter + offset].isLocked; for(int j=0; j<9; j++) localBlock[i].possValues[j] = board[starter+offset].possValues[j]; } /* localBlock = {board[starter], board[starter+1], board[starter+2], board[starter+9], board[starter+10], board[starter+11], board[starter+18], board[starter+19], board[starter+20]}; */ //return output; } __global__ void populate(Square* board) { __shared__ Square s_board[81]; if (threadIdx.x == 0) { for(int i = 0; i<81; i++) { s_board[i].value = board[i].value; s_board[i].isLocked = board[i].isLocked; for (int j=0; j<9; j++) { s_board[i].possValues[j] = board[i].possValues[j]; } } } __syncthreads(); int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid<81) { //one thread per square // start by filling the possValues array with numbers 1-9 // board[tid].possValues = (1, 2, 3, 4, 5, 6, 7, 8, 9); // this is now done in io_utils.cuh // initialize arrays for the current square's row/col/block Square localRow[9]; getRow(tid, s_board, localRow); Square localCol[9]; getCol(tid, s_board, localCol); Square localBlock[9]; getBlock(tid, s_board, localBlock); int localRowValues[9]; int localColValues[9]; int localBlockValues[9]; for (int i=0; i<9; i++) { localRowValues[i] = localRow[i].value; localColValues[i] = localCol[i].value; localBlockValues[i] = localBlock[i].value; } // use popoff to remove invalid values from the possValues array //int cur; int rowVal, colVal, blockVal; for (int i=0; i<9; i++) { //cur = s_board[tid].possValues[i]; rowVal = localRowValues[i]; colVal = localColValues[i]; blockVal = localBlockValues[i]; if (rowVal>=1 && rowVal<=9) s_board[tid].possValues[rowVal-1] = 0; // if (colVal != 0) if (colVal>=1 && colVal<=9) s_board[tid].possValues[colVal-1] = 0; if (blockVal>=1 && blockVal<=9) s_board[tid].possValues[blockVal-1] = 0; /* if (cur==NULL) break; // check if another Square in the row/col/block makes cur // invalid for the current Square if (!validNum(cur, localBlockValues, localRowValues, localColValues)) { //if there is a conflict, pop it off //isPossibleNum checks if it's even in the //array of possValues popoff(i, s_board[i].possValues); */ } } __syncthreads(); if (threadIdx.x == 0) { for (int i=0; i<81; i++) { board[i].value = s_board[i].value; board[i].isLocked = s_board[i].isLocked; for (int j=0; j<9; j++) board[i].possValues[j] = s_board[i].possValues[j]; } } } #endif
8,079
/* * gpu_physical_model.c * * Created on: Jan 24, 2016 * Author: sled */ // // Physical model GPU implementation // #include <unistd.h> #include <stdlib.h> #include <math.h> #define THREADS_PER_BLOCK 128 // CUDA kernel declaration __global__ void cuda_physical_model_kernel(double g, double K, double vx_0, double vy_0, double vz_0, double *x, double *y, double *z, int N); // C/C++ Wrapper void gpu_physical_model_compute(double g, double K, double vx_0, double vy_0, double vz_0, double *x, double *y, double *z, int N) { // Device memory double *dev_x; double *dev_y; double *dev_z; size_t size = N * sizeof(double); // Allocated device memory cudaMalloc((void **)&dev_x, size); cudaMalloc((void **)&dev_y, size); cudaMalloc((void **)&dev_z, size); // Upload data to device memory cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_y, y, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_z, z, size, cudaMemcpyHostToDevice); cuda_physical_model_kernel<<<((N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(g, K, vx_0, vy_0, vz_0, dev_x, dev_y, dev_z, N); cudaMemcpy(x, dev_x, size, cudaMemcpyDeviceToHost); cudaMemcpy(y, dev_y, size, cudaMemcpyDeviceToHost); cudaMemcpy(z, dev_z, size, cudaMemcpyDeviceToHost); cudaFree(dev_x); cudaFree(dev_y); cudaFree(dev_z); } // GPU memory buffers are filled with computed physical states 3D components __global__ void cuda_physical_model_kernel(double g, double K, double vx_0, double vy_0, double vz_0, double *x, double *y, double *z, int N) { int idx = threadIdx.x + (blockDim.x *blockIdx.x); if (idx == 0) return; if (idx < N) { x[idx] = x[0] + (vx_0 / K) * (1.0 - exp(-K * idx)/K); y[idx] = y[0] + ((g + K * vy_0) / (K * K)) * (1.0 - exp(-K * idx)) - (g * idx) / K; z[idx] = z[0] + (vz_0 / K) * (1.0 - exp(-K * idx)/K); } }
8,080
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> double secuential(const double a[] , int dim,bool verbose){ double mean=0; for(int i=0; i<dim;i++){ mean+=a[i]; } mean=mean/dim; if(verbose)printf("cpu mean %f\n", mean); double sum=0; for(int i=0; i<dim;i++){ sum+=(a[i]-mean)*(a[i]-mean); } if(verbose)printf("cpu sigma %f\n", sum); return sqrt(sum/(dim-1)); } __global__ void reduccion( double a[], const int dim) { unsigned long int i; unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; if (global_id < dim/2) { i = 2 * global_id; a[global_id] = a[i] + a[i+1]; } } __global__ void pre_sigma( double a[], const int dim, const double mean) { unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; if (global_id < dim) { a[global_id] -= mean; a[global_id]*= a[global_id]; } } int main(int argc, char *argv[]){ clock_t time_begin; double *device_array = 0; double *host_array = 0; unsigned long int size_array = (argc > 1)? atoi (argv[1]): 1024; unsigned int block_size = (argc > 2)? atoi (argv[2]): 16; bool verbose= (argc>3)? (argv[3][0]=='v'): false; host_array = (double*)malloc( size_array * sizeof(double)); double *copy_host_array=(double*)malloc( size_array * sizeof(double)); for(unsigned int i=0; i<size_array; i++){ host_array[i]=rand()%10; copy_host_array[i]=host_array[i]; if(verbose) printf("%f\t", host_array[i]); } printf("\n"); cudaMalloc(&device_array,size_array * sizeof(double)); cudaError_t e=cudaMemcpy(device_array, host_array, sizeof(double)*size_array, cudaMemcpyHostToDevice); //cudaerrorinvalidvalue(11) time_begin=clock(); unsigned long int i = size_array; while (i > 1) { dim3 bloque(block_size); dim3 grid((i/2 + bloque.x - 1)/ bloque.x); reduccion<<<grid, bloque>>>(device_array, i); cudaThreadSynchronize(); i = i/2; } cudaMemcpy(copy_host_array, device_array, sizeof(double)*size_array, cudaMemcpyDeviceToHost); double mean_gpu= copy_host_array[0] / size_array; e=cudaMemcpy(device_array, host_array, sizeof(double)*size_array, cudaMemcpyHostToDevice); dim3 bloque2(block_size); dim3 grid2((size_array + bloque2.x - 1) / bloque2.x); pre_sigma<<<bloque2, grid2>>>(device_array, size_array, mean_gpu); cudaThreadSynchronize(); cudaMemcpy(copy_host_array, device_array, sizeof(double)*size_array, cudaMemcpyDeviceToHost); if(verbose){ printf("mean gpu: %f\n", mean_gpu); for(unsigned int j=0; j<size_array; j++) printf("%f\t", copy_host_array[j]); printf("\n"); } i=size_array; while (i > 1) { dim3 bloque(block_size); dim3 grid((i/2 + bloque.x - 1)/ bloque.x); reduccion<<<grid, bloque>>>(device_array, i); cudaThreadSynchronize(); i = i/2; } cudaMemcpy(copy_host_array, device_array, sizeof(double)*size_array, cudaMemcpyDeviceToHost); if(verbose) printf("gpu sigma %f \n", copy_host_array[0]); double final_res= sqrt(copy_host_array[0]/(size_array-1)); printf("GPU time: %f seconds\t", (((double)clock() - (double)time_begin) / 1000000.0F ) * 1000 ); printf("GPU result: %f\n", final_res); //--------------cpu computations-----------------------// time_begin=clock(); double cpu_res=secuential(host_array, size_array, verbose); printf("CPU time: %f seconds\t", (((double)clock() - (double)time_begin) / 1000000.0F ) * 1000 ); printf("CPU result: %f\n", cpu_res); free(host_array);free(copy_host_array); cudaFree(device_array); }
8,081
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #define CUDA 0 #define OPENMP 1 #define SPHERES 20 #define rnd( x ) (x * rand() / RAND_MAX) #define INF 2e10f #define DIM 2048 struct Sphere { float r,b,g; float radius; float x,y,z; }; __device__ float hit( float x, float y, float z, float ox, float oy, float *n, float radius ) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf( radius*radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } __global__ void kernel(struct Sphere* s, unsigned char* ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y*DIM; float ox = (x - DIM/2); float oy = (y - DIM/2); float r=0, g=0, b=0; float maxz = -INF; for(int i=0; i<SPHERES; i++) { float n; float t = hit( s[i].x, s[i].y, s[i].z, ox, oy, &n, s[i].radius ); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } ptr[offset*4 + 0] = (int)(r * 255); ptr[offset*4 + 1] = (int)(g * 255); ptr[offset*4 + 2] = (int)(b * 255); ptr[offset*4 + 3] = 255; } void ppm_write(unsigned char* bitmap, int xdim,int ydim, FILE* fp) { int i,x,y; fprintf(fp,"P3\n"); fprintf(fp,"%d %d\n",xdim, ydim); fprintf(fp,"255\n"); for (y=0;y<ydim;y++) { for (x=0;x<xdim;x++) { i=x+y*xdim; fprintf(fp,"%d %d %d ",bitmap[4*i],bitmap[4*i+1],bitmap[4*i+2]); } fprintf(fp,"\n"); } } int main(int argc, char* argv[]) { double exe_time; clock_t start_time, end_time; struct Sphere *temp_s; unsigned char* bitmap; struct Sphere *d_temp_s; unsigned char* d_bitmap; dim3 blocks(DIM,DIM,1); // Error detection code if (argc!=2) { printf("> a.out [filename.ppm]\n"); printf("for example, '> a.out result.ppm' means executing CUDA\n"); exit(0); } // Start Timer srand(time(NULL)); start_time = clock(); // Allocate the memory on host bitmap = (unsigned char*)malloc(sizeof(unsigned char)*DIM*DIM*4); temp_s = (struct Sphere*)malloc(sizeof(struct Sphere) * SPHERES); // Allocate the memory on device cudaMalloc( (void**)&d_temp_s, sizeof(struct Sphere) * SPHERES ); cudaMalloc( (void**)&d_bitmap, sizeof(unsigned char)*DIM*DIM*4 ); // Generate the spheres for (int i=0; i<SPHERES; i++) { temp_s[i].r = rnd( 1.0f ); temp_s[i].g = rnd( 1.0f ); temp_s[i].b = rnd( 1.0f ); temp_s[i].x = rnd( 2000.0f ) - 1000; temp_s[i].y = rnd( 2000.0f ) - 1000; temp_s[i].z = rnd( 2000.0f ) - 1000; temp_s[i].radius = rnd( 200.0f ) + 40; } // Move data to device cudaMemcpy ( d_temp_s, temp_s, sizeof(struct Sphere) * SPHERES, cudaMemcpyHostToDevice ); // Calculate the ray kernel<<<blocks, 1>>>(d_temp_s, d_bitmap); cudaDeviceSynchronize(); cudaMemcpy ( bitmap, d_bitmap, sizeof(unsigned char)*DIM*DIM*4, cudaMemcpyDeviceToHost ); // open the file FILE* fp = fopen(argv[1],"w"); ppm_write(bitmap,DIM,DIM,fp); // Write the image // Stop Timer end_time = clock(); exe_time = ((double)(end_time - start_time)) / CLOCKS_PER_SEC; // Print the result printf("CUDA ray tracing: %f sec\n", exe_time); printf("[%s] was generated\n", argv[1]); // Close the file and free the memory fclose(fp); free(bitmap); free(temp_s); cudaFree(d_bitmap); cudaFree(d_temp_s); return 0; }
8,082
#include "includes.h" __global__ void kernal2(int *A, int k, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) A[i] = A[i]*k; }
8,083
#include <stdio.h> #include <math.h> __global__ void add_in_parallel(int *array_a, int *array_b, int *array_c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; array_c[tid] = array_a[tid] + array_b[tid]; } int main() { // -------------------------------------------- printf("Begin\n"); int arraysize = 1000; int *a_host; int *b_host; int *c_host; int *devresult_host; a_host = (int *)malloc(arraysize*sizeof(int)); b_host = (int *)malloc(arraysize*sizeof(int)); c_host = (int *)malloc(arraysize*sizeof(int)); devresult_host = (int *)malloc(arraysize*sizeof(int)); for (int i = 0; i < arraysize; i++) { a_host[i] = i; b_host[i] = i; c_host[i] = a_host[i] + b_host[i]; } // --------------------------------------------- printf("Allocating device memory\n"); int *a_dev; int *b_dev; int *c_dev; cudaMalloc((void**) &a_dev, arraysize*sizeof(int)); cudaMalloc((void**) &b_dev, arraysize*sizeof(int)); cudaMalloc((void**) &c_dev, arraysize*sizeof(int)); // ---------------------------------------------- printf("Copy host data to device\n"); cudaMemcpy(a_dev, a_host, arraysize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b_dev, b_host, arraysize*sizeof(int), cudaMemcpyHostToDevice); // ---------------------------------------------- printf("Add in parallel\n"); int blocksize = 512; int blocknum = ceil(arraysize/double(blocksize)); dim3 dimBlock(blocksize, 1, 1); dim3 dimGrid(blocknum, 1, 1); add_in_parallel<<<dimGrid, dimBlock>>>(a_dev, b_dev, c_dev); cudaThreadSynchronize(); // ---------------------------------------------- printf("Copy dev data to host\n"); cudaMemcpy(devresult_host, c_dev, arraysize*sizeof(int), cudaMemcpyDeviceToHost); // ------------------------------------------------- printf("Verify result\n"); int status = 0; for (int i = 0; i < arraysize; i++) { // printf("%d ", devresult_host[i]); // printf("%d ", c_host[i]); if (c_host[i]!=devresult_host[i]) { status = 1; } } if (status) { printf("Failed vervified.\n"); } else { printf("Sucessdully verified.\n"); } // ---------------------------------------------- printf("Free dev memory\n"); cudaFree(a_dev); cudaFree(b_dev); cudaFree(c_dev); // ---------------------------------------- printf("Free host memory\n"); free(a_host); free(b_host); free(c_host); return 1; }
8,084
#include <cuda.h> #include <iostream> #include <string.h> #include <stdlib.h> /* 1. stride = 0: all threads request same value, this is where broadcasting happened 2. stride = 1: all threads request different bank 3. stride = 2: 2-way bank conflict 4. stride = 4: 4-way bank conflict 5. stride = 16: 16-way bank conflict 6. stride = 32: 32-way bank conflict */ __global__ void TestKernel(unsigned long long* time, int stride){ __shared__ float shared_data[1024]; int tid = threadIdx.x; unsigned long long startTime = clock(); shared_data[tid * stride] = 4; for(int index = 0; index < 10; index ++){ shared_data[tid * stride] += 4; } unsigned long long endTime = clock(); *time = (endTime - startTime); } int main(int argc, const char** argv){ /* Bank_Conflict -s 2 */ if(argc != 3){ printf("this should be used like: ./Bank_Conflict -stride 2\n"); return -1; } int stride = 0; for(int index = 0; index < argc; index++){ if(strcmp(argv[index], "-stride") == 0){ stride = atoi(argv[index + 1]); } } if(stride > 32){ printf("share_data is an array with 1024 elements by default, so the max valid stride value is 32 by default, you are setting stride larger than 32, make sure memory access in TestKernel is valid."); } std::cout << "stride set is "<<stride<<std::endl; unsigned long long time; unsigned long long * dtime; cudaMalloc((void**) &dtime, sizeof(unsigned long long)); for(int index=0; index < 10; index++){ TestKernel<<<1, 32>>>(dtime, stride); cudaMemcpy(&time, dtime, sizeof(unsigned long long), cudaMemcpyDeviceToHost); // 14 is overhead for calling clock std::cout <<"Time: "<<(time - 14) / 32 << std::endl; std::cout << std::endl; } cudaFree(dtime); cudaDeviceReset(); return 0; }
8,085
/* * María Fernanda Mora Alba, 103596 * Arquitectura de computadoras - Maestría en Ciencias en Computación * Práctica de Introducción a los conceptos de CUDA * Multiplicación de matrices */ #include <stdio.h> #include <stdlib.h> void checkCUDAError(const char*); /*definimos función kernel matrix_mult a ejecutarse en devide*/ __global__ void matrix_mult(int *d_a, int *d_b, int *d_c, int DIM) { int idx = blockIdx.x; int jdx = threadIdx.x; int sum = 0; for(int k=0; k < DIM; k++){ sum += d_a[idx*DIM + k]*d_b[DIM*k + jdx]; } d_c[threadIdx.x + (blockIdx.x * blockDim.x)] = sum; } /*definimos dimensiones*/ #define N 10 #define NUM_BLOCKS N #define THREADS_PER_BLOCK N #define ARR_SIZE N*N int main(int argc, char *argv[]) { /*eventos para contar tiempo*/ cudaEvent_t start, stop; float time; /*declaración de arreglos en host y device*/ int *h_a, *h_b, *h_c; /* Arreglos del host */ int *d_a, *d_b, *d_c;/* Arreglos del device */ int i; /*alocación memoria en host*/ size_t sz = N * N * sizeof(int); h_a = (int *) malloc(sz); h_b = (int *) malloc(sz); h_c = (int *) malloc(sz); /*inicializamos eventos*/ // timer for timing CUDA calculation //PPunsigned int timer = 0; //PPcutCreateTimer( &timer ); cudaEventCreate(&start); cudaEventCreate(&stop); /*alocación de memoria en device*/ cudaMalloc((void**) &d_a, sz); cudaMalloc((void**) &d_b, sz); cudaMalloc((void**) &d_c, sz); /*inicializamos arreglos*/ for (i = 0; i < ARR_SIZE; i++) { h_a[i] = rand()%255; h_b[i] = rand()%255; h_c[i] = 0; } /*copiamos bloques de memoria de host a device*/ cudaMemcpy(d_a, h_a, sz, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sz, cudaMemcpyHostToDevice); cudaMemcpy(d_c, h_c, sz, cudaMemcpyHostToDevice); /*definimos dimensiones*/ dim3 dimBlock(32,32); dim3 dimGrid((N+31)/32,(N+31)/32); cudaEventRecord(start,0); matrix_mult<<<dimGrid,dimBlock>>>(d_a,d_b,d_c,N); /*sincronizamos hilos*/ cudaThreadSynchronize(); checkCUDAError("kernel invocation"); /*copiamos resultado de device a host*/ cudaMemcpy(h_a,d_a,sz,cudaMemcpyDeviceToHost); cudaMemcpy(h_b,d_b,sz,cudaMemcpyDeviceToHost); cudaMemcpy(h_c,d_c,sz,cudaMemcpyDeviceToHost); checkCUDAError("memcpy"); /*al ejecutarse todo, detenemos los eventos*/ cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &time, start, stop ); /*imprimimos resultados*/ printf("\nTIEMPO DE EJECUCIÓN: %f mSeg\n\n", time); /*liberamos memorias en host y device*/ cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); return 0; } /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
8,086
#include "includes.h" __global__ void KerPeriodicDuplicateNormals(unsigned n,unsigned pini,const unsigned *listp,float3 *normals,float3 *motionvel) { const unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Number of particle. if(p<n){ const unsigned pnew=p+pini; const unsigned rp=listp[p]; const unsigned pcopy=(rp&0x7FFFFFFF); normals[pnew]=normals[pcopy]; if(motionvel)motionvel[pnew]=motionvel[pcopy]; } }
8,087
#include <cstdio> #include <iostream> #include <chrono> constexpr size_t SIZE = 16384 * 3; // 16384 * 3 constexpr size_t BLOCK_COUNT = 16384 * 3; // opt 1024 + 512 for shared alg 16384 * 3 for simple constexpr size_t THREAD_PER_BLOCK = SIZE / BLOCK_COUNT; template<typename T> __global__ void sumMatrixRow(const float* matrix, T* result) { unsigned int idx = threadIdx.x; unsigned int block_idx = blockIdx.x; idx = idx + (SIZE/BLOCK_COUNT) * block_idx; result[idx] = 0; for(size_t i=0; i < SIZE; i++) { result[idx] = result[idx] + matrix[idx * SIZE + i]; } } template <typename T> void sumMatrixRowCPU(const float* matrix, T* result) { for(int idx = 0; idx < SIZE; idx++) { result[idx] = 0; for(size_t i=0; i < SIZE; i++) { result[idx] = result[idx] + matrix[idx * SIZE + i]; } } } __host__ int main() { //Выделяем память под вектора auto* matrix = new float[SIZE * SIZE]; auto* result = new float[SIZE]; auto* result_1 = new float[SIZE]; //Инициализируем значения векторов for (int i = 0; i < SIZE * SIZE; i++) { matrix[i] = int(i/SIZE); result[i%SIZE] = 0; result_1[i%SIZE] = 0; } float* gpu_matrix; float* gpu_result; //Выделяем память для векторов на видеокарте cudaMalloc((void**)&gpu_matrix, sizeof(float) * SIZE * SIZE); cudaMalloc((void**)&gpu_result, sizeof(float) * SIZE); cudaMemcpy(gpu_matrix, matrix, sizeof(float) * SIZE * SIZE, cudaMemcpyHostToDevice); cudaMemcpy(gpu_result, result, sizeof(float) * SIZE, cudaMemcpyHostToDevice); dim3 gridSize = dim3(BLOCK_COUNT, 1, 1); //Размер используемой сетки dim3 blockSize = dim3(THREAD_PER_BLOCK, 1, 1); //Размер используемого блока //Выполняем вызов функции ядра cudaEvent_t kernel_start; cudaEventCreate(&kernel_start); cudaEventRecord(kernel_start, nullptr); sumMatrixRow<<<gridSize, blockSize>>>(gpu_matrix, gpu_result); cudaEvent_t syncEvent; //Дескриптор события cudaEventCreate(&syncEvent); //Создаем event cudaEventRecord(syncEvent, nullptr); //Записываем event cudaEventSynchronize(syncEvent); //Синхронизируем event float time; cudaEventElapsedTime(&time, kernel_start, syncEvent); cudaMemcpy(result, gpu_result, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); std::cout << "GPU Elapsed time " << time << std::endl; auto t1 = std::chrono::high_resolution_clock::now(); sumMatrixRowCPU(matrix, result_1); auto t2 = std::chrono::high_resolution_clock::now(); std::cout << "CPU Elapsed time " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() <<std::endl; for (int i = 0; i < 10; i++) { printf("Element #%i: %.1f %1.f\n", i , result[i], result_1[i]); } // Освобождаем ресурсы cudaEventDestroy(syncEvent); cudaFree(gpu_matrix); cudaFree(gpu_result); delete[] result; delete[] result_1; delete[] matrix; }
8,088
#include<stdio.h> __global__ void cuda_hello(){ int a = 4; int c; c = a + 5; printf("testing"); __syncthreads(); } int main() { cuda_hello<<<1,1>>>(); return 0; }
8,089
// original code: // https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html // Compile & run with `nvcc scanBlelloch.cu && ./a.out <x>` // where `x` defines the lower and upper integral's limits // i.e. [-x, x] (see the header of `main`) #include <cmath> #include <cstdio> #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 #define CONFLICT_FREE_OFFSET(n) \ ((n) >> LOG_NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) __global__ void blocksPrescan(float * g_odata, float * g_idata, float * SUMS) { extern __shared__ float tmp[]; uint N = 2 * blockDim.x; uint ai = threadIdx.x; uint bi = threadIdx.x + N / 2; g_idata += blockIdx.x * N; g_odata += blockIdx.x * N; // load input into shared memory uint bankOffsetA = CONFLICT_FREE_OFFSET(ai); uint bankOffsetB = CONFLICT_FREE_OFFSET(bi); tmp[ai + bankOffsetA] = g_idata[ai]; tmp[bi + bankOffsetB] = g_idata[bi]; uint offset(1); // build sum in place up the tree (up-sweep) for (uint d=N>>1; d>0; d>>=1) { __syncthreads(); if (threadIdx.x < d) { uint ai = offset * (2 * threadIdx.x + 1) - 1; uint bi = offset * (2 * threadIdx.x + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); tmp[bi] += tmp[ai]; } offset <<= 1; } // write the sum of the array chunk to 'SUMS' // and clear the last element float t; if (!threadIdx.x) { uint IDX = N - 1; IDX += CONFLICT_FREE_OFFSET(N - 1); if (SUMS) { t = tmp[IDX]; SUMS[blockIdx.x] = t; } tmp[IDX] = 0; } // traverse down tree & build scan (down-sweep) for (uint d=1; d<N; d *= 2) { offset >>= 1; __syncthreads(); if (threadIdx.x < d) { uint ai = offset * (2 * threadIdx.x + 1) - 1; uint bi = offset * (2 * threadIdx.x + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); t = tmp[ai]; tmp[ai] = tmp[bi]; tmp[bi] += t; } } __syncthreads(); // write results to device memory g_odata[ai] = tmp[ai + bankOffsetA]; g_odata[bi] = tmp[bi + bankOffsetB]; } __global__ void blocksShifter(float * g_odata, float * SUMS) { g_odata += 2 * blockIdx.x * blockDim.x; g_odata[threadIdx.x] += SUMS[blockIdx.x]; g_odata[threadIdx.x + blockDim.x] += SUMS[blockIdx.x]; } size_t smemSize(int n_el) { int extra_space = n_el / NUM_BANKS; extra_space += extra_space / NUM_BANKS; return sizeof(float) * (n_el + extra_space); } //////////////////////// MAIN ////////////////////////////// // Calculation of the integral \int_{-x}^x \exp(- t^2) dt // //////////////////////////////////////////////////////////// int main(int argc, char ** argv) { // set the integral's limits float x = atof(argv[1]); // discretization settings size_t n_blocks = 512; size_t block_size = 2048; size_t n_el = n_blocks * block_size; printf("Number of discretization points: %i\n", n_el); float * idata, * odata, * sums; cudaMallocManaged(&idata, n_el * sizeof(float)); cudaMallocManaged(&odata, n_el * sizeof(float)); cudaMallocManaged(&sums, n_blocks * sizeof(float)); // calculate integrand's values float t, dt; dt = 2 * x / n_el; for (uint i=0; i<n_el; ++i) { t = - x + i * dt; idata[i] = exp(- t * t) * dt; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // measure the execution time of the framed block cudaEventRecord(start); //------------------------------ blocksPrescan<<<n_blocks, block_size / 2, smemSize(block_size)>>>(odata, idata, sums); blocksPrescan<<<1, n_blocks / 2, smemSize(n_blocks)>>>(sums, sums, NULL); blocksShifter<<<n_blocks, block_size / 2>>>(odata, sums); //------------------------------ cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsed_time; cudaEventElapsedTime(&elapsed_time, start, stop); printf("Elapsed time: %f ms\n", elapsed_time); printf("Results are written to 'output.bin'\n"); FILE * cfout = fopen("output.bin", "wb"); fwrite(odata, sizeof(float), n_el, cfout); fclose(cfout); cudaFree(idata); cudaFree(odata); cudaFree(sums); }
8,090
#include <algorithm> #include <chrono> #include <cmath> #include <ctime> #include <iostream> #include <string> #include <vector> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/remove.h> #include <thrust/sequence.h> #include <thrust/inner_product.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <thrust/fill.h> #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include <thrust/tuple.h> #include <thrust/equal.h> #include <thrust/transform.h> using namespace std; struct CPUNode { long long id; short type; vector<CPUNode> kids; CPUNode(); CPUNode(int depth, int width, int& id); CPUNode(long long id, short type, vector<CPUNode> kids); friend ostream& operator<<(ostream& os, const CPUNode& node); }; CPUNode::CPUNode() : id(0),type(1),kids(vector<CPUNode>()) {} CPUNode::CPUNode(int depth, int width, int& id) : id(id++) { if (depth) { type = 0; kids = vector<CPUNode>(width); for (CPUNode& node : kids) node = CPUNode(depth - 1, width, id); } else { type = 1; kids = vector<CPUNode>(); } } CPUNode::CPUNode(long long id, short type, vector<CPUNode> kids) : id(id),type(type),kids(kids) {} ostream& operator<<(ostream& os, const CPUNode& node) { os << node.type << " " << node.id; return os; } void print_cpu_tree(CPUNode& node, int depth) { for (int i = 0; i < depth; i++) cout << " "; cout << node << endl; for (CPUNode& kid : node.kids) print_cpu_tree(kid, depth + 1); } void help(void) { cout << "Benchmark <fnc|mut|gpu> <print|quiet> [<depth> <width>|<depth_start> <depth_end> <width_start> <width_end>]" << endl; } CPUNode functional_flatten(CPUNode& node) { vector<CPUNode> nodes; if (!node.type) { vector<CPUNode> kids(node.kids.size()); for (int i = 0; i < node.kids.size(); i++) { kids[i].id = node.kids[i].id; kids[i].type = 1; kids[i].kids = vector<CPUNode>(); } nodes.push_back(CPUNode(node.id, node.type, kids)); for (int i = 0; i < node.kids.size(); i++) { auto f = functional_flatten(node.kids[i]); for (auto k : f.kids) nodes.push_back(k); } } return CPUNode(-1, 0, nodes); } void benchmark_functional(int depth, int width, bool print) { cout << "Benchmarking Functional algorithm (Depth: " << depth << " Width: " << width << ")" << endl; cout << "Creating AST..."; int id = 0; CPUNode node(depth, width, id); cout << "done." << endl; if (print) { cout << endl << "Before: " << endl; print_cpu_tree(node, 1); cout << endl; } cout << "Flattening AST..."; long long timing = 0; CPUNode temp; for (int i = 0; i < 5; i++) { auto start = chrono::high_resolution_clock::now(); temp = functional_flatten(node); auto end = chrono::high_resolution_clock::now(); timing += chrono::duration_cast<chrono::microseconds>(end - start).count(); } node = temp; double average_timing = (double)timing / 5; cout << "took an average of " << average_timing / 1000 << " milliseconds." << endl; cout << "SET_TIMINGS 0 " << depth << " " << width << " " << average_timing / 1000 << endl; if (print) { cout << endl << "After: " << endl; print_cpu_tree(node, 1); } } void mutation_flatten_helper(CPUNode& node, vector<CPUNode>& lifted) { if (!node.type) { for_each(node.kids.rbegin(), node.kids.rend(), [&lifted](CPUNode& kid) { mutation_flatten_helper(kid, lifted); }); lifted.push_back(node); node.type = 1; node.kids = vector<CPUNode>(); } } void mutation_flatten(CPUNode& node) { vector<CPUNode> lifted; mutation_flatten_helper(node, lifted); node = CPUNode{ -1, 0, lifted }; } void benchmark_mutation(int depth, int width, bool print) { cout << "Benchmarking Mutation algorithm (Depth: " << depth << " Width: " << width << ")" << endl; cout << "Creating AST..."; int id = 0; CPUNode node(depth, width, id); cout << "done." << endl; if (print) { cout << endl << "Before: " << endl; print_cpu_tree(node, 1); cout << endl; } cout << "Flattening AST..."; long long timing = 0; CPUNode temp; for (int i = 0; i < 5; i++) { temp = node; auto start = chrono::high_resolution_clock::now(); mutation_flatten(temp); auto end = chrono::high_resolution_clock::now(); timing += chrono::duration_cast<chrono::microseconds>(end - start).count(); } node = temp; double average_timing = (double)timing / 5; cout << "took an average of " << average_timing / 1000 << " milliseconds." << endl; cout << "SET_TIMINGS 1 " << depth << " " << width << " " << average_timing / 1000 << endl; if (print) { cout << endl << "After: " << endl; print_cpu_tree(node, 1); } } struct GPUNode { int depth; int width; long long count; thrust::device_vector<int> depths; thrust::device_vector<short> types; thrust::device_vector<long long> coords; GPUNode(int depth, int width); }; GPUNode::GPUNode(int depth, int width) : depth(depth), width(width), count(0) { for (int i = 0; i < depth; i++) count += (long long)pow(width, i); thrust::host_vector<int> host_depths(count); thrust::host_vector<short> host_types(count); thrust::host_vector<long long> host_coords(count * depth, 0); vector<int> cur_width(depth, 0); vector<long long> cur_coord(depth, 0); int cur_depth = 0; for (int i = 0; i < count; i++) { if (cur_width[cur_depth] >= width) { cur_coord[cur_depth - 1] += cur_coord[cur_depth]; cur_coord[cur_depth] = 0; cur_depth--; i--; continue; } host_depths[i] = cur_depth; host_types[i] = cur_depth + 1 >= depth ? 1 : 0; cur_coord[cur_depth]++; cur_width[cur_depth]++; host_coords[i*depth + cur_depth] = cur_coord[0]; for (int j = cur_depth - 1; j >= 0; j--) host_coords[i*depth + j] = host_coords[i*depth + j + 1] + cur_coord[cur_depth - j]; if (cur_depth + 1 < depth) { cur_width[++cur_depth] = 0; } } depths = host_depths; types = host_types; coords = host_coords; } struct print_gpu_node { int max_depth; thrust::host_vector<long long>& coords; template <typename Tuple> __host__ void operator()(Tuple t) { int depth = thrust::get<0>(t) + 1; for (int i = 0; i < depth; i++) cout << " "; cout << thrust::get<1>(t) << " "; long long i = thrust::get<2>(t); for (int j = 0; j < max_depth; j++) { long long c = coords[i*max_depth + j]; if (c) cout << " " << c; else break; } cout << endl; } }; void print_gpu_tree(GPUNode& ast) { thrust::host_vector<int> host_depths = ast.depths; thrust::host_vector<short> host_types = ast.types; thrust::host_vector<long long> host_coords = ast.coords; thrust::counting_iterator<long long> row(0); thrust::for_each(thrust::host, thrust::make_zip_iterator( thrust::make_tuple(host_depths.begin(), host_types.begin(), row)), thrust::make_zip_iterator( thrust::make_tuple(host_depths.end(), host_types.end(), row + ast.count)), print_gpu_node{ ast.depth, host_coords}); } struct coord_parent_index : public thrust::unary_function<long long, long long> { thrust::device_ptr<long long> coords; thrust::device_ptr<int> depths; thrust::device_ptr<short> types; int max_depth; coord_parent_index( thrust::device_ptr<long long> cs, thrust::device_ptr<int> depths, thrust::device_ptr<short> types, int md) : max_depth(md), depths(depths), types(types), coords(cs) {} __host__ __device__ long long operator()(long long i) const { for (int j = 1; j <= depths[i]; j++) { auto parent = coords[i * max_depth + j] - 1; if (!types[parent]) return parent; } return 0; } }; struct copy_coord { thrust::device_ptr<long long> new_coords; thrust::device_ptr<long long> old_coords; int max_depth; template <typename Tuple> __host__ __device__ void operator()(Tuple t) { long long ci = thrust::get<0>(t); long long ei = thrust::get<1>(t); for (int j = 0; j < max_depth; j++) new_coords[ci*max_depth + j] = old_coords[ei*max_depth + j]; } }; void gpu_flatten(GPUNode& ast) { thrust::device_vector<long long> eids(ast.count); thrust::sequence(eids.begin(), eids.end()); auto eids_begin = eids.begin(); auto eids_end = thrust::remove_if(eids_begin, eids.end(), ast.types.begin(), thrust::identity<short>()); long long exp_count = eids_end - eids_begin; long long result_count = ast.count + exp_count - 1; thrust::device_vector<int> new_depths(result_count); thrust::device_vector<short> new_types(result_count); thrust::device_vector<long long> new_coords(result_count * ast.depth); thrust::device_vector<long long> refids(result_count); thrust::counting_iterator<long long> newids(0); thrust::device_vector<long long> keys(result_count); thrust::fill(new_depths.begin(), new_depths.begin() + exp_count, 0); thrust::fill(new_depths.begin() + exp_count, new_depths.end(), 1); thrust::fill(new_types.begin(), new_types.begin() + exp_count, 0); thrust::fill(new_types.begin() + exp_count, new_types.end(), 1); auto keys_first = keys.begin() + exp_count; thrust::copy(eids_begin, eids_end, keys.begin()); thrust::transform(newids + 1, newids + ast.count, keys_first, coord_parent_index(ast.coords.data(), ast.depths.data(), ast.types.data(), ast.depth)); thrust::copy(eids_begin, eids_end, refids.begin()); thrust::sequence(refids.begin() + exp_count, refids.end(), 1); thrust::stable_sort_by_key(keys.begin(), keys.end(), thrust::make_zip_iterator( thrust::make_tuple(new_depths.begin(), new_types.begin(), refids.begin()))); thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(newids, refids.begin())), thrust::make_zip_iterator(thrust::make_tuple(newids + result_count, refids.end())), copy_coord{ new_coords.data(), ast.coords.data(), ast.depth }); ast.count = result_count; ast.types = new_types; ast.coords = new_coords; ast.depths = new_depths; } void benchmark_gpu(int depth, int width, bool print) { cudaSetDevice(1); cout << "Benchmarking GPU algorithm (Depth: " << depth << " Width: " << width << ")..." << endl; cout << "Creating AST "; GPUNode ast(depth+1, width); cout << "done." << endl; if (print) { cout << endl << "Before: " << endl; print_gpu_tree(ast); cout << endl; } cout << "Flattening AST..."; long long timing = 0; GPUNode temp(1, 1); for (int i = 0; i < 5; i++) { GPUNode temp = ast; auto start = chrono::high_resolution_clock::now(); gpu_flatten(temp); auto end = chrono::high_resolution_clock::now(); timing += chrono::duration_cast<chrono::microseconds>(end - start).count(); } double average_timing = (double)timing / 5; cout << "took an average of " << average_timing / 1000 << " milliseconds." << endl; cout << "SET_TIMINGS 2 " << depth << " " << width << " " << average_timing / 1000 << endl; if (print) { gpu_flatten(ast); cout << endl << "After: " << endl; print_gpu_tree(ast); } } void print_count(int depth, int width) { long long count = 0; for (int i = 0; i <= depth; i++) count += (long long)pow(width, i); cout << "ASTs should have " << count << " elements." << endl << endl; } int main(int argc, char *argv[]) { int depth = 3; int width = 2; int depth_end = 4; int width_end = 3; bool print = true; string print_str("print"); string quiet_str("quiet"); string bench_str("bench"); string fnc_str("fnc"); string mut_str("mut"); string gpu_str("gpu"); if (argc != 3 && argc != 5 && argc != 7) { help(); return 1; } if (argc == 5) { depth = stoi(argv[3]); width = stoi(argv[4]); depth_end = depth + 1; width_end = width + 1; } if (argc == 7) { depth = stoi(argv[3]); depth_end = stoi(argv[4]); width = stoi(argv[5]); width_end = stoi(argv[6]); } if (print_str == argv[2]) print = true; else if (quiet_str == argv[2]) print = false; else { help(); return 1; } if (fnc_str == argv[1]) { print_count(depth, width); for (int i = depth; i < depth_end; i++) { for (int j = width; j < width_end; j++) { benchmark_functional(i, j, print); } } } else if (mut_str == argv[1]) { print_count(depth, width); for (int i = depth; i < depth_end; i++) { for (int j = width; j < width_end; j++) { benchmark_mutation(i, j, print); } } } else if (gpu_str == argv[1]) { print_count(depth, width); for (int i = depth; i < depth_end; i++) { for (int j = width; j < width_end; j++) { benchmark_gpu(i, j, print); } } } else if (bench_str == argv[1]) { print_count(depth, width); for (int i = depth; i < depth_end; i++) { for (int j = width; j < width_end; j++) { benchmark_functional(i, j, print); cout << endl; benchmark_mutation(i, j, print); cout << endl; benchmark_gpu(i, j, print); } } } else { help(); return 1; } return 0; }
8,091
#include<stdio.h> #include<math.h> #include<stdlib.h> #include<sys/time.h> #define MAXROW 1024 #define MAXCOL 1024 double when() { struct timeval tp; gettimeofday(&tp, NULL); return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6); } void initialize(float *oA, float *nA) { int i,j; for(i=0; i<MAXROW; i++) { for(j=0;j<MAXCOL;j++) { if(i==0 || j==0 || j==(MAXCOL-1)) { nA[i*MAXCOL + j] = 0.0; oA[i*MAXCOL + j] = 0.0; } else if (i==MAXROW-1) { nA[i*MAXCOL + j] = 100.0; oA[i*MAXCOL + j] = 100.0; } else if (i==400 && j<=330) { nA[i*MAXCOL + j] = 100.0; oA[i*MAXCOL + j] = 100.0; } else if (i==200 && j ==500) { nA[i*MAXCOL + j] = 100.0; oA[i*MAXCOL + j] = 100.0; } else { nA[i*MAXCOL + j] = 50.0; oA[i*MAXCOL + j] = 50.0; } } } } __global__ void calculate_new_values(float *nA, float *oA) { if(blockIdx.x == 0 || threadIdx.x ==0 ||blockIdx.x == MAXCOL-1 || threadIdx.x == MAXCOL-1 || (blockIdx.x==400 && threadIdx.x<=330) || (blockIdx.x==200 && threadIdx.x==500) ) {} else { nA[blockIdx.x * MAXCOL + threadIdx.x] =(oA[(blockIdx.x+1) * MAXCOL + threadIdx.x] + oA[(blockIdx.x-1) * MAXCOL + threadIdx.x] + oA[blockIdx.x * MAXCOL + threadIdx.x+1] + oA[blockIdx.x * MAXCOL + threadIdx.x-1] + (4 * oA[blockIdx.x * MAXCOL + threadIdx.x]))/8.0; } } int main(void) { double start_time = when(); float *nA, *oA; float *d_nA, *d_oA; float *tmp; int iter=0; float convergence; int converged = 0; int size = MAXROW * MAXCOL * sizeof(float); nA = (float*)malloc(size); oA = (float*)malloc(size); cudaError_t err = cudaMalloc((void**)&d_nA,size); cudaError_t err1 = cudaMalloc((void**)&d_oA,size); initialize(oA, nA); cudaMemcpy(d_oA, oA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_nA, nA, size, cudaMemcpyHostToDevice); while(!converged) { calculate_new_values<<<1024,1024>>>(d_nA , d_oA); cudaMemcpy(nA , d_nA, size, cudaMemcpyDeviceToHost); converged = 1; for(int i=1;i<MAXROW-1;i++) { for(int j=1;j<MAXCOL-1;j++) { if( (i==400 && j<=330) || (i==200 && j ==500)) { //skip } else { convergence = nA[i*MAXCOL + j]- ((nA[(i+1)*MAXCOL + j] + nA[(i-1)*MAXCOL + j] + nA[i*MAXCOL + j+1] + nA[i*MAXCOL + j-1])/4.0 ); if(fabs(convergence) > 0.1) { converged = 0; break; } } } if(converged == 0) break; } iter++; tmp = d_nA; d_nA = d_oA; d_oA = tmp; } printf("iter = %d and execution time = %f\n",iter, when() - start_time); cudaMemcpy(nA, d_nA, size, cudaMemcpyDeviceToHost); cudaFree(d_nA); cudaFree(d_oA); free(nA); free(oA); return 1; }
8,092
#include <cuda.h> #include <cuda_runtime.h> __constant__ float q = 1.60217646e-19; __constant__ float m = 9.10938188e-31; __constant__ float B0 = 1e-12; __constant__ float alpha = 250000; __global__ void kernel(float* x, float* y, float* z, float* vx, float* vy, float* vz, int count, float tau) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < count) { float r = sqrtf(x[i]*x[i]+y[i]*y[i]+z[i]*z[i]); float Bx = 0; float By = 0; float Bz = -B0 * expf(-r*r / alpha); float vx1 = vx[i]; float vy1 = vy[i]; float vz1 = vz[i]; vx[i] = vx1 + tau * q * (vy1 * Bz - vz1 * By) / m; vy[i] = vy1 + tau * q * (vz1 * Bx - vx1 * Bz) / m; vz[i] = vz1 + tau * q * (vx1 * By - vy1 * Bx) / m; x[i] += vx[i] * tau; y[i] += vy[i] * tau; z[i] += vz[i] * tau; } } static float *d_x = NULL, *d_y = NULL, *d_z = NULL, *d_vx = NULL, *d_vy = NULL, *d_vz = NULL; static size_t oldcount = 0; __host__ void process_particles(float* x, float* y, float* z, float* vx, float* vy, float*vz, size_t count, float tau) { int size = count * sizeof(float); if(!d_x || oldcount != count) { cudaFree(d_x); cudaMalloc(&d_x, size); cudaFree(d_y); cudaMalloc(&d_y, size); cudaFree(d_z); cudaMalloc(&d_z, size); cudaFree(d_vx); cudaMalloc(&d_vx, size); cudaFree(d_vy); cudaMalloc(&d_vy, size); cudaFree(d_vz); cudaMalloc(&d_vz, size); oldcount = count; } cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice); cudaMemcpy(d_z, z, size, cudaMemcpyHostToDevice); cudaMemcpy(d_vx, vx, size, cudaMemcpyHostToDevice); cudaMemcpy(d_vy, vy, size, cudaMemcpyHostToDevice); cudaMemcpy(d_vz, vz, size, cudaMemcpyHostToDevice); kernel<<<count / 256 + 1, 256>>>(d_x, d_y, d_z, d_vx, d_vy, d_vz, count, tau); cudaMemcpy(x, d_x, size, cudaMemcpyDeviceToHost); cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost); cudaMemcpy(z, d_z, size, cudaMemcpyDeviceToHost); cudaMemcpy(vx, d_vx, size, cudaMemcpyDeviceToHost); cudaMemcpy(vy, d_vy, size, cudaMemcpyDeviceToHost); cudaMemcpy(vz, d_vz, size, cudaMemcpyDeviceToHost); }
8,093
#include <stdio.h> #include <stdlib.h> __global__ void add(int *a,int *c) { int tid = threadIdx.x; int n= a[tid] ; int temp=0,temp1=0; while(n!=0) { int t=n%8; temp=temp*10+t; n=n/8; } while(temp!=0) { int t=temp%10; temp1=temp1*10+t; temp=temp/10; } c[tid]=temp1; } int main(void) { int n,a[20],c[20]; printf("Enter value of N:"); n=5; printf("Enter array elements of array A\n"); for(int i=0;i<n;i++) { a[i]=i; } int *d_a,*d_c; int size = sizeof(int); cudaMalloc((void **)&d_a,size*n); cudaMalloc((void **)&d_c,size*n); cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice); add<<<1,n>>>(d_a,d_c); cudaMemcpy(c,d_c,size*n,cudaMemcpyDeviceToHost); printf("Octal array is :"); for(int i=0;i<n;i++) { printf("%d ",c[i]); } cudaFree(d_a); cudaFree(d_c); return 0; }
8,094
#ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #define SHAREDSIZE 32 __global__ void MatrixMulKernel(double* A, double* B, double* C, int wA, int wB, int tileSize) { int BLOCK_SIZE = tileSize; // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // The element of the block sub-matrix that is computed // by the thread float Csub = 0; //create the shared memory for two sub-blocks in A and B respectively __shared__ volatile float As[SHAREDSIZE][SHAREDSIZE]; __shared__ volatile float Bs[SHAREDSIZE][SHAREDSIZE]; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the tiles from global memory into shared memory; // each thread loads one element of the two tiles from A & B As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); __threadfence_block(); // Each thread in this block computes one element // of the block sub-matrix (tile). Thread with indexes // ty and tx computes in this tile the entry [ty][tx] . for (int k = 0; k < BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } double* read_array(const char* filename, int len) { double *x = (double*) malloc(len * sizeof(double)); FILE *fp = fopen(filename, "r"); for (int i = 0; i < len; i++) { fscanf(fp, "%lf", &x[i]); } fclose(fp); return x; } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension,int nCols, int tileSize, float* incTime ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { if(argc!=5) { printf("Usage: ./problem3 i j k N\n"); return 0; } int nRows = atoi(argv[1]); int nInnerDimension = atoi(argv[2]); int nCols = atoi(argv[3]); int num_elementsA= nRows*nInnerDimension; int num_elementsB=nInnerDimension*nCols; int num_elementsC= nRows*nCols; int tileSize = atoi(argv[4]); //change this for scaling analysis float incTime=0; // Time for GPU double* hA = read_array("problem3.inp",num_elementsA); double* hB = read_array("problem3.inp",num_elementsB); double* hC = (double*) malloc(num_elementsC * sizeof(double)); // **===-------- Modify the body of this function -----------===** computeOnDevice( hA, hB,hC, nRows, nInnerDimension, nCols, tileSize, &incTime); // **===-----------------------------------------------------------===** //cpu calculation check /*double check = 0.0; for(int i=0;i<nInnerDimension;i++){ check += hA[(nRows-1)*nInnerDimension+i]*hB[i*nCols+nCols-1]; } printf("%f\n", check); */ printf("%f\n%f\n%d\n%d\n%d\n",hC[num_elementsC-1],incTime,tileSize,nRows,nCols); // cleanup memory free(hA); free(hB); free(hC); return 0; } //ZeroPadthe matrix so that it could be exactly devided by Tile Size in both row and col double * zeroPadMatrix(double *unpadded, int row, int col, int paddedRow, int paddedCol, int TileSize, int copy) { double *paddedMatrix = (double *)calloc(paddedRow*paddedCol, sizeof(double)); //Copy the values from unpadded matrix to padded matrix if(copy){ for (int i=0;i<row;i++) { memcpy(&paddedMatrix[i*paddedCol], &unpadded[i*col], col*sizeof(double)); } } return paddedMatrix; } void extractPaddedMaxtrix(double *unpadded, double *padded, int row, int col, int paddedRow, int PaddedCol, int TileSize) { for(int i=0;i<row; i++){ memcpy(&unpadded[i*col], &padded[i*PaddedCol], col*sizeof(double)); } } //for debug use void printMatrix(double *matrix, int row, int col) { for(int i=0;i<row;i++){ for(int j=0;j<col;j++){ printf("%f ", matrix[i*col + j]); } printf("\n"); } printf("\n"); } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension, int nCols, int TileSize, float* incTime) { //calculate the size needed for padding int tempRow = (nRows-1)/TileSize + 1; int paddednRows = tempRow*TileSize; int tempnInnerDimension = (nInnerDimension-1)/TileSize + 1; int paddedtempnInnerDimension = tempnInnerDimension*TileSize; int tempCol = (nCols-1)/TileSize + 1; int paddednCols = tempCol*TileSize; //zero paddding double *paddedA = zeroPadMatrix(hA, nRows, nInnerDimension, paddednRows, paddedtempnInnerDimension, TileSize, 1); double *paddedB = zeroPadMatrix(hB, nInnerDimension, nCols, paddedtempnInnerDimension, paddednCols, TileSize, 1); double *paddedC = zeroPadMatrix(hB, nRows, nCols, paddednRows, paddednCols, TileSize, 0); //printMatrix(paddedA, paddednRows, paddedtempnInnerDimension); //printMatrix(paddedB, paddedtempnInnerDimension, paddednCols); //start inclusive timing cudaEvent_t startIn,stopIn; cudaEventCreate(&startIn); cudaEventCreate(&stopIn); cudaEventRecord(startIn, 0); //allocate the device memory double *dA, *dB, *dC; cudaMalloc((void **)&dA, sizeof(double)*paddednRows*paddedtempnInnerDimension); cudaMalloc((void **)&dB, sizeof(double)*paddedtempnInnerDimension*paddednCols); cudaMalloc((void **)&dC, sizeof(double)*paddednRows*paddednCols); //copy from host to device cudaMemcpy(dA, paddedA, sizeof(double)*paddednRows*paddedtempnInnerDimension, cudaMemcpyHostToDevice); cudaMemcpy(dB, paddedB, sizeof(double)*paddedtempnInnerDimension*paddednCols, cudaMemcpyHostToDevice); dim3 dimGrid(paddednCols/TileSize, paddednRows/TileSize); dim3 dimBlock(TileSize,TileSize); MatrixMulKernel<<<dimGrid, dimBlock>>>(dA, dB, dC, paddedtempnInnerDimension, paddednCols, TileSize); cudaMemcpy(paddedC, dC, sizeof(double)*paddednRows*paddednCols,cudaMemcpyDeviceToHost); extractPaddedMaxtrix(hC, paddedC, nRows, nCols, paddednRows, paddednCols, TileSize); //stop inclusive timing cudaEventRecord(stopIn, 0); cudaEventSynchronize(stopIn); cudaEventElapsedTime(incTime, startIn, stopIn); cudaEventDestroy(startIn); cudaEventDestroy(stopIn); return;//Placeholder }
8,095
#include <stdio.h> #define N 2048 __global__ void addvec(int *dc, const int *da, const int *db) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < N) { dc[tid] = da[tid] + db[tid]; tid += blockDim.x * gridDim.x; } } int main() { int a[N], b[N], c[N], *da, *db, *dc; for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i*i; } cudaMalloc((void**)& dc, N*sizeof(int)); cudaMalloc((void**)& da, N*sizeof(int)); cudaMalloc((void**)& db, N*sizeof(int)); cudaMemcpy(da, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(db, b, N*sizeof(int), cudaMemcpyHostToDevice); addvec<<<(N+127)/128, 128>>>(dc, da, db); cudaMemcpy(c, dc, N*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dc); cudaFree(da); cudaFree(db); return 0; }
8,096
//Color functions needed for shading and coloring //integer clamp function __device__ __host__ int clamp(int value, int lower, int upper) { if(value < lower) return lower; if(value > upper) return upper; return value; } //convert RGB values to int #RRGGBB __device__ __host__ int color(int r, int g, int b) { //bound r,g,b values r = clamp(r,0,255); g = clamp(g,0,255); b = clamp(b,0,255); return (r << 16) + (g << 8) + b; // #RRGGBB } //color component methods __device__ __host__ int getr(int col) //get red { return (col >> 16) & 0xFF; } __device__ __host__ int getg(int col) //green { return (col >> 8) & 0xFF; } __device__ __host__ int getb(int col) //blue { return col & 0xFF; } //color operation methods //multiply R,G,B by number (shading) __device__ int colorShade(int col, float shade) { return color(int(getr(col)*shade), int(getg(col)*shade), int(getb(col)*shade)); } //average respective R, G, B components of two colors __device__ int averageColors(int col1, int col2) { return color((int)(getr(col1)/2.+getr(col2)/2.), int(getg(col1)/2.+getg(col2)/2.), int(getb(col1)/2.+getb(col2)/2.)); } //weighted average __device__ int averageColors(int col1, int col2,float weight) { return color((int)(getr(col1)*weight+getr(col2)*(1-weight)), int(getg(col1)*weight+getg(col2)*(1-weight)), int(getb(col1)*weight+getb(col2)*(1-weight))); } //sum respective R,G,B components of 2 colors __device__ int addColors(int col1, int col2) { return color((int)(getr(col1)+getr(col2)), (int)(getg(col1)+getg(col2)), (int)(getb(col1)+getb(col2))); }
8,097
#include "celeba.hh" #include <string> #include "jpg.hh" namespace celeba { namespace { const char* DIR_PATH = "../celeba_norm/"; constexpr std::size_t IMG_SIZE = 64 * 64 * 3; } dbl_t* load(const std::vector<std::size_t>& idxs) { dbl_t* res = new dbl_t[IMG_SIZE * idxs.size()]; for (std::size_t i = 0; i < idxs.size(); ++i) { dbl_t* img = res + i * IMG_SIZE; auto idx = std::to_string(idxs[i]); while (idx.size() < 6) idx = std::string("0") + idx; std::string path = std::string(DIR_PATH) + idx + ".jpg"; std::uint8_t* pixs = img::jpg_load(path, nullptr, nullptr, nullptr); for (std::size_t j = 0; j < IMG_SIZE; ++j) img[j] = dbl_t(pixs[j]) / 127.5 - 1.; delete[] pixs; } return res; } dbl_t* load(std::size_t idx_beg, std::size_t idx_end) { std::vector<std::size_t> idxs; for (std::size_t i = idx_beg; i < idx_end; ++i) idxs.push_back(i); return load(idxs); } void save_samples(const dbl_t* data, std::size_t width, std::size_t height, const std::string& path) { std::uint8_t* pixs = new std::uint8_t[width * height * 64 * 64 * 3]; for (std::size_t i = 0; i < width; ++i) for (std::size_t j = 0; j < height; ++j) { const dbl_t* img = data + (j * width + i) * (64 * 64 * 3); std::size_t out_x = i * 64; std::size_t out_y = j * 64; for (std::size_t x = 0; x < 64; ++x) for (std::size_t y = 0; y < 64; ++y) for (std::size_t c = 0; c < 3; ++c) { const dbl_t* pin = img + y * 64 * 3 + x * 3 + c; std::uint8_t* pout = pixs + (out_y + y) * (width * 64 * 3) + (out_x + x) * 3 + c; *pout = (*pin + 1.) * 127.5; } } img::jpg_save(path, pixs, width * 64, height * 64); delete[] pixs; } }
8,098
/* * Generates an array of random numbers * Reference: sortGPU.cu https://www.olcf.ornl.gov/tutorials/openacc-interoperability-ii/ */ #include <stdio.h> #include <curand.h> // Fill d_buffer with num random numbers // If you only need to generate on set of numbers and fill the array once, // use this function. If you want to fill the array over and over again, // use the other functions given below. // extern "C" void fill_rand(float *d_buffer, int num, void *stream, unsigned long long seed) { curandGenerator_t gen; int status = CURAND_STATUS_SUCCESS; // Create generator status = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); // Set CUDA stream status |= curandSetStream(gen, (cudaStream_t)stream); // Set seed status |= curandSetPseudoRandomGeneratorSeed(gen, seed); // Generate num random numbers // From documentation: //The curandGenerateUniform() function is used to generate uniformly // distributed floating point values between 0.0 and 1.0, // where 0.0 is excluded and 1.0 is included. status |= curandGenerateUniform(gen, d_buffer, num); // Cleanup generator status |= curandDestroyGenerator(gen); if (status != CURAND_STATUS_SUCCESS) { printf ("curand failure!\n"); exit (EXIT_FAILURE); } } // // Set up a CUDA random number generator and return it. // extern "C" curandGenerator_t setup_prng(void *stream, unsigned long long seed) { curandGenerator_t gen; int status = CURAND_STATUS_SUCCESS; // Create generator status = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); // Set CUDA stream status |= curandSetStream(gen, (cudaStream_t)stream); // Set seed status |= curandSetPseudoRandomGeneratorSeed(gen, seed); if (status != CURAND_STATUS_SUCCESS) { printf ("curand failure!\n"); exit (EXIT_FAILURE); } return gen; } // // Place a set of random numbers between 0.0 and 1.0 in an array d_buffer. // This is designed so that with one generator (gen), this function can // be called multiple times as needed to get a new set of random numbers // for an iteration of a simulation, for example. // extern "C" void gen_rand_nums(curandGenerator_t gen, float *d_buffer, int num, void *stream) { int status = CURAND_STATUS_SUCCESS; // Generate num random numbers // From documentation: //The curandGenerateUniform() function is used to generate uniformly // distributed floating point values between 0.0 and 1.0, // where 0.0 is excluded and 1.0 is included. status |= curandGenerateUniform(gen, d_buffer, num); if (status != CURAND_STATUS_SUCCESS) { printf ("curand failure!\n"); exit (EXIT_FAILURE); } } // // Remove the CUDA random number generator when finished with it. // extern "C" void rand_cleanup( curandGenerator_t gen ) { int status = CURAND_STATUS_SUCCESS; // Cleanup generator status |= curandDestroyGenerator(gen); if (status != CURAND_STATUS_SUCCESS) { printf ("curand failure!\n"); exit (EXIT_FAILURE); } }
8,099
#include "includes.h" __global__ void kernel1(float *dW, float *dWcurr, int N) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < N) { dWcurr[id] = dW[id]; } }
8,100
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void print_threadIdx() { // printf("threasdIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d\n", threadIdx.x, threadIdx.y, threadIdx.z); printf("blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d, blockDim.x: %d, blockDim.y: %d, gridDim.x: %d, gridDim.y: %d\n", blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, gridDim.x, gridDim.y); } int main() { int nx, ny; nx = 16; ny = 16; dim3 block(8, 8); dim3 grid(nx / block.x, ny / block.y); print_threadIdx<<<grid, block>>>(); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }