serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
1,901
#include "includes.h" __global__ void square(float * d_out, float * d_in) { int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f; }
1,902
#include <stdio.h> int main(int argc, char **argv) { printf("Hello World from CPU!\n"); return 0; }
1,903
#include <iostream> #include <fstream> #include <cmath> #include <cuda.h> const int n = 1025; const double h = 1.0 / (double)(n); const double K2 = 100.0; const double cft1 = 1.0 / (4.0 + h * h * K2); const double cft3 = cft1 * h * h; const double PI = 3.1415926535897932385; const int MaxIter = 30000; using namespace std; __host__ double fRight(double x, double y) { return 2.0 * sin(PI * y) + K2 * (1.0 - x) * x * sin(PI * y) + PI * PI * (1.0 - x) * x * sin(PI * y); } __global__ void kernelJacobi(double* ym, double* um, double* fm) { /*int alpha, beta; // положение блока int i, j; // положение треда в блоке alpha = blockIdx.x; beta = blockIdx.y; i = threadIdx.x; j = threadIdx.y; int bnx, bny; // размеры блоков bnx = blockDim.x; bny = blockDim.y; int row,col;// положение элемента в матрице col = alpha * bnx + i; row = beta * bny + j; int id = row * (n + 1) + col; //int k; //um[id] = 0.0; //if ((id <= n * (n + 1) - 1)&&(id >= n + 1)&&((id % (n + 1)) != 0)&&((id % (n + 1)) != n)) um[id] = (cft1 * (ym[id - n - 1] + ym[id + n + 1] + ym[id - 1] + ym[id + 1]) + fm[id]) * cft3;*/ int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; um[(i + 1) * (n + 1) + (j + 1)] = 0.0; um[(i + 1) * (n + 1) + (j + 1)] = cft1 * (ym[(i + 2) * (n + 1) + (j + 1)] + ym[(i + 1) * (n + 1) + j + 2] + ym[i * (n + 1) + (j + 1)] + ym[(i + 1) * (n + 1) + j]) + cft3 * fm[(i + 1) * (n + 1) + (j + 1)]; return; } __global__ void kernelSwap(double* ym, double* um) { int alpha, beta; // положение блока int i, j; // положение треда в блоке alpha = blockIdx.x; beta = blockIdx.y; i = threadIdx.x; j = threadIdx.y; int bnx, bny; // размеры блоков bnx = blockDim.x; bny = blockDim.y; int row,col;// положение элемента в матрице col = alpha * bnx + i; row = beta * bny + j; int id = row * (n + 1) + col; ym[id] = um[id]; return; } double exsol(double x, double y) { return x * (1.0 - x) * sin(PI * y); } int main(int argc, char* argv[]) { /*int n = 128; double h = 1.0 / n; const double cft1 = 1 / (h * h); const double cft2 = 4.0 * cft1 + K2; const double cft3 = 1 / cft2; const double PI = 3.141592653589793238462643; const double PI2 = PI*PI; const double K2 = 100.0; const int MaxIter = 10000;*/ cout << "cft1 = " << cft1 << endl; //cout << "cft2 = " << cft2 << endl; cout << "cft3 = " << cft3 << endl; double * y; double * u; double * f; y = new double [(n + 1) * (n + 1)]; u = new double [(n + 1) * (n + 1)]; f = new double [(n + 1) * (n + 1)]; int nbytes = (n + 1) * (n + 1) * sizeof(double); int i, j; //инициализация for (i = 1; i < n; i++) { y[i * (n + 1)] = 0.0; y[i * (n + 1) + n] = 0.0; for (j = 1; j < n; j++) y[i * (n + 1) + j] = 0.0; //0.5 } for (i = 0; i <= n; i++) { y[i] = 0.0; y[n * (n + 1) + i] = 0.0; for (j = 0; j <= n; j++) u[i * (n + 1) + j] = 0.0; } for (i = 0; i <= n; i++) for (j = 0; j <= n; j++) f[i * (n + 1) + j] = fRight(i * h, j * h); // cudaError_t SD; SD = cudaSetDevice(0); if (SD != cudaSuccess) { cout << "CUDA set device error" << endl; return 1; } double * uDev = NULL; double * yDev = NULL; double * fDev = NULL; cudaMalloc ((void **)&uDev, nbytes); cudaMalloc ((void **)&yDev, nbytes); cudaMalloc ((void **)&fDev, nbytes); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); cudaEventSynchronize(start); const int blockDimx = 1; //n; //32; //8; //16; //64; //128; const int blockDimy = 1024; dim3 threads(blockDimx, blockDimy, 1); dim3 blocks((n - 1) / blockDimx, (n - 1) / blockDimy, 1); cout << blocks.x << "\n"; cudaMemcpy(fDev, f, nbytes, cudaMemcpyHostToDevice); cudaMemcpy(uDev, u, nbytes, cudaMemcpyHostToDevice); cudaMemcpy(yDev, y, nbytes, cudaMemcpyHostToDevice); //int iter = 1; for (int k = 0; k < MaxIter/2; k++) { kernelJacobi<<<blocks, threads>>>(yDev, uDev, fDev); kernelJacobi<<<blocks, threads>>>(uDev, yDev, fDev); } /*do { kernelJacobi<<<blocks, threads>>>(yDev, uDev, fDev); kernelJacobi<<<blocks, threads>>>(uDev, yDev, fDev); //cudaThreadSynchronize(); //kernelSwap<<<blocks, threads>>>(yDev, uDev); //cudaThreadSynchronize(); iter++; } while (iter <= MaxIter);*/ //cudaThreadSynchronize(); //test /*kernelJacobi<<<blocks, threads>>>(yDev, uDev, fDev); cudaMemcpy(u, uDev, nbytes, cudaMemcpyDeviceToHost);*/ // cudaMemcpy(y, yDev, nbytes, cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float dt; cudaEventElapsedTime(&dt,start,stop); //абсолютная погрешность double max = 0.0; for (int i = 0; i <= n; i++) { //double x = i*h; for (int j = 0; j <= n; j++) { //double y = j*h; double val = fabs(y[i * (n + 1) + j] - exsol(i * h, j * h)); if (max < val) { max = val; } } } cout << "Mistake: " << max << endl; // //запись в файл /*ofstream outFile; outFile.open("res.dat"); for (int i = 0; i <= n; i++) for (int j = 0; j <= n; j++) outFile << i * h << " " << j * h << " " << y[i * (n + 1) + j] << endl; outFile.close();*/ // cout << "Time = " << dt << " ms"<< endl; delete[] u; delete[] y; delete[] f; cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(uDev); cudaFree(yDev); cudaFree(fDev); return 0; }
1,904
#include "includes.h" __global__ void kernelF(const float *d_x, float *d_y) { const float &x0 = d_x[0]; const float &x1 = d_x[1]; // f = (1-x0)^2 + 100 (x1-x0^2)^2 const float a = (1.0 - x0); const float b = (x1 - x0 * x0) ; *d_y = (a*a) + 100.0f * (b*b); }
1,905
/* Code adapted from book "CUDA by Example: An Introduction to General-Purpose GPU Programming" This code computes a visualization of the Julia set. Two-dimenansional "bitman" data which can be plotted is computed by the function kernel. The data can be viewed with gnuplot. The Julia set iteration is: z= z**2 + C If it converges, then the initial point z is in the Julia set. */ #include <stdio.h> #include <cuda.h> #define DIM 1000 __device__ int d_arr[DIM*DIM]; __device__ int julia( int x, int y ) { const float scale = 1.5; float jx = scale * (float)(DIM/2 - x)/(DIM/2); float jy = scale * (float)(DIM/2 - y)/(DIM/2); float cr=-0.8f; float ci=0.156f; float ar=jx; float ai=jy; float artmp; int i = 0; for (i=0; i<200; i++) { artmp = ar; ar =(ar*ar-ai*ai) +cr; ai = 2.0f*artmp*ai + ci; if ( (ar*ar+ai*ai) > 1000) return 0; } return 1; } /* void kernel( int *arr ){ for (int y=0; y<DIM; y++) { for (int x=0; x<DIM; x++) { int offset = x + y * DIM; int juliaValue = julia( x, y ); arr[offset] = juliaValue; } } } */ __global__ void kernel_gpu(int n){ //for (int y=0; y<DIM; y++) { // for (int x=0; x<DIM; x++) { //int offset = x + y * DIM; //int offset = threadIdx.x+blockDim.x*(blockIdx.x); int offset =blockIdx.x+n*(blockIdx.y); //int juliaValue = julia( x, y ); int juliaValue = julia( blockIdx.x, blockIdx.y); d_arr[offset] = juliaValue; // } //} } int main( void ) { int h_arr[DIM*DIM]; // __device__ int d_arr[DIM*DIM]; FILE *out; int n =DIM*DIM; int blockSize; // dim3 nBlocks; size_t memsize; memsize = n*sizeof(int); blockSize = 1; //nBlocks = n / blockSize + (n % blockSize > 0); dim3 nBlocks(DIM,DIM,1); kernel_gpu<<<nBlocks,blockSize>>>(DIM); cudaMemcpy(h_arr,d_arr,memsize,cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); out = fopen( "julia_gpu.dat", "w" ); for (int y=0; y<DIM; y++) { for (int x=0; x<DIM; x++) { int offset = x + y * DIM; if(h_arr[offset]==1){ fprintf(out,"%d %d \n",x,y); } } } fclose(out); }
1,906
#include <stdio.h> /* * Show DIMs & IDs for grid, block and thread */ __global__ void checkIndex(void) { if ((threadIdx.x + threadIdx.y) && ((threadIdx.x + threadIdx.y) % 5 == 0)) { printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) " "blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } } int main(int argc, char **argv) { // grid and block structure dim3 block(7,6); dim3 grid(2,2); // check for host printf("CHECK for host:\n"); printf("grid.x = %d\t grid.y = %d\t grid.z = %d\n", grid.x, grid.y, grid.z); printf("block.x = %d\t block.y = %d\t block.z %d\n", block.x, block.y, block.z); // check for device printf("CHECK for device:\n"); checkIndex<<<grid, block>>>(); // reset device cudaDeviceReset(); return (0); }
1,907
#include "includes.h" __global__ void square(float * d_out, float * d_in) { int idx = threadIdx.x ; float f = d_in[idx]; d_out[idx] = f * f; }
1,908
#include <iostream> #include <time.h> #include <random> #include "kernels.cuh" int main() { unsigned int n = 32; // variables instantiations int *h_x; int *d_x; int *h_root; int *d_root; int *h_child; int *d_child; // initiate memory allocation h_x = (int*)malloc(n*sizeof(int)); h_root = (int*)malloc(sizeof(int)); h_child = (int*)malloc(2*(n+1)*sizeof(int)); cudaMalloc((void**)&d_root, sizeof(int)); cudaMalloc((void**)&d_x, n*sizeof(int)); cudaMalloc((void**)&d_child, 2*(n+1)*sizeof(int)); cudaMemset(d_child, -1, 2*(n+1)*sizeof(int)); // fill h_temp and h_x arrays for(unsigned int i = 0; i < n; i++){ h_x[i] = i+1; } for(unsigned int i=0;i<n;i++){ unsigned int j = random() % (n-i); int temp = h_x[i]; h_x[i] = h_x[i+j]; h_x[i+j] = temp; } *h_root = h_x[0]; for(unsigned int i=0;i<n;i++){ std::cout<<h_x[i]<<" "; } std::cout<<""<<std::endl; // copy data to device cudaMemcpy(d_root, h_root, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_x, h_x, n*sizeof(int), cudaMemcpyHostToDevice); // kernel call dim3 gridSize = 8; dim3 blockSize = 8; build_binary_tree<<< gridSize, blockSize>>>(d_x, d_child, d_root, n); // copy from device back to host cudaMemcpy(h_child, d_child, 2*(n+1)*sizeof(int), cudaMemcpyDeviceToHost); // print tree for(unsigned int i = 0; i < 2*(n+1); i++){ std::cout<<h_child[i]<<" "; } std::cout<<""<<std::endl; // free memory free(h_x); free(h_root); free(h_child); cudaFree(d_x); cudaFree(d_root); cudaFree(d_child); }
1,909
extern "C" __device__ void loop_cuda(float *in, float *out, size_t n) { size_t i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { out[i] = sqrt(in[i]); } }
1,910
#include <iostream> #include <math.h> __global__ void add(unsigned long long int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for(unsigned long long int i = index; i<n; i+= stride) y[i] = x[i]+ y[i]; } int main(void) { unsigned long long int N= 1<<29; float *x , *y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); for(unsigned long long int i = 0; i<N; i++) { x[i] = 1.0f; y[i] = 2.0f; } add<<<1, 256>>>(N, x, y); cudaDeviceSynchronize(); cudaFree(x); cudaFree(y); return 0; }
1,911
#include <iostream> __global__ void kernel(void) {} // __global__ indicates that the function is to be run on device (GPU) int main(void) { kernel<<<1,1>>>(); // <<<1,1>>> are the arguments passed to the host, the arguments to device will be as usual inside (). printf("Hello, World!\n"); return 0; }
1,912
// RUN: %clang --cuda-host-only -nocudainc -target i386-unknown-linux-gnu -x cuda -E -dM -o - /dev/null | FileCheck --check-prefix HOST %s // RUN: %clang --cuda-device-only -nocudainc -target i386-unknown-linux-gnu -x cuda -E -dM -o - /dev/null | FileCheck --check-prefix DEVICE-NOFAST %s // RUN: %clang -fcuda-approx-transcendentals --cuda-device-only -nocudainc -target i386-unknown-linux-gnu -x cuda -E -dM -o - /dev/null | FileCheck --check-prefix DEVICE-FAST %s // RUN: %clang -ffast-math --cuda-device-only -nocudainc -target i386-unknown-linux-gnu -x cuda -E -dM -o - /dev/null | FileCheck --check-prefix DEVICE-FAST %s // HOST-NOT: __CLANG_CUDA_APPROX_TRANSCENDENTALS__ // DEVICE-NOFAST-NOT: __CLANG_CUDA_APPROX_TRANSCENDENTALS__ // DEVICE-FAST: __CLANG_CUDA_APPROX_TRANSCENDENTALS__
1,913
#include <cstdint> #include <cmath> #include <fstream> #include <iostream> #include <vector> const int BLOCK_SIZE = 1024; class PointSourcePollution { public: PointSourcePollution() = default; ~PointSourcePollution() = default; void end(const double* data, uint64_t cylinder_size); }; void PointSourcePollution::end(const double* data, uint64_t cylinder_size) { std::ofstream payload; payload.open("output.txt"); for (uint64_t i = 0; i < cylinder_size; ++i) { if (i != 0) { payload << " "; } payload << data[i]; } payload.close(); } __device__ void central_difference_theorem( double left, double right, double& out ) { out = (left + right) / 2.0; } __global__ void diffuse( double* cylinder, double* copy_cylinder, double* temp, uint64_t cylinder_size, uint64_t diffusion_time, uint64_t contaminant_concentration ) { double left, right, cdt_out; int i = blockIdx.x * BLOCK_SIZE + threadIdx.x; if (i < cylinder_size) { if (i > 0) left = cylinder[i - 1]; else left = cylinder[i]; right = cylinder[i + 1]; central_difference_theorem(left, right, cdt_out); cylinder[i] = cdt_out; temp = cylinder; cylinder = copy_cylinder; copy_cylinder = temp; } } int main(int argc, char** argv) { uint64_t cylinder_size, slice_location, diffusion_time, contaminant_concentration; if (argc < 5) { std::cerr << "usage: psp cylinder_size slice_location diffusion_time contaminant_concentration" << std::endl; return EXIT_FAILURE; } for (int i = 0; i < argc; ++i) { if (atoi(argv[i]) < 0) { std::cerr << "All inputs must be greater than 0" << std::endl; return EXIT_FAILURE; } } PointSourcePollution psp; cylinder_size = atoi(argv[1]); slice_location = atoi(argv[2]); diffusion_time = atoi(argv[3]); contaminant_concentration = atoi(argv[4]); cudaError_t e; double *cylinder, *copy_cylinder, *temp; cudaMallocManaged(&cylinder, cylinder_size * sizeof(double)); cudaMallocManaged(&copy_cylinder, cylinder_size * sizeof(double)); cudaMallocManaged(&temp, cylinder_size * sizeof(double)); // init our arrays for (int i = 0; i < cylinder_size; ++i) { if (i == 0) { cylinder[i] = contaminant_concentration; copy_cylinder[i] = contaminant_concentration; } else { cylinder[i] = 0.0; copy_cylinder[i] = 0.0; } } std::cout << cylinder[0] << copy_cylinder[0] << std::endl; e = cudaGetLastError(); if (e != cudaSuccess) { std::cerr << "Error: " << cudaGetErrorString(e) << std::endl; return EXIT_FAILURE; } const uint64_t GRID_SIZE = ceil(cylinder_size / static_cast<double>(BLOCK_SIZE)); for (int i = 0; i < diffusion_time; ++i) { diffuse<<<GRID_SIZE, BLOCK_SIZE>>>( cylinder, copy_cylinder, temp, cylinder_size, diffusion_time, contaminant_concentration); } cudaDeviceSynchronize(); e = cudaGetLastError(); if (e != cudaSuccess) { std::cerr << "Error2: " << cudaGetErrorString(e) << std::endl; return EXIT_FAILURE; } std::cout << "Answer at slice location: " << slice_location << " is " << cylinder[slice_location] << std::endl; std::cout << "Now visualizing results..." << std::endl; psp.end(cylinder, cylinder_size); cudaFree(cylinder); cudaFree(copy_cylinder); e = cudaGetLastError(); if (e != cudaSuccess) { std::cerr << cudaGetErrorString(e) << std::endl; return EXIT_FAILURE; } system("python plot.py"); return EXIT_SUCCESS; }
1,914
#include "conv2d.hh" #include "graph.hh" #include "../runtime/node.hh" #include "../memory/alloc.hh" #include "conv2d-input-grad.hh" #include "conv2d-kernel-grad.hh" #include "ops-builder.hh" #include <cassert> #include <stdexcept> #include <cmath> namespace ops { Conv2D::Conv2D(Op* input, Op* kernel, const int strides[]) : Op("conv2d", Shape({input->shape_get()[0], (int)std::ceil(static_cast<float>(input->shape_get()[1]) / (float)strides[0]), (int)std::ceil(static_cast<float>(input->shape_get()[2]) / (float)strides[1]), kernel->shape_get()[3]}), {input, kernel}) , m_strides(strides) , m_input_shape(input->shape_get()) , m_kernel_shape(kernel->shape_get()) { int in_height = input->shape_get()[1]; int in_width = input->shape_get()[2]; int filter_height = kernel->shape_get()[0]; int filter_width = kernel->shape_get()[1]; int pad_along_height = 0; int pad_along_width = 0; if (in_height % strides[0] == 0) pad_along_height = std::max(filter_height - strides[0], 0); else pad_along_height = std::max(filter_height - (in_height % strides[0]), 0); if (in_width % strides[1] == 0) pad_along_width = std::max(filter_width - strides[1], 0); else pad_along_width = std::max(filter_width - (in_width % strides[1]), 0); m_pad_top = pad_along_height / 2; m_pad_left = pad_along_width / 2; m_padded_size[0] = pad_along_height; m_padded_size[1] = pad_along_width; } void Conv2D::compile() { auto& g = Graph::instance(); auto& cinput = g.compiled(preds()[0]); auto& ckernel = g.compiled(preds()[1]); std::size_t b = cinput.out_shape[0]; std::size_t i = (std::size_t)std::ceil( static_cast<float>(cinput.out_shape[1]) / (float)m_strides[0]); std::size_t j = (std::size_t)std::ceil( static_cast<float>(cinput.out_shape[2]) / (float)m_strides[1]); std::size_t k = ckernel.out_shape[3]; Shape out_shape({int(b), int(i), int(j), int(k)}); dbl_t* out_data = tensor_alloc(out_shape.total()); int input_size[4] = { cinput.out_shape[0], cinput.out_shape[1], cinput.out_shape[2], cinput.out_shape[3]}; int kernel_size[4] = { ckernel.out_shape[0], ckernel.out_shape[1], ckernel.out_shape[2], ckernel.out_shape[3]}; auto out_node = rt::Node::op_conv2d(cinput.out_data, ckernel.out_data, m_strides, m_padded_size[0], m_padded_size[1], out_data, input_size, kernel_size, {cinput.out_node, ckernel.out_node}); g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data); } Op* Conv2D::child_grad(std::size_t index, Op* dout) { assert(index < 2); if (dout == nullptr) throw std::runtime_error {"conv2d must not be the final node of the gradient"}; auto& builder = OpsBuilder::instance(); int input_size[4] = { m_input_shape[0], m_input_shape[1], m_input_shape[2], m_input_shape[3]}; int kernel_size[4] = { m_kernel_shape[0], m_kernel_shape[1], m_kernel_shape[2], m_kernel_shape[3]}; if (index == 0) return builder.conv2d_input_grad(dout , preds()[1], m_strides, input_size); else return builder.conv2d_kernel_grad(dout, preds()[0], m_strides, kernel_size, m_padded_size); } }
1,915
#if GOOGLE_CUDA #define EIGEN_USE_GPU #include <cassert> __device__ inline void swapf(float & a, float & b) { float tmp = a; a = b; b = tmp; } __device__ inline void swap(int & a, int & b) { int tmp = a; a = b ; b = tmp; } __global__ void KnnKernel(int b,const int n,const int d,const float * xyz,const int k,float * result,int * result_i){ const int size = 4096; __shared__ float dist[size]; __shared__ int idx[size]; assert( n <= size ); for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x ) { for ( int i = blockIdx.y ; i < n ; i += gridDim.y ) { for ( int j = threadIdx.x ; j < n ; j += blockDim.x ) { if( i == j ){ dist[j] = 0; idx[j] = j; continue; } float d = 0.0; for ( int di = 0 ; di < d ; ++di ) { float dif = xyz[(bi*n+i)*3+di] - xyz[(bi*n+j)*3+di]; d += dif*dif; } dist[j] = d; idx[j] = j; } __syncthreads(); //odd-even sort int pownum = int(log2(float(n))); if ( n != pow(2, pownum) ){ for ( int cnt = 0 ; cnt < ( n + 1 ) / 2 ; ++cnt ) { for ( int j = 2*threadIdx.x + 1 ; j < n ; j += 2*blockDim.x ) { if ( dist[j] < dist[ j - 1 ] ) { swapf(dist[j], dist[j-1]); swap(idx[j], idx[j-1]); } } __syncthreads(); for ( int j = 2*threadIdx.x + 2 ; j < n ; j += 2*blockDim.x ) { if ( dist[j] < dist[ j - 1 ] ) { swapf(dist[j], dist[j-1]); swap(idx[j], idx[j-1]); } } __syncthreads(); } }else{ //Bitonic Sort for (unsigned int t = 2; t <= n ; t *= 2) { // Bitonic merge: for (unsigned int j = t / 2; j>0; j /= 2) { for (unsigned int tid = threadIdx.x ; tid < n ; tid += blockDim.x ) { unsigned int ixj = tid ^ j; if (ixj > tid) { if ((tid & t) == 0) { if (dist[tid] > dist[ixj]) { swapf(dist[tid], dist[ixj]); swap(idx[tid], idx[ixj]); } } else { if (dist[tid] < dist[ixj]) { swapf(dist[tid], dist[ixj]); swap(idx[tid], idx[ixj]); } } } } __syncthreads(); } } } __syncthreads(); //copy result for ( int j = threadIdx.x ; j < k ; j += blockDim.x ) { result[(bi*n+i)*k+j] = dist[j+1]; result_i[ ((bi*n+i)*k+j)*2+0 ] = bi; result_i[ ((bi*n+i)*k+j)*2+1 ] = idx[j+1]; } } } } void KnnKernelLauncher(int b,const int n,const int d,const float * xyz,const int k,float * result,int * result_i){ KnnKernel<<<dim3(b,16,1),512>>>(b,n,d,xyz,k,result,result_i); } #endif
1,916
#pragma once #include <curand_kernel.h> #include <stdio.h> #define _tol 10E-6 typedef float real; //Change this between double or (float) single precision //typedef float3 real3; //Change this between double or (float) single precision struct real3 { real x, y, z; real& operator [] (size_t index) { return *(&x + index); } }; template <typename T> __device__ __host__ __forceinline__ T cuDist(T x1, T y1, T z1, T x2, T y2, T z2){ //square distance between point (x1,y1,z1) and (x2,y2,z2) T dx, dy, dz; dx = x1 - x2; dy = y1 - y2; dz = z1 - z2; dx *= dx; dy *= dy; dz *= dz; return dx + dy + dz; } __device__ __forceinline__ real generateRAND(curandState* globalState, int ind) { //generate random number (callable from the device) //stolen from https://nidclip.wordpress.com/2014/04/02/cuda-random-number-generation/ //copy state to local mem ind = ind%1024; curandState localState = globalState[ind]; //apply uniform distribution with calculated random real rndval = curand_uniform(&localState); //update state globalState[ind] = localState; //return value return rndval; } __device__ __forceinline__ void NormalizeVector(real&vector_x, real&vector_y, real&vector_z){ //Normalize an input vector //real nn = rnorm3df(vector_x, vector_y, vector_z);//1/sqrt(vector_x^2 + vector_y^2 + vector_z^2) real nn = real(1)/sqrtf(vector_x*vector_x + vector_y*vector_y + vector_z*vector_z); vector_x *= nn; vector_y *= nn; vector_z *= nn; } __device__ __forceinline__ void CrossProdcut(const real xv1, const real yv1, const real zv1, //Input:Vector 1 const real xv2, const real yv2, const real zv2, //Input:Vector 2 real&xx, real&yy, real&zz){ //Output:Vector 3 //Find the cross product between vector 1 and vectro 2 xx = yv1*zv2 - zv1*yv2; yy = zv1*xv2 - xv1*zv2; zz = xv1*yv2 - yv1*xv2; } __device__ __forceinline__ real DotProdcut(const real xv1, const real yv1, const real zv1, //Input:Vector 1 const real xv2, const real yv2, const real zv2){ //Input:Vector 2 //Dot product of two vectors return xv1*xv2 + yv1*yv2 + zv1*zv2; } __device__ __forceinline__ void ProjectPointOntoPlane(const real point_x, const real point_y, const real point_z, //Input: Point to project const real normal_dx, const real normal_dy, const real normal_dz, //Input: normal to the plan const real orig_x, const real orig_y, const real orig_z, //Input: point on the plane real&projected_x, real&projected_y, real&projected_z){//Output: projected point //return the ortho distance to the plane //http://stackoverflow.com/questions/9605556/how-to-project-a-3d-point-to-a-3d-plane real point_orig_x(point_x - orig_x), point_orig_y(point_y - orig_y), point_orig_z(point_z - orig_z); //NormalizeVector(point_orig_x, point_orig_y, point_orig_z); real dot1 = DotProdcut(point_orig_x, point_orig_y, point_orig_z, normal_dx, normal_dy, normal_dz); projected_x = point_x - dot1*normal_dx; projected_y = point_y - dot1*normal_dy; projected_z = point_z - dot1*normal_dz; //return sqrtf(cuDist(projected_x, projected_y, projected_z, point_x, point_y, point_z)); } __device__ __forceinline__ void RandSpoke1D(const real x,const real y, const real z, //Input: starting point of the spoke const real xn1, const real yn1, const real zn1, //Input: normal to the plane 1 const real xn2, const real yn2, const real zn2, //Input: normal to the plane 2 real&xv, real&yv, real&zv, //Output: direction of the spoke (normalized) curandState* globalState, int randID){ //Input: global state for rand generate //Random spoke sampling along a 1D line defined by the intersection //of two planes (plane 1 and plane 2) //spoke starting point should be on the 1D line (not checked) //the two planes are defined by their normal vectors CrossProdcut(xn1, yn1, zn1, xn2, yn2, zn2, xv, yv, zv); //randomly alternative the direction to point in the opposite direction real randNum = generateRAND(globalState, randID); if(randNum < 0.5){ xv *=-1; yv *=-1; zv *=-1; } NormalizeVector(xv, yv, zv); //testing /*real dot1 = DotProdcut(xv,yv,zv, xn1,yn1,zn1); real dot2 = DotProdcut(xv,yv,zv, xn2,yn2,zn2); printf("\n dot1= %f, dot2= %f", dot1, dot2);*/ } __device__ __forceinline__ void RandSpoke2D(const real x,const real y, const real z, //Input: starting point of the spoke const real xn, const real yn, const real zn, //Input: normal to the plane real&xv, real&yv, real&zv, //Output: direction of the spoke (normalized) curandState* globalState, int randID){ //Input: global state for rand generate //Random spoke sampling in a 2D plane embedded in the 3D domain //spoke starting point should be on the 2D plane //The 2d plane is defined by its normal vector //Algorithm: throw random point in the space, then project it //to the plane, return the direction as the ray starting from (x,y,z) //and pointing to the projected point real Vx_rand = generateRAND(globalState, randID); real Vy_rand = generateRAND(globalState, randID); real Vz_rand = generateRAND(globalState, randID); ProjectPointOntoPlane(Vx_rand, Vy_rand, Vz_rand, xn, yn,zn, x,y,z, xv,yv,zv); xv -=x; yv -=y; zv -=z; NormalizeVector(xv, yv, zv); //testing //real dot = DotProdcut(xv,yv,zv, xn,yn,zn); //printf("\n RandSpoke2D() DOT= %f", dot); /*printf("\n Vx_rand= %f, Vy_rand= %f, Vz_rand= %f",Vx_rand, Vy_rand, Vz_rand); printf("\n \n xn= %f, yn= %f, zn= %f \n xv= %f, yv= %f, zv= %f", xn, yn, zn, xv, yv, zv); printf("\n dot =%f\n",dot ); printf("\n px =%f, py =%f, pz =%f\n", x+0.2*xn, y+0.2*yn, z+0.2*zn); printf("\n qx =%f, qy =%f, qz =%f\n", x+0.2*xv, y+0.2*yv, z+0.2*zv);*/ } __device__ __forceinline__ void RandSpoke3D(const real x, const real y, const real z, //Input: starting point of the spoke real&xv, real&yv, real&zv, //Output: direction of the spoke (normalized) curandState* globalState, int randID){ //Input: global state for rand generate //Random spoke sampling in the 3d domain; there is no constraints at all xv = generateRAND(globalState, randID); yv = generateRAND(globalState, randID); zv = generateRAND(globalState, randID); NormalizeVector(xv, yv, zv); //printf("\n xv= %f, yv= %f, zv= %f\n", xv, yv, zv); } __device__ __forceinline__ bool SpokePlaneIntersect(const real pp_x, const real pp_y, const real pp_z, const real pv_x, const real pv_y, const real pv_z, //Input: plane (point, normal vector) const real pt_x, const real pt_y, const real pt_z, const real sp_v_x, const real sp_v_y, const real sp_v_z, //Input: spoke (point and vector) real&point_x, real&point_y, real&point_z){ //Output: point //Plane line intersection. Plane define by normal vector (pv_x,pv_y,pv_z) and point on it(pp_x,pp_y,pp_z) // and line between point ip1 and ip2 real dot = DotProdcut(sp_v_x, sp_v_y, sp_v_z, pv_x, pv_y, pv_z); if (abs(dot) <= 0.0){ return false; } real s = (DotProdcut(pv_x, pv_y, pv_z, pp_x - pt_x, pp_y - pt_y, pp_z - pt_z)) / (dot); if (s<-1.0*10E-8 || s >1.0 + 10E-8){ return false; } point_x = pt_x + s*sp_v_x; point_y = pt_y + s*sp_v_y; point_z = pt_z + s*sp_v_z; return true; } __device__ __forceinline__ bool SpokePlaneTrimming(const real pp_x, const real pp_y, const real pp_z, const real pv_x, const real pv_y, const real pv_z, //Input: plane (point, normal vector) const real pt_x_st, const real pt_y_st, const real pt_z_st, real&pt_x_end, real&pt_y_end, real&pt_z_end){ //Input: spoke (starting and end point) //Trim the spoke by the plane, //Return the trimmed spoke; only the end point of the spoke is allowed to be change const real sp_v_x = pt_x_end - pt_x_st; const real sp_v_y = pt_y_end - pt_y_st; const real sp_v_z = pt_z_end - pt_z_st; real point_x(0), point_y(0), point_z(0); if(SpokePlaneIntersect(pp_x,pp_y, pp_z, pv_x, pv_y, pv_z, pt_x_st, pt_y_st, pt_z_st, sp_v_x, sp_v_y, sp_v_z, point_x, point_y, point_z)){ pt_x_end = point_x; pt_y_end = point_y; pt_z_end = point_z; return true; }else{ return false; } } __device__ __forceinline__ real TriCircumcenter3d(real xa, real ya, real za, real xb, real yb, real zb, real xc, real yc, real zc, real&x_cir, real&y_cir, real&z_cir){ //http://www.ics.uci.edu/~eppstein/junkyard/circumcenter.html //http://gamedev.stackexchange.com/questions/60630/how-do-i-find-the-circumcenter-of-a-triangle-in-3d real xba, yba, zba, xca, yca, zca; real balength, calength; real xcrossbc, ycrossbc, zcrossbc; real denominator; real xcirca, ycirca, zcirca; xba = xb - xa; yba = yb - ya; zba = zb - za; xca = xc - xa; yca = yc - ya; zca = zc - za; balength = xba * xba + yba * yba + zba * zba; calength = xca * xca + yca * yca + zca * zca; xcrossbc = yba * zca - yca * zba; ycrossbc = zba * xca - zca * xba; zcrossbc = xba * yca - xca * yba; denominator = real(0.5) / (xcrossbc * xcrossbc + ycrossbc * ycrossbc + zcrossbc * zcrossbc); xcirca = ((balength * yca - calength * yba) * zcrossbc - (balength * zca - calength * zba) * ycrossbc) * denominator; ycirca = ((balength * zca - calength * zba) * xcrossbc - (balength * xca - calength * xba) * zcrossbc) * denominator; zcirca = ((balength * xca - calength * xba) * ycrossbc - (balength * yca - calength * yba) * xcrossbc) * denominator; x_cir = xcirca + xa; y_cir = ycirca + ya; z_cir = zcirca + za; real len1, dx, dy, dz; dx = xa - x_cir; dy = ya - y_cir; dz = za - z_cir; len1 = dx*dx + dy*dy + dz*dz; #ifdef debug dx = xb - x_cir; dy = yb - y_cir; dz = zb - z_cir; real len2 = dx*dx + dy*dy + dz*dz; dx = xc - x_cir; dy = yc - y_cir; dz = zc - z_cir; real len3 = dx*dx + dy*dy + dz*dz; if (fabs(len1 - len2)>_tol || fabs(len3 - len2)>_tol || fabs(len1 - len3)>_tol){ printf("\nError at TriCircumcenter3d()..!!\n"); } #endif return len1; }
1,917
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <vector> #include <iostream> #include <chrono> using namespace std; // adds elements of array in place like this for a 11 element array: // [1][1][1][1][1][1][1][1][1][1][1][0][0][0][0][0] // ^+=^ ^+=^ ^+=^ ^+=^ ^+=^ ^+=^ ^+=^ ^+=^ // [2][1][2][1][2][1][2][1][2][1][1][0][0][0][0][0] // ^ += ^ ^ += ^ ^ += ^ ^ += ^ // [4][1][2][1][4][1][2][1][3][1][1][0][0][0][0][0] // ^ += ^ ^ += ^ // [8][1][2][1][4][1][2][1][3][1][1][0][0][0][0][0] // ^ += ^ // [11][1][2][1][4][1][2][1][3][1][1][0][0][0][0][0] // ^ this is the final total __global__ void addKernel(unsigned int *a, unsigned int interval, unsigned int xDim) { unsigned int xInd = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yInd = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = interval * (xInd + xDim * yInd); a[i] = a[i] + a[i + interval / 2]; } // Helper function for using CUDA to add vectors in parallel. unsigned int addWithCuda(unsigned int* aHost, unsigned int size) { unsigned int* aDevice; // Choose which GPU to run on, change this on a multi-GPU system. cudaSetDevice(0); // Allocate GPU buffers for the vector, ensuring that this buffer is a multiple of 2 equal to or larger than the size of the input array. set any additional elements to 0 unsigned int multTwoSize = 2; while (multTwoSize < size) { multTwoSize = multTwoSize * 2; } cudaMalloc(&aDevice, multTwoSize * sizeof(unsigned int)); if (multTwoSize > size) { cudaMemset(&(aDevice[size]), 0, (multTwoSize - size) * sizeof(unsigned int)); } // Copy input vector from host memory to GPU buffer cudaMemcpy(aDevice, aHost, size * sizeof(unsigned int), cudaMemcpyHostToDevice); auto start = chrono::high_resolution_clock::now(); // Launch a kernel on the GPU with one thread first for every other element then every fourth and so on, synchronizing threads after each iteration unsigned int interval = 2; while (interval <= multTwoSize) { unsigned int numThreads = multTwoSize / interval; dim3 gridDim(1, 1); dim3 blockDim(1, 1); // max block dimension is 32x32 threads since the max threads per block is 1024 // max grid dimension is 2048x2048 blocks assuming each block is 32x32 threads. This stems from the max x and y dimensions of 65536x65536 if (numThreads > 32) { blockDim.x = 32; if ((numThreads / 32) > 32) { blockDim.y = 32; if ((numThreads / (32 * 32)) > 2048) { gridDim.x = 2048; if ((numThreads / (32 * 32 * 2048)) > 2048) { cout << "Array is too large" << endl; return 0; } else { gridDim.y = numThreads / (32 * 32 * 2048); } } else { gridDim.x = numThreads / (32 * 32); } } else { blockDim.y = numThreads / 32; } } else { blockDim.x = numThreads; } unsigned int xDim = gridDim.x * blockDim.x; addKernel <<< gridDim, blockDim >>> (aDevice, interval, xDim); cudaDeviceSynchronize(); interval = interval * 2; } auto end = chrono::high_resolution_clock::now(); auto duration = chrono::duration_cast<chrono::milliseconds>(end - start); cout << "Time: " << duration.count() << " ms" << endl; // check for errors during kernel creation cudaError status; status = cudaGetLastError(); if (status != cudaSuccess) { cout << cudaGetErrorString(status) << endl; } // Copy output vector from GPU buffer to host memory. cudaMemcpy(aHost, aDevice, sizeof(unsigned int), cudaMemcpyDeviceToHost); // free the memory cudaFree(aDevice); // return the total unsigned int total = aHost[0]; return total; } int main() { vector<unsigned int> a; // breaks at 536870913 since this is 2^29 + 1 so multTwo array length will be rounded up to 2^30 = 1073741824 integers times 4 bytes per integer is 4GB which is all of the available GPU memory for (unsigned int i = 0; i < 536870912; i++) { a.push_back(1); } // Add elements of the vector in parallel. unsigned int total = addWithCuda(&(a[0]), a.size()); cout << total << endl; // cudaDeviceReset must be called before exiting in order for profiling and tracing tools such as Nsight and Visual Profiler to show complete traces. cudaDeviceReset(); return 0; }
1,918
#include <stdio.h> __global__ void outputFromGPU() { printf("Hello World!!! from GPU.\n"); } int main(void) { printf(":: Ex0 ::\n"); outputFromGPU<<<1,1>>>(); printf("Hello World!!! from CPU.\n"); return 0; }
1,919
#include "includes.h" __global__ void testKernel4r(float *data1, float *data2) { float t = 0.0f; float c = 0.0f; //printf("r = %f\n", data2[NX*blockIdx.x + threadIdx.x]); if(blockIdx.x > 0) { t += (data2[NX*(blockIdx.x-1)+threadIdx.x] - data2[NX*blockIdx.x+threadIdx.x]); c += 1.0f; } if(blockIdx.x < NX-1) { t += (data2[NX*(blockIdx.x+1)+threadIdx.x] - data2[NX*blockIdx.x+threadIdx.x]); c+=1.0f; } if(threadIdx.x > 0) { t += (data2[NX*blockIdx.x+threadIdx.x-1] - data2[NX*blockIdx.x+threadIdx.x]); c+=1.0f; } if(threadIdx.x < NX-1) { t += (data2[NX*blockIdx.x+threadIdx.x+1] - data2[NX*blockIdx.x+threadIdx.x]); c+=1.0f; } //printf("block %i, %i, %i\n", blockIdx.x, threadIdx.x, 1024*blockIdx.x+threadIdx.x); //data2[1024*blockIdx.x+threadIdx.x] = 2*data1[1024*blockIdx.x+threadIdx.x]; if(blockIdx.x == 0) data1[NX*blockIdx.x+threadIdx.x] = 1.0; else data1[NX*blockIdx.x+threadIdx.x] = data2[NX*blockIdx.x+threadIdx.x] + t/c*DIFF_RATE; return; }
1,920
__global__ void plus_one_kernel(int num_comp, int *y, int *x){ int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < num_comp){ y[i] = x[i] + 1; } }
1,921
#include "math.h" #include "cuda.h" #include <iostream> const int ARRAY_SIZE = 1000; using namespace std; __global__ void increment(double *aArray, double val, unsigned int sz) { unsigned int indx = blockIdx.x * blockDim.x + threadIdx.x; if (indx < sz) aArray[indx] += val; } int main(int argc, char **argv) { double *mA; cudaMallocManaged(&mA, ARRAY_SIZE * sizeof(double)); for (int i = 0; i < ARRAY_SIZE; i++) mA[i] = 1. * i; double inc_val = 2.0; increment<<<2, 512>>>(mA, inc_val, ARRAY_SIZE); cudaDeviceSynchronize(); double error = 0.; for (int i = 0; i < ARRAY_SIZE; i++) error += fabs(mA[i] - (i + inc_val)); cout << "Test: " << (error < 1.E-9 ? "Passed" : "Failed") << endl; cudaFree(mA); return 0; }
1,922
/****************************************************************************** * PROGRAM: copyStruture * PURPOSE: This program is a test which test the ability to transfer multilevel * C++ structured data from host to device, modify them and transfer back. * * * NAME: Vuong Pham-Duy. * College student. * Faculty of Computer Science and Technology. * Ho Chi Minh University of Technology, Viet Nam. * vuongpd95@gmail.com * * DATE: 5/10/2017 * ******************************************************************************/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", \ cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void func(int *num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int s_num[8]; s_num[threadIdx.x] = num[tid]; __syncthreads(); if (tid == 8) { for(int i = 0; i < 8; i++) { printf("In global: num[%d] = %d | ", tid + i, num[tid + i]); printf("In shared: s_num[%d] = %d.\n", threadIdx.x + i, s_num[threadIdx.x + i]); } } } int main(int argc, char *argv[]) { int *num = (int*)malloc(8 * 8 * sizeof(int)); for(int i = 0; i < 8 * 8; i++) num[i] = i; int *d_num; gpuErrchk(cudaMalloc(&d_num, 8 * 8 * sizeof(int))); gpuErrchk(cudaMemcpy(d_num, num, 8 * 8 * sizeof(int), \ cudaMemcpyHostToDevice)); dim3 thread_per_block(8); int num_block = 8; func<<<num_block, thread_per_block>>>(d_num); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); return 0; }
1,923
#include <iostream> #include <cuda.h> /* Publish topic GPU code: set an topic array to a value */ __global__ void pub_topic(float *topic,float param) { int i=threadIdx.x; /* find my index */ topic[i]=i+param; } /* Subscribe topic CPU code: get antopic value */ void sub_topic(float *topic, int i) { /* Copy elements back to CPU for subscriber */ int s=1; i=0; float f=0.0; /* CPU copy of value */ cudaMemcpy(&f,&topic[i],sizeof(float),cudaMemcpyDeviceToHost); std::cout<<"subscriber "<< s <<":topic["<<i<<"] = "<<f<<"\n"; } /* CPU code: memory movement and kernel calls */ int main(int argc,char *argv[]) { int i=0; int n=20; /* total number of floats */ float *topic; /* device array of n values */ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMalloc( (void**) &topic, n*sizeof(float) ); //Allocate GPU space pub_topic<<<1,n>>>(topic,0.1543); /* Initialize the space on the GPU */ sub_topic(topic,i); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout<<"Elapsed time = "<<milliseconds<<" milliseconds\n"; return 0; }
1,924
#include "InitializeComponents.cuh" /** * Sets up a neural network with a specified amount of input and output * neurons with the same number of hidden nodes per layer and the same * activations for each neuron * Parameter layers: the amount of layers in the neural net * Parameter inputNeurons: the number of neurons for the input layer * Parameter hiddenNeurons: the number of neurons in each hidden layer * Parameter outputNeurons: the number of neurons for the output layer * Parameter fullActivations: the activations for the entire neural * network - this is the same for every neuron * Returns: a neural network with the specified attributes */ NeuralNet* setupMonotonicNeuralNet(int layers, int inputNeurons, int hiddenNeurons, int outputNeurons, activation fullActivations){ int* neurons=(int*)calloc(layers, sizeof(int)); activation** activations=(activation**)calloc(layers-1, sizeof(activation*)); // Assigns the parameters for the neural net for(int layer=0; layer<layers; layer++){ // The input layer if(layer==0){ neurons[layer]=inputNeurons; } // The output layer else if(layer==layers-1){ neurons[layer]=outputNeurons; activations[layer-1]=(activation*)calloc(outputNeurons, sizeof(activation)); for(int neuron=0; neuron<outputNeurons; neuron++){ activations[layer-1][neuron]=fullActivations; } } // The hidden layer else{ neurons[layer]=hiddenNeurons; activations[layer-1]=(activation*)calloc(hiddenNeurons, sizeof(activation)); for(int neuron=0; neuron<hiddenNeurons; neuron++){ activations[layer-1][neuron]=fullActivations; } } } return createNeuralNet(layers, neurons, activations); } void setupGame(int layers, int inputNeurons, int hiddenNeurons, int outputNeurons, activation activations, int playerColor, char* file1, char* file2){ NeuralNet* nn1=setupMonotonicNeuralNet(layers, inputNeurons, hiddenNeurons, outputNeurons, activations); NeuralNet* nn2=setupMonotonicNeuralNet(layers, inputNeurons, hiddenNeurons, outputNeurons, activations); train(nn1, nn2, playerColor, file1, file2); } void setupPausedGame(int playerColor, char* file1, char* file2){ printf("Deserializing the neural networks\n"); char* buffer=(char*)calloc(8, sizeof(char)); NeuralNet* nn1=deserializeNeuralNet(file1); NeuralNet* nn2=deserializeNeuralNet(file2); train(nn1, nn2, playerColor, file1, file2); } void getSetup(int layers, int inputNeurons, int hiddenNeurons, int outputNeurons, activation activations, char* file1, char* file2){ int resume=0; int playerColor=-1; char* buffer=(char*)calloc(80, sizeof(char)); do{ printf("Do you want to resume a game?[y/n]\n"); fgets(buffer, 78, stdin); } while(strcmp(buffer, "y\n\0")!=0 && strcmp(buffer, "n\n\0")!=0); if(strcmp(buffer, "y\n\0")==0){ resume=1; } do{ printf("Do you want to play?[y/n]\n"); fgets(buffer, 78, stdin); } while(strcmp(buffer, "y\n\0")!=0 && strcmp(buffer, "n\n\0")); if(strcmp(buffer, "y\n\0")==0){ do{ printf("What color do you want to play as? [w/b]\n"); fgets(buffer, 78, stdin); }while(strcmp(buffer, "w\n\0")!=0 && strcmp(buffer, "b\n\0")!=0); if(strcmp(buffer, "w\n\0")==0){ playerColor=0; } else{ playerColor=1; } } free(buffer); if(resume){ setupPausedGame(playerColor, file1, file2); } else{ setupGame(layers, inputNeurons, hiddenNeurons, outputNeurons, activations, playerColor, file1, file2); } }
1,925
//pass //--blockDim=64 --gridDim=1 --no-inline #include "cuda.h" __global__ void foo(float* A) { A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = 2.4f; }
1,926
#include "includes.h" __global__ void negative_prob_multiply_csr_matrix_vector_kernel(unsigned int* cum_row_indexes, unsigned int* column_indexes, float* matrix_data, float* in_vector, float* out_vector, unsigned int outerdim) { unsigned int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < outerdim) { float prob = 1.0; unsigned int row_start = cum_row_indexes[row]; unsigned int row_end = cum_row_indexes[row+1]; for (int i = row_start; i < row_end; i++) { prob *= 1.0 - (matrix_data[i] * in_vector[column_indexes[i]]); } out_vector[row] = prob; } }
1,927
#include "includes.h" __global__ void arraySet_kernel(unsigned int* d_vals, unsigned int value, size_t num_vals) { // tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < num_vals) d_vals[gIdx] = value; }
1,928
/* Vinh Le CSCI 440 - Parallel Computing Homework 2.1 - count ones in matrix Colorado School of Mines 2018 */ #include <stdio.h> __global__ void countones(int *in, int *out) { __shared__ int temp; unsigned int tid = threadIdx.x; if (in[tid]==1){ atomicAdd(&temp,1); } __syncthreads(); *out = temp; } int main(int argc, char *argv[]){ FILE *file = fopen(argv[1], "r"); int row, col; fscanf(file, "%d",&row); fscanf(file, "%d", &col); int size = row * col * sizeof(int); int *in, *out; // host copies in and cout in = (int *)malloc(size); out = (int *)malloc(sizeof(int)); for (int i = 0; i < row*col; i++) { fscanf(file, "%d", &in[i]); } fclose(file); int *d_in, *d_out; // device copies cudaMalloc((void **)&d_in, size); cudaMalloc((void **)&d_out, sizeof(int)); // Copy inputs to device cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU countones <<<1, row*col>>> (d_in, d_out); // Copy result back to host cudaMemcpy(out, d_out, sizeof(int), cudaMemcpyDeviceToHost); printf("There are %d ones.\n", *out); // Cleanup free(in); free(out); cudaFree(d_in); cudaFree(d_out); return 0; }
1,929
#include <iostream> using namespace std; __global__ void MatrixMulKernel(int m, int n, int k, float *A, float *B, float *C) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if ((Row < m) && (Col < k)) { float Cvalue = 0.0; for (int i = 0; i < n; ++i) Cvalue += A[Row * n + i] * B[Col + i * k]; C[Row * k + Col] = Cvalue; } } #define TILE_WIDTH 16 int main() { //这里将矩阵按照行优先转换成了一维的形式 int m = 4096, n = 4096, k = 4096; float *A = (float *)malloc(m * n * sizeof(float)); float *B = (float *)malloc(n * k * sizeof(float)); float *C = (float *)malloc(m * k * sizeof(float)); float *result = (float *)malloc(m * k * sizeof(float)); for (int i = 0; i < m; ++i) for (int j = 0; j < m; ++j) { A[i * m + j] = (i - 0.1 * j + 1) / (i + j + 1); B[i * m + j] = (j - 0.2 * i + 1) * (i + j + 1) / (i * i + j * j + 1); C[i * m + j] = 0.0; } //分配显存空间 int size = sizeof(float); float *d_a; float *d_b; float *d_c; // GPU time calculate start cudaEvent_t start, stop; float elapsedTime = 0.0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMalloc((void **)&d_a, m * n * size); cudaMalloc((void **)&d_b, n * k * size); cudaMalloc((void **)&d_c, m * k * size); //把数据从Host传到Device cudaMemcpy(d_a, A, size * m * n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, B, size * n * k, cudaMemcpyHostToDevice); cudaMemcpy(d_c, C, size * m * k, cudaMemcpyHostToDevice); //分配网格结构 dim3 dimGrid((k - 1) / TILE_WIDTH + 1, (m - 1) / TILE_WIDTH + 1, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); //调用内核函数 MatrixMulKernel<<<dimGrid, dimBlock>>>(m, n, k, d_a, d_b, d_c); //将结果传回到主机端 cudaMemcpy(C, d_c, size * m * k, cudaMemcpyDeviceToHost); // GPU time calculate end cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout << "GPU time: " << elapsedTime << " ms" << endl; //CPU计算正确结果 clock_t begin = clock(); for (int i = 0; i < m; ++i) { for (int j = 0; j < m; ++j) { float sum = 0; for (int k = 0; k < m; ++k) sum += A[i * m + k] * B[k * m + j]; result[i * m + j] = sum; } } clock_t end = clock(); cout << "CPU time: " << (end - begin) * 1000 / CLOCKS_PER_SEC << " ms" << endl; //比较结果 bool flag = true; for (int i = 0; i < m * k; ++i) { if (abs(result[i] - C[i]) > 0.001) { flag = false; cout << result[i] << "-" << C[i] << endl; } } if (flag) cout << "Check answer: Correct!" << endl; else cout << "Check answer: Error!" << endl; //释放显存空间 cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(A); free(B); free(C); free(result); return 0; }
1,930
//pass //--blockDim=2 --gridDim=2 --no-inline #include <cuda.h> typedef struct __align__(64) { unsigned int tid, bid; } pair; __global__ void align_test (pair* A) { int tid = threadIdx.x; int bid = blockIdx.x; int idx = blockDim.x * bid + tid; A[idx].tid = tid; A[idx].bid = bid; }
1,931
#include "includes.h" /* Modified from https://github.com/zhxfl/CUDA-CNN */ __global__ void matrixTransKernel(float *A, int rows, int cols) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (j >= cols || i >= rows) return; float tmp = A[i * cols + j]; A[i * cols + j] = A[j * cols + i]; A[j * cols + i] = tmp; }
1,932
#include "FluidGPU.cuh" #include <cmath> #include <cuda_runtime.h> #include <iostream> #include <thrust/sort.h> #include <device_launch_parameters.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include <cuda.h> float kernel(float r) { if (r >= 0 && r <= cutoff) { return 1. / 3.14159 / (powf(cutoff, 3))*(1 - 3. / 2. * powf((r / cutoff), 2) + 3. / 4. * powf((r / cutoff), 3)); } else if (r > cutoff && r < (2 * cutoff)) { return 1. / 3.14159 / (powf(cutoff, 3)) * 1 / 4. * powf(2 - (r / cutoff), 3); } else { return 0; } } float kernel_test(float r) { if (r >= 0 && r <= cutoff) { return 1. / 3.14159 / (powf(cutoff, 4))*(1 - 3. * powf((r / cutoff), 1) + 9. / 4. * powf((r / cutoff), 2)); } else if (r > cutoff && r < (2 * cutoff)) { return -1. / 3.14159 / (powf(cutoff, 4)) * 1 / 2. * powf(2 - (r / cutoff), 2); } else { return 0; } } float kernel_derivative(float r) { if (r < cutoff) { return -45.0 / 3.14159 / powf(cutoff, 6)*powf((cutoff - r), 2); } else { return 0; } } //Dot product inline float dot_prod(float x1, float y1, float z1, float x2, float y2, float z2) { return x1*x2 + y1*y2 + z1*z2; } //Cross products inline float cross_prod_x(float x1, float y1, float z1, float x2, float y2, float z2) { return y1*z2 - z1*y2; } inline float cross_prod_y(float x1, float y1, float z1, float x2, float y2, float z2) { return -x1*z2 + z1*x2; } inline float cross_prod_z(float x1, float y1, float z1, float x2, float y2, float z2) { return x1*y2 - y1*x2; } __device__ int morton(unsigned int x, unsigned int y, unsigned int z) { //int x = (bidx / GRIDSIZE / GRIDSIZE); //int y = (bidx / GRIDSIZE % GRIDSIZE); //int z = (bidx % GRIDSIZE); x = (x | (x << 16)) & 0x030000FF; x = (x | (x << 8)) & 0x0300F00F; x = (x | (x << 4)) & 0x030C30C3; x = (x | (x << 2)) & 0x09249249; y = (y | (y << 16)) & 0x030000FF; y = (y | (y << 8)) & 0x0300F00F; y = (y | (y << 4)) & 0x030C30C3; y = (y | (y << 2)) & 0x09249249; z = (z | (z << 16)) & 0x030000FF; z = (z | (z << 8)) & 0x0300F00F; z = (z | (z << 4)) & 0x030C30C3; z = (z | (z << 2)) & 0x09249249; return x | (y << 1) | (z << 2); } __device__ inline int demorton(unsigned int x, int b) { //b should be 0 for x, 1 for y, 2 for z switch (b) { case 0: break; case 1: x = (x >> 1); break; case 2: x = (x >> 2); break; } x &= 0x09249249; // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0 x = (x | (x >> 2)) & 0x030c30c3; // x = ---- --98 ---- 76-- --54 ---- 32-- --10 x = (x | (x >> 4)) & 0x0300f00f; // x = ---- --98 ---- ---- 7654 ---- ---- 3210 x = (x | (x >> 8)) & 0xff0000ff; // x = ---- --98 ---- ---- ---- ---- 7654 3210 x = (x | (x >> 16)) & 0x000003ff; // x = ---- ---- ---- ---- ---- --98 7654 3210 return x; } __global__ void findneighbours(int *cell, int *start, int *end, int nspts) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < nspts) { if (cell[idx] != cell[idx - 1] || idx == 0) { start[cell[idx]] = idx; } if (cell[idx] != cell[idx + 1] || idx == nspts-1) { end[cell[idx]] = idx; } } } __global__ void mykernel(Particle *SPptr, int *cell, int *start, int *end, int nspts) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int bidx = blockIdx.x; int tidx = threadIdx.x; int nb[27] = { -GRIDSIZE*GRIDSIZE - GRIDSIZE - 1, -GRIDSIZE*GRIDSIZE - GRIDSIZE,-GRIDSIZE*GRIDSIZE - GRIDSIZE + 1, -GRIDSIZE*GRIDSIZE - 1, -GRIDSIZE*GRIDSIZE, -GRIDSIZE*GRIDSIZE + 1, -GRIDSIZE*GRIDSIZE + GRIDSIZE - 1, -GRIDSIZE*GRIDSIZE + GRIDSIZE, -GRIDSIZE*GRIDSIZE + GRIDSIZE + 1, -GRIDSIZE - 1, -GRIDSIZE,-GRIDSIZE + 1, -1, 0, +1, GRIDSIZE - 1, GRIDSIZE, GRIDSIZE + 1, GRIDSIZE*GRIDSIZE - GRIDSIZE - 1, GRIDSIZE*GRIDSIZE - GRIDSIZE,GRIDSIZE*GRIDSIZE - GRIDSIZE + 1, GRIDSIZE*GRIDSIZE - 1, GRIDSIZE*GRIDSIZE, GRIDSIZE*GRIDSIZE + 1, GRIDSIZE*GRIDSIZE + GRIDSIZE - 1, GRIDSIZE*GRIDSIZE + GRIDSIZE, GRIDSIZE*GRIDSIZE + GRIDSIZE + 1 }; //__shared__ int nb[27]; //if (tidx < 27) { // int x = demorton(bidx, 0); // int y = demorton(bidx, 1); // int z = demorton(bidx, 2); // nb[tidx] = morton(x + tidx/9-1, y + (tidx/3)%3-1, z + tidx%3-1); //} //__syncthreads(); __shared__ short int p[27]; __shared__ short int pidx[27]; int __shared__ sum[64];// = 0; int __shared__ jj[64];// = 0; volatile __shared__ int total; volatile __shared__ int blockpop; if (idx < 64) { sum[idx] = 650; jj[idx] = 650; } __syncthreads(); //__shared__ short int sum[27]; //__shared__ short int j[27]; //if (idx <nspts) { printf("%d, %d \n", idx, SPptr[idx].cellnumber); } if (tidx < 27) { p[tidx] = 0; } if (start[bidx] >= 0) { //if (bidx == 0) { printf("%d\n", start[bidx]); } ///////////count and sort population of neighbour cells////////////// if (tidx < 27 && bidx+ nb[tidx] >= 0 && bidx + nb[tidx] < NUMCELLS && start[bidx + nb[tidx]] >= 0 && end[bidx + nb[tidx]] >= 0 && start[bidx + nb[tidx]] < nspts && 1 + end[bidx + nb[tidx]] - start[bidx + nb[tidx]] > 0 ) { p[tidx] = 1 + end[bidx + nb[tidx]] - start[bidx + nb[tidx]]; //count population of neighbour cells so we know how many threads to use pidx[tidx] = tidx; } if (tidx == 13) { blockpop = p[tidx]; } } else { if (tidx == 13) { blockpop = 0; } } __syncthreads(); //if (bidx == 21641 && tidx==0) { printf("%d %d %d \n", p[13], nb[13], start[nb[13]]); } if (start[bidx] >= 0) { if (tidx == 0) { total = 0; for (int i = 0; i < 27; i++) { if (p[i] < 64 && p[i]>0 && bidx + nb[i] >= 0 && bidx + nb[i] < NUMCELLS && start[bidx + nb[i]] >= 0 && end[bidx + nb[i]] >= 0 && start[bidx + nb[i]] < nspts) { total += p[i]; } } } } else { if (tidx == 0) {total = 0; } } __syncthreads(); if (start[bidx] >= 0) { if (tidx == 0) { int count = 0; for (int i = 0; i < 27; i++) { if (p[i] != 0) { p[count++] = p[i]; pidx[count - 1] = pidx[i]; //sort } } while (count < 27) { p[count++] = 0; //need to reset popidx in a future kernel pidx[count - 1] = 0; } } } __syncthreads(); if (start[bidx] >= 0) { if (tidx < total) { sum[tidx] = 0; jj[tidx] = 0; while (tidx + 1 > sum[tidx]) { sum[tidx] += p[jj[tidx]]; jj[tidx]++; } } } __syncthreads(); //if (bidx== 34624 && tidx < total) { printf("tidx: %d, cell#:%d, jj:%d, sum:%d \n", tidx, bidx + nb[pidx[jj[tidx] - 1]], jj[tidx],p[jj[tidx]]); } //__syncthreads(); // __shared__ float k[8]; // __shared__ float rabx[8]; // __shared__ float raby[8]; // __shared__ float rabz[8]; // __shared__ float vabx[8]; // __shared__ float vaby[8]; //__shared__ float vabz[8]; if (start[bidx] >= 0) { if (tidx < total && bidx + nb[pidx[jj[tidx] - 1]] >= 0 && bidx + nb[pidx[jj[tidx] - 1]] < NUMCELLS) { /////////////////////////////////////////////////////////////////// volatile int j = start[bidx + nb[pidx[jj[tidx] - 1]]] + sum[tidx] - (tidx + 1); if (start[bidx + nb[pidx[jj[tidx] - 1]]] >= 0 && j < nspts && j >= 0) { //int i = start[bidx] + tidx / total; for (volatile int i = start[bidx]; i <= end[bidx]; i++) { float ds = (SPptr[i]).distance((SPptr[j])); if (ds <= (2 * cutoff) && ds > 0) { volatile float k = kernel(ds); volatile float rabx = (SPptr[i]).rab_x((SPptr[j])); volatile float raby = (SPptr[i]).rab_y((SPptr[j])); volatile float rabz = (SPptr[i]).rab_z((SPptr[j])); volatile float vabx = (SPptr[i]).vab_x((SPptr[j])); volatile float vaby = (SPptr[i]).vab_y((SPptr[j])); volatile float vabz = (SPptr[i]).vab_z((SPptr[j])); volatile float dkx = kernel_derivative(ds)*rabx / ds; volatile float dky = kernel_derivative(ds)*raby / ds; volatile float dkz = kernel_derivative(ds)*rabz / ds; //float dkxtest = kernel_test(ds)*rabx / ds; //float dkytest = kernel_test(ds)*raby / ds; //float dkztest = kernel_test(ds)*rabz / ds; volatile float d = dot_prod(vabx, vaby, vabz, rabx, raby, rabz); volatile float d2 = powf(ds, 2); volatile float s = (ALPHA_FLUID * SOUND * (cutoff * (d / (d2 + 0.01*powf(cutoff, 2))) + 50 * 1.0 / SOUND*powf(cutoff * (d / (d2 + 0.01*powf(cutoff, 2))), 2)) / (((SPptr[i]).dens + (SPptr[j]).dens) / 2.0)) *(d < 0)*(1 + (!(SPptr[i]).boundary)*((SPptr[j]).boundary) * ALPHA_BOUNDARY); //float s2 = ALPHA_LAMINAR_FLUID * SOUND * cutoff / ((SPptr[i]).dens + (SPptr[j]).dens)*d*(d < 0) / (d2 + 0.01*pow(cutoff, 2))*(1 + (!(SPptr[i]).boundary)*((SPptr[j]).boundary) *ALPHA_LAMINAR_BOUNDARY); //laminar volatile float dpx = ((SPptr[j]).press / powf((SPptr[j]).dens, 2) + (SPptr[i]).press / powf((SPptr[i]).dens, 2) + s)*dkx; volatile float dpy = ((SPptr[j]).press / powf((SPptr[j]).dens, 2) + (SPptr[i]).press / powf((SPptr[i]).dens, 2) + s)*dky; volatile float dpz = ((SPptr[j]).press / powf((SPptr[j]).dens, 2) + (SPptr[i]).press / powf((SPptr[i]).dens, 2) + s)*dkz; //(SPptr[i]).vel_grad[0][0] += -vabx*dkxtest / (SPptr[i]).dens; //(SPptr[i]).vel_grad[0][1] += -vaby*dkxtest / (SPptr[i]).dens; //(SPptr[i]).vel_grad[0][2] += -vabz*dkxtest / (SPptr[i]).dens; //(SPptr[i]).vel_grad[1][0] += -vabx*dkytest / (SPptr[i]).dens; //(SPptr[i]).vel_grad[1][1] += -vaby*dkytest / (SPptr[i]).dens; //(SPptr[i]).vel_grad[1][2] += -vabz*dkytest / (SPptr[i]).dens; //(SPptr[i]).vel_grad[2][0] += -vabx*dkztest / (SPptr[i]).dens; //(SPptr[i]).vel_grad[2][1] += -vaby*dkztest / (SPptr[i]).dens; //(SPptr[i]).vel_grad[2][2] += -vabz*dkztest / (SPptr[i]).dens; ///(SPptr[i]).stress_accel[0] += ((SPptr[i]).stress_tensor[0][0] * dkxtest + (SPptr[i]).stress_tensor[0][1] * dkytest + (SPptr[i]).stress_tensor[0][2] * dkztest) / pow((SPptr[i]).dens, 2) + ((SPptr[i]).stress_tensor[0][0] * dkxtest + (SPptr[i]).stress_tensor[0][1] * dkytest + (SPptr[i]).stress_tensor[0][2] * dkztest) / pow((SPptr[i]).dens, 2); ///(SPptr[i]).stress_accel[1] += ((SPptr[i]).stress_tensor[1][0] * dkxtest + (SPptr[i]).stress_tensor[1][1] * dkytest + (SPptr[i]).stress_tensor[1][2] * dkztest) / pow((SPptr[i]).dens, 2) + ((SPptr[i]).stress_tensor[1][0] * dkxtest + (SPptr[i]).stress_tensor[1][1] * dkytest + (SPptr[i]).stress_tensor[1][2] * dkztest) / pow((SPptr[i]).dens, 2); ///(SPptr[i]).stress_accel[2] += ((SPptr[i]).stress_tensor[2][0] * dkxtest + (SPptr[i]).stress_tensor[2][1] * dkytest + (SPptr[i]).stress_tensor[2][2] * dkztest) / pow((SPptr[i]).dens, 2) + ((SPptr[i]).stress_tensor[2][0] * dkxtest + (SPptr[i]).stress_tensor[2][1] * dkytest + (SPptr[i]).stress_tensor[2][2] * dkztest) / pow((SPptr[i]).dens, 2); atomicAdd(&(SPptr[i].newdens), k *(1 + float(!(SPptr[i]).boundary)*float((SPptr[j]).boundary)*BDENSFACTOR)); atomicAdd(&(SPptr[i].newdelpressx), dpx); atomicAdd(&(SPptr[i].newdelpressy), dpy); atomicAdd(&(SPptr[i].newdelpressz), dpz); __syncthreads(); } } } } } /* float tempdens = 0; float tempdelpressx = 0; float tempdelpressy = 0; float tempdelpressz = 0; //float tempdiffusionx = 0; //float tempdiffusiony = 0; //float tempdiffusionz = 0; if (idx<nspts){ for (int i = 0; i < nspts; i++) { //if (idx != i && SPptr[idx].cellnumber == SPptr[i].cellnumber) { printf("%d, %d, %d \n", SPptr[idx].cellnumber, SPptr[i].cellnumber,neighbours[SPptr[idx].cellnumber*nspts + i]); } if (neighbours[SPptr[idx].cellnumber*nspts + i]) { //printf("%d, %d \n", SPptr[idx].cellnumber, SPptr[i].cellnumber); float ds = (SPptr[idx]).distance((SPptr[i])); if (ds <= (2 * cutoff) && ds > 0) { float k = kernel(ds); float rabx = (SPptr[idx]).rab_x((SPptr[i])); float raby = (SPptr[idx]).rab_y((SPptr[i])); float rabz = (SPptr[idx]).rab_z((SPptr[i])); float vabx = (SPptr[idx]).vab_x((SPptr[i])); float vaby = (SPptr[idx]).vab_y((SPptr[i])); float vabz = (SPptr[idx]).vab_z((SPptr[i])); float dkx = kernel_derivative(ds)*rabx / ds; float dky = kernel_derivative(ds)*raby / ds; float dkz = kernel_derivative(ds)*rabz / ds; float dkxtest = kernel_test(ds)*rabx / ds; float dkytest = kernel_test(ds)*raby / ds; float dkztest = kernel_test(ds)*rabz / ds; float d = dot_prod(vabx, vaby, vabz, rabx, raby, rabz); float d2 = pow(ds, 2); float s = (ALPHA_FLUID * SOUND * (cutoff * (d / (d2 + 0.01*pow(cutoff, 2))) + 50 * 1.0 / SOUND*pow(cutoff * (d / (d2 + 0.01*pow(cutoff, 2))), 2)) / (((SPptr[idx]).dens + (SPptr[i]).dens) / 2.0)) *(d < 0)*(1 + (!(SPptr[idx]).boundary)*((SPptr[i]).boundary) * ALPHA_BOUNDARY); float s2 = ALPHA_LAMINAR_FLUID * SOUND * cutoff / ((SPptr[idx]).dens + (SPptr[i]).dens)*d*(d < 0) / (d2 + 0.01*pow(cutoff, 2))*(1 + (!(SPptr[idx]).boundary)*((SPptr[i]).boundary) *ALPHA_LAMINAR_BOUNDARY); //laminar float dpx = ((SPptr[i]).press / pow((SPptr[i]).dens, 2) + (SPptr[idx]).press / pow((SPptr[idx]).dens, 2) + s + s2)*dkx; float dpy = ((SPptr[i]).press / pow((SPptr[i]).dens, 2) + (SPptr[idx]).press / pow((SPptr[idx]).dens, 2) + s + s2)*dky; float dpz = ((SPptr[i]).press / pow((SPptr[i]).dens, 2) + (SPptr[idx]).press / pow((SPptr[idx]).dens, 2) + s + s2)*dkz; //(SPptr[index]).vel_grad[0][0] += -vabx*dkxtest / (SPptr[i]).dens; //(SPptr[index]).vel_grad[0][1] += -vaby*dkxtest / (SPptr[i]).dens; //(SPptr[index]).vel_grad[0][2] += -vabz*dkxtest / (SPptr[i]).dens; //(SPptr[index]).vel_grad[1][0] += -vabx*dkytest / (SPptr[i]).dens; //(SPptr[index]).vel_grad[1][1] += -vaby*dkytest / (SPptr[i]).dens; //(SPptr[index]).vel_grad[1][2] += -vabz*dkytest / (SPptr[i]).dens; //(SPptr[index]).vel_grad[2][0] += -vabx*dkztest / (SPptr[i]).dens; //(SPptr[index]).vel_grad[2][1] += -vaby*dkztest / (SPptr[i]).dens; //(SPptr[index]).vel_grad[2][2] += -vabz*dkztest / (SPptr[i]).dens; ///(SPptr[index]).stress_accel[0] += ((SPptr[index]).stress_tensor[0][0] * dkxtest + (SPptr[index]).stress_tensor[0][1] * dkytest + (SPptr[index]).stress_tensor[0][2] * dkztest) / pow((SPptr[index]).dens, 2) + ((SPptr[i]).stress_tensor[0][0] * dkxtest + (SPptr[i]).stress_tensor[0][1] * dkytest + (SPptr[i]).stress_tensor[0][2] * dkztest) / pow((SPptr[i]).dens, 2); ///(SPptr[index]).stress_accel[1] += ((SPptr[index]).stress_tensor[1][0] * dkxtest + (SPptr[index]).stress_tensor[1][1] * dkytest + (SPptr[index]).stress_tensor[1][2] * dkztest) / pow((SPptr[index]).dens, 2) + ((SPptr[i]).stress_tensor[1][0] * dkxtest + (SPptr[i]).stress_tensor[1][1] * dkytest + (SPptr[i]).stress_tensor[1][2] * dkztest) / pow((SPptr[i]).dens, 2); ///(SPptr[index]).stress_accel[2] += ((SPptr[index]).stress_tensor[2][0] * dkxtest + (SPptr[index]).stress_tensor[2][1] * dkytest + (SPptr[index]).stress_tensor[2][2] * dkztest) / pow((SPptr[index]).dens, 2) + ((SPptr[i]).stress_tensor[2][0] * dkxtest + (SPptr[i]).stress_tensor[2][1] * dkytest + (SPptr[i]).stress_tensor[2][2] * dkztest) / pow((SPptr[i]).dens, 2); tempdens += k*(1 + float(!(SPptr[idx]).boundary)*float((SPptr[i]).boundary)*BDENSFACTOR); tempdelpressx += dpx; tempdelpressy += dpy; tempdelpressz += dpz; ///tempdiffusionx += 1 / (SPptr[i]).dens*dkx; ///tempdiffusiony += 1 / (SPptr[i]).dens*dky; ///tempdiffusionz += 1 / (SPptr[i]).dens*dkz; } } } (SPptr[idx]).newdens = (tempdens); (SPptr[idx]).newdelpressx = tempdelpressx; (SPptr[idx]).newdelpressy = tempdelpressy; (SPptr[idx]).newdelpressz = tempdelpressz; //(SPptr[idx]).diffusionx = tempdiffusionx; //(SPptr[idx]).diffusiony = tempdiffusiony; //(SPptr[idx]).diffusionz = tempdiffusionz; /*if ((SPptr[index]).solid) { float tr = 0; //trace of strain rate float tr2 = 0; //trace of stress tensor float tr3 = 0; //double dot of stress tensor float tr4 = 0; //trace of stress tensor times strain rate float tr5 = 0; //double dot of strain rate for (int p = 0; p < 3; p++) { for (int q = 0; q < 3; q++) { (SPptr[index]).strain_rate[p][q] = 0.5*((SPptr[index]).vel_grad[p][q] + (SPptr[index]).vel_grad[q][p]); (SPptr[index]).stress_tensor_squared[p][q] = pow((SPptr[index]).stress_tensor[p][q], 2); tr3 += 0.5*(SPptr[index]).stress_tensor_squared[p][q]; (SPptr[index]).strain_rate_squared[p][q] = pow((SPptr[index]).strain_rate[p][q], 2); tr5 += (SPptr[index]).strain_rate_squared[p][q]; tr4 += (SPptr[index]).stress_tensor[p][q] * (SPptr[index]).strain_rate[q][p]; } tr += (SPptr[index]).strain_rate[p][p]; tr2 += (SPptr[index]).stress_tensor[p][p]; } // std::cout << (SPptr[index]).press << "\n"; for (int p = 0; p < 3; p++) { for (int q = 0; q < 3; q++) { if (3 * tan(PHI) / (sqrt(9 + 12 * pow(tan(PHI), 2)))*(SPptr[index]).press + KC / (sqrt(9 + 12 * pow(tan(PHI), 2))) < tr3 && tr3 != 0) { (SPptr[index]).stress_tensor[p][q] *= (3 * tan(PHI) / (sqrt(9 + 12 * pow(tan(PHI), 2)))*(SPptr[index]).press + KC / (sqrt(9 + 12 * pow(tan(PHI), 2)))) / tr3; } (SPptr[index]).stress_rate[p][q] = 3 * C1*((SPptr[index]).press)*((SPptr[index]).strain_rate[p][q] - 1. / 3.*tr*(p == q)) + C1*C2*(tr4 + tr*(SPptr[index]).press) / (pow((SPptr[index]).press, 2) + 1e8)*(SPptr[index]).stress_tensor[p][q] - C1*C3*sqrt(tr5)*(SPptr[index]).stress_tensor[p][q]; //std::cout << tr4 << ", " << tr*(SPptr[index]).press << "\n"; } } }*/ //} __syncthreads(); } __global__ void mykernel2(Particle *SPptr, int *cells, int *start, int *end, int nspts, float *spts, float *a3, float *b3) { int index = blockIdx.x * blockDim.x + threadIdx.x; int bidx = blockIdx.x; int tidx = threadIdx.x; if (index < nspts) { if (!(SPptr[index]).flag) { spts[(3 * index)] = (SPptr[index]).xcoord; spts[(3 * index) + 1] = (SPptr[index]).ycoord; spts[(3 * index) + 2] = (SPptr[index]).zcoord; a3[index] = ((SPptr[index])).dens; b3[index] = SPptr[index].cellnumber; } (SPptr[index]).update(); (SPptr[index]).cellnumber = int((SPptr[index].xcoord - XMIN) / CELLSIZE)*GRIDSIZE*GRIDSIZE + int((SPptr[index].ycoord - YMIN) / CELLSIZE)*GRIDSIZE + int((SPptr[index].zcoord - ZMIN) / CELLSIZE); //SPptr[index].cellnumber = morton(int((SPptr[index].xcoord - XMIN) / CELLSIZE), int((SPptr[index].ycoord - YMIN) / CELLSIZE), int((SPptr[index].zcoord - ZMIN) / CELLSIZE)); cells[index] = SPptr[index].cellnumber; SPptr[index].newdens = 0; SPptr[index].newdelpressx = 0; SPptr[index].newdelpressy = 0; SPptr[index].newdelpressz = 0; } if (index < NUMCELLS) { start[index] = -1; end[index] = -1; } __syncthreads(); }
1,933
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #define BLOCKSIZE 32 #define NUM char __global__ void MatMul(int n, NUM *a, NUM *b, int *c){ int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int sum = 0; for(int i = 0; i < n; i++){ sum += a[tidy * n + i] * b[i * n + tidx]; } c[tidy * n + tidx] = sum; } __global__ void SharedMatMul(int n, NUM *a, NUM *b, int *c){ __shared__ NUM as[BLOCKSIZE*BLOCKSIZE*4]; __shared__ NUM bs[BLOCKSIZE*BLOCKSIZE*4]; __shared__ int cs[BLOCKSIZE*BLOCKSIZE]; int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; int lx = threadIdx.x; int ly = threadIdx.y; // COPY FROM GLOBAL MEMORY cs[ly * BLOCKSIZE + lx] = 0; for(int i = 0; i < n; i+=BLOCKSIZE){ as[4*(ly * BLOCKSIZE + lx)] = a[tidy * n + (lx + i)]; bs[4*(ly * BLOCKSIZE + lx)] = b[(ly + i) * n + tidx]; __syncthreads(); for(int j = 0; j < BLOCKSIZE; j++){ cs[BLOCKSIZE * ly + lx] += as[4*(BLOCKSIZE * ly + j)] * bs[4*(BLOCKSIZE * j + lx)]; } __syncthreads(); //c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } //__syncthreads(); c[tidy * n + tidx] += cs[BLOCKSIZE * ly + lx]; } __host__ void fillMatrix(NUM *a, int n){ for(int i = 0; i < n * n; i++){ a[i] = rand()%3 - 1; } } __host__ void fillMatrixZeros(int *a, int n){ for(int i = 0; i < n*n; i++){ a[i] = 0; } } __host__ void hostMatMul(int n, NUM *a, NUM *b, int *c){ for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ int val = c[i*n + j]; for(int k=0; k<n; ++k){ val += a[i*n+k] * b[k*n +j]; } c[i*n + j] = val; } } } __host__ int verify(int n, int *a, int *b){ int cont = 0; for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ cont++; if(a[i*n+j]!=b[i*n+j]){ printf("FALLO %d\t%d!=%d\n",cont , a[i*n+j], b[i*n+j]); //return 1; } } } return 0; } __host__ void printmat(int *a, int n, const char *name){ printf("mat %s:\n", name); for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ printf("%i ", a[i*n + j]); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]){ if (argc != 3){ printf("Ejecute como ./prog N\n"); return EXIT_FAILURE; } int n = atoi(argv[1]); srand(atoi(argv[2])); NUM *a, *b, *a_d, *b_d; int *c, *d, *c_d, *d_d, *matmul_simple, *matmul_shared; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float ms = 0; a = (NUM*)malloc(sizeof(NUM)*n*n); b = (NUM*)malloc(sizeof(NUM)*n*n); c = (int*)malloc(sizeof(int)*n*n); d = (int*)malloc(sizeof(int)*n*n); matmul_simple = (int*)malloc(sizeof(int)*n*n); matmul_shared = (int*)malloc(sizeof(int)*n*n); fillMatrix(a, n); fillMatrix(b, n); //fillMatrixZeros(c, n); cudaMalloc(&a_d, sizeof(NUM)*n*n); cudaMalloc(&b_d, sizeof(NUM)*n*n); cudaMalloc(&c_d, sizeof(int)*n*n); cudaMalloc(&d_d, sizeof(int)*n*n); cudaMemcpy(a_d, a, sizeof(NUM) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, sizeof(NUM) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(c_d, c, sizeof(int) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(d_d, d, sizeof(int) * n * n, cudaMemcpyHostToDevice); dim3 block(BLOCKSIZE, BLOCKSIZE, 1); dim3 grid(n/BLOCKSIZE, n/BLOCKSIZE, 1); /* printf("block: %d %d %d\n", block.x, block.y, block.z); printf("grid : %d %d %d\n", grid.x, grid.y, grid.z); */ cudaEventRecord(start); MatMul<<<grid,block>>>(n ,a_d, b_d, c_d); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); // printf("GPU Simple...ok! in %f ms\n", ms); printf("%f\n", ms); /* cudaEventRecord(start); SharedMatMul<<<grid,block>>>(n ,a_d, b_d, d_d); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); printf("GPU Shared Memory...ok! in %f ms\n", ms); cudaMemcpy(matmul_shared, d_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); cudaMemcpy(matmul_simple, c_d, sizeof(int) * n * n, cudaMemcpyDeviceToHost); // hostMatMul(n, a, b, c); if(verify(n, matmul_simple, matmul_shared) != 0){ fprintf(stderr, "error verifying result\n"); exit(EXIT_FAILURE); } */ return 0; }
1,934
#include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <time.h> /* To index element (i,j) of a 2D array stored as 1D */ #define index(i, j, N) ((i)*(N)) + (j) /*****************************************************************/ // Function declarations: Feel free to add any functions you want. void seq_heat_dist(float *, unsigned int, unsigned int); void gpu_heat_dist(float *, unsigned int, unsigned int); /*****************************************************************/ /**** Do NOT CHANGE ANYTHING in main() function ******/ int main(int argc, char * argv[]) { unsigned int N; /* Dimention of NxN matrix */ int type_of_device = 0; // CPU or GPU int iterations = 0; int i; /* The 2D array of points will be treated as 1D array of NxN elements */ float * playground; // to measure time taken by a specific part of the code double time_taken; clock_t start, end; if(argc != 4) { fprintf(stderr, "usage: heatdist num iterations who\n"); fprintf(stderr, "num = dimension of the square matrix (50 and up)\n"); fprintf(stderr, "iterations = number of iterations till stopping (1 and up)\n"); fprintf(stderr, "who = 0: sequential code on CPU, 1: GPU execution\n"); exit(1); } type_of_device = atoi(argv[3]); N = (unsigned int) atoi(argv[1]); iterations = (unsigned int) atoi(argv[2]); /* Dynamically allocate NxN array of floats */ playground = (float *)calloc(N*N, sizeof(float)); if( !playground ) { fprintf(stderr, " Cannot allocate the %u x %u array\n", N, N); exit(1); } /* Initialize it: calloc already initalized everything to 0 */ // Edge elements to 70F for(i = 0; i < N; i++) playground[index(0,i,N)] = 70; for(i = 0; i < N; i++) playground[index(i,0,N)] = 70; for(i = 0; i < N; i++) playground[index(i,N-1, N)] = 70; for(i = 0; i < N; i++) playground[index(N-1,i,N)] = 70; // from (0,10) to (0,30) inclusive are 100F for(i = 10; i <= 30; i++) playground[index(0,i,N)] = 100; // from (n-1,10) to (n-1,30) inclusive are 150F for(i = 10; i <= 30; i++) playground[index(N-1,i,N)] = 150; if( !type_of_device ) // The CPU sequential version { start = clock(); seq_heat_dist(playground, N, iterations); end = clock(); } else // The GPU version { start = clock(); gpu_heat_dist(playground, N, iterations); end = clock(); } time_taken = ((double)(end - start))/ CLOCKS_PER_SEC; printf("Time taken for %s is %lf\n", type_of_device == 0? "CPU" : "GPU", time_taken); free(playground); return 0; } /***************** The CPU sequential version (DO NOT CHANGE THIS) **************/ void seq_heat_dist(float * playground, unsigned int N, unsigned int iterations) { // Loop indices int i, j, k; int upper = N-1; // number of bytes to be copied between array temp and array playground unsigned int num_bytes = 0; float * temp; /* Dynamically allocate another array for temp values */ /* Dynamically allocate NxN array of floats */ temp = (float *)calloc(N*N, sizeof(float)); if( !temp ) { fprintf(stderr, " Cannot allocate temp %u x %u array\n", N, N); exit(1); } num_bytes = N*N*sizeof(float); /* Copy initial array in temp */ memcpy((void *)temp, (void *) playground, num_bytes); for( k = 0; k < iterations; k++) { /* Calculate new values and store them in temp */ for(i = 1; i < upper; i++) for(j = 1; j < upper; j++) temp[index(i,j,N)] = (playground[index(i-1,j,N)] + playground[index(i+1,j,N)] + playground[index(i,j-1,N)] + playground[index(i,j+1,N)])/4.0; /* Move new values into old values */ memcpy((void *)playground, (void *) temp, num_bytes); } } /***************** The GPU version: Write your code here *********************/ /* This function can call one or more kernels if you want ********************/ // There will be two main functions that can be parallelized: one to average individual points around each point and one to update the current matrix's points for the next iteration to work with. __global__ void spread_to_point(float * current, unsigned int N, unsigned int iter, float * fresh) { // Averages the four surrounding points to update a single point. // Let's make a grid-stride with a 2D grid, to fit the problem. unsigned int ind_i = blockDim.x * blockIdx.x + threadIdx.x; // Current block and current thread for the i-coord. unsigned int ind_j = blockDim.y * blockIdx.y + threadIdx.y; // Current block and current thread for the j-coord. int stride_i = blockDim.x * gridDim.x; // Next in line, if needed. int stride_j = blockDim.y * gridDim.y; // Next in line, if needed. for ( int j = ind_j+1 ; j < N-1 ; j += stride_j ) for ( int i = ind_i+1 ; i < N-1 ; i += stride_i ) { // Uncomment here if you want to take advantage of shared memory. // __shared__ // current[(i-1) * Nm2 + j], // current[(i+1) * Nm2 + j], // current[i * Nm2 + (j-1)], // current[i * Nm2 + (j+1)]; fresh[i * N + j] = ( // Multiply N by i (the row #) since the input data still represents the matrix as a 1D structure. current[(i-1) * N + j] + current[(i+1) * N + j] + current[i * N + (j-1)] + current[i * N + (j+1)] ) / 4; } } __global__ void overwrite_current_iteration(float * current, unsigned int N, unsigned int iter, float * fresh) { // After computing all values using the old iteration, this function will make the new values take the old values' places. // Again let's make a grid-stride with a 2D grid to fit the problem. unsigned int ind_i = blockDim.x * blockIdx.x + threadIdx.x; // Current block and current thread for the i-coord. unsigned int ind_j = blockDim.y * blockIdx.y + threadIdx.y; // Current block and current thread for the j-coord. int stride_i = blockDim.x * gridDim.x; int stride_j = blockDim.y * gridDim.y; int index = ind_i * N + ind_j; for ( int j = ind_j+1 ; j < N-1 ; j += stride_j ) for ( int i = ind_i+1 ; i < N-1 ; i += stride_i ) current[index] = fresh[index]; } // The two commented functions below were anticipating writing parallel code to initialize the mesh temperatures (parallelized instead of sequential), however that is already done sequentially in main(), so the two below are not needed, but may be conceptually useful. // __global__ void initialize_edges_or_not(int N, float * grid, int iterations) // { // Checks if a point is on the edge or not, setting to 70 and 0 respectively. This is used only once, to efficiently initialize values. // int i = blockDim.x * blockIdx.x +threadIdx.x; // Current block and current thread for the i-coord. // int j = blockDim.y * blockIdx.y +threadIdx.y; // Current block and current thread for the j-coord. // int index = i * N + j; // if ( index == 0 || index == N - 1 ) // { // h[index] = 70; // } // else if ( index%N == 0 ) // { // h[index] = 70; // } // else { // h[index] = 0; // } // } // __global__ void initialize_special_sections(int N, float * grid, int iterations) // { // Can specificy swaths of points to have special values. This is used only once, to efficiently initialize values. Separated into another function to prevent too many redundant condition checks with high N in the init..edge() func. // int i = blockDim.x * blockIdx.x +threadIdx.x; // Current block and current thread for the i-coord. // int j = blockDim.y * blockIdx.y +threadIdx.y; // Current block and current thread for the j-coord. // int index = i * N + j; // if (i==0 || i==N) // { // // So on... // } // } int calcBlocks(unsigned int Nm2, int tpb) { int remain = Nm2 % tpb; int sheared = Nm2 - remain; int result = (sheared / tpb) + 1; return result; } void gpu_heat_dist(float * playground, unsigned int N, unsigned int iterations) { float *current, *fresh; cudaMallocManaged(&current, N*N*sizeof(float)); cudaMallocManaged(&fresh, N*N*sizeof(float)); cudaMemcpy(current, playground, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemset(fresh, 0, N*N*sizeof(int)); // Tiling setup - how many threads per block, and how many blocks in the grid. unsigned int blkcount = (N + 16 - 1) / 16; dim3 threadsPerBlock(16, 16); // If you want to control the block calculation, then uncomment here and comment the other numBlocks(...) declaration. // dim3 numBlocks // ( // calcBlocks(Nm2, threadsPerBlock.x), // calcBlocks(Nm2, threadsPerBlock.y) // ); dim3 numBlocks(blkcount,blkcount); for ( int i = 0 ; i < iterations ; i++ ) { spread_to_point<<<numBlocks,threadsPerBlock>>>(current, N, iterations, fresh); cudaDeviceSynchronize(); overwrite_current_iteration<<<numBlocks,threadsPerBlock>>>(current, N, iterations, fresh); cudaDeviceSynchronize(); int lessAmt = 10; for ( int j = N ; j < N+lessAmt ; j++ ) printf("%.5f", current[j]); printf("\n"); } cudaMemcpy(playground, current, N*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(&current); cudaFree(&fresh); }
1,935
#include "includes.h" __global__ void calculateIntermediates(int n, double *xs, int *cluster_index, int *intermediates0, double *intermediates1, double *intermediates2, int k, int d){ int blocksize = n / 450 + 1; int start = blockIdx.x * blocksize; int end1 = start + blocksize; int end; if (end1>n) end = n; else end = end1; if (end > n ) return; // loop for every K for (int clust = threadIdx.y; clust < k; clust+= blockDim.y){ // loop for every dimension(features) for (int dim = threadIdx.x; dim < d; dim+= blockDim.x) { // Calculate intermediate S0 // for counts we don't have dimensions if (dim ==0) { int count = 0; for(int z=start; z<end; z++) { if(cluster_index[z] == clust) { count ++; } } intermediates0[blockIdx.x*k+clust] = count; } // Calculate intermediate S1 and S2 double sum1 = 0.0; double sum2 = 0.0; int idx ; for (int z=start; z<end; z++) { if(cluster_index[z] == clust) { idx = z * d + dim; sum1 += xs[idx]; sum2 += xs[idx] * xs[idx]; } } int index = (blockIdx.x*k*d + clust*d + dim); intermediates1[index] = sum1; intermediates2[index] = sum2; } } }
1,936
#include "model.cuh" #include <vector> int main(int argc, char* argv[]) { try { Model nn (losses::MSE); nn += new Dense(10,300); nn += new Dense(300, 5); Matrix m(10); std::cout << nn.feed(m); std::vector<Matrix> train_data; std::vector<Matrix> ans; // generate some training data for(int i = 0; i < 10; ++i) { Matrix m(10); Matrix a(5, 1.0f); train_data.push_back(m); ans.push_back(a); } for (int i = 0; i < 20; ++i) std::cout << nn.train_batch(train_data.begin(), ans.begin(), 10, 0.01f) << "\n"; } catch (char const* s) { std::cout << s << "\n"; } }
1,937
// Modified from // https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/group_points_gpu.cu #include <stdio.h> #include <stdlib.h> #define THREADS_PER_BLOCK 256 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) __global__ void group_points_grad_kernel(int b, int c, int n, int npoints, int nsample, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { // grad_out: (B, C, npoints, nsample) // idx: (B, npoints, nsample) // output: // grad_points: (B, C, N) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int index = blockIdx.x * blockDim.x + threadIdx.x; int pt_idx = index / nsample; if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; int sample_idx = index % nsample; grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0], grad_out[0]); } void group_points_grad_kernel_launcher(int b, int c, int n, int npoints, int nsample, const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream) { // grad_out: (B, C, npoints, nsample) // idx: (B, npoints, nsample) // output: // grad_points: (B, C, N) cudaError_t err; dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); group_points_grad_kernel<<<blocks, threads, 0, stream>>>( b, c, n, npoints, nsample, grad_out, idx, grad_points); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void group_points_kernel(int b, int c, int n, int npoints, int nsample, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, npoints, nsample) // output: // out: (B, C, npoints, nsample) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int index = blockIdx.x * blockDim.x + threadIdx.x; int pt_idx = index / nsample; if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; int sample_idx = index % nsample; idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; int in_idx = bs_idx * c * n + c_idx * n + idx[0]; int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; out[out_idx] = points[in_idx]; } void group_points_kernel_launcher(int b, int c, int n, int npoints, int nsample, const float *points, const int *idx, float *out, cudaStream_t stream) { // points: (B, C, N) // idx: (B, npoints, nsample) // output: // out: (B, C, npoints, nsample) cudaError_t err; dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); group_points_kernel<<<blocks, threads, 0, stream>>>(b, c, n, npoints, nsample, points, idx, out); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
1,938
#include "includes.h" // risky #define dfloat double #define p_eps 1e-6 #define p_Nsamples 1 // ratio of importance in sampling primary ray versus random rays #define p_primaryWeight 2.f #define p_intersectDelta 0.1f #define p_shadowDelta 0.15f #define p_projectDelta 1e-2 #define p_maxLevel 5 #define p_maxNrays (2<<p_maxLevel) #define p_apertureRadius 20.f #define NRANDOM 10000 cudaEvent_t startTimer, endTimer; __global__ void finishScanKernel(const int N, int *scanv, int *starts){ int j = threadIdx.x; int b = blockIdx.x; int n=j+b*BLOCKSIZE; if(n<N){ int start = starts[b]; scanv[n+1] += start; } }
1,939
#include "includes.h" __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __global__ void cMultPhi(double2* in1, double* in2, double2* out){ double2 result; unsigned int gid = getGid3d3d(); result.x = cos(in2[gid])*in1[gid].x - in1[gid].y*sin(in2[gid]); result.y = in1[gid].x*sin(in2[gid]) + in1[gid].y*cos(in2[gid]); out[gid] = result; }
1,940
#include "includes.h" __global__ void load(int size, const long *in) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < size) { } }
1,941
#include<cuda_runtime.h> #include<device_launch_parameters.h> #include<stdio.h> #include<stdlib.h> #include<string.h> __global__ void add(int* d_a,int* d_b,int* d_r,int *d_m) { int n = threadIdx.x; int size = gridDim.x; for(int i = 0;i<(2);i++) { d_r[i*(*d_m)+n] = d_a[i*(*d_m)+n] + d_b[i*(*d_m)+n]; } } int main(void) { int *a,*b,*r,m,n,i; int *d_a,*d_b,*d_r,*d_m; printf("Enter m,n : "); scanf("%d %d",&m,&n); a = (int*)malloc(m*n*sizeof(int)); b = (int*)malloc(m*n*sizeof(int)); r = (int*)malloc(m*n*sizeof(int)); printf("Enter matrix 1:\n"); for(i=0;i<m*n;i++) { scanf("%d",&a[i]); } printf("Enter matrix 2:\n"); for(i=0;i<m*n;i++) { scanf("%d",&b[i]); } cudaMalloc((void **)&d_a,(m*n)*sizeof(int)); cudaMalloc((void **)&d_b,(m*n)*sizeof(int)); cudaMalloc((void **)&d_r,(m*n)*sizeof(int)); cudaMalloc((void **)&d_m,sizeof(int)); cudaMemcpy(d_a,a,(m*n)*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,(m*n)*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_r,r,(m*n)*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_m,&m,sizeof(int),cudaMemcpyHostToDevice); add<<<1,m>>>(d_a,d_b,d_r,d_m); cudaError_t error = cudaGetLastError(); if(error!= cudaSuccess) { printf("%s\n",cudaGetErrorString(error)); } cudaMemcpy(r,d_r,(m*n)*sizeof(int),cudaMemcpyDeviceToHost); printf("Result matrix :\n"); for(i=0;i<m*n;i++) { printf("%d ",r[i]); if((i+1)%m==0) printf("\n"); } }
1,942
#include "includes.h" namespace ann { // CUDA2 } __global__ void kernel_calc_gjL_2( int layer_id, int *l, int *s_ext, int *sw_ext, float *z_ext_arr, float *a_ext_arr, float *t_arr, float *gjl_ext, float *w_ext_arr ){ int idx = threadIdx.y + blockDim.y*blockIdx.y; int h = blockDim.x; int pidx = threadIdx.y; int lidx = threadIdx.x; extern __shared__ int sm[]; float *sm_g = (float*)&sm[0]; int neuron_count = l[layer_id]; int neuron_count_next = l[layer_id+1]; if(idx >= neuron_count-1) return; float sum = 0; for (int k = lidx; k < neuron_count_next-1; k+=h) { sum += w_ext_arr[sw_ext[layer_id] + idx*(l[layer_id + 1] - 1) + k] * gjl_ext[s_ext[layer_id + 1] + k]; } sm_g[pidx*h + lidx] = sum; __syncthreads(); if(lidx == 0){ float z = z_ext_arr[s_ext[layer_id] + idx]; float tmp = 1 + expf(-z); float f_deriv = expf(-z) / (tmp*tmp); sum = 0; for(int i = 0; i < h; i++) sum += sm_g[pidx*h + i]; gjl_ext[s_ext[layer_id] + idx] = f_deriv*sum; } }
1,943
// Inner product of 2 vectors #include<iostream> #include<vector> __global__ void vecProd(float *a, float *b, float *c, int N){ int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < N){ c[i] = a[i]*b[i]; } } int main(){ std::vector<float> v1; std::vector<float> v2; std::vector<float> v3; for(auto i = 0; i < 10; i++){ v1.emplace_back(i); v2.emplace_back(2*i); v3.emplace_back(0); } size_t size = v1.size()*sizeof(float); float *d_v1, *d_v2, *d_v3; cudaMalloc(&d_v1, size); cudaMalloc(&d_v2, size); cudaMalloc(&d_v3, size); cudaMemcpy(d_v1, v1.data(), size, cudaMemcpyHostToDevice); cudaMemcpy(d_v2, v2.data(), size, cudaMemcpyHostToDevice); int threadsPerBlock = 256; int numBlocks = (v1.size() + threadsPerBlock - 1)/threadsPerBlock; vecProd<<<numBlocks, threadsPerBlock>>>(d_v1, d_v2, d_v3, v1.size()); cudaMemcpy(v3.data(), d_v3, size, cudaMemcpyDeviceToHost); for(auto& i:v3){ std::cout << i << std::endl; } cudaFree(d_v1); cudaFree(d_v2); cudaFree(d_v3); return 0; }
1,944
#include <stdio.h> #include <stdlib.h> #include <time.h> //kernel __global__ void Matmul(float *A,float *B,float *C,int wA,int wC,int hC){ int i = blockDim.x*blockIdx.x+threadIdx.x; int j = blockDim.y*blockIdx.y+threadIdx.y; int k; float tmp = 0.0f; for(k=0;k<wA;k++){ tmp += A[k+j*wC] * B[i+k*hC]; } C[i+j*wC] = tmp; } //C function void init(float *A, int wA, int hA) { for (int h=0; h<hA; h++) for (int w=0; w<wA; w++) A[w+h*wA] = (float)rand() / (float)RAND_MAX; } void compute(float *A, float *B, float *C,int wA, int hA, int wB) { for (int h=0; h<hA; h++) { for (int w=0; w<wB; w++) { float temp = 0.0f; for (int i=0; i<wA; i++) temp += A[i+h*wA] * B[w+i*wB]; C[w+h*wB] = temp; } } } int main() { float cpu_time; int iter, max_iter = 10; int wA = 320, hA = 320, wB = 640, hB = 320; int wC = wB, hC = hA; size_t sizeA = wA*hA*sizeof(float); size_t sizeB = wB*hB*sizeof(float); size_t sizeC = hA*wB*sizeof(float); float *A, *B, *C; A = (float*) malloc(sizeA); B = (float*) malloc(sizeB); C = (float*) malloc(sizeC); // seed random number generator srand(time(NULL)); // initialize A init(A, wA, hA); init(B, wB, hB); //prepare memory in cuda float *dA =NULL; float *dB =NULL; float *dC =NULL; cudaMalloc((void**)&dA,sizeA); cudaMalloc((void**)&dB,sizeB); cudaMalloc((void**)&dC,sizeC); //coppy input value from host to cuda cudaMemcpy(dA,A,sizeA,cudaMemcpyHostToDevice); cudaMemcpy(dB,B,sizeB,cudaMemcpyHostToDevice); //execution configurtion dim3 dimG(wC/32,hC/32); dim3 dimB(32,32); // compute matric C cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); for (iter=0; iter<max_iter; iter++){ Matmul<<<dimG,dimB>>>(dA,dB,dC,wA,wC,hC); } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpu_time,start,stop); printf("CPU time = %lf s\n", cpu_time*0.001/max_iter); //coppy output value from cuda to host cudaMemcpy(C,dC,sizeC,cudaMemcpyDeviceToHost); //result check ///* float *Check; Check = (float*) malloc(sizeC); compute(A, B, Check, wA, hA, wB); float sum = 0.0f; for (int h=0; h<hC; h++) { for (int w=0; w<wC; w++) { sum += C[w+h*wC]-Check[w+h*wC]; } } printf("Check result %f (should be zero)\n", sum/(hC*wC)); free(Check); //*/ //free memory free(A); free(B); free(C); cudaFree(dA); cudaFree(dB); cudaFree(dC); return 0; }
1,945
#include <stdio.h> #include <iostream> #include <map> #include <string> #include <fstream> #include <vector> using namespace std; #define ll long long const int GRID_SIZE = 1; // Use naive method __device__ bool isPrime(ll n) { if(n<2) return false; for(ll i=2;i*i<=n;i++) if(n%i==0) return false; return true; } // Read numbers from file and add to vector std::vector<ll> readFile(char* arg){ vector<ll> numbersFromFile; std::ifstream infile(arg); ll number; if(!infile.is_open()) { throw std::invalid_argument("Problem with file"); } while (infile >> number) { numbersFromFile.push_back(number); } return numbersFromFile; } __global__ void calculate(ll *Arr, bool *results, int sizeOfArray, int amountOfBlocks){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; if (amountOfBlocks >= sizeOfArray){ results[x] += isPrime(Arr[x]); } else{ int sizeOfPart = sizeOfArray / amountOfBlocks; int restOfDivide = sizeOfArray%amountOfBlocks; int startPart = sizeOfPart * x; int endPart = sizeOfPart * (x + 1); if (endPart <= sizeOfArray) { int restStart = sizeOfPart * amountOfBlocks; for (int i = startPart; i < endPart; i++){ results[i] += isPrime(Arr[i]); } if (x < restOfDivide){ results[restStart + x] += isPrime(Arr[restStart + x]); } } } } int main(int argc, char** argv ) { float time; if ( argc < 2 ) { printf("Pass file path\n"); return -1; } vector<ll> numbersFromFile; try { numbersFromFile = readFile(argv[1]); } catch ( const std::invalid_argument& ex ) { cout << ex.what() << endl; return -1; } int sizeOfArray = numbersFromFile.size(); int sizeToAllocateLongLong = sizeOfArray * sizeof(ll); int sizeToAllocateBool = sizeOfArray * sizeof(bool); ll numbersFromFileArr[sizeOfArray]; std::copy(numbersFromFile.begin(), numbersFromFile.end(), numbersFromFileArr); bool* results = (bool *) malloc (sizeToAllocateBool); ll* c_arr; bool* c_results; cudaMalloc((void**) &c_arr, sizeToAllocateLongLong); cudaMalloc((void**) &c_results, sizeToAllocateBool); cudaMemcpy((void *)c_arr, (void *)numbersFromFileArr, sizeToAllocateLongLong, cudaMemcpyHostToDevice); //Start timer cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); int amountOfBlocks = sizeOfArray; calculate<<<amountOfBlocks, GRID_SIZE>>>(c_arr, c_results, sizeOfArray, amountOfBlocks); //End timer and put result into time variable cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Czas: %.4fms\n", time); if (cudaMemcpy((void *)results, (void *)c_results , sizeToAllocateBool, cudaMemcpyDeviceToHost) != cudaSuccess) { cout<<"GPU to CPU copy error\n"; } cudaFree(c_arr); cudaFree(c_results); for(int j = 0; j < sizeOfArray ; j++) { if (results[j]){ cout << numbersFromFileArr[j] << " prime" << endl; } else { cout << numbersFromFileArr[j] << " composite" << endl; } } free(results); return 0; }
1,946
#include <iostream> #include <bits/stdc++.h> using namespace std; class Stack { private: int Size; int* arr; public: Stack(); ~Stack(); void Append(int x); void Pop(); void Destroy(); void Peek(); void Show(); }; Stack::Stack() : Size(1) { arr = new int[INT_MAX]; } Stack::~Stack() { arr = nullptr; } void Stack::Append(int x) { arr[Size-1] = x; Size++; } void Stack::Pop() { arr[Size-1] = -1; Size--; } void Stack::Destroy() { this->~Stack(); } void Stack::Peek() { cout << this->arr[Size-1] << endl; } void Stack::Show() { cout << '['; for(int i=Size-2;i>0;i--) { cout << this->arr[i] << ','; } cout << this->arr[0] << ']' << endl; }
1,947
#include "includes.h" /* * Compile: nvcc -o saxby saxby.cu * Run: ./saxby */ __global__ void daxbyAdd(const float *A, const float *B, float *C, float x,int numElements){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < numElements){ C[i] = A[i]* x + B[i]; } }
1,948
// Passing array of a Class and assigning elements at odd/even elements to another array. // @alpha74 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include "stdio.h" using namespace std; class Coord { int x; int y; public: Coord() { x = 0; y = 0; } void set(int a, int b) { x = a; y = b; } void print() { printf(" (%d,%d) ", x, y); } }; __global__ void foo( int lim, Coord C[], Coord *oddi, Coord *eveni) { int tid = blockIdx.x; if (tid < lim) { if (tid % 2 == 0) { eveni[tid / 2] = C[tid]; } else { oddi[tid / 2] = C[tid]; } } } int main() { const int N = 20; // Declare an array of size N Coord C[N]; Coord result_odd[ N/2], result_even[ N/2 ]; // Initialize the elements for (int i = 0; i < N; i++) { if (i % 2 == 0) { C[i].set(2, 2); } else C[i].set(-1, -1); } // Declare and allocate device memory Coord *dev_C; Coord *dev_odd, *dev_even; cudaMalloc((void**)&dev_C, N * sizeof(Coord)); cudaMalloc((void**)&dev_odd, N / 2 * sizeof(Coord)); cudaMalloc((void**)&dev_even, N / 2 * sizeof(Coord)); cudaMemcpy(dev_C, C, N * sizeof(Coord), cudaMemcpyHostToDevice); cudaMemcpy(dev_odd, C, N/2 * sizeof(Coord), cudaMemcpyHostToDevice); // Adding part of the array for comparison. cudaMemcpy(dev_even, C, N / 2 * sizeof(Coord), cudaMemcpyHostToDevice); // Adding part of the array for comparison. foo <<<N, 1 >>> ( N, dev_C, dev_odd, dev_even ); // Copying back the results cudaMemcpy(&result_even, dev_even, N/2 * sizeof(Coord), cudaMemcpyDeviceToHost); cudaMemcpy(&result_odd, dev_odd, N/2 * sizeof(Coord), cudaMemcpyDeviceToHost); cout << "\n At even pos: "; for (int i = 0 ; i < N / 2; i++) { result_even[i].print(); } cout << "\n At odd pos: "; for (int i = 0; i < N / 2; i++) { result_odd[i].print(); } cout << "\n "; // Freeing device memory cudaFree(dev_C); cudaFree(dev_odd); cudaFree(dev_even); return 0; }
1,949
// sudoku solver sequential execution , using algorithm x , exact cover #include<bits/stdc++.h> #include<cuda.h> #include <algorithm> #include <chrono> #include<iostream> #include <fstream> #include<stdio.h> #include<stdlib.h> #define f first #define s second using namespace std; //using namespace std::chrono; vector< vector < pair< int , pair <int , int> > > > g; void print1d(vector<int> v){ for(int ii=0;ii<v.size();ii++){ cout<<v[ii]<<" "; } cout<<endl; } void print2d(vector < vector < int> >v){ for(int ii=0;ii<v.size();ii++){ for(int jj=0;jj<v[ii].size();jj++){ cout<<v[ii][jj]<<" "; } cout<<endl; } } void print2(vector< vector < pair< int , pair <int , int> > > > v){ if(v.size()==0)return; cout<<" "; for(int jj=0;jj<v[0].size();jj++){ cout<<(v[0][jj].s).s<<" "; } cout<<endl; for(int ii=0;ii<v.size();ii++){ cout<<(v[ii][0].s).f<<" "; for(int jj=0;jj<v[ii].size();jj++){ cout<<v[ii][jj].f<<" "; } cout<<endl; } cout<<endl; } int find_col(vector< vector < pair< int , pair <int , int> > > > A , int rows , int cols){ int minind = -1; int min = 1000; int sum = 0; for(int ii=0;ii<cols;ii++){ sum = 0; for(int jj=0;jj<rows;jj++){ if(A[jj][ii].f == 1 ){ sum = sum + 1; } } if(sum<min){ min = sum; minind = ii; } } if(min==0)return -1; return minind; } vector< vector < pair< int , pair <int , int> > > > delcol( vector< vector < pair< int , pair <int , int> > > > v , int j ){ // delete j indexed coloumn for(int ii=0;ii<v.size();ii++){ v[ii].erase(v[ii].begin()+j); } return v; } /* If the matrix A has no columns, the current partial solution is a valid solution; terminate successfully. Otherwise choose a column c (deterministically). Choose a row r such that Ar, c = 1 (nondeterministically). Include row r in the partial solution. For each column j such that Ar, j = 1, for each row i such that Ai, j = 1, delete row i from matrix A. delete column j from matrix A. Repeat this algorithm recursively on the reduced matrix A. */ vector< vector < pair< int , pair <int , int> > > > help_algox(vector< vector < pair< int , pair <int , int> > > > A , int rows , int cols , vector< vector < pair< int , pair <int , int> > > > partsoln , int r){ vector<int> x; //cout<<"cols selected"<<endl; for(int ii=0;ii<A[0].size();ii++){ if(A[r][ii].f == 1){ x.push_back(ii); //cout<<ii<<" "; } } //cout<<endl; for(int ii=x.size()-1;ii>=0;ii--){ for(int kk=A.size()-1;kk>=0;kk--){ if(A[kk][x[ii]].f == 1){ // delete kk row A.erase(A.begin()+kk); //cout<<" row "<<kk<<" deleted"<<endl; //print2(A); } } // delete coloumn ii A = delcol(A,x[ii]); //cout<<"col "<<x[ii]<<" deleted"<<endl; //print2(A); } //vector< vector < pair< int , pair <int , int> > > > soln; return A; //if(A.size() == 0) return partsoln; //return algX(A,A.size(),A[0].size(),partsoln); } vector< vector < pair< int , pair <int , int> > > > algX ( vector< vector < pair< int , pair <int , int> > > > A , int rows , int cols , vector< vector < pair< int , pair <int , int> > > > partsoln ){ if(cols==0){ //cout<<" ending"<<endl; return partsoln; } vector< vector < pair< int , pair <int , int> > > > soln2; //cout<<"partsoln size is : "<<partsoln.size()<<endl; //cout<<"in algx cols are : "<<cols <<endl; // choose the coloumn with min no of 1's in it int c = find_col(A,rows,cols); // c is our chosen coloumn index if(c==-1) return soln2; //cout<<"selected colomn is :"<<c<<endl; vector<int> r; for(int ii=0;ii<rows;ii++){ if(A[ii][c].f == 1){ r.push_back(ii); } } vector< vector < pair< int , pair <int , int> > > > temp1; // for copy of A vector< vector < pair< int , pair <int , int> > > > temp2 ; // for copy of partsoln vector< vector < pair< int , pair <int , int> > > > soln; vector< vector < pair< int , pair <int , int> > > > soln1; //cout<<"check"<<endl; //cout<<r.size()<<endl; int useg,llpr,ttpr,rwno; for(int jj=0;jj<r.size();jj++){ // call each branch //cout<<"each branch"<<endl; //cout<<"slected row is :"<<r[jj]<<endl; temp1 = A; temp2 = partsoln; //temp2.push_back(A[r[jj]]); useg = ((A[r[jj]][0]).s).f ; llpr = useg/100; ttpr = useg%100; rwno = (ttpr-1)*9 + llpr-1; temp2.push_back(g[rwno]); soln = help_algox(temp1,rows,cols,temp2,r[jj]); //cout<<"hello"<<endl; //print2(soln); if(soln.size() == 0) { //cout<<"part soln is "<<endl; //print2(partsoln); return temp2; } soln1 = algX(soln,soln.size() , soln[0].size() , temp2 ); if(soln1.size() != 0) return soln1; } return soln2; } int main(){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start,0); //vector< vector < pair< int , pair <int , int> > > > temp; //auto start = chrono::high_resolution_clock::now(); vector < pair< int , pair <int , int> > > temp; vector< vector < pair< int , pair <int , int> > > > exactcover ; // exact cover matrix of input sudoku for(int ii=0;ii<729;ii++){ exactcover.push_back(temp); } // exact cover has 729 rows for(int ii=0;ii<81;ii++){ for(int jj=1;jj<=9;jj++){ for(int kk=1;kk<=324;kk++){ exactcover[9*ii+(jj-1)].push_back(make_pair( 0 , make_pair( 1+ii+100*jj , kk) ) ); } } } //print2(exactcover); for(int ii=0;ii<81;ii++){ //cout<<"row number is :"<<ii<<endl; for(int jj=1;jj<=9;jj++){ // through all 729 rows keep 41's in each row exactcover[9*ii+(jj-1)][ii].f = 1; exactcover[9*ii+(jj-1)][80+jj+(ii/9)*9].f = 1; //cout<<80+jj+(ii/9)*9<<" "; exactcover[9*ii+(jj-1)][161+jj+(ii%9)*9].f = 1; //cout<<161+jj+(ii%9)*9<<" "; exactcover[9*ii+(jj-1)][242+jj+(ii/27)*27 + ((ii/3)%3)*9].f = 1; //cout<<"row number is :"<<ii<<endl; //cout<< 242+jj+(ii/27)*27 + ((ii/3)%3)*9<<" "; } //cout<<endl; } g = exactcover; //print2(exactcover); ofstream fout; ifstream fin; int cell; vector< vector <int> > sudoku; vector <int> p; for(int ii=0;ii<9;ii++){ sudoku.push_back(p); } fin.open("input.txt"); //fout.open("output.txt"); int pos[82]; int row[729]; int col[324]; for(int ii=0;ii<729;ii++){ row[ii]=0; } for(int ii=0;ii<324;ii++){ col[ii]=0; } for(int ii=0;ii<82;ii++){ pos[ii]=0; } vector<int> delrows; // vector containing indexes of rows to delete vector<int> delcols; // vector containing indexes of cols to delete for(int ii=0;ii<9;ii++){ for(int jj=0;jj<9;jj++){ fin>>cell; sudoku[ii].push_back(cell); if(cell!=0){ pos[ii*9 + jj + 1]=1; //row[(ii*9 + jj )*9 ]=1; /*for(int kk=0;kk<9;kk++){ row[(ii*9 + jj )*9 + kk ]=1; //delrows.push_back((ii*9 + jj )*9 + kk); }*/ // ii above is ii*9+jj // jj above is cell col[ii*9 + jj]=1; col[80+cell+((ii*9+jj)/9)*9]=1; col[161+cell+((ii*9+jj)%9)*9]=1; col[242+cell+((ii*9+jj)/27)*27 + (((ii*9+jj)/3)%3)*9]=1; //delrows.push_back() } } } for(int ii=0;ii<324;ii++){ if(col[ii]==1){ delcols.push_back(ii); } } for(int ii=0;ii<729;ii++){ for(int jj=0;jj<delcols.size();jj++){ if(exactcover[ii][delcols[jj]].f==1){ // need to delete that row row[ii]=1; } } } for(int ii=0;ii<729;ii++){ if(row[ii]==1){ delrows.push_back(ii); } } cout<<delrows.size()<<" "<<delcols.size()<<endl; for(int ii=delrows.size()-1;ii>=0 ;ii--){ exactcover.erase(exactcover.begin() + delrows[ii]); } for(int ii=delcols.size()-1;ii>=0;ii--){ exactcover = delcol(exactcover , delcols[ii]); } print2d(sudoku); cout<<exactcover.size()<<endl; cout<<exactcover[0].size()<<endl; vector< vector < pair< int , pair <int , int> > > > empty; vector< vector < pair< int , pair <int , int> > > > soln = algX(exactcover , exactcover.size() , exactcover[0].size() , empty); cout<<"soln is"<<endl; cout<<soln.size()<<endl; int sdk,bdk,cdk,edk,fdk; for(int ii=0;ii<soln.size();ii++){ for(int jj=0;jj<81;jj++){ if(soln[ii][jj].f == 1){ // add an element sdk = (soln[ii][jj].s).f ; // row no as stored in exact cover matrix sdk = sdk-1; bdk = sdk/100; // the no to put cdk = sdk%100; edk = cdk/9; // row index of sudoku fdk = cdk%9; // col index of sudoku sudoku[edk][fdk]=bdk; cout<<edk<<" "<<fdk<<" "<<bdk<<endl; } } } print2d(sudoku); //print2(soln); fin.close(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("Time taken by function to execute is: %.6f ms\n", milliseconds); return 0; }
1,950
#include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/functional.h> int main(int argc, char *argv[]) { thrust::device_vector<int> data(8); data[0] = 6; data[1] = 3; data[2] = 7; data[3] = 5; data[4] = 9; data[5] = 0; data[6] = 8; data[7] = 1; thrust::sort(data.begin(), data.end()); std::cout << "ascending" << std::endl; for (int i = 0; i < data.size(); i++) { std::cout << data[i] << std::endl; } thrust::sort(data.begin(), data.end(), thrust::greater<int>()); std::cout << "descending" << std::endl; for (int i = 0; i < data.size(); i++) { std::cout << data[i] << std::endl; } return 0; }
1,951
#include "includes.h" __global__ void Solve_redblack2_Kernel(float* output, const float* input, int width, int height, int nChannels, int c, const float* weightx, const float* weighty, float lambda, float omega, bool redflag) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int x = bx*blockDim.x + tx; int y = by*blockDim.y + ty; if (x >= width || y >= height) return; if ((y + x) % 2 == redflag) return; int offset = y*width + x; int slice = width*nChannels; int offset_c = offset*nChannels + c; float coeff = 0, sigma = 0, weight = 0; if (y > 0) { weight = lambda*weighty[offset - width]; coeff += weight; sigma += weight * output[offset_c - slice]; } if (y < height - 1) { weight = lambda*weighty[offset]; coeff += weight; sigma += weight*output[offset_c + slice]; } if (x > 0) { weight = lambda*weightx[offset - 1]; coeff += weight; sigma += weight*output[offset_c - nChannels]; } if (x < width - 1) { weight = lambda*weightx[offset]; coeff += weight; sigma += weight*output[offset_c + nChannels]; } coeff += 1; sigma += input[offset_c]; if (coeff > 0) output[offset_c] = sigma / coeff*omega + output[offset_c] * (1 - omega); }
1,952
#include<stdio.h> #include<cuda.h> /* Producing twiddle factors */ #define NUM_OF_X_THREADS 10 #define NUM_OF_Y_THREADS 10 __global__ void inputKernel(float *x, int N) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int idx = iy * NUM_OF_X_THREADS + ix; if (idx < N) x[idx] = x[idx] + (float)idx; } __global__ void factorKernel(float *w, int N) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int idx = ix * 2; int izx = N + idx; const float pi = 3.1415; float aw = (2.0 * pi) / (float)N; float arg = aw * (float)ix; /* Twiddle factors are symmetric along N/2. with change in sign, due to 180 degree phase change */ if (idx < N) { w[idx] = cos(arg); w[idx + 1] = sin(arg); w[izx] = (-1) * w[idx]; w[izx+1] = (-1) * w[idx + 1]; } } __global__ void twiddleRealKernel(float *wr, float *w, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = 0, index; if (idx < N) { if (idx == 0) { for (i = 0; i < N; i++) wr[idx * N + i] = 1; } else { wr[idx * N + 0] = 1; for (i = 1; i < N; i++) { index = (idx * i) % N; wr[idx * N + i] = w[index * 2]; } } } } __global__ void twiddleImgKernel(float *wi, float *w, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i, index; if (idx < N) { if (idx == 0) { for (i = 0; i < N; i++) wi[idx * N + i] = 0; } else { wi[idx * N + 0] = 0; for (i = 1; i < N; i++) { index = (idx * i) % N; wi[idx * N + i] = w[index * 2 + 1]; } } } } int main(int agrc, char** argv) { float *x, *w, *w_r, *w_i; float *d_x, *d_w, *dw_r, *dw_i; int N = 10000, n = N/2; x = (float *)malloc(N * sizeof(float)); w = (float *)malloc(2 * N * sizeof(float)); w_r = (float *)malloc(N * N * sizeof(float)); w_i = (float *)malloc(N * N * sizeof(float)); dim3 numberOfThreads(NUM_OF_X_THREADS, NUM_OF_Y_THREADS); dim3 numberOfBlocks( (100 + NUM_OF_X_THREADS -1)/NUM_OF_X_THREADS, (100 + NUM_OF_Y_THREADS - 1)/NUM_OF_Y_THREADS ); cudaMalloc((void **)&d_x, N * sizeof(float)); cudaMalloc((void **)&d_w, 2 * N * sizeof(float)); cudaMalloc((void **)&dw_r, N * N * sizeof(float)); cudaMalloc((void **)&dw_i, N * N * sizeof(float)); cudaMemset(d_x, 0, N * sizeof(float)); cudaMemset(d_w, 0, 2 * N * sizeof(float)); cudaMemset(dw_r, 0, N * N * sizeof(float)); cudaMemset(dw_i, 0, N * N * sizeof(float)); inputKernel<<<numberOfBlocks, numberOfThreads>>>(d_x, N); cudaMemcpy(x, d_x, N * sizeof(float), cudaMemcpyDeviceToHost); printf("%f\n",x[100]); // Calculating factor factorKernel<<<n/512, 512>>>(d_w, (float)N); cudaMemcpy(w, d_w, 2 * N * sizeof(float), cudaMemcpyDeviceToHost); printf("%f %f\n", w[5], w[10005]); // Calculating twiddle real matrix twiddleRealKernel<<<n/512, 512>>>(dw_r, d_w, N); cudaMemcpy(w_r, dw_r, N * N * sizeof(float), cudaMemcpyDeviceToHost); // Calculating twiddle imaginary matrix twiddleImgKernel<<<n/512, 512>>>(dw_i, d_w, N); cudaMemcpy(w_i, dw_i, N * N * sizeof(float), cudaMemcpyDeviceToHost); /* int i,j; for(i = 0; i < 50; i++) { for(j = 0; j < 50; j++) { printf("%f \t", w_r[i*N + j]); } printf("\n"); } printf("*********************************************************************************\n"); for(i = 0; i < 50; i++) { for(j = 0; j < 50; j++) { printf("%f \t", w_i[i*N + j]); } printf("\n"); } */ return 0; }
1,953
#include <math.h> #include <stdio.h> #define TDIM 32 #define RDIM 8 //number of rows in a block __global__ void fast_transpose( double* a, double* b, int N) { //buffer __shared__ double buffer[TDIM][TDIM+1]; int blockIdx_y = blockIdx.x; int blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; int y = blockIdx_y * TDIM + threadIdx.y; int x = blockIdx_x * TDIM + threadIdx.x; // doing the transposition on the shared memory for (int i=0; i<TDIM; i+=RDIM) { buffer[threadIdx.y+i][threadIdx.x] = a[y*N + x+i*N]; } __syncthreads(); // copy back on global memory y = blockIdx_x * TDIM + threadIdx.y; x = blockIdx_y * TDIM + threadIdx.x; for (int i=0; i<TDIM; i+=RDIM) { b[y*N + x+i*N] = buffer[threadIdx.x] [threadIdx.y+i]; } } //naive transpose __global__ void transpose( double* a, double* b, int N) { int row = (blockIdx.x * blockDim.x + threadIdx.x) / N; int col = (blockIdx.x * blockDim.x + threadIdx.x) % N; b[col * N + row] = a[row * N + col]; } // just randomlly fill the matrix void random_fill(double* mat, int N){ for(int i = 0; i < N; i++) for(int j = 0; j < N; j++) mat[i*N+j] = (double) rand()/(double) RAND_MAX * 100.; } // Used for error-checking void transpose_cpu(double* a, double* b, int N){ for(int i = 0; i < N; i++) for(int j = 0; j < N; j++) b[j*N + i] = a[i*N + j]; } // check if two matrix are equals int is_equal(double* a, double* b, int N){ for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++) if(b[i*N + j] != a[i*N + j]) return 0; } return 1; } void print_mat(double* a, int N){ for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++){ printf("%.1f ", a[i*N+j]); } printf("\n"); } printf("\n"); } int main( int argc, char* argv[] ) { double *a, *b, *c, *d; // host copies of a, b, c const int N = 8192; double *dev_a, *dev_b, *dev_c; // device copies of a, b, c int size = N * N * sizeof( double ); const int Nblocks = (N*N) / 1024; dim3 grid, block; block.x = TDIM; block.y = RDIM; grid.x = N/TDIM; grid.y = N/TDIM; // allocate device copies of a, b, c cudaMalloc( (void**)&dev_a, size ); cudaMalloc( (void**)&dev_b, size ); a = (double*)malloc( size ); b = (double*)malloc( size ); d = (double*)malloc(size); //fill the matrix with random numbers random_fill( a, N ); cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice ); //cuda event for timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); fast_transpose<<<grid,block>>>(dev_a, dev_b, N ); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaMemcpy( b, dev_b, size, cudaMemcpyDeviceToHost); //print_mat(b,N); transpose_cpu(a,d,N); int equal = is_equal(b,d,N); if(equal) printf("Correct fast\n"); else printf("Uncorrect fast\n"); // Bandwith for reading from matrix a + writing on matrix b printf("Time fast= %f\n", milliseconds); printf("Bandwidth fast= %f\n", N*N*2*8/milliseconds/1e6); free( b ); //print_mat(d,N); cudaFree( dev_b ); c = (double*) malloc(size); cudaMalloc( (void**)&dev_c, size ); cudaEventRecord(start); transpose<<<Nblocks,1024>>>(dev_a, dev_c, N); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost); equal = is_equal(c,d,N); if(equal) printf("Correct naive\n"); else printf("Uncorrect naive\n"); printf("Time naive = %f\n", milliseconds); printf("Bandwidth naive= %f\n", N*N*2*8/milliseconds/1e6); free( a ); free( c ); free(d); cudaFree( dev_a ); cudaFree( dev_c ); return 0; }
1,954
#include <stdio.h> #include <stdlib.h> #include <fstream> /** * Computes the log of reaction rate. * @param a: Pointer to coefficient matrix. * @param temp: Pointer to temperature array. * @param lam: Matrix to write the results to. * @param nsets: Number of sets / number of rows in coefficient matrix. * @param ncells: Number of cells / length of temperature array. * @param ncoeff: Number of coefficients / number of columns in coefficient matrix. */ template <class dtype> __device__ void rates(dtype *a, dtype *temp, dtype *lam, int nsets, int ncells, int ncoeff) { int istart = blockIdx.x * blockDim.x + threadIdx.x; int istep = blockDim.x * gridDim.x; int jstart = blockIdx.y * blockDim.y + threadIdx.y; int jstep = blockDim.y * gridDim.y; int kstart = blockIdx.z * blockDim.z + threadIdx.z; int kstep = blockDim.z * gridDim.z; for(int i = istart; i < nsets; i += istep) { for(int j = jstart; j < ncells; j += jstep) { dtype temp9 = temp[j] * 1e-9; for(int k = kstart; k < ncoeff; k += kstep) { switch(k) { case 0: atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k]); break; case 6: atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * log(temp9)); break; default: atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * pow(temp9, (2 * k - 5) / 3.0)); break; } } } } } template <> __device__ void rates<float>(float *a, float *temp, float *lam, int nsets, int ncells, int ncoeff) { int istart = blockIdx.x * blockDim.x + threadIdx.x; int istep = blockDim.x * gridDim.x; int jstart = blockIdx.y * blockDim.y + threadIdx.y; int jstep = blockDim.y * gridDim.y; int kstart = blockIdx.z * blockDim.z + threadIdx.z; int kstep = blockDim.z * gridDim.z; for(int i = istart; i < nsets; i += istep) { for(int j = jstart; j < ncells; j += jstep) { float temp9 = temp[j] * 1e-9; for(int k = kstart; k < ncoeff; k += kstep) { switch(k) { case 0: atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k]); break; case 6: atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * logf(temp9)); break; default: atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * powf(temp9, (2 * k - 5) / 3.0f)); break; } } } } } template <class dtype, int nsets, int ncells, int ncoeff> __global__ void exec(dtype *lam) { int xInd = blockIdx.x * blockDim.x + threadIdx.x; int yInd = blockIdx.y * blockDim.y + threadIdx.y; int ySize = blockDim.y * gridDim.y; int zInd = blockIdx.z * blockDim.z + threadIdx.z; int zSize = blockDim.z * gridDim.z; int ind = xInd * ySize * zSize + yInd * zSize + zInd; // Tensors __shared__ dtype a[nsets * ncoeff]; // These are all of the sets in reaclib with two nuclei as reactants // where one of them is carbon-12. if(ind == 0) { // c12 + c12 -> n + mg23 a[0] = -12.8056; a[1] = -30.1498; a[2] = 0.0; a[3] = 11.4826; a[4] = 1.82849; a[5] = -0.34844; a[6] = 0.0; // c12 + c12 -> p + na23 a[7] = 60.9649; a[8] = 0.0; a[9] = -84.165; a[10] = -1.4191; a[11] = -0.114619; a[12] = -0.070307; a[13] = -0.666667; // c12 + c12 -> he4 + ne20 a[14] = 61.2863; a[15] = 0.0; a[16] = -84.165; a[17] = -1.56627; a[18] = -0.0736084; a[19] = -0.072797; a[20] = -0.666667; // he4 + he4 + he4 -> c12 (1) a[21] = -0.971052; a[22] = 0.0; a[23] = -37.06; a[24] = 29.3493; a[25] = -115.507; a[26] = -10.0; a[27] = -1.33333; // he4 + he4 + he4 -> c12 (2) a[28] = -11.7884; a[29] = -1.02446; a[30] = -23.57; a[31] = 20.4886; a[32] = -12.9882; a[33] = -20.0; a[34] = -2.16667; // he4 + he4 + he4 -> c12 (3) a[35] = -24.3505; a[36] = -4.12656; a[37] = -13.49; a[38] = 21.4259; a[39] = -1.34769; a[40] = 0.0879816; a[41] = -13.1653; } __shared__ dtype temp[ncells]; if(ind == 0) { #pragma unroll for(int i = 0; i < ncells; i++) { temp[i] = pow(10.0, 7 + i * 3.0 / ncells); } } __syncthreads(); /******************************************* * Compute ln(lambda) for each set and cell * *******************************************/ rates<dtype>(a, temp, lam, nsets, ncells, ncoeff); } int main() { // Tensor dimensions const int nsets = 6, ncells = 10, ncoeff = 7; // Results matrix double *lam; cudaError_t code = cudaMallocManaged(&lam, nsets * ncells * sizeof(double)); if(code != cudaSuccess) return -1; for(int i = 0; i < nsets; i++) { for(int j = 0; j < ncells; j++) { lam[i * ncells + j] = 0.0f; } printf("\n"); } // Compute the rates dim3 threadsPerBlock(nsets, ncells, ncoeff); dim3 numBlocks(1, 1, 1); exec<double, nsets, ncells, ncoeff><<<numBlocks, threadsPerBlock>>>(lam); // Write lambda to file cudaDeviceSynchronize(); std::ofstream file; file.open("double.dat"); for(int i = 0; i < nsets; i++) { for(int j = 0; j < ncells; j++) { printf("%8.3f ", lam[i * ncells + j]); file << exp(lam[i * ncells + j]); if(j != ncells - 1) file << " "; } file << "\n"; printf("\n"); } file.close(); return 0; }
1,955
#include "includes.h" __global__ void matrixPolyderNewLayout(const float *coefImg, float *coefImgDer, const int w, const int h, const int m, size_t yOffset){ size_t x = threadIdx.x + blockDim.x * blockIdx.x; size_t y = threadIdx.y + blockDim.y * blockIdx.y; if(x >= w || y >= h) return; size_t xOffsetDer = m-1; size_t yOffsetDer = w*xOffsetDer; size_t xOffsetCoef = m; size_t yOffsetCoef = w*xOffsetCoef; for (int i = 0; i < m - 1; ++i) //if of degree d=2, we have n=3 coeffs ax'2 + bx +c { size_t idxDer = x*xOffsetDer + y*yOffsetDer + i; size_t idxCoef = x*xOffsetCoef + y*yOffsetCoef + i; coefImgDer[idxDer]=coefImg[idxCoef]*(m-i-1); } }
1,956
#include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include <cuda.h> // Device global variables __device__ double c_x_min; __device__ double c_x_max; __device__ double c_y_min; __device__ double c_y_max; __device__ double pixel_width; __device__ double pixel_height; __device__ int iteration_max = 200; __device__ int image_size; __device__ int image_buffer_size; __device__ int num_threads; __device__ int th_per_block; __device__ int pixels_per_thread; __device__ int gradient_size = 16; __device__ int colors[17][3] = { {66, 30, 15}, {25, 7, 26}, {9, 1, 47}, {4, 4, 73}, {0, 7, 100}, {12, 44, 138}, {24, 82, 177}, {57, 125, 209}, {134, 181, 229}, {211, 236, 248}, {241, 233, 191}, {248, 201, 95}, {255, 170, 0}, {204, 128, 0}, {153, 87, 0}, {106, 52, 3}, {16, 16, 16}, }; // Host global variables dim3 num_blocks, threads_per_block; int num_blocks_x, th_per_block_x; int num_blocks_y, th_per_block_y; int host_image_buffer_size; unsigned char* image_buffer_host; int i_x_max; int i_y_max; int check (cudaError_t& err, const char* msg) { if (err != cudaSuccess) { printf ("%s", msg); printf (" | Error: %s\n", cudaGetErrorString(err)); return 1; } return 0; } void print_bad_arguments () { printf("usage: ./mandelbrot_seq c_x_min c_x_max c_y_min c_y_max" " image_size NUM_BLOCKS TH_PER_BLOCK \n"); printf("examples with image_size = 11500:\n"); printf(" Full Picture: ./mandelbrot_cuda -2.5 1.5 -2.0 2.0 11500 4 64 \n"); printf(" Seahorse Valley: ./mandelbrot_cuda -0.8 -0.7 0.05 0.15 11500 4 64 \n"); printf(" Elephant Valley: ./mandelbrot_cuda 0.175 0.375 -0.1 0.1 11500 4 64 \n"); printf(" Triple Spiral Valley: ./mandelbrot_cuda -0.188 -0.012 0.554 0.754 11500 4 64 \n"); } // Get global variables from command line args void init (int argc, char* argv[]) { // host variables double host_c_x_min, host_c_x_max; double host_c_y_min, host_c_y_max; int host_image_size; if (argc < 8) { print_bad_arguments(); exit(0); } else { num_blocks_y = th_per_block_y = 1; sscanf(argv[1], "%lf", &host_c_x_min); sscanf(argv[2], "%lf", &host_c_x_max); sscanf(argv[3], "%lf", &host_c_y_min); sscanf(argv[4], "%lf", &host_c_y_max); sscanf(argv[5], "%d", &host_image_size); sscanf(argv[6], "%d", &num_blocks_x); if (argc == 8) { sscanf(argv[7], "%d", &th_per_block_x); } else if (argc == 10) { sscanf(argv[7], "%d", &num_blocks_y); sscanf(argv[8], "%d", &th_per_block_x); sscanf(argv[9], "%d", &th_per_block_y); } else { print_bad_arguments(); exit(0); } host_image_buffer_size = host_image_size * host_image_size; int host_th_per_block = th_per_block_x * th_per_block_y; int host_num_threads = host_th_per_block * num_blocks_x * num_blocks_y; int host_pixels_per_thread = host_image_buffer_size / host_num_threads; i_x_max = host_image_size; i_y_max = host_image_size; double host_pixel_width = (host_c_x_max - host_c_x_min) / i_x_max; double host_pixel_height = (host_c_y_max - host_c_y_min) / i_y_max; // copy host variables to device cudaError_t err = cudaSuccess; cudaMemcpyToSymbol(c_x_min, &host_c_x_min, sizeof(double)); cudaMemcpyToSymbol(c_x_max, &host_c_x_max, sizeof(double)); cudaMemcpyToSymbol(c_y_min, &host_c_y_min, sizeof(double)); cudaMemcpyToSymbol(c_y_max, &host_c_y_max, sizeof(double)); cudaMemcpyToSymbol(image_size, &host_image_size, sizeof(int)); cudaMemcpyToSymbol(num_threads, &host_num_threads, sizeof(int)); cudaMemcpyToSymbol(th_per_block, &host_th_per_block, sizeof(int)); cudaMemcpyToSymbol(pixel_width, &host_pixel_width, sizeof(double)); cudaMemcpyToSymbol(pixel_height, &host_pixel_height, sizeof(double)); cudaMemcpyToSymbol(pixels_per_thread, &host_pixels_per_thread, sizeof(int)); cudaMemcpyToSymbol(image_buffer_size, &host_image_buffer_size, sizeof(int)); err = cudaGetLastError(); if (check(err, "Failed to copy command line args to device")) exit(EXIT_FAILURE); }; }; __device__ void update_rgb_buffer(unsigned char* image_buffer_device, int iteration, int pix) { int color; if (iteration == iteration_max) { image_buffer_device[pix * 3 + 0] = colors[gradient_size][0]; image_buffer_device[pix * 3 + 1] = colors[gradient_size][1]; image_buffer_device[pix * 3 + 2] = colors[gradient_size][2]; } else { color = iteration % gradient_size; image_buffer_device[pix * 3 + 0] = colors[color][0]; image_buffer_device[pix * 3 + 1] = colors[color][1]; image_buffer_device[pix * 3 + 2] = colors[color][2]; }; }; __global__ void compute_mandelbrot(unsigned char* image_buffer_device) { double z_x; double z_y; double z_x_squared; double z_y_squared; double escape_radius_squared = 4; int iteration; int i_x; int i_y; double c_x; double c_y; // Calculates pixel where current thread will start its work int my_block = blockIdx.x + gridDim.x * blockIdx.y; int my_thread_in_block = threadIdx.x + blockDim.x * threadIdx.y; int my_thread = my_block * th_per_block + my_thread_in_block; /* what thread will process each pixel ? * * Example: image 5x5 -> buffer_size = 25 * 3 blocks of 3 threads -> 9 threads * * 2 4 7 - - * 1 4 6 - - * 1 3 6 8 - * 0 3 5 8 - * 0 2 5 7 - * * and the remaining pixels we process separetedly, * each thread process its remaining pixel in the end * * 2 4 7 5 0 * 1 4 6 6 1 * 1 3 6 8 2 * 0 3 5 8 3 * 0 2 5 7 4 */ // Its easier to process by pixels instead of by row-collunm int pix = my_thread * pixels_per_thread; int end_pixel = pix + pixels_per_thread; int my_rem_pixel = image_buffer_size - my_thread - 1; while (pix <= my_rem_pixel) { i_y = pix / image_size; i_x = pix % image_size; c_y = c_y_min + i_y * pixel_height; if (fabs(c_y) < pixel_height / 2) { c_y = 0.0; }; c_x = c_x_min + i_x * pixel_width; z_x = 0.0; z_y = 0.0; z_x_squared = 0.0; z_y_squared = 0.0; for (iteration = 0; iteration < iteration_max && \ ((z_x_squared + z_y_squared) < escape_radius_squared); iteration++) { z_y = 2 * z_x * z_y + c_y; z_x = z_x_squared - z_y_squared + c_x; z_x_squared = z_x * z_x; z_y_squared = z_y * z_y; }; update_rgb_buffer(image_buffer_device, iteration, pix); pix++; // Treat remaining pixel if (pix == end_pixel) { if (my_rem_pixel >= pix) pix = my_rem_pixel; else break; } } } void allocate_image_buffer(unsigned char** image_buffer_device, size_t size) { // Our buffer, instead of a matrix, will be a continuous array // Allocate host memory image_buffer_host = (unsigned char*)malloc(sizeof(unsigned char) * size); // Allocate device memory cudaError_t err = cudaSuccess; err = cudaMalloc((void**)(image_buffer_device), size); // Test alloc success if (image_buffer_host == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } if (check(err, "Failed to allocate device image buffer")) exit(EXIT_FAILURE); }; void write_to_file() { FILE* file; const char* filename = "output.ppm"; const char* comment = "# "; int max_color_component_value = 255; file = fopen(filename, "wb"); fprintf(file, "P6\n %s\n %d\n %d\n %d\n", comment, i_x_max, i_y_max, max_color_component_value); for (int i = 0; i < host_image_buffer_size; i++) { fwrite(image_buffer_host + 3*i, 1, 3, file); }; fclose(file); }; int main(int argc, char* argv[]) { init(argc, argv); cudaError_t err; int rgb_size = 3; size_t size = host_image_buffer_size * rgb_size; unsigned char* image_buffer_device; allocate_image_buffer(&image_buffer_device, size); // Launch compute_mandelbrot CUDA Kernel num_blocks = dim3(num_blocks_x, num_blocks_y); threads_per_block = dim3(th_per_block_x, th_per_block_y); compute_mandelbrot<<<num_blocks, threads_per_block>>>(image_buffer_device); cudaDeviceSynchronize(); err = cudaGetLastError(); if (check(err, "Failed to launch compute_mandelbrot kernel")) exit(EXIT_FAILURE); // Copy the device result vector in device memory to the host result vector // in host memory. err = cudaMemcpy(image_buffer_host, image_buffer_device, size, cudaMemcpyDeviceToHost); if (check(err, "Failed to copy vector from device to host")) exit(EXIT_FAILURE); // Free device global memory err = cudaFree(image_buffer_device); if (check(err, "Failed to free device vector")) exit(EXIT_FAILURE); write_to_file(); // Free host memory free(image_buffer_host); return 0; }
1,957
#include<stdio.h> __global__ void vecAdd(int *c_d,int *a_d,int *b_d) { int idx=threadIdx.x; c_d[idx]=a_d[idx]+b_d[idx]; } int main() { const int N=12; int a_h[N],b_h[N],c_h[N]; for(int i=0;i<12;i++) { a_h[i]=i; b_h[i]=i*2; } //initialize gpu pointer int *a_d,*b_d,*c_d; const int size=N*sizeof(int); //allocate memory on gpu cudaMalloc((void **) &a_d,size); cudaMalloc((void **) &b_d,size); cudaMalloc((void **) &c_d,size); cudaMemcpy(a_d,a_h,size,cudaMemcpyHostToDevice); cudaMemcpy(b_d,b_h,size,cudaMemcpyHostToDevice); //call the kernal vecAdd<<<1,N>>>(c_d,a_d,b_d); cudaMemcpy(c_h,c_d,size,cudaMemcpyDeviceToHost); for(int i=0;i<N;i++) { printf("%d",c_h[i]); printf("\n"); } cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); } //output console /* 0 3 6 9 12 15 18 21 24 27 30 33 */
1,958
#include <stdlib.h> #include <cuda.h> #include <stdio.h> #include <malloc.h> __host__ void fill_vector(float *V, int len){ float aux = 5.0; for (int i = 0; i < len; i++) { V[i] = ((float)rand() / (float)(RAND_MAX)) * aux ; } } __host__ void print(float *V, int len){ for (int i = 0; i < len; i++) { printf("%.2f ", V[i]); } printf("\n"); } __global__ void MatrixKernel(float* d_M, float* d_R, int n){ //calculate row index of element int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) d_R[i] = 2 * d_M[i]; return; } int main(){ int n = 100; float size = n * sizeof(float); //Manejo de errores en cuda cudaError_t error = cudaSuccess; //CPU float *h_M, *h_R; h_M = (float*)malloc(size); h_R = (float*)malloc(size); //GPU float *d_M, *d_R; error = cudaMalloc((void**)&d_M, size); if (error != cudaSuccess){ printf("Error solicitando memoria en la GPU para d_M\n"); exit(-1); } error = cudaMalloc((void**)&d_R, size); if (error != cudaSuccess){ printf("Error solicitando memoria en la GPU para d_R\n"); exit(-1); } //Fill Matrix fill_vector(h_M, n); print(h_M, n); //Copy from CPU to GPU cudaMemcpy(d_M, h_M, size, cudaMemcpyHostToDevice); //Dimension kernel dim3 dimGrid(ceil(n/10.0), 1, 1); dim3 dimBlock(10,1,1); MatrixKernel<<<dimGrid, dimBlock>>>(d_M, d_R, n); cudaDeviceSynchronize(); cudaMemcpy(h_R, d_R, size, cudaMemcpyDeviceToHost); print(h_R, n); free(h_M); free(h_R); cudaFree(d_M); cudaFree(d_R); return 0; }
1,959
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> __global__ void kernel(void){ printf("hello world from block %d, thread %d\n", blockIdx.x, threadIdx.x); } int main(void){ kernel <<<10, 10>>> (); cudaDeviceSynchronize(); return 0; }
1,960
#include "includes.h" const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// // THIS UPDATE DOES NOT UPDATE ELOSS? ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void spaceFilter(const double *Params, const float *data, const float *U, const int *iC, const int *iW, float *dprod){ volatile __shared__ float sU[32*NrankMax]; volatile __shared__ int iU[32]; float x; int tid, bid, i,k, Nrank, Nchan, NT, Nfilt, NchanU; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nfilt = (int) Params[1]; Nrank = (int) Params[6]; NchanU = (int) Params[10]; Nchan = (int) Params[9]; if (tid<NchanU) iU[tid] = iC[tid + NchanU * iW[bid]]; __syncthreads(); if(tid<NchanU*Nrank) sU[tid]= U[iU[tid%NchanU] + Nchan * bid + Nchan * Nfilt * (tid/NchanU)]; //sU[tid]= U[tid%NchanU + NchanU * bid + NchanU * Nfilt * (tid/NchanU)]; __syncthreads(); while (tid<NT){ for (k=0;k<Nrank;k++){ x = 0.0f; for(i=0;i<NchanU;i++) x += sU[i + NchanU*k] * data[tid + NT * iU[i]]; dprod[tid + NT*bid + k*NT*Nfilt] = x; } tid += blockDim.x; __syncthreads(); } }
1,961
// tdfc-cuda backend autocompiled body file // tdfc version 1.160 // Fri May 27 17:47:08 2011 #include <stdio.h> __global__ void tdfc_vadd(double cc_a,double* cc_x,double* cc_y,double* cc_z,int N ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<N) { { cc_z[idx] = (((cc_a*cc_x[idx])+cc_y[idx])); } } } //tdfc_vadd
1,962
#include <cuda_runtime.h> #include <stdio.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code:%d, reason: %s", error, cudaGetErrorString(error));\ exit(-10 * error);\ }\ }\ void initHostMatrix(int *h_A, int nxy) { for (int i = 0; i < nxy; ++i) { h_A[i] = i; } } void printMatrix(int *h_A, int nx, int ny) { for (int i = 0; i < ny; ++i) { for (int j = 0; j < nx; ++j) { printf("%d\t", h_A[i * nx + j]); } printf("\n"); } } __global__ void printThreadIndex(int *d_A, const int nx, const int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; printf("thread_id (%d, %d), block_id(%d, %d), coordinate(%d, %d), " "global index %d value %d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, d_A[idx]); } int main(void) { //get device info int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("using device %d : %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); //set matrix dimension int nx = 8; int ny = 6; int nxy = nx * ny; int nBytes = nxy * sizeof(int); //malloc host memory int *h_A; h_A = (int*)malloc(nBytes); //init host matrix initHostMatrix(h_A, nxy); printMatrix(h_A, nx, ny); int *d_A; cudaMalloc((void **)&d_A, nBytes); //transfer data to device cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); //set up execution configuration dim3 block(4, 2); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); //invoke the kernel printThreadIndex <<<grid, block>>>(d_A, nx, ny); cudaDeviceSynchronize(); //free host and device memory cudaFree(d_A); free(h_A); //reset device cudaDeviceReset(); return 0; }
1,963
extern "C" { __global__ void img_reverse(uchar3* d_idata, uchar3* d_odata, int width, int height){ int xIndex = threadIdx.x + blockIdx.x * blockDim.x; int yIndex = threadIdx.y + blockIdx.y * blockDim.y; int idx = yIndex * width + xIndex; if (xIndex < width && yIndex < height){ uchar3 rgb = d_idata[idx]; d_odata[idx].x = 255 - rgb.x; d_odata[idx].y = 255 - rgb.y; d_odata[idx].z = 255 - rgb.z; } } }
1,964
//TEST CASE PASS IN GPU_VERIFY. IT IS NOT VERIFY ARRAY BOUNDS VIOLATION #include <stdio.h> #include <cuda.h> #include <assert.h> #define N 2//64 __global__ void foo(int* p) { int* q; q = p + 1; p[threadIdx.x] = q[threadIdx.x]; }
1,965
#include "includes.h" __global__ void rfi_gpu_kernel(unsigned short *d_input, int nchans, int nsamp) { int c = blockIdx.x * blockDim.x + threadIdx.x; int count =0; float stdev = 1000000.0f; float mean = 0.0f; float sum = 0.0f; float sum_squares = 0.0f; float cutoff = (4.0f * stdev); for(int out=0; out<4; out++) { sum = 0.0f; sum_squares = 0.0f; count = 0; for(int t = 0; t < nsamp; t++) { float data=(float)d_input[c*nsamp + t]; if(data < (mean + cutoff) && data > (mean - cutoff) ) { sum+=data; sum_squares+=(data*data); count++; } } mean = (sum/(float)count); sum_squares = ((sum_squares / count) - (mean * mean)); stdev = sqrt(sum_squares); cutoff = (4.0f * stdev); } for(int t = 0; t < nsamp-4; t++) { float data=0.0f; for(int x = 0; x<4; x++) data+=(float)d_input[c*nsamp + t + x]; data=data*0.25f; //float data=(float)d_input[c*nsamp + t]; if(data > (mean + cutoff) || data < (mean - cutoff)) { d_input[c*nsamp + t]=(unsigned short)mean; } } }
1,966
#include <stdio.h> #include <assert.h> #include <sys/time.h> #include <math.h> #define MAXIT 360 #define N 1024 #define M 1024 int *lkeepgoing; float *iplate; float *oplate; float *tmp; /* Return the current time in seconds, using a double precision number. */ double When() { struct timeval tp; gettimeofday(&tp, NULL); return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6); } __global__ void kernel(float * d_matrix, size_t pitch) { int colsPerThread = 1;//32 threads per block ,256 cells in block-> 256/32 int rowstart = blockIdx.y * blockDim.y + (threadIdx.y * colsPerThread); for (int j = rowstart; j < rowstart+colsPerThread; j ++) { float* row_d_matrix = (float*)((char*)d_matrix + j*pitch); int colstart = blockIdx.x * blockDim.x + (threadIdx.x * colsPerThread); for (int i = colstart; i < colstart + colsPerThread; i ++) { row_d_matrix[i] = j * M + i; } } } __global__ void fill(float * iplate, size_t ipitch,float * oplate, size_t opitch) { int index; int colsPerThread = 1;//32 threads per block ,256 cells in block-> 256/32 int rowstart = blockIdx.y * blockDim.y + (threadIdx.y * colsPerThread); for (int j = rowstart ; j < rowstart+colsPerThread; j ++) { float* row_iplate = (float*)((char*)iplate + j*ipitch); float* row_oplate = (float*)((char*)oplate + j*opitch); int colstart = blockIdx.x * blockDim.x + (threadIdx.x * colsPerThread); for (int i = colstart; i < colstart + colsPerThread; i ++) { // row_iplate[i] = (j * M + i) + (j * M + i); index = j * M + i; if (index <= M || (index % (M-1)) == 0){ row_iplate[i] = 0.0; row_oplate[i] = 0.0; } else if (index >= 67100672 || index == (200 * M + 500) || (index < (400*M+331) && index > (400 * M +0))){// might be one off row_iplate[i] = 100.0; row_oplate[i] = 100.0; } else{ row_iplate[i] = 50.0; row_oplate[i] = 50.0; } } } } __global__ void doCalc(float * iplate, size_t ipitch,float * oplate, size_t opitch) { int index; int colsPerThread = 1;//32 threads per block ,256 cells in block-> 256/32 float* n_row_oplate; float* s_row_oplate; int rowstart = blockIdx.y * blockDim.y + (threadIdx.y * colsPerThread); for (int j = rowstart ; j < rowstart+colsPerThread; j ++) { float* row_iplate = (float*)((char*)iplate + j*ipitch); float* row_oplate = (float*)((char*)oplate + j*opitch); if (j == 0 || j == N){ continue; } else { n_row_oplate = (float*)((char*)oplate + (j-1)*opitch); s_row_oplate = (float*)((char*)oplate + (j+1)*opitch); } int colstart = blockIdx.x * blockDim.x + (threadIdx.x * colsPerThread); for (int i = colstart; i < colstart + colsPerThread; i ++) { // row_iplate[i] = (j * M + i) + (j * M + i); index = j * M + i; if (i == 0 || i == M){ continue; } if (index >= 67100672 || index == (200 * M + 500) || (index < (400*M+331) && index > (400 * M +0))){// might be one off row_iplate[i] = 100.0; } else{ row_iplate[i] = (( s_row_oplate[i]//bottom + n_row_oplate[i]//top + row_oplate[i+1]//right + row_oplate[i-1])//left + (4.0 * row_oplate[i])) / 8.0; } } } } __global__ void doCheck(float * iplate, size_t ipitch,float * oplate, size_t opitch,int *lkeepgoing,size_t lpitch) { float delta = 0.0; //int index; int colsPerThread = 1;//32 threads per block ,256 cells in block-> 256/32 int rowstart = blockIdx.y * blockDim.y + (threadIdx.y * colsPerThread); for (int j = rowstart ; j < rowstart+colsPerThread; j ++) { float* row_iplate = (float*)((char*)iplate + j*ipitch); float* row_oplate = (float*)((char*)oplate + j*opitch); float* row_lplate = (float*)((char*)lkeepgoing + j*lpitch); if (j == 0 || j == N){ continue; } int colstart = blockIdx.x * blockDim.x + (threadIdx.x * colsPerThread); for (int i = colstart; i < colstart + colsPerThread; i ++) { // index = j * M + i; if (i == 0 || i == M){ continue; } delta = fabs((row_iplate[i] - row_oplate[i])); if (delta > 0.0500) { row_lplate[i] = 1; // keep going }else{ row_lplate[i] = 0; // steady state } } } } __global__ void reduceSum(int *lkeepgoing, size_t lpitch, int *odata) { int colsPerThread = 1;//32 threads per block ,256 cells in block-> 256/32 if(threadIdx.x == 0) { odata[blockIdx.x] = 0; int rowstart = blockIdx.y * blockDim.y + (threadIdx.y * colsPerThread); for (int j = rowstart ; j < rowstart+blockDim.y; j ++) { float* row_lplate = (float*)((char*)lkeepgoing + j*lpitch); if (j == 0 || j == N){ continue; } int colstart = blockIdx.x * blockDim.x + (threadIdx.x * colsPerThread); for (int i = colstart; i < colstart + blockDim.x; i ++) { odata[blockIdx.x] += row_lplate[i]; } } } } __global__ void reduce1(int *g_idata, int *g_odata) { extern __shared__ int sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { if (tid % (2*s) == 0) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce2(int *g_idata, int *g_odata) { extern __shared__ int sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce3(int *g_idata, int *g_odata) { extern __shared__ int sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s = (blockDim.x/2) ; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } int main() { //h_matrix = (float *) malloc(M * N * sizeof(float)); //dc_matrix = (float *) malloc(M * N * sizeof(float)); double t0, tottime, start = When(); size_t ipitch; size_t opitch; size_t lpitch; cudaMallocPitch(&iplate, &ipitch, M * sizeof(float), N); cudaMallocPitch(&oplate, &opitch, M * sizeof(float), N); cudaMallocPitch(&lkeepgoing, &lpitch, M * sizeof(float), N); // cudaMallocPitch(&d_matrix, &pitch, M * sizeof(float), N); dim3 threadsPerBlock(32, 32, 1); // number of threads per block dim3 numBlocks(N/threadsPerBlock.x,M/threadsPerBlock.y, 1); // number of blocks in grid 16x16 fill<<<numBlocks, threadsPerBlock>>>(iplate, ipitch, oplate, opitch); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("%s\n",cudaGetErrorString(error)); return 0; } int *keepgoing_single; int *keepgoing_sums; int keepgoing; int iteration; cudaMalloc((void **)&keepgoing_single, 1 * sizeof(int)); keepgoing = 1; cudaMalloc((void **)&keepgoing_sums, M * sizeof(int)); float totaltime = 0.0; for (iteration = 0; (iteration < MAXIT) && keepgoing; iteration++) { doCalc<<< numBlocks, threadsPerBlock >>>(iplate, ipitch, oplate, opitch); cudaDeviceSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess) { printf("%s\n",cudaGetErrorString(error)); return 0; } doCheck<<< numBlocks, threadsPerBlock >>>(iplate, ipitch, oplate, opitch, lkeepgoing, lpitch); cudaDeviceSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess) { printf("%s\n",cudaGetErrorString(error)); return 0; } reduceSum<<< numBlocks, threadsPerBlock>>>(lkeepgoing,lpitch, keepgoing_sums ); cudaDeviceSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess) { printf("%s\n",cudaGetErrorString(error)); return 0; } // Now we have the sum for each row in the first column, // reduce to one value t0 = When(); int timeit; for(timeit = 0; timeit < 10000; timeit++){ //reduce1<<<1, 1024, 1024*sizeof(int)>>>(keepgoing_sums, keepgoing_single); //reduce2<<<1, 1024, 1024*sizeof(int)>>>(keepgoing_sums, keepgoing_single); reduce3<<<1, 1024, 1024*sizeof(int)>>>(keepgoing_sums, keepgoing_single); cudaDeviceSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess) { printf("%s\n",cudaGetErrorString(error)); return 0; } } tottime = When()-t0; keepgoing = 0; cudaMemcpy(&keepgoing, keepgoing_single, 1 * sizeof(int), cudaMemcpyDeviceToHost); totaltime += (tottime/10000); /* swap the new value pointer with the old value pointer */ tmp = oplate; oplate = iplate; iplate = tmp; } totaltime = totaltime/ iteration; cudaFree(iplate); cudaFree(oplate); cudaFree(keepgoing_single); cudaFree(keepgoing_sums); printf("Finished in %d iterations at %f, with reduce average time in %f.\n", iteration,When()-start,totaltime); }
1,967
#include <cstdio> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float* elements; bool cpu; } Matrix; Matrix make_cpu(int w, int h){ Matrix m; m.width = w; m.height = h; m.elements = static_cast<float*>(malloc(w * h * sizeof(float))); m.cpu = true; return m; } Matrix make_gpu(int w, int h){ Matrix m; m.width = w; m.height = h; auto size = w * h; cudaMalloc(&m.elements, size); m.cpu = false; return m; } Matrix make_gpu_from(Matrix m){ Matrix gpu = make_gpu(m.width, m.height); cudaMemcpy(gpu.elements, m.elements, m.width * m.height, cudaMemcpyHostToDevice); return gpu; } // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } int main(){ std::size_t size = 2048; Matrix A = make_cpu(size, size); Matrix B = make_cpu(size, size); Matrix C = make_cpu(size, size); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float time = 0; for (int i = 0; i < 10; ++i){ cudaEventRecord(start, 0); for (int j = 0; j < 10; ++j){ MatMul(A, B, C); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float cuda_time = 0; cudaEventElapsedTime(&cuda_time, start, stop); time += cuda_time; } printf("%f \n", time); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
1,968
#include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void cuda_xor(char * encrypt, char * key, int numElements, size_t len_key){ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements){ encrypt[i] = encrypt[i] ^ key[i % len_key]; } } void xor_encrypt(char h_m[], char h_k[], size_t o_m_len) { cudaError_t err = cudaSuccess; int numElements_m = (int) o_m_len; int numElements_k = strlen(h_k); size_t size_m = numElements_m * sizeof(char); size_t size_k = numElements_k * sizeof(char); // Alocacao dos vetores do device char * d_m = NULL; err = cudaMalloc((void **)&d_m, size_m); if (err != cudaSuccess) { fprintf(stderr, "Falha ao alocar string do device (mensagem) (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } char * d_k = NULL; err = cudaMalloc((void **)&d_k, size_k); if (err != cudaSuccess) { fprintf(stderr, "Falha ao alocar string do device (chave) (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //Copia do vetor com o texto original do host para o do device printf("Copiando mensagem do host para o device\n"); err = cudaMemcpy(d_m, h_m, size_m, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Falha ao copiar string mensagem do host para o device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Copiando chave do host para o device\n"); err = cudaMemcpy(d_k, h_k, size_k, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Falha ao copiar string chave do host para o device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Rodando o algoritmo de encriptacao // O tamanho do bloco eh determinado pela funcao cudaOccupancyMaxPotentialBlockSize // das ferramentas do CUDA. // O tamanho maximo do grid eh de 65535 int minGridSize, blockSize, gridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cuda_xor, 0, numElements_m); gridSize = (numElements_m + blockSize - 1) / blockSize; if (gridSize > 65535) gridSize = 65535; printf("CUDA kernel executando com %d blocos de %d threads\n", gridSize, blockSize); cuda_xor<<<gridSize, blockSize>>>(d_m, d_k, numElements_m, numElements_k); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Erro ao rodar XOR kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copia o resultado do device para o host printf("Copiando saida do device para o host\n"); err = cudaMemcpy(h_m, d_m, size_m, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Erro ao copiar mensagem do device pro host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Liberando o conteudo do device err = cudaFree(d_m); if (err != cudaSuccess) { fprintf(stderr, "Erro ao liberar mensagem do device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_k); if (err != cudaSuccess) { fprintf(stderr, "Erro ao liberar chave do device (error code %s )!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Resetando o device e terminando err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Erro ao resetar o device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("\nDone\n"); } int xor_test(char * file, char * key_file) { char * o_message, * key, * message; int pass = 1, fsize; FILE *f; f = fopen(file, "r"); if (f){ fseek(f, 0, SEEK_END); fsize = ftell(f); rewind(f); o_message = (char *) malloc (fsize * sizeof (char)); fread(o_message, 1, fsize, f); fclose(f); } else{ fprintf(stderr, "Erro ao abrir arquivo da mensagem!\n"); exit(EXIT_FAILURE); } f = fopen(key_file, "r"); if (f){ fseek(f, 0, SEEK_END); fsize = ftell(f); rewind(f); key = (char *) malloc (fsize * sizeof (char)); fread(key, 1, fsize, f); fclose(f); } else{ fprintf(stderr, "Erro ao abrir arquivo da chave!\n"); exit(EXIT_FAILURE); } message = (char *) malloc (strlen(o_message) * sizeof (char)); strcpy(message, o_message); xor_encrypt(message, key, strlen(o_message)); xor_encrypt(message, key, strlen(o_message)); pass = pass && !strcmp(o_message, message); return(pass); } int main(int argc, char ** argv) { printf("CUDA XOR tests: %s\n", xor_test(argv[1], argv[2]) ? "SUCCEEDED" : "FAILED"); return(0); }
1,969
#include <stdio.h> #include <cuda.h> int main() { int dev_count; cudaDeviceProp dev_prop; cudaGetDeviceCount(&dev_count); printf("the number of cuda device is %d\n",dev_count); cudaGetDeviceProperties(&dev_prop,0); printf("the number of max threads per block is:%d\n",dev_prop.maxThreadsPerBlock); printf("the number of streaming multiprocessors(SM) is:%d\n",dev_prop.multiProcessorCount); return 0; }
1,970
#include <stdio.h> #include <iostream> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } __global__ void doubleElements(int *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < N + stride; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { /* * Add error handling to this source code to learn what errors * exist, and then correct them. Googling error messages may be * of service if actions for resolving them are not clear to you. */ int N = 10000; int *a; size_t size = N * sizeof(int); //cudaError_t err; //err = cudaMallocManaged(&a, size); cudaMallocManaged(&a, size); /* if (err != cudaSuccess) { printf("Here is the error: %s", cudaGetErrorString(err)); } printf(err); */ init(a, N); size_t threads_per_block = 1024; //size_t threads_per_block = 2048; size_t number_of_blocks = 32; doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); // Catch errors for both the kernel launch above and any errors that occur during the asynchronous `doubleElements` kernel execution. cudaError_t syncError = cudaGetLastError(); cudaError_t asyncError = cudaDeviceSynchronize(); if (syncError != cudaSuccess) printf("Here is an error: %s\n", cudaGetErrorString(syncError)); // invalid configuration argument if (asyncError != cudaSuccess) printf("Here is an error: %s\n", cudaGetErrorString(asyncError)); // no error bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); cudaFree(a); }
1,971
#include "includes.h" __global__ void forwardPass1(float* in, float* syn1, float* layer1) { int l = blockDim.x*blockIdx.x + threadIdx.x; int j = blockDim.y*blockIdx.y + threadIdx.y; int Y = 128; atomicAdd(&layer1[l] , in[j] * syn1[j*Y + l]); layer1[l] = 1.0/(1.0 + exp(layer1[l])); }
1,972
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cuda.h> #include <stdlib.h> //#define BLOCK_SIZE 32 #define SIZE 1024*1024 __host__ void SaveMatrixToFile(char* fileName, int* matrix, int width, int height) { FILE* file = fopen(fileName, "wt"); for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { fprintf(file, "%d\t", matrix[y * width + x]); } fprintf(file, "\n"); } fclose(file); } __global__ void transpose(int* inputMatrix, int* outputMatrix, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; for (int x = 0; x < width; x++) for (int y = 0; y < height; y++) outputMatrix[x * height + y] = inputMatrix[y * width + x]; } __host__ int main() { int width; int height; printf("Input number of columns: "); scanf("%d", &width); printf("Input number of strings: "); scanf("%d", &height); int N = width*height; cudaEvent_t start, stop; float gpuTime = 0.0; cudaEventCreate(&start); cudaEventCreate(&stop); int* A; A = (int *)malloc(sizeof(int) * N); int* A_t; A_t = (int *)malloc(sizeof(int) * N); for (int i = 0; i < N; i++) { A[i] = i + 1; } SaveMatrixToFile("matrix.txt", A, width, height); int* A_dev; int* A_t_dev; cudaMalloc((void**)&A_dev, sizeof(int) * N); cudaMalloc((void**)&A_t_dev, sizeof(int) * N); cudaMemcpy(A_dev, A, N * sizeof(int), cudaMemcpyHostToDevice); dim3 block(512); cudaEventRecord(start, 0); transpose<<<SIZE/512, block>>>(A_dev, A_t_dev, width, height); cudaEvent_t syncEvent; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpuTime, start, stop); printf("Time of transposing: %.2f milliseconds\n", gpuTime); // getch(); cudaEventCreate(&syncEvent); cudaEventRecord(syncEvent, 0); cudaEventSynchronize(syncEvent); cudaMemcpy(A_t, A_t_dev, N * sizeof(int), cudaMemcpyDeviceToHost); SaveMatrixToFile("matrix1.txt", A_t, height, width); cudaFree(A_dev); cudaFree(A_t_dev); cudaEventDestroy(start); cudaEventDestroy(stop); delete[] A; delete[] A_t; return 0; }
1,973
#include "includes.h" __device__ __forceinline__ float imag(const float2& val) { return val.y; } __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } }
1,974
/******************************************************************************* * serveral useful gpu functions will be defined in this file to facilitate * the set calculus toolbox scheme, i.e., to calculate gradients,normal vectors, * curvatures, Heaviside function and Dirac_Delta function ******************************************************************************/ __device__ inline double max2(double x, double y) { return (x<y) ? y : x; } __device__ inline double min2(double x, double y) { return (x<y) ? x : y; } __device__ inline double norm(double vx, double vy, double vz) { return max2(sqrt(vx*vx+vy*vy+vz*vz), 1e-14); } __device__ inline void cross_product(double & wx, double & wy, double & wz, double ux, double uy, double uz, double vx, double vy, double vz) { wx = uy * vz - uz * vy; wy = uz * vx - ux * vz; wz = ux * vy - uy * vx; } // convert subindex to linear index __device__ inline int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges) { int row_idxn = min2(rows-1, max2(0, row_idx)); int col_idxn = min2(cols-1, max2(0, col_idx)); int pge_idxn = min2(pges-1, max2(0, pge_idx)); int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn; return ind; } __global__ void set_calculus_toolbox(double * Fx, double * Fy, double * Fz, double * FGradMag, double * Nx, double * Ny, double * Nz, double * Fxx, double * Fyy, double * Fzz, double * Fxy, double * Fyz, double * Fzx, double * FLaplacian, double * MeanCurvature, double * GaussianCurvature, double * Heaviside, double * DiracDelta, double const * lsf, double const * HPrimal, int rows, int cols, int pges, double dx, double dy, double dz, double ds, int num_ele) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges); int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges); int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges); int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges); int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges); int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges); double fx = (lsf[right] - lsf[left]) / (2*dx); double fy = (lsf[front] - lsf[back]) / (2*dy); double fz = (lsf[up] - lsf[down]) / (2*dz); double fGradMag = sqrt(fx*fx + fy*fy + fz*fz); fGradMag = max2(fGradMag, 1e-14); // avoid signularity Fx[ind] = fx; Fy[ind] = fy; Fz[ind] = fz; FGradMag[ind] = fGradMag; Nx[ind] = fx / fGradMag; Ny[ind] = fy / fGradMag; Nz[ind] = fz / fGradMag; int front_right = sub2ind(row_idx+1, col_idx+1, pge_idx, rows, cols, pges); int back_left = sub2ind(row_idx-1, col_idx-1, pge_idx, rows, cols, pges); int back_right = sub2ind(row_idx-1, col_idx+1, pge_idx, rows, cols, pges); int front_left = sub2ind(row_idx+1, col_idx-1, pge_idx, rows, cols, pges); int front_up = sub2ind(row_idx+1, col_idx, pge_idx+1, rows, cols, pges); int back_down = sub2ind(row_idx-1, col_idx, pge_idx-1, rows, cols, pges); int front_down = sub2ind(row_idx+1, col_idx, pge_idx-1, rows, cols, pges); int back_up = sub2ind(row_idx-1, col_idx, pge_idx+1, rows, cols, pges); int right_up = sub2ind(row_idx, col_idx+1, pge_idx+1, rows, cols, pges); int left_down = sub2ind(row_idx, col_idx-1, pge_idx-1, rows, cols, pges); int right_down = sub2ind(row_idx, col_idx+1, pge_idx-1, rows, cols, pges); int left_up = sub2ind(row_idx, col_idx-1, pge_idx+1, rows, cols, pges); double fxx = (lsf[right] - 2*lsf[ind] + lsf[left]) / (dx*dx); double fyy = (lsf[front] - 2*lsf[ind] + lsf[back]) / (dy*dy); double fzz = (lsf[up] - 2*lsf[ind] + lsf[down]) / (dz*dz); double fxy = (lsf[front_right]+lsf[back_left]-lsf[front_left]-lsf[back_right]) / (4*ds*ds); double fyz = (lsf[front_up]+lsf[back_down]-lsf[front_down]-lsf[back_up]) / (4*ds*ds); double fzx = (lsf[right_up]+lsf[left_down]-lsf[right_down]-lsf[left_up]) / (4*ds*ds); double fLaplacian = fxx + fyy + fzz; Fxx[ind] = fxx; Fyy[ind] = fyy; Fzz[ind] = fzz; Fxy[ind] = fxy; Fyz[ind] = fyz; Fzx[ind] = fzx; FLaplacian[ind] = fLaplacian; // calculate mean curvature double col1 = fxx*fx + fxy*fy + fzx*fz; double col2 = fxy*fx + fyy*fy + fyz*fz; double col3 = fzx*fx + fyz*fy + fzz*fz; MeanCurvature[ind] = fLaplacian/fGradMag - (fx*col1+fy*col2+fz*col3)/pow(fGradMag,3); // calculate Gaussian curvature col1 = (fyy*fzz-fyz*fyz)*fx + (fzx*fyz-fxy*fzz)*fy + (fxy*fyz-fzx*fyy)*fz; col2 = (fyz*fzx-fxy*fzz)*fx + (fxx*fzz-fzx*fzx)*fy + (fzx*fxy-fxx*fyz)*fz; col3 = (fxy*fyz-fyy*fzx)*fx + (fzx*fxy-fxx*fyz)*fy + (fxx*fyy-fxy*fxy)*fz; GaussianCurvature[ind] = (fx*col1+fy*col2+fz*col3) / pow(fGradMag,4); // calculate Heaviside function double px = (HPrimal[right] - HPrimal[left]) / (2*dx); double py = (HPrimal[front] - HPrimal[back]) / (2*dy); double pz = (HPrimal[up] - HPrimal[down]) / (2*dz); double dot_DHPrimal_DF = px*fx + py*fy + pz*fz; Heaviside[ind] = dot_DHPrimal_DF / pow(fGradMag,2); // calculate DiraDelta function double pxx = (HPrimal[right] - 2*HPrimal[ind] +HPrimal[left]) / (dx*dx); double pyy = (HPrimal[front] - 2*HPrimal[ind] + HPrimal[back]) / (dy*dy); double pzz = (HPrimal[up] - 2*HPrimal[ind] + HPrimal[down]) / (dz*dz); double pLaplacian = pxx + pyy + pzz; DiracDelta[ind] = pLaplacian/pow(fGradMag,2) - dot_DHPrimal_DF*fLaplacian/pow(fGradMag,4); } __global__ void auxi_set_calculus_toolbox(double * Ax, double * Ay, double * Az, double * AGradMag, double * ACx, double * ACy, double * ACz, double * ANormCrossAF, double * Tx, double * Ty, double * Tz, double * Anx, double * Any, double * Anz, double * Axx, double * Ayy, double * Azz, double * Axy, double * Ayz, double * Azx, double * ALaplacian, double * GeodesicCurvature, double * NormalCurvature, double * GeodesicTorsion, double * BPerpendicular, double * AHeaviside, double * ADiracDelta, double const * lsf, double const * AHPrimal, double const * Fx, double const * Fy, double const * Fz, double const * FGradMag, double const * Nx, double const * Ny, double const * Nz, double const * Fxx, double const * Fyy, double const * Fzz, double const * Fxy, double const * Fyz, double const * Fzx, int rows, int cols, int pges, double dx, double dy, double dz, double ds, int num_ele) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges); int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges); int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges); int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges); int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges); int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges); double ax = (lsf[right] - lsf[left]) / (2*dx); double ay = (lsf[front] - lsf[back]) / (2*dy); double az = (lsf[up] - lsf[down]) / (2*dz); double aGradMag = norm(ax, ay, az); Ax[ind] = ax; Ay[ind] = ay; Az[ind] = az; AGradMag[ind] = aGradMag; double fx = Fx[ind]; double fy = Fy[ind]; double fz = Fz[ind]; double Cx, Cy, Cz; cross_product(Cx,Cy,Cz,fx,fy,fz,ax,ay,az); double NormCrossAF = norm(Cx,Cy,Cz); ACx[ind] = Cx; ACy[ind] = Cy; ACz[ind] = Cz; ANormCrossAF[ind] = NormCrossAF; double tx = Cx / NormCrossAF; double ty = Cy / NormCrossAF; double tz = Cz / NormCrossAF; Tx[ind] = tx; Ty[ind] = ty; Tz[ind] = tz; double fNx = Nx[ind]; double fNy = Ny[ind]; double fNz = Nz[ind]; double nx, ny, nz; cross_product(nx,ny,nz,tx,ty,tz,fNx,fNy,fNz); Anx[ind] = nx; Any[ind] = ny; Anz[ind] = nz; int front_right = sub2ind(row_idx+1, col_idx+1, pge_idx, rows, cols, pges); int back_left = sub2ind(row_idx-1, col_idx-1, pge_idx, rows, cols, pges); int back_right = sub2ind(row_idx-1, col_idx+1, pge_idx, rows, cols, pges); int front_left = sub2ind(row_idx+1, col_idx-1, pge_idx, rows, cols, pges); int front_up = sub2ind(row_idx+1, col_idx, pge_idx+1, rows, cols, pges); int back_down = sub2ind(row_idx-1, col_idx, pge_idx-1, rows, cols, pges); int front_down = sub2ind(row_idx+1, col_idx, pge_idx-1, rows, cols, pges); int back_up = sub2ind(row_idx-1, col_idx, pge_idx+1, rows, cols, pges); int right_up = sub2ind(row_idx, col_idx+1, pge_idx+1, rows, cols, pges); int left_down = sub2ind(row_idx, col_idx-1, pge_idx-1, rows, cols, pges); int right_down = sub2ind(row_idx, col_idx+1, pge_idx-1, rows, cols, pges); int left_up = sub2ind(row_idx, col_idx-1, pge_idx+1, rows, cols, pges); double axx = (lsf[right] - 2*lsf[ind] + lsf[left]) / (dx*dx); double ayy = (lsf[front] - 2*lsf[ind] + lsf[back]) / (dy*dy); double azz = (lsf[up] - 2*lsf[ind] + lsf[down]) / (dz*dz); double axy = (lsf[front_right]+lsf[back_left]-lsf[front_left]-lsf[back_right]) / (4*ds*ds); double ayz = (lsf[front_up]+lsf[back_down]-lsf[front_down]-lsf[back_up]) / (4*ds*ds); double azx = (lsf[right_up]+lsf[left_down]-lsf[right_down]-lsf[left_up]) / (4*ds*ds); double aLaplacian = axx + ayy + azz; Axx[ind] = axx; Ayy[ind] = ayy; Azz[ind] = azz; Axy[ind] = axy; Ayz[ind] = ayz; Azx[ind] = azx; ALaplacian[ind] = aLaplacian; // geodesic curvature double fxx = Fxx[ind]; double fyy = Fyy[ind]; double fzz = Fzz[ind]; double fxy = Fxy[ind]; double fyz = Fyz[ind]; double fzx = Fzx[ind]; double fGradMag = FGradMag[ind]; double vx = tx*fxx + ty*fxy + tz*fzx; double vy = tx*fxy + ty*fyy + tz*fyz; double vz = tx*fzx + ty*fyz + tz*fzz; double w1x, w1y, w1z; cross_product(w1x,w1y,w1z,vx,vy,vz,ax,ay,az); vx = tx*axx + ty*axy + tz*azx; vy = tx*axy + ty*ayy + tz*ayz; vz = tx*azx + ty*ayz + tz*azz; double w2x, w2y, w2z; cross_product(w2x,w2y,w2z,fx,fy,fz,vx,vy,vz); GeodesicCurvature[ind] = ( nx*(w1x+w2x) + ny*(w1y+w2y) + nz*(w1z+w2z) ) / NormCrossAF; /* NormalCurvature, GeodesicTorsion, BPerpendicular */ double Nxx = fxx / fGradMag - fx*(fxx*fx + fxy*fy + fzx*fz) / pow(fGradMag,3) ; double Nyx = fxy / fGradMag - fy*(fxx*fx + fxy*fy + fzx*fz) / pow(fGradMag,3) ; double Nzx = fzx / fGradMag - fz*(fxx*fx + fxy*fy + fzx*fz) / pow(fGradMag,3) ; double Nxy = fxy / fGradMag - fx*(fxy*fx + fyy*fy + fyz*fz) / pow(fGradMag,3) ; double Nyy = fyy / fGradMag - fy*(fxy*fx + fyy*fy + fyz*fz) / pow(fGradMag,3) ; double Nzy = fyz / fGradMag - fz*(fxy*fx + fyy*fy + fyz*fz) / pow(fGradMag,3) ; double Nxz = fzx / fGradMag - fx*(fzx*fx + fyz*fy + fzz*fz) / pow(fGradMag,3) ; double Nyz = fyz / fGradMag - fy*(fzx*fx + fyz*fy + fzz*fz) / pow(fGradMag,3) ; double Nzz = fzz / fGradMag - fz*(fzx*fx + fyz*fy + fzz*fz) / pow(fGradMag,3) ; // NormalCurvature. vx = Nxx * tx + Nxy * ty + Nxz * tz; vy = Nyx * tx + Nyy * ty + Nyz * tz; vz = Nzx * tx + Nzy * ty + Nzz * tz; NormalCurvature[ind] = - (tx*vx + ty*vy + tz*vz); // GeodesicTorsion, BPerpendicular vx = Nxx * nx + Nxy * ny + Nxz * nz; vy = Nyx * nx + Nyy * ny + Nyz * nz; vz = Nzx * nx + Nzy * ny + Nzz * nz; GeodesicTorsion[ind] = - (tx*vx + ty*vy + tz*vz); BPerpendicular[ind] = - (nx*vx + ny*vy + nz*vz); /*primal of Heaviside(A), Heaviside(A), DiracDelta(A)*/ // calculate Heaviside function double px = (AHPrimal[right] - AHPrimal[left]) / (2*dx); double py = (AHPrimal[front] - AHPrimal[back]) / (2*dy); double pz = (AHPrimal[up] - AHPrimal[down]) / (2*dz); double dot_DAHPrimal_DF = px*ax + py*ay + pz*az; AHeaviside[ind] = dot_DAHPrimal_DF / pow(aGradMag,2); // calculate DiraDelta function double pxx = (AHPrimal[right] - 2*AHPrimal[ind] +AHPrimal[left]) / (dx*dx); double pyy = (AHPrimal[front] - 2*AHPrimal[ind] + AHPrimal[back]) / (dy*dy); double pzz = (AHPrimal[up] - 2*AHPrimal[ind] + AHPrimal[down]) / (dz*dz); double pLaplacian = pxx + pyy + pzz; ADiracDelta[ind] = pLaplacian/pow(aGradMag,2) - dot_DAHPrimal_DF*aLaplacian/pow(aGradMag,4); }
1,975
#include <stdio.h> #include <iostream> #include <string> #include <stdlib.h> #include <math.h> #include <cuda.h> #include <cmath> #include <fstream> #include <sstream> #define DIM 32 const float PI = 3.14159265358979f; using namespace std; /*************************************************/ class Complex { public: __host__ __device__ Complex() : real(0.0f), imag(0.0f) {} __host__ __device__ Complex(float r) : real(r), imag(0.0f) {} __host__ __device__ Complex(float r, float i) : real(r), imag(i) {} __host__ __device__ Complex operator+(const Complex &b) const { float newReal = real + b.real; float newImag = imag + b.imag; Complex newComplex(newReal, newImag); return newComplex; } __host__ __device__ Complex operator-(const Complex &b) const { float newReal = real - b.real; float newImag = imag - b.imag; Complex newComplex(newReal, newImag); return newComplex; } __host__ __device__ Complex operator*(const Complex &b) const { float newReal = real * b.real - imag * b.imag; float newImag = real * b.imag + imag * b.real; Complex newComplex(newReal, newImag); return newComplex; } __host__ __device__ Complex mag() const { float magNum = sqrt(real * real + imag * imag); Complex magComplex(magNum); return magComplex; } __host__ __device__ Complex angle() const { float angle = atan(1.0 * imag / real)*180/PI; Complex angleComplex(angle); return angleComplex; } __host__ __device__ Complex conj() const { Complex newComplex(real, -1.0 * imag); return newComplex; } float real; float imag; }; std::ostream& operator<< (std::ostream& os, const Complex& rhs) { Complex c(rhs); if(fabsf(rhs.imag) < 1e-10) c.imag = 0.0f; if(fabsf(rhs.real) < 1e-10) c.real = 0.0f; if(c.imag == 0) { os << c.real; } else { os << "(" << c.real << "," << c.imag << ")"; } return os; } class InputImage { public: InputImage(const char* filename){ std::ifstream ifs(filename); if(!ifs) { std::cout << "Can't open image file " << filename << std::endl; exit(1); } ifs >> w >> h; data = new Complex[w * h]; for(int r = 0; r < h; ++r) { for(int c = 0; c < w; ++c) { // float real; // ifs >> real; // data[r * w + c] = Complex(real); string word; ifs >> word; int found = word.find_first_not_of(" \t"); if (word[found] == '(') { istringstream iss(word); char temp; float real, imag; iss >> temp >> real >> temp >> imag >> temp; data[r * w + c] = Complex(real, imag); } else { istringstream iss(word); float real; iss >> real; data[r * w + c] = Complex(real); } } } } int get_width() const{ return w; } int get_height() const{ return h; } //returns a pointer to the image data. Note the return is a 1D //array which represents a 2D image. The data for row 1 is //immediately following the data for row 0 in the 1D array Complex* get_image_data() const{ return data; } //use this to save output from forward DFT void save_image_data(const char* filename, Complex* d, int w, int h){ std::ofstream ofs(filename); if(!ofs) { std::cout << "Can't create output image " << filename << std::endl; return; } ofs << w << " " << h << std::endl; for(int r = 0; r < h; ++r) { for(int c = 0; c < w; ++c) { ofs << d[r * w + c] << " "; } ofs << std::endl; } } //use this to save output from reverse DFT void save_image_data_real(const char* filename, Complex* d, int w, int h){ std::ofstream ofs(filename); if(!ofs) { std::cout << "Can't create output image " << filename << std::endl; return; } ofs << w << " " << h << std::endl; for (int r = 0; r < h; ++r) { for (int c = 0; c < w; ++c) { ofs << d[r * w + c].real << " "; } ofs << std::endl; } } private: int w; int h; Complex* data; }; /*************************************************/ struct DeviceData { Complex *d_data; Complex *d_temp; Complex *d_res; }; void cleanup(DeviceData *d) { cudaFree(d->d_data); cudaFree(d->d_temp); cudaFree(d->d_res); } /*************** forward transform by row **********************/ __global__ void transByRow (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < width; i++) { float re = (src + y*width + i)->real; float im = (src + y*width + i)->imag; Complex w = Complex(cos(2*PI*i*x/width), -sin(2*PI*i*x/width)); (dst + index)->real += re * w.real - im*w.imag; (dst + index)->imag += re * w.imag + im*w.real; } } } /*************** forward transform by column **********************/ __global__ void transByCol (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < height; i++) { float re = (src + x + i*width)->real; float im = (src + x + i*width)->imag; Complex w = Complex(cos(2*PI*i*y/height), -sin(2*PI*i*y/height)); (dst + index)->real += re * w.real - im*w.imag; (dst + index)->imag += re * w.imag + im*w.real; } } } /*************** reverse transform by row **********************/ __global__ void revByRow (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < width; i++) { float re = (src + y*width + i)->real; float im = (src + y*width + i)->imag; Complex w = Complex(cos(2*PI*i*x/width), sin(2*PI*i*x/width)); (dst + index)->real += (re * w.real - im*w.imag)/width; (dst + index)->imag += (re * w.imag + im*w.real)/width; } } } /*************** reverse transform by column **********************/ __global__ void revByCol (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < height; i++) { float re = (src + x + i*width)->real; float im = (src + x + i*width)->imag; Complex w = Complex(cos(2*PI*i*y/height), sin(2*PI*i*y/height)); (dst + index)->real += (re * w.real - im*w.imag)/height; (dst + index)->imag += (re * w.imag + im*w.real)/height; } } } int main (int argc, char* argv[]) { DeviceData devs; string str = "forward"; bool forward = (strcmp(argv[1], str.c_str()) == 0 ); char* inFile = argv[2]; char* outFile = argv[3]; InputImage image(inFile); int height = image.get_height(); int width = image.get_width(); int N = height * width; Complex res[N]; fill_n(res, N, 1); Complex* data = image.get_image_data(); cudaMalloc((void**)&devs.d_data, N * sizeof(Complex)); cudaMalloc((void**)&devs.d_res, N * sizeof(Complex)); cudaMalloc((void**)&devs.d_temp, N * sizeof(Complex)); cudaMemcpy(devs.d_data, data, N * sizeof(Complex), cudaMemcpyHostToDevice); dim3 blocks((width + DIM - 1) / DIM, (height + DIM - 1) / DIM); dim3 threads(DIM, DIM); cout << width << ", " << height << forward << endl; if (forward) { transByRow<<<blocks, threads>>>(devs.d_temp, devs.d_data, width, height); transByCol<<<blocks, threads>>>(devs.d_res, devs.d_temp, width, height); cudaMemcpy(res, devs.d_res, N*sizeof(Complex), cudaMemcpyDeviceToHost); image.save_image_data(outFile, res, width, height); } else { revByRow<<<blocks, threads>>>(devs.d_temp, devs.d_data, width, height); revByCol<<<blocks, threads>>>(devs.d_res, devs.d_temp, width, height); cudaMemcpy(res, devs.d_res, N*sizeof(Complex), cudaMemcpyDeviceToHost); image.save_image_data_real(outFile, res, width, height); } cleanup(&devs); return 0; }
1,976
#include "includes.h" __device__ void check_existance_of_candidate_rows( short *deleted_rows, int *row_group, const int search_depth, int *token, int *selected_row_id, const int total_dl_matrix_row_num) { for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) { // std::cout<<deleted_rows[i]<<' '<<row_group[i]<<std::endl; if (deleted_rows[i] == 0 && row_group[i] == search_depth) { // std::cout<<"Candidate Row Found...."<<std::endl; // atomicExch(token, 1); *token = 1; atomicMin(selected_row_id, i); // If find a number can break; // break; } } } __global__ void check_existance_of_candidate_rows( int *deleted_rows, int *row_group, const int search_depth, int *token, int *selected_row_id, const int total_dl_matrix_row_num) { for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) { // std::cout<<deleted_rows[i]<<' '<<row_group[i]<<std::endl; if (deleted_rows[i] == 0 && row_group[i] == search_depth) { // std::cout<<"Candidate Row Found...."<<std::endl; atomicExch(token, 1); atomicMin(selected_row_id, i); } } __syncthreads(); }
1,977
#include <stdio.h> __device__ const char *STR = "HELLO WORLD!"; //__constant__ const char *STR = "HELLO WORLD!"; const char STR_LENGTH = 12; __global__ void hello() { printf("%c\n", STR[threadIdx.x % STR_LENGTH]); } int main(void) { int num_threads = STR_LENGTH; int num_blocks = 2; dim3 dimBlock (16,16); dim3 dimGrid(32,32); hello<<<dimGrid,dimBlock>>>(); cudaDeviceSynchronize(); return 0; }
1,978
#include <stdio.h> #include <stdlib.h> #include <time.h> //CUDA RunTime API #include <cuda_runtime.h> //1M #define DATA_SIZE 1048576 #define THREAD_NUM 256 #define BLOCK_NUM 32 #define NUM_THREADS 256 __global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n) { extern __shared__ float data[]; const int tid = threadIdx.x; const int row = blockIdx.x; int i, j; for(i = tid; i < n; i += blockDim.x) { data[i] = a[row * lda + i]; } __syncthreads(); for(j = tid; j < n; j += blockDim.x) { float t = 0; float y = 0; for(i = 0; i < n; i++) { float r; y -= data[i] * b[i * ldb + j]; r = t - y; y = (r - t) + y; t = r; } c[row * ldc + j] = t; } } clock_t matmultCUDA(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n) { float *ac, *bc, *cc; clock_t start, end; start = clock(); cudaMalloc((void**) &ac, sizeof(float) * n * n); cudaMalloc((void**) &bc, sizeof(float) * n * n); cudaMalloc((void**) &cc, sizeof(float) * n * n); cudaMemcpy(ac, a, sizeof(float) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(bc, b, sizeof(float) * n * n, cudaMemcpyHostToDevice); int blocks = (n + NUM_THREADS - 1) / NUM_THREADS; matMultCUDA<<< n, NUM_THREADS, sizeof(float) * n>>> (ac, n, bc, n, cc, n, n); cudaMemcpy(c, cc, sizeof(float) * n * n, cudaMemcpyDeviceToHost); cudaFree(ac); cudaFree(bc); cudaFree(cc); end = clock(); return end - start; } void compare_mat(const float* a, int lda, const float* b, int ldb, int n) { float max_err = 0; float average_err = 0; int i, j; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { if(b[i * ldb + j] != 0) { float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]); if(max_err < err) max_err = err; average_err += err; } } } printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n)); } void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n) { int i, j, k; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { double t = 0; for(k = 0; k < n; k++) { t += a[i * lda + k] * b[k * ldb + j]; } c[i * ldc + j] = t; } } } void matgen(float* a, int lda, int n) { int i, j; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { a[i * lda + j] = (float) rand() / RAND_MAX + (float) rand() / (RAND_MAX * RAND_MAX); } } } int main() { float *a, *b, *c, *d; int n = 1000; a = (float*) malloc(sizeof(float) * n * n); b = (float*) malloc(sizeof(float) * n * n); c = (float*) malloc(sizeof(float) * n * n); d = (float*) malloc(sizeof(float) * n * n); srand(0); matgen(a, n, n); matgen(b, n, n); clock_t time = matmultCUDA(a, n, b, n, c, n, n); matmult(a, n, b, n, d, n, n); compare_mat(c, n, d, n, n); double sec = (double) time / CLOCKS_PER_SEC; printf("Time used: %.2f (%.2lf GFLOPS)\n", sec, 2.0 * n * n * n / (sec * 1E9)); return 0; }
1,979
__global__ void init_kernel(int * domain, int domain_x) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; // Dummy initialization domain[ty * domain_x + tx] = (1664525ul * (blockIdx.x + threadIdx.y + threadIdx.x) + 1013904223ul) % 3; } // Reads a cell at (x+dx, y+dy) __device__ int read_cell(int * source_domain, int x, int y, int dx, int dy, unsigned int domain_x, unsigned int domain_y) { x = (unsigned int)(x + dx) % domain_x; // Wrap around y = (unsigned int)(y + dy) % domain_y; return source_domain[y * domain_x + x]; } // Compute kernel __global__ void life_kernel(int * source_domain, int * dest_domain, int domain_x, int domain_y) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; // Copier depuis la mémoire globale vers la mémoire partagée pour que les threads y accèdent par la suite // sdata -> Bloc = 8*8 threads -> 100 lectures (10*10) -> |sdata| = 100 extern __shared__ int sdata[]; int sdataDim = 10; // Voir si on ne peut pas obtenir ça de manière auto // Chaque thread lit sa case int myself = read_cell(source_domain, tx, ty, 0, 0, domain_x, domain_y); /* * Dans le cas d'un bloc 3x3: * bloc: x² (carré pour optimiser lectures) * 0 1 2 * 3 4 5 * 6 7 8 * * sdata: (x+2)² (sdataDim=5) * 0 1 2 3 4 * 5| 6 7 8| 9 * 10|11 12 13|14 * 15|16 17 18|19 * 20 21 22 23 24 */ // Position y du thread dans la shared memory int decY, decX, myloc; decY = threadIdx.y + 1; decX = threadIdx.x + 1; // Position dans sdata (avec contours compris) myloc = decY * sdataDim + decX; sdata[myloc] = myself; /* Initialisation de 4 variables par thread : haut bas gauche droite Booléens indiquant si le thread est situé en bordure du carré de cellules traité */ int haut, bas, gauche, droite; haut = threadIdx.y == 0; bas = threadIdx.y == blockDim.y - 1; gauche = threadIdx.x == 0; droite = threadIdx.x == blockDim.x - 1; /* Lectures des bordures si nécessaire */ // Lecture en haut if (haut) { sdata[myloc-sdataDim] = read_cell(source_domain, tx, ty, 0, -1, domain_x, domain_y); // Lecture haut-gauche if (gauche) { sdata[myloc-sdataDim-1] = read_cell(source_domain, tx, ty, -1, -1, domain_x, domain_y); } // Lecture haut-droite if (droite) { sdata[myloc-sdataDim+1] = read_cell(source_domain, tx, ty, 1, -1, domain_x, domain_y); } } // Lecture en bas if (bas) { sdata[myloc+sdataDim] = read_cell(source_domain, tx, ty, 0, 1, domain_x, domain_y); // Lecture bas-gauche if (gauche) { sdata[myloc+sdataDim-1] = read_cell(source_domain, tx, ty, -1, 1, domain_x, domain_y); } // Lecture bas-droite if (droite) { sdata[myloc+sdataDim+1] = read_cell(source_domain, tx, ty, 1, 1, domain_x, domain_y); } } // Lecture à gauche if (gauche) { sdata[myloc-1] = read_cell(source_domain, tx, ty, -1, 0, domain_x, domain_y); } // Lecture à droite if (droite) { sdata[myloc+1] = read_cell(source_domain, tx, ty, 1, 0, domain_x, domain_y); } __syncthreads(); // Read the 8 neighbors and count number of blue and red int i; int count_blue = 0, count_red = 0, valTemp; // Parcours des voisins du dessus et du dessous for (i=-1; i<2; i++){ valTemp = read_cell(sdata, decX, decY, i, -1, sdataDim, sdataDim); // Changer le switch par une incrémentation conditionnelle ne change rien switch(valTemp){ case 1: count_red++; break; case 2: count_blue++; break; } valTemp = read_cell(sdata, decX, decY, i, 1, sdataDim, sdataDim); switch(valTemp){ case 1: count_red++; break; case 2: count_blue++; break; } } // Voisin gauche valTemp = read_cell(sdata, decX, decY, -1, 0, sdataDim, sdataDim); switch(valTemp){ case 1: count_red++; break; case 2: count_blue++; break; } // Voisin droit valTemp = read_cell(sdata, decX, decY, 1, 0, sdataDim, sdataDim); switch(valTemp){ case 1: count_red++; break; case 2: count_blue++; break; } // Compute new value int new_cell=0; int num_nei = count_red + count_blue; switch (myself){ case 0: // Cellule vide if (num_nei == 3) { // 3 voisins // Control flow reduction new_cell = 1 + (count_red < count_blue); } break; default: // Survie de la cellule si nbVoisins == 2||3 if (num_nei == 2 || num_nei == 3){ new_cell = myself;} break; } dest_domain[ty * domain_x + tx] = new_cell; }
1,980
#include <cstdlib> #include <cmath> #include <iostream> #include <tuple> __global__ void Run(unsigned int *d_force, unsigned int *d_distance, unsigned int *d_output, unsigned int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) *d_output = d_force[id] * d_distance[id]; } int main(int argc, char** argv) { if (argc < 2) { std::cerr << "usage: muscle vector_size threads_per_block " << std::endl; return EXIT_FAILURE; } unsigned int vector_size = atoi(argv[1]); unsigned int threads_per_block = atoi(argv[2]); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { std::cerr << "Error! " << cudaGetErrorString(error) << std::endl; return EXIT_FAILURE; } // host vectors unsigned int* h_force; unsigned int* h_distance; // host output vector unsigned int* h_output; // device input vectors unsigned int* d_force; unsigned int* d_distance; // device output vector unsigned int* d_output; size_t bytes = vector_size*sizeof(unsigned int); h_force = (unsigned int*)malloc(bytes); h_distance = (unsigned int*)malloc(bytes); h_output = (unsigned int*)malloc(bytes); // Allocate cuda memory cudaMalloc(&d_force, bytes); cudaMalloc(&d_distance, bytes); cudaMalloc(&d_output, bytes); for (unsigned int i = 0; i < vector_size / 2; ++i) { h_force[i] = (i + 1); } int val = vector_size / 2; for (unsigned int i = vector_size / 2; i < vector_size; ++i) { h_force[i] = --val + 1; } for (unsigned int i = 0; i < vector_size; ++i) { h_distance[i] = ((i % 10) + 1); } for (int i = 0; i < 300; ++i) std::cout << h_distance[i] << std::flush; cudaMemcpy(d_force, h_force, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_distance, h_distance, bytes, cudaMemcpyHostToDevice); int g = (int)ceil((float) vector_size / threads_per_block); Run <<< g, threads_per_block>>>(d_force, d_distance, d_output, vector_size); error = cudaGetLastError(); if (error != cudaSuccess) { std::cerr << "Error! " << cudaGetErrorString(error) << std::endl; } cudaMemcpy(h_output, d_output, bytes, cudaMemcpyDeviceToHost); unsigned int sum = 0; for (int i = 0; i < vector_size; ++i) { std::cout << h_output[i] << std::endl; sum += h_output[i]; } std::cout << "Final result: " << sum << std::endl; cudaFree(d_force); cudaFree(d_distance); cudaFree(d_output); free(h_force); free(h_distance); free(h_output); return EXIT_SUCCESS; }
1,981
#include<stdio.h> //GPU CODE! __global__ void add(int *a, int *b, int *c){ *c = *a + *b; } //CPU CODE int main(void){ int a, b, c; //host variables int *d_a, *d_b, *d_c; //GPU copies of host variables a = 9; b = 32; int size = sizeof(int); //Allocate space from GPU for host copies cudaMalloc((void **) &d_a, size); cudaMalloc((void **) &d_b, size); cudaMalloc((void **) &d_c, size); //copy input to GPU cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); //GPU kernel launcher add<<<1,1>>>(d_a, d_b, d_c); //Copy result from GPU cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); printf("%d\n",c); //cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
1,982
#include <stdio.h> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #define N 1024 // 向量中元素的个数。 // 每一个block中线程数量,这样将内核配置参数定义为常量,便于程序的移植和修改。 #define threadsPerBlock 512 /**向量点乘,向量内积运算 * 向量内积运算,就是将多个对应元素的乘法结果累加起来。 * * CUDA 编程中重要概念:归约运算。 * 这种原始输入是两个数组,而输出为一个单一的数值的运算,CUDA 编程称之为归约运算。 */ // Define kernel function to compute the vector dot preduct. __global__ void gpu_vector_dot(float *device_a, float *device_b, float *device_c) { // Define shared memory. // 共享内存中的元素等于每个块的线程数,每一个块都有自己单独的共享内存地址。 __shared__ float partial_sum[threadsPerBlock]; // Computer the current unique thread ID. unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x; // 作为每次计算部分元素和的索引。 unsigned int index = threadIdx.x; float sum = 0; while (tid < N) { // 计算向量对应元素的乘积结果 sum += device_a[tid] * device_b[tid]; // 由于配置的并行启动的线程总数有限,不可能达到每次并行计算需要的总数 N。 // 所以,每个线程必须执行多个操作,由已启动的线程总数作为偏移量进行分隔。 // 偏移量计算 tid += blockDim.x * gridDim.x; // 前者代表了本次启动的块的数量,后者代表了每个块里面的线程数量, // 这样向后偏移以得到下一个任务的索引。 tid += blockDim.x * gridDim.x; } // Set the partial sum in shared memory. partial_sum[index] = sum; // synchronize threads in the block. // 在进行归约运算之前,即对共享内存中的数据进行读取之前,必须保证每个线程都完成对共享内存的写入操作。 // __syncthreads(); 同步函数可以做到。 __syncthreads(); // Calculate partial sum for a current block using data in shared memory. // Why should we do this as follow ? // 将每一个块中的部分和结果,并行化执行累加,并将2个数累加结果覆盖写入第一个数的位置地址, // 重复该累加方式,最终得到整个块的部分和结果,并将块中的部分和结果以块的ID 为索引写入全局内存中。 unsigned int i = blockDim.x / 2; while (i != 0) { if (index < i) { // 并行化执行累加,并将2个数累加结果覆盖写入第一个数的位置地址, partial_sum[index] += partial_sum[index + i]; } // synchronize threads 同步操作。 __syncthreads(); // 怎么完成并行化操作的索引? i /= 2; } // Store result of partial sum for a block in global memory. if (index == 0) { // 整个块的部分和结果。 device_c[blockIdx.x] = partial_sum[0]; } } int main(int argc, char *argv[]) { // Define the host and device pointers. float *host_a, *host_b, host_c, *partial_sum; float *device_a, *device_b, *device_partial_sum; // Calculate number of blocks and number of threads. unsigned int block_calc = (N + threadsPerBlock - 1) / threadsPerBlock; unsigned int blocks_per_grid = (32 < block_calc ? 32 : block_calc); // Allocate memory on the CPU host. host_a = (float*)malloc(N * sizeof(float)); host_b = (float*)malloc(N * sizeof(float)); partial_sum = (float*)malloc(blocks_per_grid * sizeof(float)); // Allocate memory on the GPU device. cudaMalloc((void**)&device_a, N * sizeof(float)); cudaMalloc((void**)&device_b, N * sizeof(float)); cudaMalloc((void**)&device_partial_sum, blocks_per_grid * sizeof(float)); // Fill in the host memory with data. for (unsigned int i = 0; i < N; ++i) { // 等差数列的求和,便于后面的CPU计算测试。 host_a[i] = i; host_b[i] = 2; } // Copy the arrays data on host to the device. cudaMemcpy(device_a, host_a, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(device_b, host_b, N * sizeof(float), cudaMemcpyHostToDevice); // kernel call. gpu_vector_dot <<< blocks_per_grid, threadsPerBlock >>> (device_a, device_b, device_partial_sum); // Copy the arrays data on device back to the host. cudaMemcpy(partial_sum, device_partial_sum, blocks_per_grid * sizeof(float), cudaMemcpyDeviceToHost); // Calculate final vector dot preduct. host_c = 0; for (unsigned int i = 0; i < blocks_per_grid; ++i) { // 将每一个块计算的部分和,通过块的ID 索引的值,计算最终的结果。 host_c += partial_sum[i]; } // 检查向量内积的结果是否正确。 printf("The computed dot product is: %f\n", host_c); // 等差数列的求和。 #define cpu_sum(x) (x * (x+1)) /**GPU 和 CPU 的浮点运算结果不能通过 == 直接进行对比。 * 这是因为GPU 并行计算的本质,达到的浮点数结果几乎总是和 CPU 的结果在最后的尾数上有轻微的差异。 * 建议对两个结果进行作差,得到其绝对值,当其值足够小,就认为结果一致。 */ if (host_c == cpu_sum((float)(N - 1))) { printf("The dot product computed by GPU is correct.\n"); } else { printf("Error in dot product computation."); } // Free dynamic allocation memory on host and device. cudaFree(device_a); cudaFree(device_b); cudaFree(device_partial_sum); free(host_a); free(host_b); free(partial_sum); return 0; }
1,983
#include <stdio.h> #include <cstdlib> #include "type.cuh" #include "case.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" void ObjFuncStat(FILE *fp, IPTR pj, Population *p); void RawStat(FILE *fp, IPTR pj, Population *p); __host__ void PhenoPrint(FILE *fp, IPTR pj, Population *p); void GooguStat(FILE *fp, IPTR pop, Population *p); void Report(int gen, IPTR pop, Population *p) { /* report generations stats */ FILE *fp; /* print Progress Statistics to file */ if( (fp = fopen(p->oFile,"a")) == NULL){ printf("error in opening file ofile: %s \n", p->oFile); exit(1); }else{ RawStat(fp, pop, p); fclose(fp); } /* print Progress Statistics for Googu */ if( (fp = fopen(p->fitFile,"a")) == NULL){ printf("error in opening file fitFile in report: %s \n", p->fitFile); exit(1); }else{ GooguStat(fp, pop, p); fclose(fp); } /* print Phenotype information for Googu*/ if(p->bigGen == p->generation){ if( (fp = fopen(p->phenoFile,"a")) == NULL){ printf("error in opening file phenoFile %s \n", p->phenoFile); exit(1); }else{ PhenoPrint(fp, pop, p); // Wait for GPU to finish before accessing on host //cudaDeviceSynchronize(); fclose(fp); } } /* If improvement on current best, save new best individual to case-base */ if(p->saveCases){ if(p->bigGen == p->generation) { if( (fp = fopen(p->caseFileName, "a")) == NULL){ printf("error in opening file casefile: %s \n", p->caseFileName); exit(1); }else{ SaveCase(fp, &pop[p->maxi], p->generation, p); fflush(fp); fclose(fp); } } } /* Progress stats on stdout */ RawStat(stdout, pop, p); } void ObjFuncStat(FILE *fp, IPTR pj, Population *p) { fprintf(fp,"%d %f\n", p->generation, pj->objfunc); } void RawStat(FILE *fp, IPTR pop, Population *p) { fprintf(fp," %3d %.3f %.3f %.3f %.3f %.3f", p->generation, p->max, p->avg, p->min, p->smax, p->smin); fprintf(fp," %3d %.3f %3d", p->bigGen, p->bigMax, p->bigInd); fprintf(fp," %.3f\n", pop[p->maxi].objfunc); } void GooguStat(FILE *fp, IPTR pop, Population *p) { fprintf(fp," %3d %.3f %.3f %.3f\n", p->generation, p->max, p->avg, p->min); }
1,984
#include <cuda_runtime.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> #define N 8388608 #define BLOCK_DIM 256 //Kernel __global__ void reduction(int * in, int * out){ int globalid = blockIdx.x*blockDim.x + threadIdx.x; __shared__ int s_array[BLOCK_DIM]; s_array[threadIdx.x] = in[globalid]; __syncthreads(); for (int i = 1; i < blockDim.x; i *= 2){ if (threadIdx.x % (2*i) == 0){ s_array[threadIdx.x] += s_array[threadIdx.x+i]; } __syncthreads(); } if (threadIdx.x == 0) out[blockIdx.x] = s_array[0]; } int main(){ struct timeval t1, t2; int *hArray; int hReduction; int *dIn, *dOut; //Device Arrays //Reserva de memoria Host hArray = (int*)malloc(N*sizeof(int)); //Inicialización del vector srand(time(NULL)); for (int i = 0; i < N; i++){ hArray[i] = ((float)rand()/RAND_MAX)*200 - 100; } //Reserva de memoria Device cudaMalloc((void **)&dIn, N*sizeof(int)); cudaMalloc((void **)&dOut, (N/BLOCK_DIM)*sizeof(int)); //Copia de memoria Host->Device cudaMemcpy(dIn, hArray, N*sizeof(int), cudaMemcpyHostToDevice); int *aux; int block_dim_stage = BLOCK_DIM; int blocks; gettimeofday(&t1, 0); //Reducción por etapas for(int left = N; left > 1; left /= block_dim_stage){ if(left < block_dim_stage) block_dim_stage = left; blocks = left / block_dim_stage; cudaDeviceSynchronize(); reduction<<<blocks, block_dim_stage>>>(dIn, dOut); aux = dIn; dIn = dOut; dOut = aux; } cudaDeviceSynchronize(); gettimeofday(&t2, 0); //Copia de memoria Device->Host cudaMemcpy(&hReduction, dIn, sizeof(int), cudaMemcpyDeviceToHost); //Comprobación de errores int hReduction2 = 0; for(int i = 0; i < N; i++){ hReduction2 += hArray[i]; } if(hReduction != hReduction2) printf("Error\n"); else printf("Correcto\n"); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Tiempo: %f ms\n", time); printf("Reducción = %d\n", hReduction); //Liberar memoria Host y Device free(hArray); cudaFree(dIn); cudaFree(dOut); }
1,985
#include <iostream> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_real_distribution.h> // nvcc -arch=sm_70 -std=c++14 exemplo2.cu -o exemplo2 && ./exemplo2 struct random_ex{ thrust::minstd_rand rng; thrust::uniform_real_distribution<double> dist; random_ex(thrust::uniform_real_distribution<double> dist, thrust::minstd_rand rng) : dist(dist), rng(rng) {} __host__ __device__ double operator()(const double& x){ // default_random_engine is currently an alias for minstd_rand, and may change in a future version. return dist(rng); } }; int main(){ int seed; std::cin >> seed; thrust::minstd_rand rng(seed); thrust::uniform_real_distribution<double> dist(25, 40); thrust::host_vector<double> host(10, 0); thrust::transform(host.begin(), host.end(), host.begin(), random_ex(dist, rng)); printf("Host vector: "); for (auto i = host.begin(); i != host.end(); i++) { std::cout << *i << " "; // este acesso é rápido -- CPU } }
1,986
#include <stdio.h> #include <stdlib.h> __device__ int d_value; // Device code: GPU function __global__ void test_Kernel() { int threadID = threadIdx.x; d_value = 1; printf("threadID %-3d d_value%3d\n", threadID, d_value); } // Host code: CPU function int main() { int h_value = 0; // kernelName<<<#block_per_grid, #thread_per_block, shared_size, s>>>(param1, ...); test_Kernel<<<1, 2>>>(); cudaMemcpyFromSymbol(&h_value, d_value, sizeof(int), 0, cudaMemcpyDeviceToHost); printf("Output from host: %d\n",h_value); return 0; }
1,987
// // Created by root on 2020/12/3. // #include "thrust/device_vector.h" #include "thrust/host_vector.h" #include "thrust/iterator/counting_iterator.h" #include "thrust/transform.h" #include "math.h" #include "stdio.h" #define N 64 struct DistanceFrom { float mRef; int mN; DistanceFrom(float ref, int n) : mRef(ref), mN{n} {} __host__ __device__ float operator()(const float &x) { float scaledX = x / (mN - 1); return std::sqrt((scaledX - mRef) * (scaledX - mRef)); } }; int main() { float ref = 0.5f; thrust::device_vector<float> dvec_dist(N); thrust::transform(thrust::counting_iterator<float>(0), thrust::counting_iterator<float>(N), dvec_dist.begin(), DistanceFrom(ref, N)); // instantiate function object DistanceFrom. thrust::host_vector<float> h_dist(N); h_dist = dvec_dist; for (int i = 0; i < N; i++) { printf("x = %.3f, dist = %.3f\n", 1.f * i / (N - 1), h_dist[i]); } return 0; }
1,988
extern "C" #define TILE_LENGTH 128 __global__ void l2(float *v1, float *v2, int n, float *result) { __shared__ float ds_R[TILE_LENGTH]; //int tx = blockIdx.y * blockDim.y + threadIdx.y; int tx = blockIdx.x * blockDim.x + threadIdx.x; float ret = 0.0f; for (int t = 0; t < (n - 1) / TILE_LENGTH + 1; t++) { if (t * TILE_LENGTH + tx < n) { float f1 = v1[t * TILE_LENGTH + tx]; float f2 = v2[t * TILE_LENGTH + tx]; ds_R[tx] = (f1 - f2) * (f1 - f2); } else { ds_R[tx] = 0; } __syncthreads(); if (tx == 0) { for (int i = 0; i < TILE_LENGTH; i++) { ret += ds_R[i]; } } __syncthreads(); } if (tx == 0) { result[0] = sqrt(ret); } }
1,989
/* * EzLeftUpdater.cpp * * Created on: 23 янв. 2016 г. * Author: aleksandr */ #include "EzLeftUpdater.h" #include "SmartIndex.h" /* * indx должен пренадлежать участку от [0, sizeY-1] */ __device__ void EzLeftUpdater::operator() (const int indx) { int n = indx; Ez(0, n) = coeff[0]*(Ez(2, n) + EzLeft(0, 1, n)) + coeff[1] * (EzLeft(0, 0, n) + EzLeft(2, 0, n) - Ez(1, n) - EzLeft(1, 1, n)) + coeff[2] * EzLeft(1, 0, n) - EzLeft(2, 1, n); for (int m = 0; m < 3; m++) { EzLeft(m, 1, n) = EzLeft(m, 0, n); EzLeft(m, 0, n) = Ez(m, n); } }
1,990
#include "includes.h" __global__ void cuda_Shrink_CalU_Vector(float *Y, float *U, float *X, float lambda, float *L1Weight, int nRows, int nCols, int nFilts) { unsigned int Tidx = threadIdx.x + blockIdx.x * blockDim.x; unsigned int Tidy = threadIdx.y + blockIdx.y * blockDim.y, index; float WLambda; float absxV1, X_temp, U_temp, Y_temp; if ((Tidx < nCols) && (Tidy < nRows)) { for (int k = 0; k < nFilts; k += 1) { index = Tidx + (Tidy + nRows * k) * nCols; X_temp = (X[index] / (nRows * nCols)); U_temp = U[index]; WLambda = lambda * L1Weight[k]; Y_temp = X_temp + U_temp; absxV1 = fabs(Y_temp) - WLambda; Y_temp = signbit(-absxV1) * copysign(absxV1, Y_temp); Y[index] = Y_temp; U[index] = U_temp + X_temp - Y_temp; } } }
1,991
#include "includes.h" __device__ float hard_mish_yashas_grad(float x) { if (x > 0) return 1; if (x > -2) return x + 1; return 0; } __device__ float hard_mish_yashas(float x) { if (x > 0) return x; if (x > -2) return x * x / 2 + x; return 0; } __device__ float mish_yashas(float x) { float e = __expf(x); if (x <= -18.0f) return x * e; float n = e * e + 2 * e; if (x <= -5.0f) return x * __fdividef(n, n + 2); return x - 2 * __fdividef(x, n + 2); } __global__ void gradient_array_hard_mish_kernel(int n, float *activation_input_gpu, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { const float x = activation_input_gpu[i]; delta[i] *= hard_mish_yashas_grad(x); } }
1,992
#include <stdio.h> #include <cuda_profiler_api.h> #include <cuda_runtime_api.h> #include <stdlib.h> #include <time.h> #include<sys/time.h> #include<unistd.h> #define cudaCheck(e) do { \ if (cudaSuccess != (e)) { \ fprintf(stderr, "Cuda runtime error in line %d of file %s \ : %s \n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError()) ); \ exit(EXIT_FAILURE); \ } \ } while(0); /// res = A (row_A, col_A) * B (col_A, col_B) template <typename DType> __global__ void matmul(DType* A, const int row_A, const int col_A, DType* B, const int col_B, DType* res) { int xIndex = threadIdx.x + blockIdx.x * blockDim.x; int yIndex = threadIdx.y + blockIdx.y * blockDim.y; size_t tid = xIndex + yIndex * col_B; const size_t numel = row_A * col_B; if(tid >= numel) return; DType tmp = 0; for(int i = 0; i < col_A; ++i) { tmp += A[yIndex * col_A + i] * B[i*col_B + xIndex]; } res[tid] = tmp; } #define DIM 32 #define IPAD 0 template <typename DType> __global__ void matmul_smem(DType* A, const int row_A, const int col_A, DType* B, const int col_B, DType* res) { int xIndex = threadIdx.x + blockIdx.x * blockDim.x; int yIndex = threadIdx.y + blockIdx.y * blockDim.y; size_t tid = xIndex + yIndex * col_B; if(tid >= row_A * col_B) return; DType tmp = 0; for(int i = 0; i < col_A / DIM; ++i) { __shared__ DType sA[DIM][DIM+IPAD]; __shared__ DType sB[DIM][DIM+IPAD]; sA[threadIdx.y][threadIdx.x] = A[yIndex * col_A + DIM * i + threadIdx.x]; sB[threadIdx.y][threadIdx.x] = B[xIndex + (threadIdx.y + i*DIM) * col_B]; __syncthreads(); for(int j = 0; j < DIM; ++j) { tmp += sA[threadIdx.y][j] * sB[j][threadIdx.x]; } __syncthreads(); } if(col_A % DIM) { for(int i = col_A % DIM ; i > 0; --i) tmp += A[yIndex * col_A + col_A - i] * B[xIndex + (col_A - i)*col_B]; } res[tid] = tmp; } template <typename DType> void cpu_matmul(DType* A, const int row_A, const int col_A, DType* B, const int col_B, DType* res) { for(int i = 0; i < row_A; ++i) { for(int j = 0 ; j < col_B; ++j) { DType tmp = 0; for(int k = 0; k < col_A; ++k) tmp += A[i*col_A + k] * B[k*col_B + j]; res[i*col_B + j] = tmp; } } } __inline__ int divUp(int a, int b) { return (a + b - 1) / b; } template <typename DType> DType diff(DType* A, DType* B, size_t numel) { DType res = 0; for(size_t i = 0; i < numel; ++i) res += fabs(A[i] - B[i]); return res; } int main(int argc, char* argv[]) { srand(time(NULL)); cudaStream_t stream[2]; cudaCheck(cudaSetDevice(0)); //! CUDA Streams for(int i = 0; i < 2; ++i) cudaCheck(cudaStreamCreate(&stream[i])); cudaProfilerStart(); const int M = 1024, N = 1000, K = 1024; float * c_buffers[5]; void* g_buffers[3]; cudaCheck(cudaMallocHost(&c_buffers[0], sizeof(float)*M*N)); cudaCheck(cudaMallocHost(&c_buffers[1], sizeof(float)*N*K)); cudaCheck(cudaMallocHost(&c_buffers[2], sizeof(float)*M*K)); cudaCheck(cudaMallocHost(&c_buffers[3], sizeof(float)*M*K)); cudaCheck(cudaMallocHost(&c_buffers[4], sizeof(float)*M*K)); cudaCheck(cudaMalloc(&g_buffers[0], sizeof(float)*M*N)); cudaCheck(cudaMalloc(&g_buffers[1], sizeof(float)*N*K)); cudaCheck(cudaMalloc(&g_buffers[2], sizeof(float)*M*K)); for(int i = 0; i < M; ++i) { for(int j = 0; j < N; ++j) { c_buffers[0][i * N + j] = rand() % 3; // / double(RAND_MAX); } } for(int i = 0; i < N; ++i) { for(int j = 0; j < K; ++j) { c_buffers[1][i * K + j] = rand() % 2; // / double(RAND_MAX); } } /// cpu matmul struct timeval start; struct timeval end; gettimeofday(&start, NULL); cpu_matmul(c_buffers[0], M, N, c_buffers[1], K, c_buffers[2]); gettimeofday(&end, NULL); double cost = (end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0; printf("cpu matmul cost time:%5f ms\n", cost); /// gpu matmul using global memory cudaCheck(cudaMemcpy(g_buffers[0], c_buffers[0], sizeof(float)*M*N, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(g_buffers[1], c_buffers[1], sizeof(float)*N*K, cudaMemcpyHostToDevice)); cudaCheck(cudaMemset(g_buffers[2], 0, sizeof(float)*M*K)); dim3 block(32, 32); dim3 grid(divUp(M, 32), divUp(K, 32)); cudaDeviceSynchronize(); gettimeofday(&start, NULL); matmul<float><<<grid, block>>>((float*)g_buffers[0], M, N, (float*)g_buffers[1], K, (float*)g_buffers[2]); cudaCheck(cudaMemcpy(c_buffers[3], g_buffers[2], sizeof(float)*M*K, cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); gettimeofday(&end, NULL); cost = (end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0; printf("gpu matmul cost time:%5f ms\n", cost); float diff_val = diff<float>(c_buffers[2], c_buffers[3], M*K); printf("diff_val:%5f\n", diff_val); /// gpu matmul using shared memory cudaCheck(cudaMemset(g_buffers[2], 0, sizeof(float)*M*K)); dim3 block2(DIM, DIM); dim3 grid2(divUp(M, DIM), divUp(K, DIM)); cudaDeviceSynchronize(); gettimeofday(&start, NULL); matmul_smem<float><<<grid2, block2>>>((float*)g_buffers[0], M, N, (float*)g_buffers[1], K, (float*)g_buffers[2]); cudaDeviceSynchronize(); gettimeofday(&end, NULL); cost = (end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0; printf("gpu matmul cost time:%5f ms\n", cost); cudaCheck(cudaMemcpy(c_buffers[4], g_buffers[2], sizeof(float)*M*K, cudaMemcpyDeviceToHost)); diff_val = diff<float>(c_buffers[2], c_buffers[4], M*K); printf("diff_val:%5f\n", diff_val); cudaProfilerStop(); for(int i = 0; i < 3; ++i) { cudaCheck(cudaFreeHost(c_buffers[i])); cudaCheck(cudaFree(g_buffers[i])); } return 0; }
1,993
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #define N 64 #define MAX_ERR 1e-6 __global__ void vector_add(float *out, float *a, float *b, int power) { //int stride = 1; int tid = blockIdx.x * blockDim.x + threadIdx.x; // 0 * 256 + 1 = 1 | BLOCK0 | // 0 * 256 + 2 = 2 // 1 * 256 + 1 = 257 | BLOCK1 | // 1 * 256 + 2 = 258 //out[tid] = a[tid] + b[tid]; float golden = 1.61803398875; float golden_to_power = pow(golden,power); float golden_minus_one_to_power = pow((1 - golden),power); out[tid] = golden_to_power - golden_minus_one_to_power; } int main(){ float *a, *b, *out; float *d_a, *d_b, *d_out; // Allocate host memory a = (float*)malloc(sizeof(float) * N); b = (float*)malloc(sizeof(float) * N); out = (float*)malloc(sizeof(float) * N); // Allocate device memory cudaMalloc((void**)&d_a, sizeof(float) * N); cudaMalloc((void**)&d_b, sizeof(float) * N); cudaMalloc((void**)&d_out, sizeof(float) * N); // Executing kernel int power = 100; vector_add<<<1,1>>>(d_out, d_a, d_b, power); // Transfer data back to host memory cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost); float result = out[0]/2.23606797749979; printf("out[0] = %lF\n", result); //printf("PASSED\n"); // Deallocate device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); // Deallocate host memory free(a); free(b); free(out); }
1,994
#include <stdlib.h> #include <stdio.h> #define L 100 __global__ void kernelcount_nonz(int size, double *A, int *nonz, int *rowptr) { int tid = threadIdx.x, count = 0; for(int i = 0; i < size; i++) { if(A[tid*size + i] != 0.0) { count ++; } } nonz[tid] = count; __syncthreads(); count = 0; for(int i = 0; i <= tid; i++) { count += nonz[i]; } rowptr[tid + 1] = count; } __global__ void kernelCSR(int size, double *A, double *value, int *colidx, int *rowptr) { int tid = threadIdx.x; int idx = rowptr[tid], count = 0; for(int i = 0; i < size; i++) { if(A[tid*size + i] != 0) { value[idx + count] = A[tid*size + i]; colidx[idx + count] = i; count ++; } } } int main() { int size; double *A, *d_A, *value, *d_value; float time; int *colidx, *d_colidx, *rowptr, *d_rowptr, *d_nonzero; char ans; FILE *fp; printf("Do you want to input the data from the file?\n"); scanf("%c", &ans); if(ans == 'y' || ans == 'Y') { char filename[L]; printf("\nWhat's your file name?\n"); scanf("%s", filename); printf("What is the size of the matrix A?\n"); scanf("%d", &size); A = (double*) malloc (size*size*sizeof(*A)); rowptr = (int*) malloc ((size+1)*sizeof(*rowptr)); rowptr[0] = 0; fp = fopen(filename, "r"); if(!fp) { printf("Fail to open the file!\n"); return 0; } else printf("Success to open the file!\n\n"); printf("Start to input the dense matrix...\n"); for(int i = 0; i < size; i++) { for(int j = 0; j < size; j++) { fscanf( fp, "%lf", &A[i*size + j]); } } printf("Done!\n"); fclose(fp); printf("Close the file!\n"); } else { int choice; printf("Ok, let's start to construct the matrix stored in dense.\n"); printf("1. 0-1 criss-crossing matrix.\n"); printf("2. 1D Poisson differential matrix.\n"); printf("What's your choice?\n"); scanf("%d", &choice); printf("What's the size of this matrix?\n"); scanf("%d", &size); A = (double*) malloc (size*size*sizeof(*A)); rowptr = (int*) malloc ((size+1)*sizeof(*rowptr)); rowptr[0] = 0; if(choice == 1) { for(int i = 0; i < size; i++) { for(int j = 0; j < size; j++) { if((i + j)%2 == 0) A[i*size + j] = 0; else A[i*size + j] = 1; } } } else if (choice == 2) { for(int i = 0; i < size; i++) { for(int j = 0; j < size; j++) { A[i*size + j] = 0; } } for(int i = 0; i < size; i++) { A[i*size + i] = 2; A[i*size + i - 1] = -1; A[(i+1)*size + i] = -1; } A[size*size - 1] = 2; } else { printf("No this choice!\n"); return 0; } printf("Done\n"); } printf("====================================\n\n"); printf("Start to convert the dense matrix to sparse matrix with CSR!\n"); cudaEvent_t start, stop; cudaEventCreate (&start); cudaEventCreate (&stop); cudaEventRecord(start, 0); cudaMalloc( (void**) &d_A, size*size*sizeof(double)); cudaMalloc( (void**) &d_rowptr, (size+1)*sizeof(int)); cudaMalloc( (void**) &d_nonzero, size*sizeof(int)); cudaMemcpy( d_A, A, size*size*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( d_rowptr, rowptr, (size+1)*sizeof(int), cudaMemcpyHostToDevice); dim3 block(size); dim3 grid(1); kernelcount_nonz<<< grid, block>>>(size, d_A, d_nonzero, d_rowptr); cudaMemcpy( rowptr, d_rowptr, (size+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMalloc( (void**) &d_value, rowptr[size]*sizeof(double)); cudaMalloc( (void**) &d_colidx, rowptr[size]*sizeof(int)); kernelCSR<<< grid, block>>>(size, d_A, d_value, d_colidx, d_rowptr); value = (double*) malloc (rowptr[size]*sizeof(*value)); colidx = (int*) malloc (rowptr[size]*sizeof(*colidx)); cudaMemcpy( value, d_value, rowptr[size]*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy( colidx, d_colidx, rowptr[size]*sizeof(int), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf( "Global memory processing time: %f (ms)\n" , time); cudaFree(d_A); cudaFree(d_value); cudaFree(d_colidx); cudaFree(d_rowptr); cudaFree(d_nonzero); printf("Successfully free the cuda memory\n"); printf("====================================\n\n"); char file[] = "CSR_gpu.txt"; fp = fopen(file, "w"); if(!fp) { printf("Fail to open the file!\n"); return 0; } else printf("Start to write the file!\n"); for(int i = 0; i < rowptr[size]; i++) { if(i < rowptr[size]) { fprintf( fp,"%f,", value[i]); } else { fprintf( fp, "%f\n", value[i]); } } for(int i = 0; i < rowptr[size]; i++) { if(i < rowptr[size]) { fprintf( fp, "%d,", colidx[i]); } else { fprintf( fp, "%d\n", colidx[i]); } } for(int i = 0; i < size + 1; i++) { if(i <= size) { fprintf( fp, "%d,", rowptr[i]); } else { fprintf( fp, "%d\n", rowptr[i]); } } fclose(fp); printf("Close the file!\n"); /* free(A); free(value); free(colidx); free(rowptr); printf("Successfully free the memory\n\n"); */ return 0; }
1,995
/* The data set that we are using has 4 attributes but we are using only 2 attributes. Those 2 attributes are 1)Study Time 2) Exam Performance. These 2 attributes will be used to calculate the student's "KnowledgeLevel" KnowledgeLevel can be High or Low in our program but in the data set "KnowledgeLevel" has High, Low and Middle. We will represent High as 1 and low as 0. For now we will consider Middle as High so it will be 1. */ #include<stdio.h> #include<math.h> #include<string.h> #include<stdlib.h> #include <cuda.h> void readDataset(); /*<--- This function reads data set into below arrays.*/ void displayDataset();/*<--- This function will display our data set*/ __global__ void startClustering(float*,float*,float*,int,int,int,int,int,int,int,int,int,int,struct cluster*, struct cluster*); void remapSeedValues(); void displayClusters(); int highCluster = 1, lowCluster = 1; float study_time[260]/*<--- attribute number: 1*/, examPerformance[260]/*<--- attribute number: 2*/, targetAnswers[260]/*<---- real knowledge level: 3*/; float ourAnswer[258]/*<--- this will store our answer for knowledge level 1 = High, 0 = Low*/; /*sample seed value to create 2 clusters or knowledge levels*/ /*These seed values will change during different iterations*/ float svLX = 0.2, svLY = 0.2, svHX = 0.78, svHY = 0.78; float psvLX, psvLY, psvHX, psvHY; float seedDistance = 0.0; /*examPerformance is on Y-axis and study_time is on X-axis*/ struct cluster { float study_time; float exam_performance; float target_value; }cluster_one[260], cluster_two[250]; int cluster_one_index = 0, cluster_two_index = 0; __global__ void startClustering(float *study_time, float *examPerformance, float *targetAnswers,int N, int svHX, int svHY, int svLX, int svLY, int seedDistance, int lowCluster, int highCluster, int cluster_one_index, int cluster_two_index, struct cluster *cluster_one, struct cluster *cluster_two) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float distance_1, distance_2; distance_1 = (svHX-study_time[idx]) + (svHY-examPerformance[idx]); distance_2 = (svLX-study_time[idx]) + (svLY-examPerformance[idx]); if((distance_1 <= seedDistance) && (highCluster==1)) { cluster_one[idx].study_time = study_time[idx]; cluster_one[idx].exam_performance = examPerformance[idx]; cluster_one[idx].target_value = targetAnswers[idx]; } else if((distance_2 <= seedDistance) && (lowCluster==1)) { cluster_two[idx].study_time = study_time[idx]; cluster_two[idx].exam_performance = examPerformance[idx]; cluster_two[idx].target_value = targetAnswers[idx]; } } float calculateDistanceSerial(float x1, float x2, float y1,float y2) { return abs(((x2-x1)*(x2-x1))+((y2-y1)*(y2-y1))); } void startParallelProcessing() { float *sT, *eP, *tA; struct cluster *c1, *c2; int N = 260; size_t size = N * sizeof(float); cudaMalloc((void **) &sT, size); cudaMalloc((void **) &eP, size); cudaMalloc((void **) &tA, size); cudaMemcpy(sT, study_time, size, cudaMemcpyHostToDevice); cudaMemcpy(eP, examPerformance, size, cudaMemcpyHostToDevice); cudaMemcpy(tA, targetAnswers, size, cudaMemcpyHostToDevice); size = N * sizeof(struct cluster); cudaMalloc((void **) &c1, size); cudaMalloc((void **) &c2, size); cudaMemcpy(c1, cluster_one, size, cudaMemcpyHostToDevice); cudaMemcpy(c2, cluster_two, size, cudaMemcpyHostToDevice); int block_size = 1; int n_blocks = N/block_size + (N%block_size == 0 ? 0:1); startClustering <<< n_blocks, block_size >>> (sT, eP, tA, N, svHX, svHY, svLX, svLY, seedDistance, lowCluster, highCluster, cluster_one_index, cluster_two_index, c1, c2); remapSeedValues(); if(psvHX == svHX && psvHY == svHY) { printf("\nHigh Knowledge Cluster Seed Value Achieved Successfully.\n"); highCluster = 0; } if(psvLX = svLX && psvLY == svLY) { printf("\nLow Knowledge Cluster Seed Value Achieved Successfully.\n"); lowCluster = 0; } if(lowCluster==1 || highCluster==1) { startClustering <<< n_blocks, block_size >>> (sT, eP, tA, N, svHX, svHY, svLX, svLY, seedDistance, lowCluster, highCluster, cluster_one_index, cluster_two_index, c1, c2); } cudaMemcpy(cluster_one, c1, sizeof(struct cluster)*N, cudaMemcpyDeviceToHost); cudaMemcpy(cluster_two, c2, sizeof(struct cluster)*N, cudaMemcpyDeviceToHost); cudaFree(sT); cudaFree(eP); cudaFree(tA); cudaFree(c1); cudaFree(c2); } int main() { //freopen("output.txt","w",stdout); readDataset(); psvLX = svLX; psvLY = svLY; psvHX = svHX; psvHY = svHY; seedDistance = calculateDistanceSerial(svLX,svHX,svLY,svHY); displayDataset(); startParallelProcessing(); displayClusters(); return 0; } void remapSeedValues() { int counter, count = 0; float StudyTime = 0, ExamPerformance = 0; psvHX = svHX; psvHY = svHY; psvLX = svLX; psvLY = svLY; for(counter=2;counter<260;counter++) { if(cluster_one[counter].study_time==0 && cluster_one[counter].exam_performance==0) { break; } count++; StudyTime = cluster_one[counter].study_time + StudyTime; ExamPerformance = cluster_one[counter].exam_performance + ExamPerformance; } svHX = StudyTime/count; svHY = ExamPerformance/count; ExamPerformance = StudyTime = 0; count = 0; for(counter=2;counter<260;counter++) { if(cluster_two[counter].study_time==0 && cluster_two[counter].exam_performance==0) { break; } count++; StudyTime = cluster_one[counter].study_time + StudyTime; ExamPerformance = cluster_one[counter].exam_performance + ExamPerformance; } svLX = StudyTime/count; svLY = ExamPerformance/count; printf("\nPrevious Low X: %f\tNew Low X: %f\nPrevious Low Y: %f\tNew Low Y: %f\n",psvLX,svLX,psvLY,svLY); printf("\nPrevious High X: %f\tNew High X: %f\nPrevious High Y: %f\tNew High Y: %f\n",psvHX,svHX,psvHY,svHY); seedDistance = calculateDistanceSerial(svLX,svHX,svLY,svHY); /*count = 0; for(counter=2;counter<260;counter++) { if(cluster_three[counter].study_time==0 && cluster_three[counter].examPerformance==0) { break; } count++; StudyTime = cluster_one[counter].study_time + StudyTime; ExamPerformance = cluster_one[counter].examPerformance + ExamPerformance; }*/ } void readDataset() { FILE *ptr_file; char buf[1000]; ptr_file =fopen("input.txt","r"); if (!ptr_file) { printf("Requested Input File Not Found :("); return; } int studyTimeIndex = 0, examPerformanceIndex = 0, targetAnswersIndex = 0; while (fgets(buf,1000, ptr_file)!=NULL) { int counter; char *p = strtok(buf,"\t"); for(counter = 0;counter<=5 && p!=NULL;counter++) { if(p!=NULL) { switch(counter) { case 0: study_time[studyTimeIndex++] = atof(p); break; case 4: examPerformance[examPerformanceIndex++] = atof(p); break; case 5: if(strcmp(p,"High")==0 || strcmp(p,"Middle")==0) { targetAnswers[targetAnswersIndex++] = 1; } else { targetAnswers[targetAnswersIndex++] = 0; } break; default: break; } } p = strtok(NULL,"\t"); } } fclose(ptr_file); } void displayDataset() { printf("\n\nDisplaying Dataset Entries\n\nStudy Time\tExam Performance\tKnowledge Level\n\n"); int counter = 0; for(counter = 0;counter<259;counter++) { printf("%f\t%f\t\t%f\n",study_time[counter],examPerformance[counter],targetAnswers[counter]); } printf("\n\nEnd of Displaying Dataset\n\n"); } void displayClusters() { int counter, total_ones = 0, total_zeroes = 0, total = 0; printf("\n\nDisplaying Cluster of Users with High KnowledgeLevel: \nStudy Time\tExam Performance\tTarget Cluster (1: High, 0: Low)\n\n"); for(counter = 2;counter<260;counter++) { if(cluster_one[counter].study_time==0 && cluster_one[counter].exam_performance==0) { break; } if(cluster_one[counter].target_value==1.0) { total_ones++; } if(cluster_one[counter].target_value==0.0) { total_zeroes++; } total++; printf("%f\t%f\t\t%f\n",cluster_one[counter].study_time, cluster_one[counter].exam_performance, cluster_one[counter].target_value); } printf("\n\nTotal Entries: %d\tLow Entried: %d\tHigh Entries: %d\n",total,total_zeroes,total_ones); printf("-------------------------------------------------------------------------------"); printf("\n\nDisplaying Cluster of Users with Low KnowledgeLevel: \nStudy Time\tExam Performance\tTarget Cluster (1: High, 0: Low)\n\n"); total = total_zeroes = total_ones = 0; for(counter = 0;counter<260;counter++) { if(cluster_two[counter].study_time==0 && cluster_two[counter].exam_performance==0) { break; } if(cluster_two[counter].target_value==1.0) { total_ones++; } if(cluster_two[counter].target_value==0.0) { total_zeroes++; } total++; printf("%f\t%f\t\t%f\n",cluster_two[counter].study_time, cluster_two[counter].exam_performance, cluster_two[counter].target_value); } printf("\n\nTotal Entries: %d\tLow Entried: %d\tHigh Entries: %d\n",total,total_zeroes,total_ones); printf("--------------------------------------------------------------------------------"); }
1,996
#include "includes.h" __global__ void one_channel_mul_kernel(const float *data_l, const float *data_r, float *result, int channel_total, int total) { int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x); int one_ch_idx = idx % (2 * channel_total); if (idx / 2 < total) { result[idx] = data_l[idx] * data_r[one_ch_idx] - data_l[idx + 1] * data_r[one_ch_idx + 1]; result[idx + 1] = data_l[idx] * data_r[one_ch_idx + 1] + data_l[idx + 1] * data_r[one_ch_idx]; } }
1,997
#include <stdio.h> #include <time.h> #define N 10 float max(float *timer, int n){ int i = 0; float maxTimer=0.0; for( ; i < n ; i ++){ if(timer[i] > maxTimer)maxTimer = timer[i]; } return maxTimer; } void flush(float *a, int n){ int i = 0; for( ; i < n ; i++){ a[i] = 0.0; } } float sum(float *a, int n){ float s = 0.0; int i = 0; for( ; i < N ; i++){ s += a[i]; } return s; } __global__ void Sort(float *a, int n, float *ts, float *tx, int *signal){ clock_t start0, finish0; clock_t start1, finish1; clock_t start2, finish2; int blockId = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x; start0 = clock(); int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x; finish0 = clock(); ts[threadId] = finish0 - start0; float tmp = 0.0; float swap = 0.0; //even if(*signal > 0){ if(threadId < n){ if(threadId % 2 == 0){ start1 = clock(); tmp = a[threadId] - a[threadId + 1]; finish1 = clock(); swap = a[threadId + 1]; if(tmp <= 0){ start2 = clock(); a[threadId + 1] = swap; finish2 = clock(); } else{ a[threadId + 1] = a[threadId]; start2 = clock(); a[threadId] = swap; finish2 = clock(); } tx[threadId] = (finish2 - start2) + (finish1 - start1); } } } //odd if(*signal < 0){ if(threadId + 1 < n){ if(threadId % 2 == 1){ start1 = clock(); tmp = a[threadId] - a[threadId + 1]; finish1 = clock(); swap = a[threadId + 1]; if(tmp <= 0){ start2 = clock(); a[threadId + 1] = swap; finish2 = clock(); } else{ a[threadId + 1] = a[threadId]; start2 = clock(); a[threadId] = swap; finish2 = clock(); } tx[threadId] = (finish2 - start2) + (finish1 - start1); } } } } int main(){ float overhead = 0.0, TxSum = 0.0, TsSum = 0.0; int *signal = (int*)malloc(sizeof(int) * 1); float *a = (float*)malloc(sizeof(float) * N); float *Tx = (float*)malloc(sizeof(float) * N); float *Ts = (float*)malloc(sizeof(float) * N); float *ts = (float*)malloc(sizeof(float) * N); float *tx = (float*)malloc(sizeof(float) * N); signal[0] = 1; //intialization array a, Tx, Ts, tx, ts int i = 0, x = N-1; for( ; i < N ; i++){ a[i] = (float)x; x--; Tx[i] = 0.0; Ts[i] = 0.0; tx[i] = 0.0; ts[i] = 0.0; } float *d_a, *d_ts, *d_tx; int *d_signal; cudaMalloc(&d_a, sizeof(float) * N); cudaMalloc(&d_tx, sizeof(float) * N); cudaMalloc(&d_ts, sizeof(float) * N); cudaMalloc(&d_signal, sizeof(int) * 1); cudaMemcpy(d_a, a, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_tx, tx, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_ts, ts, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_signal, signal, sizeof(int)*1, cudaMemcpyHostToDevice); dim3 grid(1,1,1); dim3 block(N,1,1); //GPU 'warm up' Sort<<<grid, block>>>(d_a, N, d_ts, d_tx, d_signal); cudaMemcpy(d_a, a, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_tx, tx, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_ts, ts, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_signal, signal, sizeof(int) * 1, cudaMemcpyHostToDevice); i=0; int j = N - 1; for( ; i < N ; i++, j--){ Sort<<<grid, block>>>(d_a, N, d_ts, d_tx, d_signal); cudaMemcpy(ts, d_ts, sizeof(float) * N, cudaMemcpyDeviceToHost); Ts[j] = max(ts, N); flush(ts, N); cudaMemcpy(d_ts, ts, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(tx, d_tx, sizeof(float) * N, cudaMemcpyDeviceToHost); Tx[j] = max(tx, N); flush(tx, N); cudaMemcpy(d_tx, tx, sizeof(float) * N, cudaMemcpyHostToDevice); *signal = *signal * (-1); cudaMemcpy(d_signal, signal, sizeof(int) * 1, cudaMemcpyHostToDevice); } cudaMemcpy(a, d_a, sizeof(float) * N, cudaMemcpyDeviceToHost); /* i = 0; for( ; i < N ; i ++){ printf("a[%d] = %f ; ",i,a[i]); printf("\n"); } i = 0; for( ; i < N ; i ++){ printf("Tx[%d] = %f ; ",i,Tx[i]); printf("\n"); } i = 0; for( ; i < N ; i ++){ printf("Ts[%d] = %f ; ",i,Ts[i]); printf("\n"); } */ TxSum = sum(Tx, N); TsSum = sum(Ts, N); overhead = TxSum + TsSum; printf("TxSum = %f, TsSum = %f, overhead = %f \n", TxSum, TsSum, overhead); free(a); free(signal); free(Tx); free(Ts); free(ts); free(tx); cudaFree(d_a); cudaFree(d_ts); cudaFree(d_tx); cudaFree(d_signal); return 0; }
1,998
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <assert.h> /** Max size 1024 */ __global__ void kreduce(unsigned int *vec, int size){ int tid = threadIdx.x; for(int offset=(size/2);offset >= 1;offset /= 2){ if(tid < offset){ vec[tid] += vec[tid+offset]; } __syncthreads(); } } void reduce(unsigned int *vec, unsigned int *sum, int size){ unsigned int *d_vec; int bytes = size*sizeof(unsigned int); cudaMalloc((void **)&d_vec, bytes); cudaMemcpy(d_vec, vec, bytes, cudaMemcpyHostToDevice); kreduce<<<1, size>>>(d_vec, size); cudaMemcpy(sum, d_vec, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaFree(d_vec); } int main(int argc, char **argv) { if (argc < 2){ printf("Usage: <filename>\n"); exit(-1); } int size; unsigned int *vec; FILE *f = fopen(argv[1],"r"); fscanf(f,"%d\n",&size); if (size >= 20){ printf("Size (%u) is too large: size is limited to 2^10\n",size); exit(-1); } size = 1 << size; vec = (unsigned int *) malloc(size * sizeof(unsigned int)); assert(vec); for (int i=0; i<size; i++){ fscanf(f, "%u\n",&(vec[i])); } unsigned int sum; reduce(vec,&sum,size); printf("%u\n", sum); }
1,999
#include <stdio.h> __global__ void helloCUDA(float f) { if (threadIdx.x == 0) printf("Hello thread %d, f=%f\n", threadIdx.x, f) ; } int main() { helloCUDA<<<1, 5>>>(1.2345f); cudaDeviceSynchronize(); return 0; }
2,000
#include <stdlib.h> #include <sys/time.h> #include <stdio.h> #include <cuda.h> #include <math.h> //#define N 1000000 #define SQRT_TWO_PI 2.506628274631000 #define BLOCK_D1 1024 #define BLOCK_D2 1 #define BLOCK_D3 1 // Note: Needs compute capability >= 2.0 for calculation with doubles, so compile with: // nvcc kernelExample.cu -arch=compute_20 -code=sm_20,compute_20 -o kernelExample // -use_fast_math doesn't seem to have any effect on speed // CUDA kernel: __global__ void calc_calib(short* raws, short* darks, int N) { // note that this assumes no third dimension to the grid // id of the block int myblock = blockIdx.x + blockIdx.y * gridDim.x; // size of each block (within grid of blocks) int blocksize = blockDim.x * blockDim.y * blockDim.z; // id of thread in a given block int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; // assign overall id/index of the thread int idx = myblock * blocksize + subthread; if(idx < N) { raws[idx] -= darks[idx]; } } // CPU analog for speed comparison int calc_calib_cpu(short* raws, short* darks, int N) { for(int idx = 0; idx < N; idx++){ raws[idx] -= darks[idx]; } return 0; } /* ---------------------- host code -----------------------------*/ void fill( short *p, int n, int val ) { for(int i = 0; i < n; i++){ p[i] = val; } } double read_timer() { struct timeval end; gettimeofday( &end, NULL); return end.tv_sec+1.e-6*end.tv_usec; } int main (int argc, char *argv[]) { short* cpu_raws; short* gpu_raws; short* cpu_darks; short* gpu_darks; int N; cudaError_t cudaStat; printf("==========================================\n"); for( N = 2296960; N <= 2296960; N+=2296960 ) { cpu_raws = (short*) malloc( sizeof(short)*N ); cudaStat = cudaMalloc(&gpu_raws, sizeof(short)*N); if(cudaStat != cudaSuccess) { printf ("device memory allocation failed on gpu_raws"); return EXIT_FAILURE; } cpu_darks = (short*) malloc( sizeof(short)*N ); cudaStat = cudaMalloc(&gpu_darks, sizeof(short)*N); if(cudaStat != cudaSuccess) { printf ("device memory allocation failed on gpu_darks"); return EXIT_FAILURE; } // fixed block dimensions (1024x1x1 threads) const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3); // determine number of blocks we need for a given problem size int tmp = ceil(pow(N/(BLOCK_D1 * BLOCK_D2 * BLOCK_D3), 0.5)); printf("Grid dimension is %i x %i\n", tmp, tmp); dim3 gridSize(tmp, tmp, 1); int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*tmp*tmp; if (nthreads < N){ printf("\n================ NOT ENOUGH THREADS TO COVER N=%d =======================\n\n", N); } else { printf("Launching %d threads (N=%d)\n", nthreads, N); } // simulate 'data' fill(cpu_raws, N, 3); fill(cpu_darks, N, 1); printf("Input values (raw): %d %d %d...\n", cpu_raws[0], cpu_raws[1], cpu_raws[2]); printf("Input values (dark): %d %d %d...\n", cpu_darks[0], cpu_darks[1], cpu_darks[2]); cudaDeviceSynchronize(); double tInit = read_timer(); // copy input data to the GPU cudaStat = cudaMemcpy(gpu_raws, cpu_raws, N*sizeof(short), cudaMemcpyHostToDevice); printf("Memory Copy from Host to Device (raw)"); if (cudaStat){ printf("failed.\n"); } else { printf("successful.\n"); } cudaStat = cudaMemcpy(gpu_darks, cpu_darks, N*sizeof(short), cudaMemcpyHostToDevice); printf("Memory Copy from Host to Device (dark)"); if (cudaStat){ printf("failed.\n"); } else { printf("successful.\n"); } cudaDeviceSynchronize(); double tTransferToGPU = read_timer(); // do the calculation calc_calib<<<gridSize, blockSize>>>(gpu_raws, gpu_darks, N); cudaDeviceSynchronize(); double tCalc = read_timer(); cudaStat = cudaMemcpy(cpu_raws, gpu_raws, N*sizeof(short), cudaMemcpyDeviceToHost); printf("Memory Copy from Device to Host (raw) "); if (cudaStat){ printf("failed.\n"); } else { printf("successful.\n"); } cudaDeviceSynchronize(); double tTransferFromGPU = read_timer(); printf("Output values: %d %d %d...%d %d %d\n", cpu_raws[0], cpu_raws[1], cpu_raws[2], cpu_raws[N-3], cpu_raws[N-2], cpu_raws[N-1]); // do calculation on CPU for comparison (unfair as this will only use one core) fill(cpu_raws, N, 3); fill(cpu_darks, N, 1); double tInit2 = read_timer(); calc_calib_cpu(cpu_raws, cpu_darks, N); double tCalcCPU = read_timer(); printf("Output values (CPU): %d %d %d...\n", cpu_raws[0], cpu_raws[1], cpu_raws[2]); printf("Timing results for n = %d\n", N); printf("Transfer to GPU time: %f\n", tTransferToGPU - tInit); printf("Calculation time (GPU): %f\n", tCalc - tTransferToGPU); printf("Calculation time (CPU): %f\n", tCalcCPU - tInit2); printf("Transfer from GPU time: %f\n", tTransferFromGPU - tCalc); printf("Freeing memory...\n"); printf("==============================================\n"); free(cpu_raws); free(cpu_darks); cudaFree(gpu_raws); cudaFree(gpu_darks); } printf("\n\nFinished.\n\n"); return 0; }