serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
11,101
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/device_reference.h> #include <thrust/tuple.h> #include <thrust/transform.h> #include <thrust/sequence.h> #define SPHERES 20 #define rnd( x ) (x * rand() / RAND_MAX) #define INF 2e10f #define DIM 2048 struct Sphere { float r,b,g; float radius; float x,y,z; __host__ __device__ float hit( float ox, float oy, float *n ) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf( radius*radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } }; struct CalculateBitmap { Sphere* s; CalculateBitmap(Sphere* sp) : s(sp) {} __host__ __device__ thrust::tuple<unsigned char,unsigned char,unsigned char, unsigned char> operator()(const int& idx)const { int x = idx / DIM; int y = idx % DIM; //int offset = idx; float ox = (x - DIM/2); float oy = (y - DIM/2); //printf("x:%d, y:%d, ox:%f, oy:%f\n",x,y,ox,oy); float r=0, g=0, b=0; float maxz = -INF; for(int i=0; i<SPHERES; i++) { float n; float t = s[i].hit( ox, oy, &n ); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } /* ptr[offset*4 + 0] = (int)(r * 255); ptr[offset*4 + 1] = (int)(g * 255); ptr[offset*4 + 2] = (int)(b * 255); ptr[offset*4 + 3] = 255; */ thrust::tuple<unsigned char, unsigned char, unsigned char, unsigned char> result((int)(r*255),(int)(g*255),(int)(b*255),255); return result; } }; void ppm_write(unsigned char* bitmap, int xdim,int ydim, FILE* fp) { int i,x,y; fprintf(fp,"P3\n"); fprintf(fp,"%d %d\n",xdim, ydim); fprintf(fp,"255\n"); for (y=0;y<ydim;y++) { for (x=0;x<xdim;x++) { i=x+y*xdim; fprintf(fp,"%d %d %d ",bitmap[4*i],bitmap[4*i+1],bitmap[4*i+2]); } fprintf(fp,"\n"); } } int main(int argc, char* argv[]) { srand(time(NULL)); if (argc!=2) { printf("> a.out [filename.ppm]\n"); printf("for example, '> a.out result.ppm' means executing THRUST\n"); exit(0); } FILE* fp = fopen(argv[1],"w"); Sphere* temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES ); Sphere* dev_temp_s; cudaMalloc( (void**)&dev_temp_s, SPHERES*sizeof(Sphere)); for (int i=0; i<SPHERES; i++) { temp_s[i].r = rnd( 1.0f ); temp_s[i].g = rnd( 1.0f ); temp_s[i].b = rnd( 1.0f ); temp_s[i].x = rnd( 2000.0f ) - 1000; temp_s[i].y = rnd( 2000.0f ) - 1000; temp_s[i].z = rnd( 2000.0f ) - 1000; temp_s[i].radius = rnd( 200.0f ) + 40; } cudaMemcpy(dev_temp_s, temp_s, SPHERES*sizeof(Sphere),cudaMemcpyHostToDevice); thrust::device_vector<thrust::tuple<unsigned char, unsigned char, unsigned char, unsigned char> > dev_bitm(DIM*DIM); thrust::device_vector<int> idx(DIM*DIM); thrust::sequence(idx.begin(),idx.end()); unsigned char* bitmap = (unsigned char*) malloc(sizeof(unsigned char)*DIM*DIM*4); unsigned char* dev_bitmap; cudaMalloc((void**)&dev_bitmap,sizeof(unsigned char)*DIM*DIM*4); clock_t start = clock(); thrust::transform(idx.begin(),idx.end(),dev_bitm.begin(),CalculateBitmap(dev_temp_s)); clock_t end = clock(); //printf("end of parallel\n"); thrust::host_vector<thrust::tuple<unsigned char,unsigned char,unsigned char, unsigned char> > bitm = dev_bitm; for(int i=0;i<DIM;i++){ for(int j=0;j<DIM;j++){ for(int k=0;k<4;k++){ bitmap[(i*DIM+j)*4 + 0] = thrust::get<0>(bitm[i+j*DIM]); bitmap[(i*DIM+j)*4 + 1] = thrust::get<1>(bitm[i+j*DIM]); bitmap[(i*DIM+j)*4 + 2] = thrust::get<2>(bitm[i+j*DIM]); bitmap[(i*DIM+j)*4 + 4] = thrust::get<3>(bitm[i+j*DIM]); } } } //clock_t end = clock(); //printf("end of copy\n"); ppm_write(bitmap,DIM,DIM,fp); fclose(fp); //free(bitmap); //free(temp_s); printf("THRUST ray tracing: %1.6f sec\n",(end-start) / (float)CLOCKS_PER_SEC); printf("[%s] was generated.\n",argv[1]); return 0; }
11,102
#include "includes.h" __global__ void kernel_updateweights_fl(int N, float *wt, float *x, float *q, float nu){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only M threads */ if (tid<N) { wt[tid]=((nu+1.0f)/(nu+x[tid]*x[tid])); q[tid]=wt[tid]-logf(wt[tid]); /* so that its +ve */ } }
11,103
#include <stdio.h> #include <sys/time.h> #define N 512 #define TILE_WIDTH 16 __global__ void matrixMult (int *a, int *b, int *c, int width); void matrixMultCPU (int a[N][N], int b[N][N], int c[N][N], int width); double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } int main() { int a[N][N], b[N][N], c[N][N], g[N][N]; timeval start, end; int *dev_a, *dev_b, *dev_c; int size = N * N * sizeof(int); // initialize matrices a and b with appropriate values for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { a[i][j] = i*N + j; b[i][j] = i + j; } } // initialize a and b matrices here cudaMalloc((void **) &dev_a, size); cudaMalloc((void **) &dev_b, size); cudaMalloc((void **) &dev_c, size); gettimeofday(&start, NULL); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid((int)ceil(N/dimBlock.x), (int)ceil(N/dimBlock.y)); matrixMult<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N); cudaDeviceSynchronize(); cudaMemcpy(g, dev_c, size, cudaMemcpyDeviceToHost); gettimeofday(&end, NULL); printf("GPU Time for %i additions: %f\n", N, myDiffTime(start, end)); gettimeofday(&start, NULL); matrixMultCPU(a, b, c, N); gettimeofday(&end, NULL); printf("CPU Time for %i additions: %f\n", N, myDiffTime(start, end)); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); // print verification for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { if (c[i][j] != g[i][j]) { printf("Results do not match! %i, %i, c=%i, g=%i\n", i, j, c[i][j], g[i][j]); exit(1); } } } } __global__ void matrixMult(int* A, int* B, int* C, int width) { int k, sum = 0; int col = blockIdx.x*TILE_WIDTH + threadIdx.x; int row = blockIdx.y*TILE_WIDTH + threadIdx.y; if(col < width && row < width) { for (k = 0; k < width; k++) sum += A[row * width + k] * B[k * width + col]; C[row * width + col] = sum; } } void matrixMultCPU (int a[N][N], int b[N][N], int c[N][N], int width) { for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { int sum = 0; for (int k = 0; k < width; k++) { int m = a[i][k]; int n = b[k][j]; sum += m * n; } c[i][j] = sum; } } }
11,104
#include <stdio.h> #include <cuda_runtime.h> int main(int argc, char const *argv[]) { int nElem = 1024; // first init dim3 block(1024); dim3 grid((nElem + block.x - 1) / block.x); printf("grid.x : %d, block.x : %d\n", grid.x, block.x); // reset fist time block = 512; grid= (nElem + block.x - 1) / block.x; printf("grid.x : %d, block.x : %d\n", grid.x, block.x); // reset second time block = 256; grid = (nElem + block.x - 1) / block.x; printf("grid.x : %d, block.x : %d\n", grid.x, block.x); // reset third time block = 256; grid = (nElem + block.x - 1) / block.x; printf("grid.x : %d, block.x : %d\n", grid.x, block.x); // reset fourth time block = 128; grid = (nElem + block.x - 1) / block.x; printf("grid.x : %d, block.x : %d\n", grid.x, block.x); // reset fivth time block = 64; grid = (nElem + block.x - 1) / block.x; printf("grid.x : %d, block.x : %d\n", grid.x, block.x); // reset sixth time block = 32; grid =(nElem + block.x - 1) / block.x; printf("grid.x : %d, block.x : %d\n", grid.x, block.x); // reset seventh time block = 16; grid = (nElem + block.x - 1) / block.x; printf("grid.x : %d, block.x : %d\n", grid.x, block.x); return 0; }
11,105
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #define NUM_THREADS 1024 #define EPSILON 0.0001 //#define EPSILON 0.00001 --> error is too small // KERNEL: x*A = B __global__ void MatMul(int* x, int* A, int* B, int height, int width) { // index into flattened weights matrix int i = blockDim.x * blockIdx.x + threadIdx.x; // index into the input vector int row = i / width; // index into the output vector int col = i % width; // zero out resultant vector B if (i < width) B[i] = 0.0; __syncthreads(); if ((i < height * width) && (row < height)) { // TODO: atomicAdd to local, shared output vectors --> atomicAdd to global atomicAdd(&B[col], x[row] * A[i]); } } // HOST int main(int argc, char** argv) { // Variables int *h_x, *h_A, *h_B, *d_x, *d_A, *d_B; int height = 256; int width = 100; // Allocate vectors and matrices in host memory and device memory h_x = (int*)malloc(height*sizeof(int)); h_A = (int*)malloc(height*width*sizeof(int)); h_B = (int*)malloc(width*sizeof(int)); cudaMalloc((void**)&d_x, height*sizeof(int)); cudaMalloc((void**)&d_A, height*width*sizeof(int)); cudaMalloc((void**)&d_B, width*sizeof(int)); // Initialize input vector x for (int i = 0; i < height; ++i) { h_x[i] = rand() / (int)RAND_MAX; } // Initialize input matrix A for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { h_A[i*width + j] = rand() / (int)RAND_MAX; } } // Copy vectors from host memory to device memory cudaMemcpy(d_x, h_x, height*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_A, h_A, height*width*sizeof(int), cudaMemcpyHostToDevice); // FILL IN KERNEL SETUP AND INVOCATION int blocks = (height*width) / NUM_THREADS; if ((height*width) % NUM_THREADS != 0) blocks++; MatMul <<< blocks, NUM_THREADS >>> (d_x, d_A, d_B, height, width); cudaDeviceSynchronize(); // Copy result from device memory to host memory cudaMemcpy(h_B, d_B, width*sizeof(int), cudaMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare int* result = (int*)malloc(width*sizeof(int)); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { // zero out result elements if (i == 0) result[j] = 0.0; result[j] += h_x[i] * h_A[i*width + j]; } } for (int j = 0; j < width; j++) { //if (fabs(h_B[j] - result[j]) > EPSILON) if (h_B[j] != result[j]) { printf("ERROR: expected h_B[%i] = %f but received %f\n", j, result[j], h_B[j]); correct = false; break; } } if (correct) printf("---PASSED---\n"); // Free host and device memory cudaFree(d_x); cudaFree(d_A); cudaFree(d_B); free(h_x); free(h_A); free(h_B); free(result); }
11,106
/**************************************************************************** * * cuda-matsum.cu - Dense matrix-matrix addition with CUDA * * Written in 2017 by Moreno Marzolla <moreno.marzolla(at)unibo.it> * * To the extent possible under law, the author(s) have dedicated all * copyright and related and neighboring rights to this software to the * public domain worldwide. This software is distributed without any warranty. * * You should have received a copy of the CC0 Public Domain Dedication * along with this software. If not, see * <http://creativecommons.org/publicdomain/zero/1.0/>. * * --------------------------------------------------------------------------- * * Simple implementation of dense square matrix-matrix addition with CUDA. * * Compile with: * nvcc cuda-matsum.cu -o cuda-matsum -lm * * Run with: * ./cuda-matsum * ****************************************************************************/ #include <stdio.h> #include <math.h> #define BLKSIZE 16 void matsum( float *p, float *q, float *r, int n ) { int i, j; for (i=0; i<n; i++) { for (j=0; j<n; j++) { r[i*n + j] = p[i*n + j] + q[i*n + j]; } } } /* Initialize square matrix p */ void fill( float *p, int n ) { int i; for (i=0; i<n*n; i++) { p[i] = i; } } __global__ void cudaMatsum(float *p, float *q, float *r, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row > n/BLKSIZE) { row = n/BLKSIZE; col += BLKSIZE; } int index = row *n + col; r[index] = p[index] + q[index]; } int main( int argc, char* argv[] ) { float *p, *q, *r; int i, j, k, n = 1024; size_t size; if ( argc > 2 ) { printf("Usage: %s [n]\n", argv[0]); return -1; } if ( argc > 1 ) { n = atoi(argv[1]); } size = n*n*sizeof(*p); /* Allocate space for host copies of p, q, r */ p = (float*)malloc(size); fill(p, n); q = (float*)malloc(size); fill(q, n); r = (float*)malloc(size); float *d_p, *d_q, *d_r; cudaMalloc((void**)&d_p, size); cudaMalloc((void**)&d_q, size); cudaMalloc((void**)&d_r, size); cudaMemcpy(d_p, p, size, cudaMemcpyHostToDevice); cudaMemcpy(d_q, q, size, cudaMemcpyHostToDevice); int blocks = (n*n) % BLKSIZE == 0 ? (n*n)/BLKSIZE : (n*n)/BLKSIZE + 1; printf("blocks = %d\n",blocks ); cudaMatsum<<<blocks, BLKSIZE>>>(d_p, d_q, d_r, n); cudaMemcpy(r, d_r, size, cudaMemcpyDeviceToHost); //matsum(p, q, r, n); /* Check result */ k = 0; for (i=0; i<n; i++) { for (j=0; j<n; j++) { if (fabsf(r[i*n+j] - 2.0*k) > 1e-5) { printf("Check failed: r[%d][%d] = %f, expeted %f\n", i, j, r[i*n+j], 2.0*k); return -1; } k++; } } printf("Check OK\n"); /* Cleanup */ free(p); free(q); free(r); return 0; }
11,107
__global__ void f() {} void kernel() { f<<<1, 1>>>(); }
11,108
__global__ void linear_kernel(float* Y, const float* input_x, const float* input_w, int dim_xw, int dim_xh, int dim_ww, int dim_wh ){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; __shared__ float x_s[16]; // load if(tid < dim_xw){ x_s[tid] = input_x[tid]; } __syncthreads(); float sum = 0.0; // cal sum if(idx < dim_wh){ for(int i = 0; i < dim_ww; i++){ // sum += x_s[i] * input_w[idx][i]; sum += x_s[i] * input_w[idx * dim_ww + i]; } } // output if(idx < dim_wh){ Y[idx] = sum; } } void launch_linear(float* device_y, const float* input_x, const float* input_w, int input_dim_xw, int input_dim_xh, int input_dim_ww, int input_dim_wh ){ // first try batchsize = 1 // int TILE_WIDTH = 16; // int dimx = (int)(ceil)((float)input_dim_ww / TILE_WIDTH); // int dimy = (int)(ceil)((float)input_dim_xh / TILE_WIDTH); dim3 gridSize((input_dim_wh+1023)/1024); dim3 blockSize(1024); linear_kernel<<<gridSize, blockSize>>>(device_y, \ input_x, \ input_w, \ input_dim_xw, \ input_dim_xh, \ input_dim_ww, \ input_dim_wh ); }
11,109
#include <iostream> #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel(int *a, int *b, int *c){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs)/2; } } int main(){ cudaDeviceProp prop; int whichDevice; cudaGetDevice(&whichDevice); cudaGetDeviceProperties(&prop, whichDevice); if (!prop.deviceOverlap) { std::cout << "Device will not handle overlaps, so no speed up from streams" << std::endl; return 0; } cudaEvent_t start, stop; float elapsedTime; // start the timers cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // initialize the stream cudaStream_t stream0, stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; // GPU buffers for stream0 int *dev_a1, *dev_b1, *dev_c1; // GPU buffers for stream1 // allocate the memory on the GPU cudaMalloc((void**)&dev_a0, N * sizeof(int)); cudaMalloc((void**)&dev_b0, N * sizeof(int)); cudaMalloc((void**)&dev_c0, N * sizeof(int)); cudaMalloc((void**)&dev_a1, N * sizeof(int)); cudaMalloc((void**)&dev_b1, N * sizeof(int)); cudaMalloc((void**)&dev_c1, N * sizeof(int)); // allocate page-locked memory, used to stream cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); for (int i = 0; i < FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } // now loop over full data, in bite-sized chunks for (int i = 0; i < FULL_DATA_SIZE; i += N*2) { // copy the locked memory to the device, async cudaMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0); kernel<<<N/256, 256, 0, stream0>>>(dev_a0, dev_b0, dev_c0); // copy the data from device to locked memory cudaMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0); // copy the locked memory to the device, async cudaMemcpyAsync(dev_a1, host_a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(dev_b1, host_b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1); kernel <<<N/256, 256, 0, stream1>>>(dev_a1, dev_b1, dev_c1); // copy the data from device to locked memory cudaMemcpyAsync(host_c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1); } cudaStreamSynchronize(stream0); cudaStreamSynchronize(stream1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); std::cout << "Time taken: " << elapsedTime << std::endl; // cleanup the streams and memory cudaFreeHost(host_a); cudaFreeHost(host_b); cudaFreeHost(host_c); cudaFree(dev_a0); cudaFree(dev_b0); cudaFree(dev_c0); cudaFree(dev_a1); cudaFree(dev_b1); cudaFree(dev_c1); cudaStreamDestroy(stream0); cudaStreamDestroy(stream1); return 0; }
11,110
#include "includes.h" /* TODO: Your code here */ /* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */ // y = inputs[0], y_ = inputs[1] // np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True) __global__ void softmax_kernel(int64_t nrow, int64_t ncol, const float *input_data, float *output_data) { // two dimensional thread blocks. int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (y >= nrow) { return; } // y_th row of input data input_data += y * ncol; output_data += y * ncol; // find max for a row. float maxval = *input_data; for (int x = 1; x < ncol; ++x) { maxval = max(maxval, input_data[x]); } // Deduct by max for a row, and raise to exp. // in case of too large of exp, and the result will not be affected float sum = 0; for (int x = 0; x < ncol; ++x) { sum += exp(input_data[x] - maxval); } // Compute per-row softmax. for (int x = 0; x < ncol; ++x) { output_data[x] = exp(input_data[x] - maxval) / sum; } }
11,111
#include "includes.h" __global__ void Kogge_Stone_scan_kernel(float *X, float *Y, int InputSize) { __shared__ float XY[SECTION_SIZE]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < InputSize && threadIdx.x != 0) { XY[threadIdx.x] = X[i - 1]; } else { XY[threadIdx.x] = 0; } if (threadIdx.x < InputSize) { // Perform iterative exclusive scan on XY for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { if (threadIdx.x >= stride) { __syncthreads(); XY[threadIdx.x] += XY[threadIdx.x - stride]; } } Y[i] = XY[threadIdx.x]; } }
11,112
#include "includes.h" __global__ void parallelReduction(int *d_array , int numberOfElements, int elementsPerThread,int numberOfThreadsPerBlock,int numberOfBlocks,int *d_global) { int index = blockIdx.x * blockDim.x + threadIdx.x ; int sum = 0; int j=0; for(int i=index;i<numberOfElements;i = i+(numberOfBlocks*numberOfThreadsPerBlock)) { sum = sum + d_array[i]; j++; } d_global[index] = sum; }
11,113
/* * ===================================================================================== * * Filename: square_number.cu * * Description: * * Version: 1.0 * Created: 2016年09月30日 13时33分00秒 * Revision: none * Compiler: gcc * * Author: YOUR NAME (), * Organization: * * ===================================================================================== */ #include<stdio.h> __global__ void fun(int * d_out,int *d_in){ int idx = threadIdx.x; int num = d_in[idx]; d_out[idx] = num*num*num; } int main(){ const int ARRAY_SIZE = 96; const size_t ARRAY_BYTES = ARRAY_SIZE* sizeof(int); int h_in[ARRAY_SIZE]; int h_out[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++){ h_in[i] = i; } int *d_in; int *d_out; cudaMalloc(&d_in,ARRAY_BYTES); cudaMalloc((void **)&d_out,ARRAY_BYTES); cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice); fun<<<1,ARRAY_SIZE>>>(d_out,d_in); cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost); printf("Output:\n"); for(int i=0;i<ARRAY_SIZE;i++){ printf("%d ",h_out[i]); } printf("\n"); cudaFree(d_in); cudaFree(d_out); }
11,114
#include <iostream> #include <stdio.h> #include <cuda.h> #define SIZEM 1000 __global__ void matrixXvecKernel(float* A, float* B, float* C, int n){ /* the input is A the output matrix, B matrix, c vector, n size */ //int size = n * sizeof(float); int j = threadIdx.x + blockDim.x * blockIdx.x; if( j < n ){ A[j] = 0; for (int i = 0; i < n; ++i){ A[j] += B[ ( j * n ) + i ] * C[i]; } } } __host__ void product(float* A, float* B, float* C, int n){ int size = n * sizeof(float); float* d_A; float* d_B; float* d_C; cudaMalloc((void **) &d_B, size * n); cudaMemcpy(d_B, B, size*n, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_C, size); cudaMemcpy(d_C, C, size, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_A, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); matrixXvecKernel<<<ceil(n/256.0), 256>>>(d_A, d_B, d_C, n); cudaMemcpy(A, d_A, size, cudaMemcpyDeviceToHost); // Free device memory for A, B, C cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } __host__ int main(int argc, char const *argv[]){ float* a; float* b; float* c; a = (float*) malloc(SIZEM*sizeof(float)); b = (float*) malloc(SIZEM*SIZEM*sizeof(float)); c = (float*) malloc(SIZEM*sizeof(float)); for (int i = 0; i < SIZEM; ++i){ a[i] = 0; } for (int i = 0; i < SIZEM*SIZEM; ++i){ b[i] = 1; } for (int i = 0; i < SIZEM; ++i){ c[i] = 1; } product(a, b, c, SIZEM); for (int i = 0; i < SIZEM; ++i){ if (i % SIZEM == 0){ std::cout << '\n'; } std::cout << a[i] << '\t'; } std::cout << '\n'; return 0; }
11,115
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Parallel reduction kernels */ #ifndef _REDUCE_KERNEL_H_ #define _REDUCE_KERNEL_H_ #include <cooperative_groups.h> #include <stdio.h> namespace cg = cooperative_groups; // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template <class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template <> struct SharedMemory<double> { __device__ inline operator double *() { extern __shared__ double __smem_d[]; return (double *)__smem_d; } __device__ inline operator const double *() const { extern __shared__ double __smem_d[]; return (double *)__smem_d; } }; /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ /* This reduction interleaves which threads are active by using the modulo operator. This operator is very expensive on GPUs, and the interleaved inactivity means that no whole warps are active, which is also very inefficient */ template <class T> __global__ void reduce0(T *g_idata, T *g_odata, unsigned int n) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; cg::sync(cta); // do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2) { // modulo arithmetic is slow! if ((tid % (2 * s)) == 0) { sdata[tid] += sdata[tid + s]; } cg::sync(cta); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /* This version uses contiguous threads, but its interleaved addressing results in many shared memory bank conflicts. */ template <class T> __global__ void reduce1(T *g_idata, T *g_odata, unsigned int n) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; cg::sync(cta); // do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } cg::sync(cta); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /* This version uses sequential addressing -- no divergence or bank conflicts. */ template <class T> __global__ void reduce2(T *g_idata, T *g_odata, unsigned int n) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; cg::sync(cta); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } cg::sync(cta); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /* This version uses n/2 threads -- it performs the first level of reduction when reading from global memory. */ template <class T> __global__ void reduce3(T *g_idata, T *g_odata, unsigned int n) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockDim.x < n) mySum += g_idata[i + blockDim.x]; g_odata[i] = mySum; cg::sync(cta); // do reduction in global mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { g_odata[i] = mySum = mySum + g_odata[i + s]; } cg::sync(cta); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } /* This version uses the warp shuffle operation if available to reduce warp synchronization. When shuffle is not available the final warp's worth of work is unrolled to reduce looping overhead. See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ for additional information about using shuffle to perform a reduction within a warp. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize> __global__ void reduce4(T *g_idata, T *g_odata, unsigned int n) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i + blockSize]; sdata[tid] = mySum; cg::sync(cta); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) { sdata[tid] = mySum = mySum + sdata[tid + s]; } cg::sync(cta); } cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); if (cta.thread_rank() < 32) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = tile32.size() / 2; offset > 0; offset /= 2) { mySum += tile32.shfl_down(mySum, offset); } } // write result for this block to global mem if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum; } /* This version is completely unrolled, unless warp shuffle is available, then shuffle is used within a loop. It uses a template parameter to achieve optimal code for any (power of 2) number of threads. This requires a switch statement in the host code to handle all the different thread block sizes at compile time. When shuffle is available, it is used to reduce warp synchronization. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize> __global__ void reduce5(T *g_idata, T *g_odata, unsigned int n) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockSize * 2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i + blockSize]; sdata[tid] = mySum; cg::sync(cta); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } cg::sync(cta); if ((blockSize >= 256) && (tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } cg::sync(cta); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } cg::sync(cta); cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); if (cta.thread_rank() < 32) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = tile32.size() / 2; offset > 0; offset /= 2) { mySum += tile32.shfl_down(mySum, offset); } } // write result for this block to global mem if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum; } /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x; unsigned int gridSize = blockSize * 2 * gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 // sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i + blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; cg::sync(cta); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } cg::sync(cta); if ((blockSize >= 256) && (tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } cg::sync(cta); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } cg::sync(cta); cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); if (cta.thread_rank() < 32) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = tile32.size() / 2; offset > 0; offset /= 2) { mySum += tile32.shfl_down(mySum, offset); } } // write result for this block to global mem if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum; } extern "C" bool isPow2(unsigned int x); //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// template <class T> void reduce(int size, int threads, int blocks, int whichKernel, T *d_idata, T *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch switch (whichKernel) { case 0: reduce0<T><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 1: reduce1<T><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 2: reduce2<T><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 3: reduce3<T><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 4: switch (threads) { case 512: reduce4<T, 512> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 256: reduce4<T, 256> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 128: reduce4<T, 128> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 64: reduce4<T, 64> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 32: reduce4<T, 32> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 16: reduce4<T, 16> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 8: reduce4<T, 8> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 4: reduce4<T, 4> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 2: reduce4<T, 2> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 1: reduce4<T, 1> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; } break; case 5: switch (threads) { case 512: reduce5<T, 512> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 256: reduce5<T, 256> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 128: reduce5<T, 128> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 64: reduce5<T, 64> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 32: reduce5<T, 32> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 16: reduce5<T, 16> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 8: reduce5<T, 8> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 4: reduce5<T, 4> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 2: reduce5<T, 2> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 1: reduce5<T, 1> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; } break; case 6: default: if (isPow2(size)) { switch (threads) { case 512: reduce6<T, 512, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 256: reduce6<T, 256, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 128: reduce6<T, 128, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 64: reduce6<T, 64, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 32: reduce6<T, 32, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 16: reduce6<T, 16, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 8: reduce6<T, 8, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 4: reduce6<T, 4, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 2: reduce6<T, 2, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 1: reduce6<T, 1, true> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; } } else { switch (threads) { case 512: reduce6<T, 512, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 256: reduce6<T, 256, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 128: reduce6<T, 128, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 64: reduce6<T, 64, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 32: reduce6<T, 32, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 16: reduce6<T, 16, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 8: reduce6<T, 8, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 4: reduce6<T, 4, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 2: reduce6<T, 2, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; case 1: reduce6<T, 1, false> <<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size); break; } } break; } } // Instantiate the reduction function for 3 types template void reduce<int>(int size, int threads, int blocks, int whichKernel, int *d_idata, int *d_odata); template void reduce<float>(int size, int threads, int blocks, int whichKernel, float *d_idata, float *d_odata); template void reduce<double>(int size, int threads, int blocks, int whichKernel, double *d_idata, double *d_odata); #endif // #ifndef _REDUCE_KERNEL_H_
11,116
#include <time.h> #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #define N 10000 void initializeMatrices(int a[N][N], int b[N][N]) { srand(time(NULL)); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { a[i][j] = rand() % 50; b[i][j] = rand() % 50; } } } __global__ void matrixProduct(int *a, int *b, int *c, int width) { int sum = 0; int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; // printf("Thread in block position: (%d, %d) \n", row, col); if (col < width && row < width) { for (int k=0; k<width; k++) { sum += a[row * width + k] * b[k * width + col]; } c[row * width + col] = sum; } } void showMatrices(int a[N][N], int b[N][N], int c[N][N]) { printf("***** MATRIX A *****\n"); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { (j % N == N-1) ? printf("%d \n", a[i][j]) : printf("%d,", a[i][j]); } } printf("***** MATRIX B *****\n"); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { (j % N == N-1) ? printf("%d \n", b[i][j]) : printf("%d,", b[i][j]); } } printf("***** MATRIX C *****\n"); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { (j % N == N-1) ? printf("%d \n", c[i][j]) : printf("%d,", c[i][j]); } } } int main() { struct timeval t1, t2; gettimeofday(&t1, 0); int h_a[N][N], h_b[N][N], h_c[N][N]; int *d_a, *d_b, *d_c; initializeMatrices(h_a, h_b); double size = (double) N * N * sizeof(int); cudaMalloc((void **) &d_a, size); cudaMalloc((void **) &d_b, size); cudaMalloc((void **) &d_c, size); cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice); dim3 dimGrid(1, 1); dim3 dimBlock(N, N); matrixProduct<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, N); cudaDeviceSynchronize(); cudaGetLastError(); cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // showMatrices(a, b, c); cudaDeviceReset(); gettimeofday(&t2, 0); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000000.0; printf("Time to calculate: %3.1f ms \n", time); return 0; }
11,117
#include "includes.h" __device__ inline float devIoU(const float *a, const float *b){ //a: [5, ] b: [5, ], ymin, xmin, ymax, xmax, score float w = max(0.0, min(a[2], b[2]) - max(a[0], b[0])); float h = max(0.0, min(a[3], b[3]) - max(a[1], b[1])); float intersect = w * h; float sa = (a[2] - a[0]) * (a[3] - a[1]); float sb = (b[2] - b[0]) * (b[3] - b[1]); float _union = sa + sb - intersect; float eps = 1e-4; return intersect * 1.0 / (_union + eps); } __global__ void nms_kernel(float *bbox_dev, unsigned long long *mask_dev, int num_boxes, int col_blocks, float threshold){ //for each block(c, r) with thread(t, 0), compute the cur_box: r * 64 + t with boxes[c*64 to c*64+63], store to mask_dev //bx = c, by = r, t = tx int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; //因为划block时取整,最后一组可能不满, 实际上的row对应block上面的y方向 const int row_size = min(num_boxes - by * THREADS, THREADS); const int col_size = min(num_boxes - bx * THREADS, THREADS); __shared__ float sh[THREADS * 5]; //put [c*64 ~ c*64+63] to share mem, i.e., in parallel: c * 64 + bx, 放入的时候可以并行放 if(tx < col_size){ int cols = tx + bx * THREADS; #pragma unroll 5 for(int j = 0; j < 5; j++){ sh[tx * 5 + j] = bbox_dev[cols * 5 + j]; } __syncthreads(); } //compute cur_box at each row: r * 64 + t with shared mem if(tx < row_size){ //compute cur with share mem const int cur_box_idx = (by * THREADS) + tx; float *cur_box = bbox_dev + cur_box_idx * 5; int start = 0; if(bx == by){ start = tx + 1; } unsigned long long t = 0; for(int i = start; i < col_size; i++){ if(devIoU(cur_box, sh + tx * 5) >= threshold){ t |= (1ULL<<tx); } } const int mask_idx = cur_box_idx * col_blocks + bx; mask_dev[mask_idx] = t; } }
11,118
#include "includes.h" __global__ void backward_kernel(const float *dz, const float *z, const float *var, const float *weight, const float *bias, const float *edz, const float *eydz, float *dx, float *dweight, float *dbias, float eps, int N, int C, int S) { int plane = blockIdx.x; float _edz = edz[plane]; float _eydz = eydz[plane]; float gamma = weight != 0 ? abs(weight[plane]) + eps : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; if (dx != 0) { float _var = var[plane]; float invStd = 0; if (_var != 0.f || eps != 0.f) { invStd = 1 / sqrt(_var + eps); } float mul = gamma * invStd; for (int batch = 0; batch < N; ++batch) { for (int n = threadIdx.x; n < S; n += blockDim.x) { float _dz = dz[(batch * C + plane) * S + n]; float _y = (z[(batch * C + plane) * S + n] - beta) / gamma; dx[(batch * C + plane) * S + n] = (_dz - _edz - _y * _eydz) * mul; } } } if (dweight != 0 || dbias != 0) { float norm = N * S; if (dweight != 0) { if (threadIdx.x == 0) { if (weight[plane] > 0) dweight[plane] += _eydz * norm; else if (weight[plane] < 0) dweight[plane] -= _eydz * norm; } } if (dbias != 0) { if (threadIdx.x == 0) { dbias[plane] += _edz * norm; } } } }
11,119
#include"stdio.h" #include"stdlib.h" #include<math.h> #include <sys/time.h> #define USECUDA 1 #if USECUDA==1 #include<cuda_runtime.h> #include<curand.h> #include<curand_kernel.h> #endif #define Dev_Loop 1024 #define BlockN 1024 #define addNum 16 //这个程序由CUDA并行构架编写而成 long getCurrentTime() { struct timeval tv; gettimeofday(&tv,NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } double randomf()//c 的随机数产生器改为产生小数 { return ((double)rand())/RAND_MAX; } #if USECUDA==1 //以下代码全部在CUDA构架下运行的 __global__ void inte_cell(double* l,double* r,double *res,long *time,curandState *state) { //不要传函数指针 int i = blockIdx.x*BlockN+threadIdx.x; long seed=(*time)+(i);//因为所有给定时间一定,所以我们只能通过对时间进行简单处理 int offset=0;//完全独立的序列,所以offset全部为零来节约时间 curand_init (seed,i,offset,&state[i]);//设置第i个随机序列 double x=1,sum=0; double k=8; for(int j=0;j<k*Dev_Loop;j++) { x=(r[i]-l[i])*curand_uniform_double(&state[i]); sum+=sqrt(x+sqrt(x));//func(x); } res[i]=sum/(Dev_Loop*k); __syncthreads(); } __global__ void big_plus(double*a,double *res,int *threadNum) { //为了尽可能的利用并行效率,加法采用两次树形相加的形式,每次加addNum个 //如此可以对付2^30次的快速相加 //虽然嘛。。。。这是毫无意义的啦!因为本程序只有2^20次的相加 //不过留个接口以后用总是好事情 int i=blockIdx.x*(*threadNum)+threadIdx.x; double sum=0; int k=i*addNum; for(int j=0;j<addNum;j++) { sum+=a[k]; k++; } res[i]=sum/addNum; __syncthreads(); } __device__ double func0(double x) { //被积分函数0,实际应用仅需修改此函数即可 //同时考虑了代码复用性 return sqrt(x+sqrt(x)); } long*getCurrentTimeForDev() { long *time; cudaMalloc(&time,sizeof(long)); long *timenow=new long; *timenow=getCurrentTime(); cudaMemcpy(time,timenow,sizeof(long),cudaMemcpyHostToDevice); return time; } double *DevValueD(double v,int len)//把host值转化为dev指针值 { double*res; cudaMalloc(&res,sizeof(double)*len); double *val=new double[len]; for(int i=0;i<len;i++) val[i]=v; cudaMemcpy(res,val,sizeof(double)*len,cudaMemcpyHostToDevice); return res; } int *DevValueI(int v,int len) { int*res; cudaMalloc(&res,sizeof(int)*len); int *val=new int[len]; for(int i=0;i<len;i++) val[i]=v; cudaMemcpy(res,val,sizeof(int)*len,cudaMemcpyHostToDevice); return res; } #endif double inte_cell_cpu(double l,double r) { double x; double res=0; for(int i=0;i<Dev_Loop;i++) { x=(r-l)*randomf(); res+=sqrt(x+sqrt(x*x)); } res/=Dev_Loop; return res; } int work() { int threadPerBlock=BlockN; int numBlocks= 256; size_t size = BlockN *numBlocks*sizeof(double); long st=getCurrentTime(); curandState *state; cudaMalloc(&state,sizeof(curandState)*1024*1024);//设立随机状态列 double* d_A,*add_tem0,*add_tem1,*res; cudaMalloc(&d_A, size); cudaMalloc(&add_tem0, size/16); cudaMalloc(&add_tem1, size/256); cudaMalloc(&res,sizeof(double)); inte_cell<<<numBlocks,threadPerBlock>>>(DevValueD(0.0,numBlocks*threadPerBlock),DevValueD(1.0,numBlocks*threadPerBlock),d_A,getCurrentTimeForDev(),state); /* big_plus<<<numBlocks/addNum,threadPerBlock>>>(d_A,add_tem0,DevValueI(1024,numBlocks*threadPerBlock)); big_plus<<<numBlocks/addNum/addNum,threadPerBlock>>>(add_tem0,add_tem1,DevValueI(1024,numBlocks*threadPerBlock/addNum)); */ double *result=new double[numBlocks*threadPerBlock]; /* FILE * out0,*out1,*out2; out0=fopen("data0.txt","w"); out1=fopen("data1.txt","w"); out2=fopen("data2.txt","w"); cudaMemcpy(result, d_A, size, cudaMemcpyDeviceToHost); for(int i=0;i<1024;i++) fprintf(out0,"%f\n",result[i]); cudaMemcpy(result, add_tem0, size/addNum, cudaMemcpyDeviceToHost); for(int i=0;i<1024;i++) fprintf(out1,"%f\n",result[i]); */ /* for(int i=0;i<numBlocks*threadPerBlock;i++) fprintf(out0,"%f\n",result[i]); */ double fin_res=0; cudaMemcpy(result,d_A, size, cudaMemcpyDeviceToHost); for(int i=0;i<numBlocks*threadPerBlock;i++) { fin_res+=result[i]; } fin_res/=(numBlocks*threadPerBlock); long ed=getCurrentTime(); printf("GPU running Time:%ld\n",ed-st); printf("final:%16.14f\n",fin_res); /* st=getCurrentTime(); double sum=0; for(int i=0;i<256;i++) for(int j=0;j<1024;j++) { sum+=inte_cell_cpu(0,1); } sum/=(1024*256); ed=getCurrentTime(); printf("cpu:time:%ld,res:%15f\n",ed-st,sum); */ /* fclose(out0); fclose(out1); fclose(out2); */ cudaFree(d_A); cudaFree(d_A); cudaFree(add_tem0); cudaFree(add_tem1); cudaFree(state); cudaFree(res); } int main() { work(); }
11,120
#include <iostream> #include <vector> #include <random> #include <functional> #include <fstream> #include <sstream> #include <algorithm> #include <chrono> __global__ void saxpy_kernel(float a, float *x, float *y, float *z) { // Вычисляем глобальный индекс нити size_t idx = threadIdx.x + blockIdx.x * blockDim.x; // Обработка соответствующих каждой нити данных z[idx] = a * x[idx] + y[idx]; } int saxpy_wrapper(std::vector<float> &x, std::vector<float> &y, std::vector<float> &z, float a) { int n = x.size(); auto n_bytes = n * sizeof(float); float *x_dev = nullptr, *y_dev = nullptr, *z_dev = nullptr; //Выделить память на GPU для x_dev cudaError_t cuerr = cudaMalloc( (void**)&x_dev, n_bytes ); if (cuerr != cudaSuccess) { std::cout << "Cannot allocate GPU memory for x_dev" << cudaGetErrorString(cuerr); return 1; } //Выделить память на GPU для н_dev cuerr = cudaMalloc( (void**)&y_dev, n_bytes ); if (cuerr != cudaSuccess) { std::cout << "Cannot allocate GPU memory for y_dev" << cudaGetErrorString(cuerr); return 1; } //Выделить память на GPU для z_dev cuerr = cudaMalloc( (void**)&z_dev, n_bytes ); if (cuerr != cudaSuccess) { std::cout << "Cannot allocate GPU memory for z_dev" << cudaGetErrorString(cuerr); return 1; } //Задать конфигурацию запуска блоков нитей и сетки блоков int block_size = 1024; int grid_size = n / block_size; //Скопировать входные данные из памяти CPU в память GPU. cuerr = cudaMemcpy(x_dev, x.data(), n_bytes, cudaMemcpyHostToDevice ); if (cuerr != cudaSuccess) { std::cout << "Cannot copy data from x to x_dev" << cudaGetErrorString(cuerr); return 1; } //Скопировать входные данные из памяти CPU в память GPU. cuerr = cudaMemcpy(y_dev, y.data(), n_bytes, cudaMemcpyHostToDevice ); if (cuerr != cudaSuccess) { std::cout << "Cannot copy data from y to y_dev" << cudaGetErrorString(cuerr); return 1; } //Создать события для замерения времени cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Вызвать ядро с заданной конфигурацией для обработки данных в цикле saxpy_kernel<<<grid_size, block_size>>>(a, x_dev, y_dev, z_dev); cuerr = cudaGetLastError(); if (cuerr != cudaSuccess) { std::cout << "Cannot launch CUDA kernel " << cudaGetErrorString(cuerr); return 1; } // Ожидать завершения работы ядра. cuerr = cudaDeviceSynchronize(); if (cuerr != cudaSuccess) { std::cout << "Cannot synchronize CUDA kernel " << cudaGetErrorString(cuerr); return 1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); //Вывести время исполнения в мс float gpu_time = 0.0f; cudaEventElapsedTime(&gpu_time, start, stop); std::cout << "Elapsed time gpu: " << gpu_time << " ms." << std::endl; // Скопировать результаты в память CPU. cuerr = cudaMemcpy(z.data(), z_dev, n_bytes, cudaMemcpyDeviceToHost ); if (cuerr != cudaSuccess) { std::cout << "Cannot copy data from z_dev to z " << cudaGetErrorString(cuerr); return 1; } // Освободить выделенную память GPU. cudaFree(x_dev); cudaFree(y_dev); cudaFree(z_dev); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; } int main(int argc, char *argv[]) { size_t n = 1 << 27; std::vector<float> x(n); std::vector<float> y(n); std::vector<float> z_cpu(n); std::vector<float> z_gpu(n); std::uniform_real_distribution<> distribution(0.0, 1.0); std::mt19937 engine; auto generator = std::bind(distribution, engine); std::generate_n(x.begin(), n, generator); std::generate_n(y.begin(), n, generator); float a = distribution(engine); auto begin = std::chrono::steady_clock::now(); for(auto i = 0; i < n; ++i) z_cpu[i] = a * x[i] + y[i]; auto end = std::chrono::steady_clock::now(); std::cout << "Elapsed time cpu: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count() << " ms." << std::endl; saxpy_wrapper(x, y, z_gpu, a); for(auto i = 0; i < n; ++i) if( fabs( z_cpu[i] - z_gpu[i] ) > 1e-5) { std::cout << "Wrong calculation" << std::endl; return 1; } return 0; }
11,121
#include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdbool.h> #include <sys/resource.h> //TODO: add support for when there's more sets than there are blocks __global__ void gCompress(uint8_t* fours, uint8_t* threes, size_t pitchfour, size_t pitchthree, size_t setcount) { long a = blockIdx.x; uint8_t* row4; uint8_t* row3; do { row4 = fours + a * pitchfour; row3 = threes + a * pitchthree; row3[threadIdx.x] = 0; row3[threadIdx.x] = (row4[threadIdx.x]) << (2 + (threadIdx.x * 2)); row3[threadIdx.x] += (row4[threadIdx.x + 1]) >> (4 - (threadIdx.x * 2)); a += 65535; } while(a < setcount); } __global__ void gUncompress(uint8_t* fours, uint8_t* threes, size_t pitchfour, size_t pitchthree, size_t setcount) { long a = blockIdx.x; uint8_t* row4; uint8_t* row3; do { row4 = fours + a * pitchfour; row3 = threes + a * pitchthree; row4[threadIdx.x] = 0; int i, ander = 0; for (i = 0; i < threadIdx.x; i++) ander += (48 >> (i*2)); if (threadIdx.x != 0) row4[threadIdx.x] = ((row3[threadIdx.x - 1]) << (4 - (2 * (threadIdx.x - 1)))) & ander; if (threadIdx.x != 3) row4[threadIdx.x] += ((row3[threadIdx.x]) >> (2 + (threadIdx.x * 2))); a += 65535; } while (a < setcount); } uint8_t getCharVal(char c) { if (c >= '0' && c <= '9') return c - '0'; else if (c >= 'a' && c <= 'z') return 10 + (c - 'a'); else if (c >= 'A' && c <= 'Z') return 36 + (c - 'A'); else return 62; } char getOriginalVal(uint8_t t) { if (t <= 9) return '0' + t; else if (t >= 10 && t <= 35) return 'a' + t - 10; else if (t >= 36 && t <= 61) return 'A' + t - 36; else return '\n'; } bool fileExists (char* name) { FILE* tmp = fopen (name, "rb"); bool exists = (tmp != NULL); if (tmp != NULL) fclose (tmp); return exists; } void compress(char* argv[]); void uncompress(char* argv[]); size_t setCount; size_t overflow; int main(int argc, char* argv[]) { if (argc != 3 || (strcmp(argv[1], "-c") != 0 && strcmp(argv[1], "-u") != 0)) { fprintf(stderr, "Usage:\n%s -c filename ....... to compress\n%s -u filename ....... to uncompress\n", argv[0], argv[0]); exit(0); } else if (!fileExists(argv[2])) { fprintf(stderr, "File %s does not exist.\n", argv[2]); exit(0); } const rlim_t kStackSize = 64L * 1024L * 1024L; // min stack size = 64 Mb struct rlimit rl; int result; result = getrlimit(RLIMIT_STACK, &rl); if (result == 0) { if (rl.rlim_cur < kStackSize) { rl.rlim_cur = kStackSize; result = setrlimit(RLIMIT_STACK, &rl); if (result != 0) { fprintf(stderr, "setrlimit returned result = %d\n", result); exit(0); } } } setCount = 0; if (strcmp(argv[1], "-c") == 0) compress(argv); else { uncompress(argv); } exit(0); } void compress(char* argv[]) { size_t i; char* filename = argv[2]; char* outfilename = (char*)malloc(sizeof(char) * 64); sprintf(outfilename, "%s.bcg", filename); FILE* infile = fopen(filename, "r"); FILE* outfile = fopen(outfilename, "w+"); long filesize = 0; fseek(infile, 0, SEEK_END); filesize = ftell(infile); fseek(infile, 0, SEEK_SET); overflow = filesize % 4; setCount = filesize / 4; if (overflow > 0) setCount++; uint8_t threebytes[setCount][3]; uint8_t fourbytes[setCount][4]; i = 0; while (!feof(infile)) { fourbytes[i / 4][i % 4] = getCharVal(fgetc(infile)); i++; } fclose(infile); size_t pitch3, pitch4; uint8_t* garr3; uint8_t* garr4; cudaMallocPitch((void**)&garr3, &pitch3, (size_t)(3 * sizeof(uint8_t)), setCount); cudaMallocPitch((void**)&garr4, &pitch4, (size_t)(4 * sizeof(uint8_t)), setCount); cudaMemcpy2D((void*)garr4, pitch4, fourbytes, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), setCount, cudaMemcpyHostToDevice); if (setCount <= 65535) gCompress<<<setCount, 3>>>(garr4, garr3, pitch4, pitch3, setCount); else gCompress<<<65535, 3>>>(garr4, garr3, pitch4, pitch3, setCount); cudaMemcpy2D(threebytes, 3 * sizeof(uint8_t), garr3, pitch3, 3 * sizeof(uint8_t), setCount, cudaMemcpyDeviceToHost); cudaFree(garr3); cudaFree(garr4); for (i = 0; i < setCount; i++) { fprintf(outfile, "%c%c%c", threebytes[i][0], threebytes[i][1], threebytes[i][2]); } fprintf(outfile, "%i", overflow); fclose(outfile); free(outfilename); } void uncompress(char* argv[]) { size_t i; //acquire and handle file overhead char* filename = argv[2]; char* outfilename = (char*)malloc(sizeof(char) * 64); sprintf(outfilename, "%s.out", filename); FILE* infile = fopen(filename, "r"); FILE* outfile = fopen(outfilename, "w+"); //determine file size and number of sets long filesize = 0; fseek(infile, 0, SEEK_END); filesize = ftell(infile) - 1; //don't count end delimiter fseek(infile, 0, SEEK_SET); setCount = filesize / 3; uint8_t threebytes[setCount][3]; uint8_t fourbytes[setCount][4]; //get file data i = 0; while (i < filesize) { threebytes[i / 3][i % 3] = (uint8_t)(fgetc(infile)); i++; } uint8_t delim = fgetc(infile) - '0'; fclose(infile); //begin gpu section size_t pitch3, pitch4; uint8_t* garr3; uint8_t* garr4; cudaMallocPitch((void**)&garr3, &pitch3, (size_t)(3 * sizeof(uint8_t)), setCount); cudaMallocPitch((void**)&garr4, &pitch4, (size_t)(4 * sizeof(uint8_t)), setCount); cudaMemcpy2D((void*)garr3, pitch3, threebytes, 3 * sizeof(uint8_t), 3 * sizeof(uint8_t), setCount, cudaMemcpyHostToDevice); if (setCount <= 65535) gUncompress<<<setCount, 4>>>(garr4, garr3, pitch4, pitch3, setCount); else gUncompress<<<65535, 4>>>(garr4, garr3, pitch4, pitch3, setCount); cudaMemcpy2D(fourbytes, 4 * sizeof(uint8_t), garr4, pitch4, 4 * sizeof(uint8_t), setCount, cudaMemcpyDeviceToHost); cudaFree(garr3); cudaFree(garr4); for (i = 0; i < setCount; i++) { if (delim == 0 || i != setCount - 1) fprintf(outfile, "%c%c%c%c", getOriginalVal(fourbytes[i][0]), getOriginalVal(fourbytes[i][1]), getOriginalVal(fourbytes[i][2]), getOriginalVal(fourbytes[i][3])); else { int k; for (k = 0; k < delim; k++) fprintf(outfile, "%c", getOriginalVal(fourbytes[i][k])); } } fclose(outfile); free(outfilename); }
11,122
#include <time.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } void checkResults(float *A, float *B, int width) { float maxError = 0.0f; for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { int index = i*width + j; //printf("A=%f B=%f \n", A[index], B[index]); maxError = fmax(maxError, fabs(A[index] - B[index] - 3.0f)); } } printf("Max error: %f \n", maxError ); if (maxError != 3.0f) { printf("Unsuccessful results\n"); } else { printf("Successful results\n"); } } __global__ void dgemm(float *M, float *N, float *P, int width) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row < width) && (col < width)) { float pVal = 0; for (int i = 0; i < width; i++) { pVal = pVal + M[row * width + i] * N[col + i*width]; } P[row * width + col] = pVal; } } void dgemm_cpu(float *M, float *N, float *P, int width) { for (int row = 0; row < width; row++) for (int col = 0; col < width; col++) { float pVal = 0; for (int k = 0; k < width; k++) { pVal = pVal + M[row * width + k] * N[col + k*width]; } P[row * width + col] = pVal; } } int main(int argc, char **argv) { int width = (argc > 1)?atoi (argv[1]) : 256; const int mem_size = width*width*sizeof(float); int blockSize = 32; int numBlocks = (width + blockSize - 1) / blockSize; dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(numBlocks, numBlocks, 1); float *A_h = (float*)malloc(mem_size); float *B_h = (float*)malloc(mem_size); float *C_h = (float*)malloc(mem_size); float *S_h = (float*)malloc(mem_size); float *O_h = (float*)malloc(mem_size); for (int j = 0; j < width; j++) { for (int i = 0; i < width; i++) { int index = j*width + i; A_h[index] = 2; B_h[index] = 2; C_h[index] = 2; } } clock_t tStart = clock(); dgemm_cpu(A_h, B_h, S_h, width); dgemm_cpu(S_h, C_h, O_h, width); printf("Time taken by Host: %.6fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC); float *A_d; float *B_d; float *C_d; float *O_d; float *S_d; checkCuda( cudaMalloc(&A_d, mem_size) ); checkCuda( cudaMalloc(&B_d, mem_size) ); checkCuda( cudaMalloc(&C_d, mem_size) ); checkCuda( cudaMalloc(&S_d, mem_size) ); checkCuda( cudaMalloc(&O_d, mem_size) ); checkCuda( cudaMemcpy(A_d, A_h, mem_size, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpy(B_d, B_h, mem_size, cudaMemcpyHostToDevice) ); tStart = clock(); dgemm<<<dimGrid, dimBlock>>>(A_d, B_d, S_d, width); cudaDeviceSynchronize(); checkCuda( cudaMemcpy(C_d, C_h, mem_size, cudaMemcpyHostToDevice) ); dgemm<<<dimGrid, dimBlock>>>(S_d, C_d, O_d, width); cudaDeviceSynchronize(); clock_t tEnd = clock(); checkCuda( cudaMemcpy(S_h, O_d, mem_size, cudaMemcpyDeviceToHost) ); printf("Time taken by GPU: %.6fs\n", (double)(tEnd - tStart) / CLOCKS_PER_SEC); checkResults(O_h, S_h, width); error_exit: checkCuda( cudaFree(A_d) ); checkCuda( cudaFree(B_d) ); checkCuda( cudaFree(C_d) ); checkCuda( cudaFree(O_d) ); free(C_h); free(A_h); free(B_h); free(S_h); free(O_h); }
11,123
#include "includes.h" //#define __OUTPUT_PIX__ #define BLOCK_SIZE 32 __constant__ __device__ float lTable_const[1064]; __constant__ __device__ float mr_const[3]; __constant__ __device__ float mg_const[3]; __constant__ __device__ float mb_const[3]; __global__ void convert_to_luv_gpu_kernel(unsigned char *in_img, float *out_img, int cols, int rows, bool use_rgb) { float r, g, b, l, u, v, x, y, z, lt; unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < cols) && (y_pos < rows)) { unsigned int pos = (y_pos * cols) + x_pos; if (use_rgb) { r = (float)in_img[(3 * pos)]; g = (float)in_img[(3 * pos) + 1]; b = (float)in_img[(3 * pos) + 2]; } else { b = (float)in_img[(3 * pos)]; g = (float)in_img[(3 * pos) + 1]; r = (float)in_img[(3 * pos) + 2]; } x = (mr_const[0] * r) + (mg_const[0] * g) + (mb_const[0] * b); y = (mr_const[1] * r) + (mg_const[1] * g) + (mb_const[1] * b); z = (mr_const[2] * r) + (mg_const[2] * g) + (mb_const[2] * b); float maxi = 1.0f / 270; float minu = -88.0f * maxi; float minv = -134.0f * maxi; float un = 0.197833f; float vn = 0.468331f; lt = lTable_const[static_cast<int>((y*1024))]; l = lt; z = 1/(x + (15 * y) + (3 * z) + (float)1e-35); u = lt * (13 * 4 * x * z - 13 * un) - minu; v = lt * (13 * 9 * y * z - 13 * vn) - minv; out_img[(3 * pos)] = l; out_img[(3 * pos) + 1] = u; out_img[(3 * pos) + 2] = v; } }
11,124
__global__ void fsk(float *distance_matrix, float *graph_a, float *graph_b, int fv_length, int graph_a_size, int graph_b_size) { int gid = blockIdx.x*blockDim.x + threadIdx.x; // Make sure we don't overflow if (gid < graph_a_size * graph_b_size) { int idx_a = gid / graph_b_size; int idx_b = gid % graph_b_size; int fva_start = idx_a * fv_length; int fvb_start = idx_b * fv_length; // Compare to each feature vector in graph B float distance = 0.0; // Iterate through the feature vectors for (int k = 0, fv_a = fva_start; k < fv_length; k++, fv_a++) { int fv_b = fvb_start + k; // Compare feature vector and add to current similarity float fv_max = graph_b[fv_b]; if(graph_a[fv_a] > graph_b[fv_b]) { fv_max = graph_a[fv_a]; } if (fv_max < 1.0) { fv_max = 1.0; } distance += abs(graph_a[fv_a] - graph_b[fv_b]) / fv_max; } // Normalize distance float curr_sim = distance / fv_length; // Write distance to matrix distance_matrix[gid] = curr_sim; } }
11,125
#include <stdio.h> #include <stdlib.h> #include <curand.h> #include <curand_kernel.h> #define BLOCK_SIZE 512 #define GPU_ERR_CHK(ans) { gpu_assert((ans), __FILE__, __LINE__); } static void gpu_assert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } __global__ void cu_init(unsigned long long seed, curandState_t * states_d, size_t size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < size) { curand_init(seed, idx, 0, &states_d[idx]); } } __global__ void cugen_curand_array(curandState_t * states_d, int * array_d, size_t size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < size) { int r = curand_uniform(&states_d[idx]) * 100; array_d[idx] = r; } } extern "C" void gen_curand_array(unsigned long long seed, int * array, size_t size) { int blocks = ceil(size / ((float) BLOCK_SIZE)); dim3 dimgrid (blocks); dim3 dimblock (BLOCK_SIZE); curandState_t * states_d; int * array_d; GPU_ERR_CHK(cudaMalloc((void **) &states_d, size * sizeof(curandState_t))); cu_init<<<dimgrid, dimblock>>>(seed, states_d, size); GPU_ERR_CHK(cudaMalloc((void **) &array_d, size * sizeof(int))); cugen_curand_array<<<dimgrid, dimblock>>>(states_d, array_d, size); GPU_ERR_CHK(cudaMemcpy(array, array_d, size * sizeof(int), cudaMemcpyDeviceToHost)); GPU_ERR_CHK(cudaFree(states_d)); GPU_ERR_CHK(cudaFree(array_d)); } template <unsigned int block_size> __global__ void cu_dot(int * a_d, int * b_d, int * block_results_d, size_t size) { extern __shared__ int cache[]; unsigned int tid = threadIdx.x; int idx = threadIdx.x + blockIdx.x * blockDim.x; cache[tid] = 0; if(idx < size) { cache[tid] = a_d[idx] * b_d[idx]; } __syncthreads(); if(block_size >= 512) { if(tid < 256) { cache[tid] += cache[tid + 256]; } __syncthreads(); } if(block_size >= 256) { if(tid < 128) { cache[tid] += cache[tid + 128]; } __syncthreads(); } if(block_size >= 128) { if(tid < 64) { cache[tid] += cache[tid + 64]; } __syncthreads(); } if(tid < 32) { if(block_size >= 64) { cache[tid] += cache[tid + 32]; } __syncthreads(); if(block_size >= 32) { cache[tid] += cache[tid + 16]; } __syncthreads(); if(block_size >= 16) { cache[tid] += cache[tid + 8]; } __syncthreads(); if(block_size >= 8) { cache[tid] += cache[tid + 4]; } __syncthreads(); if(block_size >= 4) { cache[tid] += cache[tid + 2]; } __syncthreads(); if(block_size >= 2) { cache[tid] += cache[tid + 1]; } } __syncthreads(); if(tid == 0) { block_results_d[blockIdx.x] = cache[0]; } } extern "C" void dot_product(long long * result, int * a, int * b, size_t size) { int * a_d; int * b_d; int blocks = 1; if(size > BLOCK_SIZE) { blocks = ceil(((float)size) / BLOCK_SIZE); } int cache_size = BLOCK_SIZE * sizeof(int); int * block_results_d; int * block_results = (int *) malloc(blocks * sizeof(int)); dim3 dimgrid (blocks); dim3 dimblock (BLOCK_SIZE); GPU_ERR_CHK(cudaMalloc((void **) &a_d, sizeof(int) * size)); GPU_ERR_CHK(cudaMalloc((void **) &b_d, sizeof(int) * size)); GPU_ERR_CHK(cudaMalloc((void **) &block_results_d, blocks * sizeof(int))); GPU_ERR_CHK(cudaMemcpy(a_d, a, sizeof(int) * size, cudaMemcpyHostToDevice)); GPU_ERR_CHK(cudaMemcpy(b_d, b, sizeof(int) * size, cudaMemcpyHostToDevice)); //cu_dot<BLOCK_SIZE> <<<dimgrid, dimblock, cache_size>>>(a_d, b_d, block_results_d, size); cu_dot<BLOCK_SIZE> <<<blocks, BLOCK_SIZE, cache_size>>>(a_d, b_d, block_results_d, size); GPU_ERR_CHK(cudaMemcpy(block_results, block_results_d, blocks * sizeof(int), cudaMemcpyDeviceToHost)); for(int z = 0; z < blocks; z++) { *result += block_results[z]; } //*result = *block_results; cudaFree(a_d); cudaFree(b_d); cudaFree(block_results_d); }
11,126
#include "includes.h" namespace ann { // CUDA2 } __global__ void kernel_calc_gjL( int layer_id, int *l, int *s, int *sw, float *z_arr, float *a_arr, float *t_arr, float *gjl, float *w_arr ){ volatile int idx = threadIdx.x + blockDim.x*blockIdx.x; int neuron_count = l[layer_id]; int neuron_count_next = l[layer_id+1]; if(idx >= neuron_count-1) return; //float f_deriv=expf(-z_arr[s[layer_id] + idx]) / powf((1 + expf(-z_arr[s[layer_id] + idx])),2.0f); float z = z_arr[s[layer_id] + idx]; float tmp = 1 + expf(-z); float f_deriv=expf(-z) / (tmp*tmp); float sum = 0; for (int k = 0; k < neuron_count_next-1; k++) { sum += w_arr[sw[layer_id] + idx*(l[layer_id + 1] - 1) + k] * gjl[s[layer_id + 1] + k]; } gjl[s[layer_id] + idx] = f_deriv*sum; // printf("Kernelis %d - %.20f\n", s[layer_id] + idx, gjl[s[layer_id] + idx]); }
11,127
#include "includes.h" __global__ void GaussianBlur(unsigned int *B,unsigned int *G,unsigned int *R, int numberOfPixels, unsigned int width, int *B_new, int *G_new, int *R_new) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numberOfPixels){ //printf("%d\n",index); return; } int mask[] = { 1, 2, 1, 2, 4, 2, 1, 2, 1 }; int s = mask[0] + mask[1] + mask[2] + mask[3] + mask[4] + mask[5] + mask[6] + mask[7] + mask[8]; if (index < width){ // dolny rzad pikseli if (index == 0){ //lewy dolny rog s = mask[4] + mask[1] + mask[2] + mask[5]; B_new[index] = (int)((B[index] * mask[4] + B[index + width] * mask[1] + B[index + width + 1] * mask[2] + B[index + 1] * mask[5]) / s); G_new[index] = (int)((G[index] * mask[4] + G[index + width] * mask[1] + G[index + width + 1] * mask[2] + G[index + 1] * mask[5]) / s); R_new[index] = (int)((R[index] * mask[4] + R[index + width] * mask[1] + R[index + width + 1] * mask[2] + R[index + 1] * mask[5]) / s); return; } if (index == width - 1){//prawy dolny rog s = mask[4] + mask[0] + mask[1] + mask[3]; B_new[index] = (B[index] * mask[4] + B[index + width - 1] * mask[0] + B[index + width] * mask[1] + B[index - 1] * mask[3]); G_new[index] = (G[index] * mask[4] + G[index + width - 1] * mask[0] + G[index + width] * mask[1] + G[index - 1] * mask[3]); R_new[index] = (R[index] * mask[4] + R[index + width - 1] * mask[0] + R[index + width] * mask[1] + R[index - 1] * mask[3]); return; } //reszta pikseli w dolnym rzedzie s = mask[4] + mask[1] + mask[2] + mask[5] + mask[0] + mask[3]; B_new[index] = (int)((B[index] * mask[4] + B[index + width] * mask[1] + B[index + width + 1] * mask[2] + B[index + 1] * mask[5] + B[index + width - 1] * mask[0] + B[index - 1] * mask[3]) / s); R_new[index] = (int)((R[index] * mask[4] + R[index + width] * mask[1] + R[index + width + 1] * mask[2] + R[index + 1] * mask[5] + R[index + width - 1] * mask[0] + R[index - 1] * mask[3]) / s); G_new[index] = (int)((G[index] * mask[4] + G[index + width] * mask[1] + G[index + width + 1] * mask[2] + G[index + 1] * mask[5] + G[index + width - 1] * mask[0] + G[index - 1] * mask[3]) / s); return; } if (index >= numberOfPixels - width){ //gorny rzad pikseli if (index == numberOfPixels - width){ //lewy gorny rog s = mask[4] + mask[5] + mask[7] + mask[8]; B_new[index] = (int)((B[index] * mask[4] + B[index + 1] * mask[5] + B[index - width] * mask[7] + B[index - width + 1] * mask[8]) / s); G_new[index] = (int)((G[index] * mask[4] + G[index + 1] * mask[5] + G[index - width] * mask[7] + G[index - width + 1] * mask[8]) / s); R_new[index] = (int)((R[index] * mask[4] + R[index + 1] * mask[5] + R[index - width] * mask[7] + R[index - width + 1] * mask[8]) / s); return; } if (index == numberOfPixels - 1){ //prawy gorny rog s = mask[4] + mask[3] + mask[6] + mask[7]; B_new[index] = (int)((B[index] * mask[4] + B[index - 1] * mask[3] + B[index - width - 1] * mask[6] + B[index - width] * mask[7]) / s); G_new[index] = (int)((G[index] * mask[4] + G[index - 1] * mask[3] + G[index - width - 1] * mask[6] + G[index - width] * mask[7]) / s); R_new[index] = (int)((R[index] * mask[4] + R[index - 1] * mask[3] + R[index - width - 1] * mask[6] + R[index - width] * mask[7]) / s); return; } s = mask[4] + mask[3] + mask[5] + mask[6] + mask[7] + mask[8]; B_new[index] = (int)((B[index] * mask[4] + B[index - 1] * mask[3] + B[index - width - 1] * mask[6] + B[index - width] * mask[7] + B[index + 1] * mask[5] + B[index - width] * mask[8]) / s); R_new[index] = (int)((R[index] * mask[4] + R[index - 1] * mask[3] + R[index - width - 1] * mask[6] + R[index - width] * mask[7] + R[index + 1] * mask[5] + R[index - width] * mask[8]) / s); G_new[index] = (int)((G[index] * mask[4] + G[index - 1] * mask[3] + G[index - width - 1] * mask[6] + G[index - width] * mask[7] + G[index + 1] * mask[5] + G[index - width] * mask[8]) / s); return; } if (index % width == 0){ //lewa sciana s = mask[4] + mask[1] + mask[2] + mask[5] + mask[8] + mask[7]; B_new[index] = (int)((B[index] * mask[4] + B[index + width] * mask[1] + B[index + width + 1] * mask[2] + B[index + 1] * mask[5] + B[index - width + 1] * mask[8] + B[index - width]) / s); G_new[index] = (int)((G[index] * mask[4] + G[index + width] * mask[1] + G[index + width + 1] * mask[2] + G[index + 1] * mask[5] + G[index - width + 1] * mask[8] + G[index - width]) / s); R_new[index] = (int)((R[index] * mask[4] + R[index + width] * mask[1] + R[index + width + 1] * mask[2] + R[index + 1] * mask[5] + R[index - width + 1] * mask[8] + R[index - width]) / s); return; } if (index % width == width - 1){ //prawa sciana s = mask[4] + mask[1] + mask[0] + mask[3] + mask[6] + mask[7]; B_new[index] = (int)((B[index] * mask[4] + B[index + width] * mask[1] + B[index + width - 1] * mask[0] + B[index - 1] * mask[3] + B[index - width - 1] * mask[6] + B[index - width] * mask[7]) / s); R_new[index] = (int)((R[index] * mask[4] + R[index + width] * mask[1] + R[index + width - 1] * mask[0] + R[index - 1] * mask[3] + R[index - width - 1] * mask[6] + R[index - width] * mask[7]) / s); G_new[index] = (int)((G[index] * mask[4] + G[index + width] * mask[1] + G[index + width - 1] * mask[0] + G[index - 1] * mask[3] + G[index - width - 1] * mask[6] + G[index - width] * mask[7]) / s); return; } int poz_1 = index - width - 1; int poz_2 = index - width; int poz_3 = index - width + 1; int poz_4 = index - 1; int poz_5 = index; int poz_6 = index + 1; int poz_7 = index + width - 1; int poz_8 = index + width; int poz_9 = index + width + 1; B_new[index] = (int)(((B[poz_1] * mask[0]) + (B[poz_2] * mask[1]) + (B[poz_3] * mask[2]) + (B[poz_4] * mask[3]) + (B[poz_5] * mask[4]) + (B[poz_6] * mask[5]) + (B[poz_7] * mask[6]) + (B[poz_8] * mask[7]) + (B[poz_9] * mask[8])) / s); G_new[index] = (int)(((G[poz_1] * mask[0]) + (G[poz_2] * mask[1]) + (G[poz_3] * mask[2]) + (G[poz_4] * mask[3]) + (G[poz_5] * mask[4]) + (G[poz_6] * mask[5]) + (G[poz_7] * mask[6]) + (G[poz_8] * mask[7]) + (G[poz_9] * mask[8])) / s); R_new[index] = (int)(((R[poz_1] * mask[0]) + (R[poz_2] * mask[1]) + (R[poz_3] * mask[2]) + (R[poz_4] * mask[3]) + (R[poz_5] * mask[4]) + (R[poz_6] * mask[5]) + (R[poz_7] * mask[6]) + (R[poz_8] * mask[7]) + (R[poz_9] * mask[8])) / s); }
11,128
#include <stdio.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } }
11,129
#include <iostream> #include <chrono> #include <ratio> #include <unistd.h> #include <cuda.h> __global__ void null_kernel() { }; int main(int argc, char* argv[]) { char hostname[256]; hostname[255]='\0'; gethostname(hostname, 255); cudaError_t error; int gpu_count = 0; error = cudaGetDeviceCount(&gpu_count); if (error == cudaSuccess) { if (gpu_count <= 0) { std::cout << "[" << hostname << "] " << "Could not find any gpu\n"; return 1; } std::cout << "[" << hostname << "] " << "Found " << gpu_count << " gpu(s)\n"; } else{ std::cout << "[" << hostname << "] " << "Error getting gpu count, exiting...\n"; return 1; } for (int i = 0; i < gpu_count; i++) { cudaSetDevice(i); // Single kernel launch to initialize cuda runtime null_kernel<<<1, 1>>>(); auto t_start = std::chrono::system_clock::now(); const int kernel_count = 5000; for (int j = 0; j < kernel_count; ++j) { null_kernel<<<1, 1>>>(); #if SYNCKERNEL == 1 cudaDeviceSynchronize(); #endif } #if SYNCKERNEL != 1 cudaDeviceSynchronize(); #endif auto t_end = std::chrono::system_clock::now(); std::cout << "[" << hostname << "] " << "[gpu " << i << "] " << "Kernel launch latency: " << std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(t_end - t_start).count() / kernel_count << " us\n"; } return 0; }
11,130
#include "includes.h" __global__ void modulus_updateGradInput_kernel(float* input, float* output, float* gradInput, float* gradOutput, int n) { const int i = threadIdx.x + blockIdx.x*blockDim.x; if (i >= n) return; const float eps = 0.0001; const float c = gradOutput[i]/max(output[i],eps); gradInput[2*i] = input[2*i]*c; gradInput[2*i+1] = input[2*i+1]*c; }
11,131
#include "includes.h" __global__ void transposeDiagonalColUnroll4(float *out, float *in, const int nx, const int ny) { unsigned int blk_y = blockIdx.x; unsigned int blk_x = (blockIdx.x + blockIdx.y) % gridDim.x; unsigned int ix_stride = blockDim.x * blk_x; unsigned int ix = ix_stride * 4 + threadIdx.x; unsigned int iy = blockDim.y * blk_y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[ix * ny + iy]; out[iy * nx + ix + blockDim.x] = in[(ix + blockDim.x) * ny + iy]; out[iy * nx + ix + 2 * blockDim.x] = in[(ix + 2 * blockDim.x) * ny + iy]; out[iy * nx + ix + 3 * blockDim.x] = in[(ix + 3 * blockDim.x) * ny + iy]; } }
11,132
#include <stdio.h> #include <cuda.h> #include "pnpoly.cu" #ifndef grid_size_x #define grid_size_x 1 #endif #ifndef grid_size_y #define grid_size_y 1 #endif /* * This function contains the host code for benchmarking the cn_pnpoly CUDA kernel * Including the time spent on data transfers between host and device memory * * This host code uses device mapped host memory to overlap communication * between host and device with kernel execution on the GPU. Because each input * is read only once and each output is written only once, this implementation * almost fully overlaps all communication and the kernel execution time dominates * the total execution time. * * The code has the option to precompute all polygon line slopes on the CPU and * reuse those results on the GPU, instead of recomputing them on the GPU all * the time. The time spent on precomputing these values on the CPU is also * taken into account by the time measurement in the code below. * * This code was written for use with the Kernel Tuner. See: * https://github.com/benvanwerkhoven/kernel_tuner * * Author: Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl> */ extern "C" float cn_pnpoly_host(int* bitmap, float2* points, float2* vertices, int n) { cudaError_t err; #if use_precomputed_slopes == 1 float *h_slopes; err = cudaHostAlloc((void **)&h_slopes, VERTICES*sizeof(float), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } #endif //create CUDA streams and events cudaStream_t stream[1]; err = cudaStreamCreate(&stream[0]); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaStreamCreate: %s\n", cudaGetErrorString(err)); } cudaEvent_t start; err = cudaEventCreate(&start); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err)); } cudaEvent_t stop; err = cudaEventCreate(&stop); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err)); } //kernel parameters dim3 threads(block_size_x, block_size_y, block_size_z); dim3 grid(grid_size_x, grid_size_y); //start measuring time cudaDeviceSynchronize(); cudaEventRecord(start, stream[0]); //transfer vertices to d_vertices err = cudaMemcpyToSymbolAsync(d_vertices, vertices, VERTICES*sizeof(float2), 0, cudaMemcpyHostToDevice, stream[0]); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaMemcpyToSymbolAsync: %s\n", cudaGetErrorString(err)); } #if use_precomputed_slopes == 1 //precompute the slopes and transfer to symbol d_slopes h_slopes[0] = (vertices[VERTICES-1].x - vertices[0].x) / (vertices[VERTICES-1].y - vertices[0].y); for (int i=1; i<VERTICES; i++) { h_slopes[i] = (vertices[i-1].x - vertices[i].x) / (vertices[i-1].y - vertices[i].y); } err = cudaMemcpyToSymbolAsync(d_slopes, h_slopes, VERTICES*sizeof(float), 0, cudaMemcpyHostToDevice, stream[0]); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaMemcpyToSymbolAsync: %s\n", cudaGetErrorString(err)); } #endif //call the kernel cn_pnpoly<<<grid, threads, 0, stream[0]>>>(bitmap, points, n); //using mapped memory //stop time measurement cudaEventRecord(stop, stream[0]); cudaDeviceSynchronize(); float time = 0.0; cudaEventElapsedTime(&time, start, stop); //cleanup #if use_precomputed_slopes == 1 cudaFreeHost(h_slopes); #endif cudaStreamDestroy(stream[0]); cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { const char *error_string = cudaGetErrorString(err); if (strncmp("too many resources requested for launch", error_string, 10) == 0) { time = -1.0; } else { fprintf(stderr, "Error after CUDA kernel: %s\n", error_string); exit(1); } } return time; //ms }
11,133
#include "Bitmap.cuh" HostMatrix Bitmap::loadFromFile(const char* fileName) { std::ifstream fin; fin.open(fileName); if (!fin.is_open()) { std::cerr << "Could not open " << fileName << "." << std::endl; exit(EXIT_FAILURE); } unsigned int buffer = 0; // BMP Header buffer = 0; fin.read((char*)&buffer, 2); // BM if (buffer != 19778) { std::cerr << "The image format is not supported. " << "(Offse = " << fin.tellg() << ", Value = " << buffer << ")" << std::endl; exit(EXIT_FAILURE); } fin.seekg(12, std::ios::cur); // DIB Header buffer = 0; fin.read((char*)&buffer, 4); // DIB Header Size if (buffer != 40) { std::cerr << "The image format is not supported. " << "(Offse = " << fin.tellg() << ", Value = " << buffer << ")" << std::endl; exit(EXIT_FAILURE); } buffer = 0; fin.read((char*)&buffer, 4); // Width unsigned int width = buffer; buffer = 0; fin.read((char*)&buffer, 4); // Height unsigned int height = buffer; fin.seekg(2, std::ios::cur); buffer = 0; fin.read((char*)&buffer, 2); // Number of Bits per Pixel if (buffer != 8 && buffer != 24) { std::cerr << "The image format is not supported. " << "(Offse = " << fin.tellg() << ", Value = " << buffer << ")" << std::endl; exit(EXIT_FAILURE); } bool grayScale = (buffer == 8); buffer = 0; fin.read((char*)&buffer, 4); // Compression if (buffer != 0) { std::cerr << "The image format is not supported. " << "(Offse = " << fin.tellg() << ", Value = " << buffer << ")" << std::endl; exit(EXIT_FAILURE); } fin.seekg(20, std::ios::cur); // Color Palette if (grayScale) { fin.seekg(1024, std::ios::cur); } // Image Data HostMatrix image(height, width); if (grayScale) { unsigned int padding = (width + 3) / 4 * 4 - width; for (unsigned int i = 0; i < height; ++i) { for (unsigned int j = 0; j < width; ++j) { buffer = 0; fin.read((char*)&buffer, 1); image.setElement(i, j, convertToFloat(buffer)); } fin.seekg(padding, std::ios::cur); } } else { unsigned int padding = (3 * width + 3) / 4 * 4 - width * 3; for (unsigned int i = 0; i < height; ++i) { for (unsigned int j = 0; j < width; ++j) { float value = 0.0f; buffer = 0; fin.read((char*)&buffer, 1); value += convertToFloat(buffer) * 0.11f; buffer = 0; fin.read((char*)&buffer, 1); value += convertToFloat(buffer) * 0.59f; buffer = 0; fin.read((char*)&buffer, 1); value += convertToFloat(buffer) * 0.30f; image.setElement(i, j, value); } fin.seekg(padding, std::ios::cur); } } fin.close(); return image; } void Bitmap::saveToFile(const HostMatrix& image, const char* fileName) { std::ofstream fout; fout.open(fileName); if (!fout.is_open()) { std::cerr << "Could not open " << fileName << "." << std::endl; exit(EXIT_FAILURE); } unsigned int height = image.getHeight(); unsigned int width = image.getWidth(); unsigned int padding = (width + 3) / 4 * 4 - width; unsigned int headerSize = 14 + 40 + 1024; unsigned int imageSize = height * (width + padding); unsigned int fileSize = headerSize + imageSize; unsigned int buffer = 0; // BMP Header buffer = 19778; fout.write((char*)&buffer, 2); // BM buffer = fileSize; fout.write((char*)&buffer, 4); // File Size buffer = 0; fout.write((char*)&buffer, 2); // Reserved buffer = 0; fout.write((char*)&buffer, 2); // Reserved buffer = headerSize; fout.write((char*)&buffer, 4); // Offset // DIB Header buffer = 40; fout.write((char*)&buffer, 4); // DIB Header Size buffer = width; fout.write((char*)&buffer, 4); // Width buffer = height; fout.write((char*)&buffer, 4); // Height buffer = 1; fout.write((char*)&buffer, 2); // Number of Color Planes buffer = 8; fout.write((char*)&buffer, 2); // Number of Bits per Pixel buffer = 0; fout.write((char*)&buffer, 4); // Compression buffer = imageSize; fout.write((char*)&buffer, 4); // Image Data Size buffer = 0; fout.write((char*)&buffer, 4); // Horizontal Resolution buffer = 0; fout.write((char*)&buffer, 4); // Vertical Resolution buffer = 0; fout.write((char*)&buffer, 4); // Number of Colors in Color Palette buffer = 0; fout.write((char*)&buffer, 4); // Number of Important Colors // Color Palette for (unsigned int shade = 0; shade < 256; ++shade) { buffer = shade; fout.write((char*)&buffer, 1); // Red fout.write((char*)&buffer, 1); // Green fout.write((char*)&buffer, 1); // Blue buffer = 0; fout.write((char*)&buffer, 1); // Padding } // Image Data for (unsigned int i = 0; i < height; ++i) { for (unsigned int j = 0; j < width; ++j) { buffer = convertToByte(image.getElement(i, j)); fout.write((char*)&buffer, 1); } buffer = 0; fout.write((char*)&buffer, padding); // Padding } fout.close(); }
11,134
//#include "cuda_runtime.h" //#include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <sstream> #include <string> #include <vector> #include <map> #include <iterator> #include <algorithm> #include <assert.h> //#include <time.h> using namespace std; const int N = 2394385; const int M = 5021410; const int BLOCK_SIZE = 256; const unsigned MAX_NUM = 4294967295; const int NUM_BANKS = 16; const int LOG_NUM_BANKS = 4; //const string fn("test.txt"); const string fn("WikiTalk.txt"); //const string fn("soc-LiveJournal1.txt"); unsigned **scanBlockSums; unsigned numEltsAllocated = 0; unsigned numLevelsAllocated = 0; __device__ unsigned Mterminate; __device__ unsigned numActiveThreads; __device__ unsigned *range; __device__ unsigned *pivot; #define FALSE 0u #define TRUE 1u #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "Error: %s\nFile %s, line %d\n", cudaGetErrorString(code), file, line); exit(code); } } inline bool isPowerOfTwo(int n) { return (n & (n - 1)) == 0; } inline int floorPow2(int n) { int exp; frexp((float)n, &exp); return 1 << (exp - 1); } template <bool isNP2> __device__ void loadSharedChunkFromMem(unsigned *s_data, const unsigned *idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB); template <bool isNP2> __device__ void storeSharedChunkToMem(unsigned* odata, const unsigned* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB); template <bool storeSum> __device__ void clearLastElement(unsigned* s_data, unsigned *blockSums, int blockIndex); __device__ unsigned int buildSum(unsigned *s_data); __device__ void scanRootToLeaves(unsigned *s_data, unsigned int stride); template <bool storeSum> __device__ void prescanBlock(unsigned *data, int blockIndex, unsigned *blockSums); template <bool storeSum, bool isNP2> __global__ void prescan(unsigned *odata, const unsigned *idata, unsigned *blockSums, int n, int blockIndex, int baseIndex); __global__ void uniformAdd(unsigned *data, unsigned *uniforms, int n, int blockOffset, int baseIndex); __global__ void BFSKernel1( size_t graphSize, unsigned *activeMask, unsigned *V, unsigned *E, unsigned *F, unsigned *X,unsigned *Fu); __global__ void BFSKernel2(size_t graphSize, unsigned *F, unsigned *X, unsigned *Fu); __global__ void TRIMKernel(size_t graphSize,unsigned *VF,unsigned *EF,unsigned *VB,unsigned *EB); /*__global__ void getActiveMaskTemp(size_t graphSize, unsigned *F, unsigned *activeMask);*/ __global__ void compactSIMD(size_t N, unsigned *prefixSums, unsigned *activeMask, size_t blockSize); __global__ void PIVOTS_SEL_Kernel(size_t graphSize); __global__ void UpdateKernel(size_t graphSize, unsigned *visF,unsigned *visB); __global__ void UpdateKernel1(size_t graphSize,unsigned *visF,unsigned *visB,unsigned *ac); __global__ void RenumberKernel(size_t graphSize,unsigned *prefixsum); __global__ void initpivot(size_t graphSize); __global__ void ReadPivot(size_t graphSize, unsigned *pivot_); __host__ void setUInt(unsigned *address, unsigned value); __host__ void Graphpreproc(const string filename, vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB); __host__ void BFS(vector<unsigned> &V, vector<unsigned> &E, unsigned index_s, unsigned index_e, vector<unsigned> &visited); __host__ void TRIMMING(vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB); __host__ void PIVOTS_SEL(); __host__ void Update(vector<unsigned> &visF,vector<unsigned> &visB,unsigned &termin); __host__ void Update1(vector<unsigned> &visF,vector<unsigned> &visB,vector<unsigned> &prefixsum, unsigned &termin); __host__ void preallocBlockSums(unsigned maxNumElements); __host__ void deallocBlockSums(); __host__ void prescanArrayRecursive(unsigned *outArray, const unsigned *inArray, int numElements, int level); __host__ void prescanArray(unsigned *outArray, unsigned *inArray, int numElements); int main() { vector<unsigned> VF, EF, VB, EB, visF, visB,range_,pivot_,prefixsum_; vector<unsigned>::iterator itr; /*ofstream out("out1.txt"); if(!out){ cout << "Unable to open outfile"; exit(1); // terminate with error }*/ unsigned termin; unsigned *d_r,*d_p; unsigned *d_prefixsum; range_.resize(N); pivot_.resize(N); //prefixsum_.resize(N+1); VF.reserve(N + 1); EF.reserve(M); VB.reserve(N + 1); EB.reserve(M); Graphpreproc(fn, VF, EF, VB, EB); unsigned index_s = 0; unsigned index_e = 0; unsigned loop = 0; const size_t gridSizeK2 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; gpuErrchk(cudaMalloc(&d_r, N*sizeof(unsigned))); gpuErrchk(cudaMemset(d_r, FALSE, N*sizeof(unsigned))); //gpuErrchk(cudaMemcpyToSymbol(range, &d_r, sizeof(unsigned *),size_t(0), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpyToSymbol(range, &d_r, sizeof(d_r))); gpuErrchk(cudaMalloc(&d_p, N*sizeof(unsigned))); gpuErrchk(cudaMemset(d_p, 255, N*sizeof(unsigned))); //gpuErrchk(cudaMemcpyToSymbol(pivot, &d_p, sizeof(unsigned *),size_t(0), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpyToSymbol(pivot, &d_p, sizeof(d_p))); gpuErrchk(cudaMalloc(&d_prefixsum, (N+1)*sizeof(unsigned))); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while(true){ loop++; TRIMMING(VF,EF,VB,EB); PIVOTS_SEL(); BFS(VF, EF, index_s, index_e, visF); BFS(VB, EB, index_s, index_e ,visB); if(true){ Update1(visF,visB,prefixsum_,termin); if(termin == FALSE){ gpuErrchk(cudaMemcpy(d_prefixsum, prefixsum_.data(), (N+1)*sizeof(unsigned), cudaMemcpyHostToDevice)); RenumberKernel<<<gridSizeK2,BLOCK_SIZE>>>(N,d_prefixsum); gpuErrchk(cudaDeviceSynchronize()); index_e = prefixsum_.at(N) - 1; } } else{ Update(visF,visB,termin); index_e = 3 * index_e + 2; } if(termin) break; initpivot<<<gridSizeK2,BLOCK_SIZE>>>(N); gpuErrchk(cudaDeviceSynchronize()); } cudaEventRecord(stop,0); cudaEventSynchronize(stop); float time; cudaEventElapsedTime(&time,start,stop); printf("time is %f\n",time); printf("%u\n", loop); gpuErrchk(cudaFree(d_r)); gpuErrchk(cudaFree(d_p)); gpuErrchk(cudaFree(d_prefixsum)); //out.close(); return 0; } __host__ void setUInt(unsigned *address, unsigned value) { gpuErrchk(cudaMemcpy(address, &value, sizeof(unsigned), cudaMemcpyHostToDevice)); } __host__ void Graphpreproc(const string filename, vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB) { ifstream in_f; vector<unsigned> t; vector<unsigned>::iterator itr; multimap<const unsigned, unsigned> m; multimap<const unsigned, unsigned>::iterator mitr; unsigned count = 0; in_f.open(filename.c_str(), ios::in); while (!in_f.eof()){ string temp, s1, s2; stringstream ss1, ss2; unsigned t1, t2; getline(in_f, temp); if(temp.length() == 0) continue; if (*(temp.begin()) == '#') continue; s1 = string(temp, 0, temp.find_first_of('\t')); s2 = string(temp, temp.find_first_not_of('\t', temp.find_first_of('\t')), temp.find_last_not_of('\t')); ss1 << s1; ss1 >> t1; ss2 << s2; ss2 >> t2; t.push_back(t1); m.insert(make_pair(t2, t1)); EF.push_back(t2); } itr = t.begin(); VF.push_back(0); for (int i = 0; i < N - 1; i++){ while ((itr != t.end()) && (*itr == i)){ count++; itr++; } count += VF.at(i); VF.push_back(count); count = 0; } VF.push_back(M); mitr = m.begin(); VB.push_back(0); for (int i = 0; i < N - 1; i++){ while ((mitr != m.end()) && ((mitr->first) == i)){ count++; mitr++; } count += VB.at(i); VB.push_back(count); count = 0; } VB.push_back(M); for (mitr = m.begin(); mitr != m.end(); mitr++){ EB.push_back(mitr->second); } } __global__ void BFSKernel1( size_t graphSize, unsigned *activeMask, unsigned *V, unsigned *E, unsigned *F, unsigned *X,unsigned *Fu) { unsigned activeMaskIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; // If vertex is active at current iteration if (activeMaskIdx < numActiveThreads) { unsigned v = activeMask[activeMaskIdx]; // Remove v from current frontier F[v] = FALSE; // Iterate over v's neighbors for (unsigned edge = V[v]; edge < V[v + 1]; ++edge) { unsigned neighbor = E[edge]; // If neighbor wasn't visited if(range[v] == range[neighbor]){ if (X[neighbor] == FALSE){ //C[neighbor] = C[v] + 1; Fu[neighbor] = TRUE; } } } } } __global__ void BFSKernel2(size_t graphSize, unsigned *F, unsigned *X, unsigned *Fu) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; // If vertex v exists and has recently joined the frontier if (v < graphSize && Fu[v]) { // Copy the new frontier into F F[v] = TRUE; // Set v as visited X[v] = TRUE; // Clean up the new frontier Fu[v] = FALSE; Mterminate = FALSE; } } __global__ void TRIMKernel(size_t graphSize,unsigned *VF,unsigned *EF,unsigned *VB,unsigned *EB) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; bool elim; if(v < graphSize){ if(range[v] != MAX_NUM){ elim = true; for (unsigned edge = VF[v]; edge < VF[v + 1]; ++edge) { unsigned neighbor = EF[edge]; if(range[neighbor] == range[v]){ elim = false; break; } } if(elim == false){ elim = true; for (unsigned edge = VB[v]; edge < VB[v + 1]; ++edge) { unsigned neighbor = EB[edge]; if(range[neighbor] == range[v]){ elim = false; break; } } } if(elim == true){ range[v] = MAX_NUM; Mterminate = FALSE; } } } } __global__ void PIVOTS_SEL_Kernel(size_t graphSize) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(v < graphSize){ if(range[v] != MAX_NUM) pivot[range[v]] = v; } } __global__ void UpdateKernel(size_t graphSize,unsigned *visF,unsigned *visB) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(v < graphSize){ if(range[v] != MAX_NUM){ if(visF[v] == TRUE){ if(visB[v] == TRUE){ range[v] = MAX_NUM; } else if(visB[v] == FALSE){ range[v] = 3*range[v]; Mterminate = FALSE; } } else if(visF[v] == FALSE){ if(visB[v] == TRUE){ range[v] = 3*range[v] + 1; Mterminate = FALSE; } else if(visB[v] == FALSE){ range[v] = 3*range[v] + 2; Mterminate = FALSE; } } } } } __global__ void UpdateKernel1(size_t graphSize,unsigned *visF,unsigned *visB,unsigned *ac) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(v < graphSize){ if(range[v] != MAX_NUM){ if(visF[v] == TRUE){ if(visB[v] == TRUE){ range[v] = MAX_NUM; } else if(visB[v] == FALSE){ range[v] = 3*range[v]; ac[range[v]] = TRUE; Mterminate = FALSE; } } else if(visF[v] == FALSE){ if(visB[v] == TRUE){ range[v] = 3*range[v] + 1; ac[range[v]] = TRUE; Mterminate = FALSE; } else if(visB[v] == FALSE){ range[v] = 3*range[v] + 2; ac[range[v]] = TRUE; Mterminate = FALSE; } } } } } __global__ void RenumberKernel(size_t graphSize,unsigned *prefixsum) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(v < graphSize){ if(range[v] != MAX_NUM) range[v] = prefixsum[range[v]]; } } __global__ void initpivot(size_t graphSize) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(v < graphSize){ pivot[v] = MAX_NUM; } } __global__ void ReadPivot(size_t graphSize ,unsigned *pivot_) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; if(v < graphSize){ pivot_[v] = pivot[v]; } } // Very slow but correct "active mask" calculation; for debugging /*__global__ void getActiveMaskTemp(size_t graphSize, unsigned *F, unsigned *activeMask) { numActiveThreads = 0; for (int i = 0; i < graphSize; ++i) { if (F[i]) { activeMask[numActiveThreads] = i; ++numActiveThreads; } } }*/ __global__ void compactSIMD(size_t N, unsigned *prefixSums, unsigned *activeMask, size_t blockSize) { size_t v = blockIdx.x * blockSize + threadIdx.x; if (v < N) { // Can possibly be accelerated by using shared memory if (prefixSums[v + 1] != prefixSums[v]) { activeMask[prefixSums[v]] = v; } } } __host__ void BFS(vector<unsigned> &V, vector<unsigned> &E, unsigned index_s, unsigned index_e, vector<unsigned> &visited) { assert(sizeof(unsigned) == 4); unsigned size_sub = index_e - index_s + 1; visited.clear(); visited.resize(N); vector<unsigned> pivot_h; pivot_h.resize(size_sub); unsigned *d_V, *d_E, *d_p; unsigned *d_F, *d_X, *d_Fu; unsigned *activeMask, *prefixSums; //unsigned **prefixSums; size_t memSize = (N + 1) * sizeof(unsigned); size_t memSizeE = M * sizeof(unsigned); gpuErrchk(cudaMalloc(&d_F, memSize)); gpuErrchk(cudaMemset(d_F, FALSE, memSize)); //setUInt(d_F + piv, TRUE); // add source to frontier gpuErrchk(cudaMalloc(&d_X, memSize)); gpuErrchk(cudaMemset(d_X, FALSE, memSize)); //setUInt(d_X + sourceVertex, TRUE); // set source as visited gpuErrchk(cudaMalloc(&d_Fu, memSize)); gpuErrchk(cudaMemset(d_Fu, FALSE, memSize)); gpuErrchk(cudaMalloc(&d_V, memSize)); gpuErrchk(cudaMemcpy(d_V, V.data(), memSize, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_E, memSizeE)); gpuErrchk(cudaMemcpy(d_E, E.data(), memSizeE, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&activeMask, memSize)); //setUInt(activeMask + 0, sourceVertex); // set thread #source as active gpuErrchk(cudaMalloc(&d_p, size_sub*sizeof(unsigned))); //gpuErrchk(cudaMemset(d_p, FALSE, N*sizeof(unsigned))); gpuErrchk(cudaMalloc(&prefixSums, memSize)); preallocBlockSums(N + 1); const size_t gridSizeK2 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; const size_t gridSizeK3 = (size_sub + BLOCK_SIZE - 1) / BLOCK_SIZE; ReadPivot<<<gridSizeK3,BLOCK_SIZE>>>(size_sub,d_p); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(pivot_h.data(), d_p, size_sub*sizeof(unsigned), cudaMemcpyDeviceToHost); unsigned numActiveThreadsHost = 0; for(int i = index_s;i <= index_e;i++){ if(pivot_h.at(i) == MAX_NUM) continue; setUInt(d_F + pivot_h.at(i), TRUE); setUInt(d_X + pivot_h.at(i), TRUE); setUInt(activeMask + numActiveThreadsHost, pivot_h.at(i)); numActiveThreadsHost++; } gpuErrchk(cudaMemcpyToSymbol(numActiveThreads, &numActiveThreadsHost, sizeof(unsigned))); // Main loop /*const size_t prefixSumGridSize = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;*/ while (true) { // Terminate <- TRUE unsigned terminateHost = TRUE; gpuErrchk(cudaMemcpyToSymbol(Mterminate, &terminateHost, sizeof(unsigned))); // Kernel 1: need to assign ACTIVE vertices to SIMD lanes (threads) //gpuErrchk(cudaMemcpyFromSymbol(&numActiveThreadsHost, numActiveThreads, sizeof(unsigned))); const size_t gridSizeK1 = (numActiveThreadsHost + BLOCK_SIZE - 1) / BLOCK_SIZE; // launch kernel 1 BFSKernel1 <<<gridSizeK1, BLOCK_SIZE >>> (N,activeMask, d_V, d_E, d_F, d_X,d_Fu); //gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Kernel 2: need to assign ALL vertices to SIMD lanes // launch kernel 2 BFSKernel2 <<<gridSizeK2, BLOCK_SIZE >>> (N, d_F, d_X, d_Fu); //gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpyFromSymbol(&terminateHost, Mterminate, sizeof(unsigned))); if (terminateHost) { break; } else { // Get prefix sums of F prescanArray(prefixSums, d_F, N + 1); //cudaMemcpy(&numActiveThreads, prefixSums + N, sizeof(unsigned), cudaMemcpyDeviceToDevice); cudaMemcpy(&numActiveThreadsHost, prefixSums + N, sizeof(unsigned), cudaMemcpyDeviceToHost); gpuErrchk(cudaMemcpyToSymbol(numActiveThreads, &numActiveThreadsHost, sizeof(unsigned))); compactSIMD <<<gridSizeK2, BLOCK_SIZE>>> (N, prefixSums, activeMask, BLOCK_SIZE); //gpuErrchk(cudaPeekAtLastError()); //getActiveMaskTemp<<<1,1>>>(N,d_F,activeMask); gpuErrchk(cudaDeviceSynchronize()); //gpuErrchk(cudaPeekAtLastError()); } } // Download result //gpuErrchk(cudaMemcpy(distances.data(), d_C, memSize - sizeof(unsigned), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(visited.data(), d_X, memSize - sizeof(unsigned), cudaMemcpyDeviceToHost)); // Free memory gpuErrchk(cudaFree(d_F)); gpuErrchk(cudaFree(d_X)); gpuErrchk(cudaFree(d_Fu)); gpuErrchk(cudaFree(d_V)); gpuErrchk(cudaFree(d_E)); gpuErrchk(cudaFree(d_p)); gpuErrchk(cudaFree(activeMask)); deallocBlockSums(); gpuErrchk(cudaFree(prefixSums)); } __host__ void TRIMMING(vector<unsigned> &VF, vector<unsigned> &EF, vector<unsigned> &VB, vector<unsigned> &EB) { unsigned *d_VF, *d_EF, *d_VB, *d_EB; size_t memSize = (N + 1) * sizeof(unsigned); size_t memSizeE = M * sizeof(unsigned); gpuErrchk(cudaMalloc(&d_VF, memSize)); gpuErrchk(cudaMemcpy(d_VF, VF.data(), memSize, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_EF, memSizeE)); gpuErrchk(cudaMemcpy(d_EF, EF.data(), memSizeE, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_VB, memSize)); gpuErrchk(cudaMemcpy(d_VB, VB.data(), memSize, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_EB, memSizeE)); gpuErrchk(cudaMemcpy(d_EB, EB.data(), memSizeE, cudaMemcpyHostToDevice)); const size_t gridSizeK2 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; while (true) { // Terminate <- TRUE unsigned terminateHost = TRUE; gpuErrchk(cudaMemcpyToSymbol(Mterminate, &terminateHost, sizeof(unsigned))); // launch kernel 2 TRIMKernel <<<gridSizeK2, BLOCK_SIZE >>> (N, d_VF, d_EF, d_VB, d_EB); //gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpyFromSymbol(&terminateHost, Mterminate, sizeof(unsigned))); if (terminateHost) { break; } } gpuErrchk(cudaFree(d_VF)); gpuErrchk(cudaFree(d_EF)); gpuErrchk(cudaFree(d_VB)); gpuErrchk(cudaFree(d_EB)); } __host__ void PIVOTS_SEL() { const size_t gridSizeK2 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; PIVOTS_SEL_Kernel<<<gridSizeK2, BLOCK_SIZE>>>(N); //gpuErrchk(cudaGetLastError()); gpuErrchk(cudaDeviceSynchronize()); } __host__ void Update(vector<unsigned> &visF,vector<unsigned> &visB,unsigned &termin) { unsigned *d_vf,*d_vb; size_t memSize = N * sizeof(unsigned); gpuErrchk(cudaMalloc(&d_vf, memSize)); gpuErrchk(cudaMemcpy(d_vf, visF.data(), memSize, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_vb, memSize)); gpuErrchk(cudaMemcpy(d_vb, visB.data(), memSize, cudaMemcpyHostToDevice)); // Terminate <- TRUE termin = TRUE; gpuErrchk(cudaMemcpyToSymbol(Mterminate, &termin, sizeof(unsigned))); const size_t gridSizeK2 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; UpdateKernel<<<gridSizeK2, BLOCK_SIZE>>>(N,d_vf,d_vb); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpyFromSymbol(&termin, Mterminate, sizeof(unsigned))); gpuErrchk(cudaFree(d_vf)); gpuErrchk(cudaFree(d_vb)); } __host__ void Update1(vector<unsigned> &visF,vector<unsigned> &visB,vector<unsigned> &prefixsum, unsigned &termin) { prefixsum.clear(); prefixsum.resize(N+1); unsigned *d_vf,*d_vb,*d_ac; unsigned *prefixSums; size_t memSize = (N + 1) * sizeof(unsigned); gpuErrchk(cudaMalloc(&d_ac, memSize)); gpuErrchk(cudaMemset(d_ac, FALSE, memSize)); gpuErrchk(cudaMalloc(&d_vf, memSize)); gpuErrchk(cudaMemcpy(d_vf, visF.data(), memSize - sizeof(unsigned), cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_vb, memSize)); gpuErrchk(cudaMemcpy(d_vb, visB.data(), memSize - sizeof(unsigned), cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&prefixSums, memSize)); preallocBlockSums(N + 1); // Terminate <- TRUE termin = TRUE; gpuErrchk(cudaMemcpyToSymbol(Mterminate, &termin, sizeof(unsigned))); const size_t gridSizeK2 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; UpdateKernel1<<<gridSizeK2, BLOCK_SIZE>>>(N,d_vf,d_vb,d_ac); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpyFromSymbol(&termin, Mterminate, sizeof(unsigned))); if(termin == FALSE){ prescanArray(prefixSums, d_ac, N + 1); //gpuErrchk(cudaMemcpyToSymbol(prefixsum, &prefixSums, sizeof(unsigned *),size_t(0), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(prefixsum.data(), prefixSums, memSize, cudaMemcpyDeviceToHost)); } gpuErrchk(cudaFree(d_vf)); gpuErrchk(cudaFree(d_vb)); gpuErrchk(cudaFree(d_ac)); deallocBlockSums(); gpuErrchk(cudaFree(prefixSums)); } __host__ void preallocBlockSums(unsigned maxNumElements) { numEltsAllocated = maxNumElements; unsigned blockSize = BLOCK_SIZE; unsigned numElts = maxNumElements; int level = 0; do { unsigned numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize))); if (numBlocks > 1) { level++; } numElts = numBlocks; } while (numElts > 1); scanBlockSums = (unsigned**)malloc(level * sizeof(unsigned*)); numLevelsAllocated = level; numElts = maxNumElements; level = 0; do { unsigned numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize))); if (numBlocks > 1) { gpuErrchk(cudaMalloc(&scanBlockSums[level++], numBlocks * sizeof(unsigned))); } numElts = numBlocks; } while (numElts > 1); } __host__ void deallocBlockSums() { for (unsigned i = 0; i < numLevelsAllocated; i++) { cudaFree(scanBlockSums[i]); } free(scanBlockSums); scanBlockSums = 0; numEltsAllocated = 0; numLevelsAllocated = 0; } __host__ void prescanArrayRecursive(unsigned *outArray, const unsigned *inArray, int numElements, int level) { unsigned blockSize = BLOCK_SIZE; unsigned numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize))); unsigned numThreads; if (numBlocks > 1) numThreads = blockSize; else if (isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = floorPow2(numElements); unsigned numEltsPerBlock = numThreads * 2; unsigned numEltsLastBlock = numElements - (numBlocks - 1) * numEltsPerBlock; unsigned numThreadsLastBlock = max(1u, numEltsLastBlock / 2); unsigned np2LastBlock = 0; unsigned sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if (!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(unsigned)* (2 * numThreadsLastBlock + extraSpace); } // Avoid shared memory bank conflicts unsigned extraSpace = numEltsPerBlock / NUM_BANKS; unsigned sharedMemSize = sizeof(unsigned)* (numEltsPerBlock + extraSpace); dim3 grid(max(1u, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); // Main action if (numBlocks > 1) { prescan<true, false> <<< grid, threads, sharedMemSize >>> ( outArray, inArray, scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { prescan<true, true> <<< 1, numThreadsLastBlock, sharedMemLastBlock >>> ( outArray, inArray, scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } prescanArrayRecursive(scanBlockSums[level], scanBlockSums[level], numBlocks, level + 1); uniformAdd <<< grid, threads >>> ( outArray, scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { uniformAdd <<<1, numThreadsLastBlock >>> ( outArray, scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { prescan<false, false> <<<grid, threads, sharedMemSize >>> ( outArray, inArray, 0, numThreads * 2, 0, 0); } else { prescan<false, true> <<<grid, threads, sharedMemSize >>> ( outArray, inArray, 0, numElements, 0, 0); } } __host__ void prescanArray(unsigned *outArray, unsigned *inArray, int numElements) { prescanArrayRecursive(outArray, inArray, numElements, 0); } template <bool isNP2> __device__ void loadSharedChunkFromMem(unsigned *s_data, const unsigned *idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) { int thid = threadIdx.x; mem_ai = baseIndex + threadIdx.x; mem_bi = mem_ai + blockDim.x; ai = thid; bi = thid + blockDim.x; bankOffsetA = CONFLICT_FREE_OFFSET(ai); bankOffsetB = CONFLICT_FREE_OFFSET(bi); s_data[ai + bankOffsetA] = idata[mem_ai]; if (isNP2) { s_data[bi + bankOffsetB] = (bi < n) ? idata[mem_bi] : 0; } else { s_data[bi + bankOffsetB] = idata[mem_bi]; } } template <bool isNP2> __device__ void storeSharedChunkToMem(unsigned* odata, const unsigned* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) { __syncthreads(); odata[mem_ai] = s_data[ai + bankOffsetA]; if (isNP2) { if (bi < n) odata[mem_bi] = s_data[bi + bankOffsetB]; } else { odata[mem_bi] = s_data[bi + bankOffsetB]; } } template <bool storeSum> __device__ void clearLastElement(unsigned* s_data, unsigned *blockSums, int blockIndex) { if (threadIdx.x == 0) { int index = (blockDim.x << 1) - 1; index += CONFLICT_FREE_OFFSET(index); if (storeSum) { blockSums[blockIndex] = s_data[index]; } s_data[index] = 0; } } __device__ unsigned int buildSum(unsigned *s_data) { unsigned int thid = threadIdx.x; unsigned int stride = 1; for (int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } return stride; } __device__ void scanRootToLeaves(unsigned *s_data, unsigned int stride) { unsigned int thid = threadIdx.x; for (int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } } template <bool storeSum> __device__ void prescanBlock(unsigned *data, int blockIndex, unsigned *blockSums) { int stride = buildSum(data); clearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex); scanRootToLeaves(data, stride); } template <bool storeSum, bool isNP2> __global__ void prescan(unsigned *odata, const unsigned *idata, unsigned *blockSums, int n, int blockIndex, int baseIndex) { int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB; extern __shared__ unsigned s_data[]; loadSharedChunkFromMem<isNP2>(s_data, idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)) : baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); prescanBlock<storeSum>(s_data, blockIndex, blockSums); storeSharedChunkToMem<isNP2>(odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); } __global__ void uniformAdd(unsigned *data, unsigned *uniforms, int n, int blockOffset, int baseIndex) { __shared__ unsigned uni; if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset]; unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x; __syncthreads(); data[address] += uni; data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni; }
11,135
// cuda.cu #include <stdio.h> #include <assert.h> #include <cuda.h> #include <time.h> #include <sys/time.h> __global__ void vector_multiply_row_device(float * a, float * b, float * c, int m) { int idx = blockIdx.x*blockDim.x + threadIdx.x; int j; for (j=0; j<m; j++) { a[idx] += b[j+idx*m]*c[j]; } } int main(void) { float *a_h, *b_h, *c_h; // pointers to host memory float *a_d, *b_d, *c_d; // pointers to device memory int i, j, m, n; printf("Please give m and n: "); scanf("%d %d",&m,&n); struct timeval start, end; gettimeofday(&start, NULL); // allocate arrays on host if ( (a_h=(float *)malloc(m*sizeof(float))) == NULL ) perror("memory allocation for a"); if ( (b_h=(float *)malloc(m*n*sizeof(float))) == NULL ) perror("memory allocation for b"); if ( (c_h=(float *)malloc(n*sizeof(float))) == NULL ) perror("memory allocation for c"); // allocate array on device cudaMalloc((void **) &a_d, n*sizeof(float)); cudaMalloc((void **) &b_d, m*n*sizeof(float)); cudaMalloc((void **) &c_d, n*sizeof(float)); // initialization of host data printf("Initializing matrix B and vector c\n"); for (j=0; j<n; j++) c_h[j] = 2.0; for (i=0; i<m; i++) for (j=0; j<n; j++) b_h[i*n+j] = i; printf("Vector c:\n"); for (j=0; j<n; j++) printf("c_h[%d] = %f\n", j, c_h[j]); printf("Matrix B:\n"); for (i=0; i<m; i++) for (j=0; j<n; j++) printf("b_h[%d] = %f\n", i*n+j, b_h[i*n+j]); printf("Initializing a to 0\n"); for(i=0; i<n; i++) a_h[i] = 0.0; // copy data from host to device cudaMemcpy(a_d, a_h, m*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, n*m*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(c_d, c_h, m*sizeof(float), cudaMemcpyHostToDevice); int blockSize = 4; int numBlocks = m; vector_multiply_row_device <<< blockSize, numBlocks >>> (a_d, b_d, c_d, m); cudaMemcpy(a_h, a_d, m*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(b_h, b_d, n*m*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(c_h, c_d, m*sizeof(float), cudaMemcpyDeviceToHost); gettimeofday(&end, NULL); printf("Elapsed time: %ldus\n", ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec))); printf("Vector a_h:\n"); for (j=0; j<n; j++) printf("a_h[%d] = %f\n", j, a_h[j]); // cleanup free(a_h); free(b_h); free(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); }
11,136
#ifndef THREADS_PER_BLOCK #define THREADS_PER_BLOCK 1024 #endif __global__ void stream(float* dA, float* dB, float* dC, float alpha, int N) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < N) { dA[id] = dB[id] + alpha*dC[id]; } } extern "C" { void LaunchStream(float* dA, float *dB, float* dC, float alpha, int N) { stream<<<ceil(((float)N)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dA, dB, dC, alpha, N); } }
11,137
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <time.h> #include <cuda_runtime.h> const size_t BMP_HEADER_SIZE = 54; const size_t BMP_HEADER_WIDTH_OFFSET = 18; const size_t BMP_HEADER_HEIGHT_OFFSET = 22; const size_t PIXEL_REAL_SIZE = 3; size_t getThreadsPerBlock(int deviceNum) { cudaDeviceProp properties; cudaGetDeviceProperties(&properties, deviceNum); return (size_t)properties.maxThreadsPerBlock; } size_t getWidth(const unsigned char * header) { const unsigned char * start_position = header + BMP_HEADER_WIDTH_OFFSET; unsigned int value = *((const unsigned int*)start_position); return (size_t)value; } size_t getHeight(const unsigned char * header) { const unsigned char * start_position = header + BMP_HEADER_HEIGHT_OFFSET; unsigned int value = *((const unsigned int*)start_position); return (size_t)value; } struct RgbPixel { unsigned char b; unsigned char g; unsigned char r; }; struct RgbPixel * allocateMemory(size_t size) { struct RgbPixel * data = (struct RgbPixel *)malloc(sizeof(struct RgbPixel) * size); return data; } struct RgbPixel * cudaAllocateMemory(size_t size) { struct RgbPixel * data = NULL; cudaError_t cudaError = cudaMalloc((void **)&data, sizeof(struct RgbPixel) * size); return cudaError == cudaSuccess ? data : NULL; } void deallocateMemory(struct RgbPixel ** data) { free(*data); *data = NULL; } void cudaDeallocateMemory(struct RgbPixel ** data) { cudaFree(*data); *data = NULL; } bool readBmp( const char * filename, unsigned char * header, struct RgbPixel ** bmp ) { size_t read; FILE * fileBmp = fopen(filename, "rb"); if (!fileBmp) return false; read = fread(header, sizeof(unsigned char), BMP_HEADER_SIZE, fileBmp); if (read != BMP_HEADER_SIZE) { fclose(fileBmp); return false; } size_t width = getWidth(header), height = getHeight(header); size_t size = width * height; *bmp = allocateMemory(size); for (size_t i = 0; i < size; i++) { read = fread((*bmp) + i, sizeof(unsigned char), PIXEL_REAL_SIZE, fileBmp); if (read != PIXEL_REAL_SIZE) { deallocateMemory(bmp); fclose(fileBmp); return false; } } fclose(fileBmp); return true; } bool writeBmp( const char * filename, const unsigned char * header, struct RgbPixel * bmp ) { FILE * bmpFile = fopen(filename, "wb"); if (!bmpFile) { fclose(bmpFile); return false; } fwrite(header, sizeof(unsigned char), BMP_HEADER_SIZE, bmpFile); size_t width = getWidth(header), height = getHeight(header); size_t size = width * height; for (size_t i = 0; i < size; i++) fwrite(bmp + i, sizeof(unsigned char), PIXEL_REAL_SIZE, bmpFile); fclose(bmpFile); return true; } __global__ void processImageSmoothFilter(struct RgbPixel * bmp, size_t width, size_t height, size_t radius, struct RgbPixel * result) { #define ROUND(x) (unsigned char)((x) + 0.5) size_t index = blockDim.x * blockIdx.x + threadIdx.x; size_t i = index / width, j = index % width; unsigned int r = 0, g = 0, b = 0; size_t starti = (i < radius ? 0 : i - radius), endi = (i + radius >= height ? height - 1 : i + radius), startj = (j < radius ? 0 : j - radius), endj = (j + radius >= width ? width - 1 : j + radius); size_t count = (endi - starti + 1) * (endj - startj + 1); for (size_t ii = starti; ii <= endi; ii++) for (size_t jj = startj; jj <= endj; jj++) { size_t position = ii * width + jj; r += bmp[position].r; g += bmp[position].g; b += bmp[position].b; } result[index].r = ROUND((float)r / count); result[index].g = ROUND((float)g / count); result[index].b = ROUND((float)b / count); #undef ROUND } int main(int argc, char * argv[]) { if (argc != 4) { printf("%s\n", "Usage: <program name> <input bmp file name> <output bmp file name> <radius>"); return 0; } const char * inputBmpFileName = argv[1]; const char * outputBmpFileName = argv[2]; size_t radius = (size_t)atoi(argv[3]); unsigned char header[BMP_HEADER_SIZE]; struct RgbPixel * bmp = NULL; time_t totalBegin = time(NULL); bool readResult = readBmp(inputBmpFileName, header, &bmp); if (!readResult) { printf("Cannot read bmp file %s\n", inputBmpFileName); return 1; } size_t width = getWidth(header), height = getHeight(header); size_t size = width * height; struct RgbPixel * cudaBmp = cudaAllocateMemory(size); struct RgbPixel * cudaResultBmp = cudaAllocateMemory(size); if (!cudaBmp && !cudaResultBmp) { printf("%s\n", "Cannot allocate memory in GPU"); return 1; } cudaMemcpy(cudaBmp, bmp, size * sizeof(struct RgbPixel), cudaMemcpyHostToDevice); time_t begin = time(NULL); size_t threadsPerBlock = getThreadsPerBlock(0); size_t blocks = (size + threadsPerBlock - 1) / threadsPerBlock; for (int i = 0; i < 100; i++) { processImageSmoothFilter<<<blocks, threadsPerBlock>>>(cudaBmp, width, height, radius, cudaResultBmp); cudaDeviceSynchronize(); } time_t end = time(NULL); struct RgbPixel * resultBmp = allocateMemory(size); cudaMemcpy(resultBmp, cudaResultBmp, size * sizeof(struct RgbPixel), cudaMemcpyDeviceToHost); bool writeResult = writeBmp(outputBmpFileName, header, resultBmp); if (!writeResult) { printf("Cannot write bmp file %s\n", outputBmpFileName); return 1; } deallocateMemory(&bmp); deallocateMemory(&resultBmp); cudaDeallocateMemory(&cudaBmp); cudaDeallocateMemory(&cudaResultBmp); time_t totalEnd = time(NULL); printf("Algorithm time: %.2f sec.\n", (double)(end - begin)); printf("Total time: %.2f sec.\n", (double)(totalEnd - totalBegin)); return 0; }
11,138
/*! * \brief Record the basic usage of Vector in Thrust. */ #include <iostream> #include "time.h" #include <list> // Thrust related #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/sequence.h> void VectorTest1() { // H has storage for 4 integers thrust::host_vector<int> H(4); // initialize individual elements for (int i = 0; i < H.size(); i++) H[i] = i * 10 + 1; // print contents of H for (int i = 0; i < H.size(); i++) std::cout << "H[" << i << "] = " << H[i] << std::endl; H.resize(2); // Copy host_vector H to device_vector D thrust::device_vector<int> D = H; // elements of D can be modified for (int i = 0; i < D.size(); i++) D[i] = i * 100 + 1; // print contents of D for (int i = 0; i < D.size(); i++) { std::cout << "D[" << i << "] = " << D[i] << std::endl; //printf("Dp[%d] = %d.\n", i, D[i]); //Can not print the right number in this way. } // H and D are automatically deleted when the function returns. } void VectorTest2() { // initialize all ten integers of a device_vector to 1 thrust::device_vector<int> D(10, 1); // set the first seven elements of a vector to 9 thrust::fill(D.begin(), D.begin() + 7, 9); // initialize a host_vector with the first five elements of D thrust::host_vector<int> H(D.begin(), D.begin() + 5); // set the elements of H to 0, 1, 2, 3, ... thrust::sequence(H.begin(), H.end()); // copy all of H back to the beginning of D thrust::copy(H.begin(), H.end(), D.begin()); for (int i = 0; i < D.size(); i++) std::cout << "D[" << i << "] = " << D[i] << std::endl; // H and D are automatically deleted when the function returns. } int main() { VectorTest1(); std::cout << "Finish Test1." << std::endl << std::endl; VectorTest2(); std::cout << "Finish Test2." << std::endl << std::endl; system("pause"); return 0; }
11,139
#include<stdio.h> #include<stdlib.h> // Matrix Multiplication __global__ void matrixMul(float *A, float *B, float *C, int N) { int i = threadIdx.y + blockIdx.y * blockDim.y; int j = threadIdx.x + blockIdx.x * blockDim.x; int k; if (i < N && j < N) { float temp = 0; for (k = 0; k < N; k++) temp += A[i*N + k] * B[k*N + j]; C[i*N + k] = temp; } } int main(int argc, char *argv[]) { int N = strtol(argv[1], (char **)NULL, 10); size_t size = N*N*sizeof(float); float *A, *B, *C; cudaMallocManaged(&A, size); cudaMallocManaged(&B, size); cudaMallocManaged(&C, size); // Random Initialization for (int i = 0; i < N*N; i++) { A[i] = rand() / 1000000.0; B[i] = rand() / 1000000.0; } // Grid Dimensions int threads = 10 * 10; int blocks = (N + threads - 1) / threads; // Kernel Launch Parameters dim3 THREADS (threads, threads); dim3 BLOCKS ( blocks, blocks); // Launch Kernel matrixMul<<<BLOCKS, THREADS>>>(A, B, C, N); cudaDeviceSynchronize(); // Print Command printf("Time taken for CUDA implementation with (N = \t%d) = ", N); return 0; }
11,140
#include <algorithm> #include <cassert> #include <iostream> #include <vector> #include <chrono> #include <random> using namespace std; //=========================== prototypes des fonctions =================================================== __global__ void vectorAddBaseLine(const int *__restrict a, const int *__restrict b,int *__restrict c, int N) ; void verify_result( vector<int> &a, vector<int> &b, vector<int> &c); auto get_time() { return chrono::high_resolution_clock::now(); } //=========================== fuction main =================================================== int main() { constexpr int N = 1000 << 16; // size 2^16*1 éléments constexpr size_t bytes = sizeof(int) * N; int NUM_THREADS = 1<< 10; // 2^10*1 threads/bloc int NUM_BLOCKS = (N + NUM_THREADS - 1) / NUM_THREADS; //CPU vector<int> a;a.reserve(N); vector<int> b;b.reserve(N); vector<int> c;c.reserve(N); for (int i = 0; i < N; i++) // initialisation les vacteurs a ,b { a.push_back(rand() % 100); b.push_back(rand() % 100); } //GPU int *d_a, *d_b, *d_c; cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // CPU -----> GPU cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b.data(), bytes, cudaMemcpyHostToDevice); auto start = get_time(); vectorAddBaseLine<<<NUM_BLOCKS, NUM_THREADS >>>(d_a, d_b, d_c, N); //kernel // GPU ---> CPU cudaMemcpy(c.data(), d_c, bytes, cudaMemcpyDeviceToHost); auto finish = get_time(); auto duration = chrono::duration_cast<std::chrono::milliseconds>(finish - start); cout << "temps écoulé en kernel = " << duration.count() << " ms\n"; // vérification verify_result(a, b, c); // libérer la mémoire GPU cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cout << "terminé avec succès"<<endl; return 0; // Free memory on device } // ======================= kernel ================================== __global__ void vectorAddBaseLine(const int *__restrict a, const int *__restrict b,int *__restrict c, int N) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = j*gridDim.x * blockDim.x + i; if (k < N) c[k] = a[k] + b[k]; } //========================= vérification ========================================== void verify_result( vector<int> &a, vector<int> &b, vector<int> &c) { for (int i = 0; i < a.size(); i++) assert(c[i] == a[i] + b[i]); }
11,141
/*#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "VectorCuda.h" #include "CudaStuff.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include <curand.h> #include "RandomCuda.h" __global__ KERNEL _MonteCarloTest(double* result,size_t len){ size_t index = threadIdx.x + blockIdx.x*blockDim.x; if(index < len){ result[index] = exp(result[index]) + 1; } } void dGenerateRandomVector(double** target, size_t len){ double* temp; cudaMalloc(&temp,len*sizeof(double)); curandGenerator_t generator; curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator,time(0)); curandGenerateUniformDouble(generator,temp, len); curandDestroyGenerator(generator); size_t nBlocks = len/MAX_THREADS + 1; _MonteCarloTest<<<nBlocks,MAX_THREADS>>>(temp,len); cudaDeviceSynchronize(); cudaMemcpy(*target, temp, len*sizeof(double),cudaMemcpyDeviceToHost); cudaFree(temp); }*/
11,142
/** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> __global__ void mykernel(void) { printf("hello word from GPU \n"); } /** * Host main routine */ int main(void) { mykernel<<< 1,10 >>>(); cudaDeviceSynchronize(); printf("hello word \n"); return 0; }
11,143
#include "includes.h" __global__ void scatter(int *d_array , int *d_scanArray , int *d_predicateArrry,int * d_scatteredArray ,int d_numberOfElements,int offset) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < d_numberOfElements) { if(d_predicateArrry[index] == 1) { d_scatteredArray[d_scanArray[index] - 1 +offset ] = d_array[index]; } } }
11,144
__device__ float bar(float a, float b) { return a + b; } __device__ void incrval(float *a) { *a += 3; } __global__ void somekernel1(float *a) { a[0] = a[1]; } //__global__ void foo(float *data, int N) { __global__ void foo(float *data) { // int tid = threadIdx.x; // if(tid < N) { // data[tid] += 3.0f; data[0] = 123.0f; // } }
11,145
#include "includes.h" __global__ void kernel_sigmoid_full_device(unsigned int size, int *x, int *out) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { out[i] = 1 / (1 + abs(x[i])); } }
11,146
#include "includes.h" __global__ void VecReduce(float* g_idata, float* g_odata, int N) { // shared memory size declared at kernel launch extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x; // For thread ids greater than data space if (globalid < N) { sdata[tid] = g_idata[globalid]; } else { sdata[tid] = 0; // Case of extra threads above N } // each thread loads one element from global to shared mem __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) { if (tid < s) { sdata[tid] = sdata[tid] + sdata[tid+ s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; } }
11,147
//Add THREAD Vector Using GPU // NAMA : Galih Aji Pambudi // NIM : 3332180058 // MyGPU: NVIDIA GTX 650 #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> //#define N 1048576 #define N 50000000 #define MAX_ERR 1e-6 __global__ void vector_add(float *out, float *a, float *b, int n){ int index = threadIdx.x; int stride = blockDim.x; // int block = blockIdx.x; //int global = threadIdx.x + blockIdx.x * blockDim.x; //out[global] = a[global] + b[global]; //out[global] = global; //printf("Global = %d\n", global); //printf("BlockDim.x = %d\n", stride); //printf("threadIdx.x = %d\n", threadIdx.x); //printf("Block = %d\n", block); for (int i = index; i < n; i += stride){ out[i] = (a[i] / b[i]) / 78723; } } int main(){ float *a, *b, *out; float *d_a, *d_b, *d_out; //Alokasi Host Memori a = (float*)malloc(sizeof(float) * N); b = (float*)malloc(sizeof(float) * N); out = (float*)malloc(sizeof(float) * N); //Inisialisasi Array for (int i = 0; i < N; i++){ a[i] = 1.0f; //b[i] = 49237332.0f; b[i] = 1; } //Alokasi Device memori cudaMalloc((void**)&d_a, sizeof(float) * N); cudaMalloc((void**)&d_b, sizeof(float) * N); cudaMalloc((void**)&d_out, sizeof(float) * N); //Transfer Data dari Host memori ke Device memori cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice); //Eksekusi Kernel vector_add<<<857,857>>>(d_out, d_a, d_b, N); //<<<block,thread>>> //Transfer Data kembali ke Host Memori cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost); //Verification //for (int i = 0; i < N; i++){ // assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR); //} printf("out[0] = %f\n", out[0]); printf("PASSED\n"); //Dealokasi Device Memori cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); //Dealokasi Host Memori free(a); free(b); free(out); return 0; }
11,148
// #include <stdio.h> #define BLOCKS_COUNT (2048 * 2048) #define THREADS_PER_BLOCK 512 __global__ void add(int *numberOne, int *numberTwo, int *addition, int count) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < count) { addition[index] = numberOne[index] + numberTwo[index]; } } void random_ints(int* dest, int count) { int counter = 0; for (counter = 0; counter < count; ++counter) { dest[counter] = rand(); } } int main(void) { // The host variables. int *host_numberOne, *host_numberTwo, *host_addition; // The device variables. int *device_numberOne, *device_numberTwo, *device_addition; // Size of variable per block int size = BLOCKS_COUNT * sizeof(int); // int counter = 0; // Allocate memory on device for device variables. cudaMalloc((void **)&device_numberOne, size); cudaMalloc((void **)&device_numberTwo, size); cudaMalloc((void **)&device_addition, size); // Initialize host variables; host_numberOne = (int *)malloc(size); random_ints(host_numberOne, BLOCKS_COUNT); host_numberTwo = (int *)malloc(size); random_ints(host_numberTwo, BLOCKS_COUNT); host_addition = (int *)malloc(size); // Copy host variables to device memory. cudaMemcpy(device_numberOne, host_numberOne, size, cudaMemcpyHostToDevice); cudaMemcpy(device_numberTwo, host_numberTwo, size, cudaMemcpyHostToDevice); // Invoke add kernel. add<<<(BLOCKS_COUNT + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(device_numberOne, device_numberTwo, device_addition, BLOCKS_COUNT); // Copy device variable to host memory. cudaMemcpy(host_addition, device_addition, size, cudaMemcpyDeviceToHost); // for(counter = 0; counter < BLOCKS_COUNT; ++counter) // { // printf("%5d + %5d = %5d\n", host_numberOne[counter], host_numberTwo[counter], host_addition[counter]); // } // Clean up, free all device allocated memory. free(host_numberOne); free(host_numberTwo); free(host_addition); cudaFree(device_numberOne); cudaFree(device_numberTwo); cudaFree(device_addition); return 0; }
11,149
#include<iostream> using namespace std; int main() { cout<<"Hello Zoya!!"; return 0; }
11,150
#include<stdio.h> __global__ void showCpy(float* d_a, int const nx, int const ny){ unsigned int i = threadIdx.x; if(i<nx*ny) printf("%d, %5.2f \n", i, d_a[i]); } int main(){ int const nx = 1<<4; int const ny = 1<<4; float h_a[nx][ny]; for(int i=0; i<nx; i++){ for(int j=0; j<ny; j++){ h_a[i][j] = (float)i + ((float)j)/100; } } float* d_a; size_t mSize = nx*ny*sizeof(float); cudaMalloc((void**)&d_a, mSize); cudaMemcpy(d_a, h_a, mSize, cudaMemcpyHostToDevice); dim3 grid(1, 1); dim3 block(nx*ny, 1); showCpy<<<grid, block>>>(d_a, nx, ny); /* for(int i=0; i<nx; i++){ for(int j=0; j<ny; j++){ printf("%5.2f ", h_a[i][j]); } printf("\n"); } */ cudaDeviceSynchronize(); return 0; }
11,151
#include <stdio.h> #include <stdlib.h> #include <cufft.h> #include <iostream> const int NX = 2048; const int NY = 2048; const int DEFAULT_FFT_TRIALS = 10000; const int DEFAULT_META_TRIALS = 10; const int BATCH_SIZE = 1; int main(int argc, char **argv) { int fft_trials = DEFAULT_FFT_TRIALS; int meta_trials = DEFAULT_META_TRIALS; printf("[INFO] META trials: %d\n", meta_trials); printf("[INFO] FFT trials: %d\n", fft_trials); long nx = NX; long ny = NX; printf("[INFO] NX Length: %ld\n", nx); printf("[INFO] NY Length: %ld\n", ny); cufftComplex *h_original_signal, *h_applied_fft_signal; cudaMallocHost((void **) &h_original_signal, sizeof(cufftComplex) * NX * NY); cudaMallocHost((void **) &h_applied_fft_signal, sizeof(cufftComplex) * NX * NY); cufftComplex *d_original_signal, *d_applied_fft_signal; cudaMalloc((void **) &d_original_signal, sizeof(cufftComplex) * NX * NY); cudaMalloc((void **) &d_applied_fft_signal, sizeof(cufftComplex) * NX * NY); /* * generate random signal as original signal */ srand(0); // initialize random seed for (int i = 0; i < NX*NY; i++) { h_original_signal[i].x = (float)((int)rand() % 10); h_original_signal[i].y = 0.0; } // for (int i = 0; i < NX; ++i){ // for (int j = 0; j < NY; ++j){ // std::cout << h_original_signal[i*NX + j].x << " "; // } // std::cout << std::endl; // } cudaMemcpy(d_original_signal, h_original_signal, sizeof(cufftComplex) * NX * NY, cudaMemcpyHostToDevice); cufftHandle fft_plan; //cufftPlan1d(&fft_plan, NX, CUFFT_C2C, BATCH_SIZE); cufftPlan2d(&fft_plan, NX, NY, CUFFT_C2C); // int *n = new int[2]; // n[0] = nx; // n[1] = ny; // // int *inembed = new int[2]; // inembed[0] = nx; // inembed[1] = ny; // // int istride = 1; // int idist = nx*ny; // // cufftPlanMany(&fft_plan, 2, n, inembed, istride, idist, inembed, istride, idist, CUFFT_C2C, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float sum_of_elapsed_times = 0.0; printf("[INFO] Run benchmark...\n"); for (int i = 0; i < meta_trials; i++) { cudaEventRecord(start, 0); for (int j = 0; j < fft_trials; j++) { cufftExecC2C(fft_plan, d_original_signal, d_applied_fft_signal, CUFFT_FORWARD); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsed_time_ms; cudaEventElapsedTime(&elapsed_time_ms, start, stop); float elapsed_time_sec = elapsed_time_ms / 1000.0; sum_of_elapsed_times += elapsed_time_sec; printf("%f sec\n", elapsed_time_sec); } cudaMemcpy(h_applied_fft_signal, d_applied_fft_signal, sizeof(cufftComplex) * NX * NY, cudaMemcpyDeviceToHost); // printf("[INFO] computing sum...\n"); // // for (int i = 0; i < NX; ++i){ // for (int j = 0; j < NY; ++j){ // std::cout << h_applied_fft_signal[i*NX + j].x << " "; // } // std::cout << std::endl; // } // // float red = 0; // for (int i = 0; i < NX*NY; i++) { // red += h_applied_fft_signal[i].x; // red -= h_applied_fft_signal[i].y; // } // // printf("SUM : %f\n", red); printf("[INFO] Finished!\n"); printf("[INFO] Average: %lf sec\n", sum_of_elapsed_times / meta_trials); cudaEventDestroy(start); cudaEventDestroy(stop); }
11,152
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdlib> #include <cstdio> #include <cassert> #include <iostream> __global__ void array_manipulation_kernel(int* a, int n) { unsigned int index; index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) a[index] *= 2; } class ArrayManipulation { public: int arrayLength; explicit ArrayManipulation(int arrayLength); void initArray(int* array) const; void run(int numGrids, int numThreads) const; void displayResult(int* array, int* resultArray) const; void checkResult(const int* array, const int* resultArray) const; }; ArrayManipulation::ArrayManipulation(int arrayLength) { this->arrayLength = arrayLength; } void ArrayManipulation::initArray(int *array) const { for(int i = 0; i < this->arrayLength; i++) array[i] = rand() % 100; } void ArrayManipulation::displayResult(int *array, int* resultArray) const { for(int i = 0; i < this->arrayLength; i++) printf("%d * 2 = %d\n", array[i], resultArray[i]); } void ArrayManipulation::checkResult(const int *array, const int* resultArray) const { for(int i = 0; i < this->arrayLength; i++) assert(resultArray[i] == array[i] * 2); printf("Program Executed Successfully"); } void ArrayManipulation::run(int numGrids, int numThreads) const { int deviceId = cudaGetDevice(&deviceId); printf("GPU Device ID: %d\n", deviceId); printf("CPU Device ID: %d\n\n", cudaCpuDeviceId); int * hostArray, * resultArray, * deviceArray; size_t arrayBytes = sizeof(int) * this->arrayLength; cudaMallocHost(&hostArray, arrayBytes); cudaMallocHost(&resultArray, arrayBytes); cudaMalloc(&deviceArray, arrayBytes); initArray(hostArray); cudaMemcpy(deviceArray, hostArray, arrayBytes, cudaMemcpyHostToDevice); array_manipulation_kernel<<<numGrids, numThreads>>>(deviceArray, arrayLength); cudaDeviceSynchronize(); cudaMemcpy(resultArray, deviceArray, arrayBytes, cudaMemcpyDeviceToHost); displayResult(hostArray, resultArray); checkResult(hostArray, resultArray); cudaFreeHost(hostArray); cudaFreeHost(resultArray); cudaFree(deviceArray); } int main() { ArrayManipulation program(16); program.run(1, 16); }
11,153
// This exercise is for student to learn about data sharing and synchronization between threads #include <stdio.h> #define N 4 // number of elements in vector __global__ void vector_mac(int *d_c, int *d_a, int *d_b, int n){ __shared__ int tmp[N]; // shared memory int i = blockIdx.x * blockDim.x + threadIdx.x; tmp[i] = d_a[i] * d_b[i]; __syncthreads(); // not really necessary for this simple program if (i==0){ // use thread 0 to perform the summation int sum = 0; for (int j = 0; j < n; j++) sum = sum + tmp[j]; *d_c = sum; } } int main(void){ int a[N] = {22, 13, 16, 5}; int b[N] = { 5, 22, 17, 37}; int c[1]; int *d_a, *d_b, *d_c; cudaMalloc((void**)&d_a, sizeof(int)*N); cudaMalloc((void**)&d_b, sizeof(int)*N); cudaMalloc((void**)&d_c, sizeof(int)); cudaMemcpy(d_a, a, sizeof(int)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(int)*N, cudaMemcpyHostToDevice); vector_mac<<<1,N>>>(d_c, d_a, d_b, N); // 1 thread block with N (4) threads cudaMemcpy(c, d_c, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("A = [%2d %2d %2d %2d]\n", a[0], a[1], a[2], a[3]); printf("B = [%2d %2d %2d %2d]\n", b[0], b[1], b[2], b[3]); printf("Answer = %d\n", c[0]); return 0; }
11,154
#include "includes.h" __global__ void MultiChannelsSplit(float* inputs, float* outputs, int outChannels, int offset, int row, int inChannels) { int batchId = blockIdx.x; float* input = inputs + batchId * inChannels * row * row + offset; int blockDo = outChannels * row * row; for(int i = 0; i < blockDo; i += blockDim.x) { int j = i + threadIdx.x; if(j < blockDo) { int pos = batchId * outChannels * row * row; outputs[pos + j] = input[j]; } } }
11,155
#include "includes.h" __global__ void axpy(float a, float* x, float* y) { y[threadIdx.x] = a * x[threadIdx.x]; }
11,156
#include "Tools.cuh" #include "defines.cuh" #include "function_defines.cuh" __host__ inline void sampler_PureRandom ( Sampler *sampler ); __host__ inline void sampler_Regular ( Sampler *sampler ); __host__ inline void sampler_Jittered ( Sampler *sampler ); __host__ inline void sampler_Nrooks ( Sampler *sampler ); __host__ inline void sampler_MultiJittered( Sampler *sampler ); __host__ inline void sampler_Hammersley ( Sampler *sampler ); __host__ inline void GenerateSample(Sampler *sampler){ switch( sampler->type ){ case SAMPLER_PURERANDOM : sampler_PureRandom(sampler); break; case SAMPLER_REGULAR : sampler_Regular(sampler); break; case SAMPLER_JITTERED : sampler_Jittered(sampler); break; case SAMPLER_NROOKS : sampler_Nrooks(sampler); break; case SAMPLER_MULTIJITTERED : sampler_MultiJittered(sampler); break; case SAMPLER_HAMMERSLEY : sampler_Hammersley(sampler); break; default: break; } } __device__ inline Point2D getSampleUnitSquare(Sampler *sampler){ return sampler->sample[ sampler->count++ % SAMPLE_POOL_SIZE ]; } __host__ inline void sampler_PureRandom ( Sampler *sampler ){ /*not yet..*/ for( int i = 0 ; i < sampler->numSamples ; i ++ ){ sampler->sample[i].x = 0; sampler->sample[i].y = 0; } } __host__ inline void sampler_Regular ( Sampler *sampler ){ /*not yet..*/ for( int i = 0 ; i < sampler->numSamples ; i ++ ){ sampler->sample[i].x = 0; sampler->sample[i].y = 0; } } __host__ inline void sampler_Jittered ( Sampler *sampler ){ for(int j = 0 ; j < SAMPLE_POOL_SIZE / sampler->numSamples ; j ++){ for( int i = 0 ; i < sampler->numSamples ; i ++ ){ int dim = sqrtf( float(sampler->numSamples ) ); int r = i / dim; int c = i % dim; float t = 1 / dim; sampler->sample[j * sampler->numSamples + i].x = float( c * t ) + floatRand()/dim; sampler->sample[j * sampler->numSamples + i].y = float( r * t ) + floatRand()/dim; } } } __host__ inline void sampler_Nrooks ( Sampler *sampler ){ /*not yet..*/ for( int i = 0 ; i < sampler->numSamples ; i ++ ){ sampler->sample[i].x = 0; sampler->sample[i].y = 0; } } __host__ inline void sampler_MultiJittered ( Sampler *sampler ){ /*not yet..*/ for( int i = 0 ; i < sampler->numSamples ; i ++ ){ sampler->sample[i].x = 0; sampler->sample[i].y = 0; } } __host__ inline void sampler_Hammersley ( Sampler *sampler ){ /*not yet..*/ for( int i = 0 ; i < sampler->numSamples ; i ++ ){ sampler->sample[i].x = 0; sampler->sample[i].y = 0; } } __device__ inline Point3D MapSquareToHemiSphere(Point2D point, float exp){ float cosPhi = cosf( point.x * 2.0 * PI ); float sinPhi = sinf( point.x * 2.0 * PI ); float cosTheta = powf( (1.0 - point.y ), 1.0 / ( exp + 1 )); float sinTheta = sqrtf( 1.0 - cosTheta * cosTheta ); float pu = sinTheta * cosTheta; float pv = sinTheta * sinTheta; float pw = cosTheta; return Point3D( pu , pv, pw ); } __device__ inline Point3D getSampleUnitHemiSphere(Sampler *sampler, float exp){ return MapSquareToHemiSphere( sampler->sample[ sampler->count++ % SAMPLE_POOL_SIZE ],exp ); }
11,157
#include "includes.h" __device__ inline void charAtomicAdd(char *address, char value) { int oldval, newval, readback; oldval = *address; newval = oldval + value; while ((readback=atomicCAS((int *)address, oldval, newval)) != oldval) { oldval = readback; newval = oldval + value; } } __global__ void kernel_add(char* newB, char* first, char* second, int size_biggest, int diff, int * size_newB) { int tmp = 0; int i = threadIdx.x; #if __CUDA_ARCH__>=200 //printf("#threadIdx.x = %d\n", threadIdx.x); #endif if (i == 0) return; //for (int i = size_biggest - 1; i >= 0; i--) { if (i - 1 - diff >= 0 && (second[i - 1 - diff] != '+' && second[i - 1 - diff] != '-')) { tmp = second[i - 1 - diff] + first[i - 1]; } else if (first[i - 1] != '+' && first[i - 1] != '-') { tmp = first[i - 1]; } if (tmp >= 10) { //charAtomicAdd(&newB[i], 1); newB[i - 1]++; tmp = tmp % 10; } if (i != 0) newB[i] += tmp; //} }
11,158
#include <stdio.h> #define N (2048 * 2048) #define THREADS_PER_BLOCK 512 __global__ void add(int *a,int *b,int *c,int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < n) { c[index] = a[index] + b[index]; } } int main(void) { int *a,*b,*c; int *d_a,*d_b,*d_c; int size = N * sizeof(int); cudaMalloc((void **)&d_a,size); cudaMalloc((void **)&d_b,size); cudaMalloc((void **)&d_c,size); a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); for(int i = 0; i < N;i++) { a[i] = i+1; b[i] = i+1; } cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice); add<<<(N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a,d_b,d_c,N); cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost); printf("Hello world %d\n",c[100]); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
11,159
#include <stdio.h> #include <cuda.h> #define N 10 __global__ void vectAdd(int *A, int *B, int *C) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < N) C[i] = A[i] + B[i]; } int main(int argc, char const *argv[]) { int A[N], B[N], C[N]; int size = N * sizeof(int); for (int i = 0; i < N; i++) { A[i] = i; } for (int i = 0; i < N; i++) { B[i] = i; } int *d_A, *d_B, *d_C; cudaMalloc((void **)&d_A, N*sizeof(int)); cudaMemcpy(d_A, A, N*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&d_B, N*sizeof(int)); cudaMemcpy(d_B, B, N*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&d_C, size); vectAdd<<<(N+255)/256, 256>>>(d_A, d_B, d_C); cudaMemcpy(C, d_C, N*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i<N; i++) { printf("%d\n", C[i]); } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
11,160
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> __device__ int cuda_mymin(int a, double b) { return !(b<a)?a:round(b); } __device__ double cuda_fmod(double numer, double denom){ double tquou = floor(numer / denom); return numer - tquou * denom; } __device__ int cuda_findcellidx_1D(const float* p, const int ncx) { // Floor value to find cell int idx = floor(p[0] * ncx); idx = max(0, min(idx, ncx-1)); return idx; } __device__ int cuda_findcellidx_2D(const float* p, const int ncx, const int ncy) { // Copy point double point[2]; point[0] = p[0]; point[1] = p[1]; // Cell size const float inc_x = 1.0 / ncx; const float inc_y = 1.0 / ncy; // Find initial row, col placement double p0 = min((ncx * inc_x - 0.000000001), max(0.0, point[0])); double p1 = min((ncy * inc_y - 0.000000001), max(0.0, point[1])); double xmod = cuda_fmod((double)p0, (double)inc_x); double ymod = cuda_fmod((double)p1, (double)inc_y); double x = xmod / inc_x; double y = ymod / inc_y; int cell_idx = cuda_mymin(ncx-1, (p0 - xmod) / inc_x) + cuda_mymin(ncy-1, (p1 - ymod) / inc_y) * ncx; cell_idx *= 4; // Out of bound (left) if(point[0]<=0){ if(point[1] <= 0 && point[1]/inc_y<point[0]/inc_x){ // Nothing to do here } else if(point[1] >= ncy * inc_y && point[1]/inc_y-ncy > -point[0]/inc_x) { cell_idx += 2; } else { cell_idx += 3; } return cell_idx; } // Out of bound (right) if(point[0] >= ncx*inc_x){ if(point[1]<=0 && -point[1]/inc_y > point[0]/inc_x - ncx){ // Nothing to do here } else if(point[1] >= ncy*inc_y && point[1]/inc_y - ncy > point[0]/inc_x-ncx){ cell_idx += 2; } else { cell_idx += 1; } return cell_idx; } // Out of bound (up) if(point[1] <= 0){ return cell_idx; } // Out of bound (bottom) if(point[1] >= ncy*inc_y){ cell_idx += 2; return cell_idx; } // OK, we are inbound if(x<y){ if(1-x<y){ cell_idx += 2; } else { cell_idx += 3; } } else if(1-x<y) { cell_idx += 1; } return cell_idx; /* // Cell size const float inc_x = 1.0 / nx; const float inc_y = 1.0 / ny; // Copy point float point[2]; point[0] = p[0]; point[1] = p[1]; // If point is outside [0, 1]x[0, 1] then we push it inside if (point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) { const float half = 0.5; point[0] -= half; point[1] -= half; const float abs_x = abs(point[0]); const float abs_y = abs(point[1]); const float push_x = (abs_x < abs_y) ? half*inc_x : 0.0; const float push_y = (abs_y < abs_x) ? half*inc_y : 0.0; if (abs_x > half) { point[0] = copysign(half - push_x, point[0]); } if (abs_y > half) { point[1] = copysign(half - push_y, point[1]); } point[0] += half; point[1] += half; } // Find initial row, col placement const float p0 = min((float)(1.0 - 1e-8), point[0]); const float p1 = min((float)(1.0 - 1e-8), point[1]); const float p0ncx = p0*nx; const float p1ncy = p1*ny; const int ip0ncx = p0ncx; // rounds down const int ip1ncy = p1ncy; // rounds down int cell_idx = 4 * (ip0ncx + ip1ncy * nx); // Find (sub)triangle const float x = p0ncx - ip0ncx; const float y = p1ncy - ip1ncy; if (x < y) { if (1-x < y) { cell_idx += 2; } else { cell_idx += 3; } } else if (1-x < y) { cell_idx += 1; } return cell_idx; */ } __device__ int cuda_findcellidx_3D(const float* p, const int nx, const int ny, const int nz) { // Cell size const float inc_x = 1.0 / nx; const float inc_y = 1.0 / ny; const float inc_z = 1.0 / nz; // Copy point float point[3]; point[0] = p[0]; point[1] = p[1]; point[2] = p[2]; // If point is outside [0, 1]x[0, 1]x[0, 1] then we push it inside if(point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) { const float half = 0.5; point[0] -= half; point[1] -= half; point[2] -= half; const float abs_x = abs(point[0]); const float abs_y = abs(point[1]); const float abs_z = abs(point[2]); const float push_x = (abs_x < abs_y && abs_x < abs_z) ? half*inc_x : 0.0; const float push_y = (abs_y < abs_x && abs_x < abs_z) ? half*inc_y : 0.0; const float push_z = (abs_z < abs_x && abs_x < abs_y) ? half*inc_z : 0.0; if(abs_x > half){point[0] = copysign(half - push_x, point[0]);} if(abs_y > half){point[1] = copysign(half - push_y, point[1]);} if(abs_z > half){point[2] = copysign(half - push_z, point[2]);} point[0] += half; point[1] += half; point[2] += half; } float zero = 0.0; float p0 = min((float)(nx*inc_x-1e-8),max(zero, point[0])); float p1 = min((float)(ny*inc_y-1e-8),max(zero, point[1])); float p2 = min((float)(nz*inc_x-1e-8),max(zero, point[2])); double xmod = cuda_fmod(p0,inc_x); double ymod = cuda_fmod(p1,inc_y); double zmod = cuda_fmod(p2,inc_z); int i = cuda_mymin(nx-1,((p0 - xmod)/inc_x)); int j = cuda_mymin(ny-1,((p1 - ymod)/inc_y)); int k = cuda_mymin(nz-1,((p2 - zmod)/inc_z)); int cell_idx = 5*(i + j * nx + k * nx * ny); double x = xmod/inc_x; double y = ymod/inc_y; double z = zmod/inc_z; bool tf = false; if (k%2==0){ if ((i%2==0 && j%2==1) || (i%2==1 && j%2==0)){ tf = true; } } else if((i%2==0 && j%2==0) || (i%2==1 && j%2==1)){ tf = true; } if (tf){ double tmp = x; x = y; y = 1-tmp; } if (-x -y +z >= 0){ cell_idx+=1; } else if (x+y+z - 2 >= 0){ cell_idx+=2; } else if (-x+y-z >= 0){ cell_idx+=3; } else if (x-y-z >= 0){ cell_idx+=4; } return cell_idx; } __device__ void A_times_b_1D(float x[], const float* A, float* b) { x[0] = A[0]*b[0] + A[1]; return; } __device__ void A_times_b_2D(float x[], const float* A, float* b) { x[0] = A[0]*b[0] + A[1]*b[1] + A[2]; x[1] = A[3]*b[0] + A[4]*b[1] + A[5]; return; } __device__ void A_times_b_3D(float x[], const float* A, float* b) { x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2] + A[3]; x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2] + A[7]; x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2] + A[11]; return; } __device__ void A_times_b_linear_1D(float x[], const float* A, float* b) { x[0] = A[0]*b[0]; return; } __device__ void A_times_b_linear_2D(float x[], const float* A, float* b) { x[0] = A[0]*b[0] + A[1]*b[1]; x[1] = A[3]*b[0] + A[4]*b[1]; return; } __device__ void A_times_b_linear_3D(float x[], const float* A, float* b) { x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2]; x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2]; x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2]; return; } // Kernel declaration __global__ void cpab_cuda_kernel_forward_1D(const int nP, const int batch_size, float* newpoints, const float* points, const float* Trels, const int* nStepSolver, const int* nc, const int broadcast) { int point_index = blockIdx.x * blockDim.x + threadIdx.x; int batch_index = blockIdx.y * blockDim.y + threadIdx.y; if(point_index < nP && batch_index < batch_size) { // Get point float point[1]; point[0] = points[broadcast*batch_index*nP*1+point_index]; // Define start index for the matrices belonging to this batch // batch * 2 params pr cell * cell in x int start_idx = batch_index * 2 * nc[0]; // Iterate in nStepSolver int cellidx; for(int n = 0; n < nStepSolver[0]; n++){ // Find cell idx cellidx = cuda_findcellidx_1D(point, nc[0]); // Extract the mapping in the cell const float* Trels_idx = Trels + 2*cellidx + start_idx; // Calculate trajectory of point float point_updated[1]; A_times_b_1D(point_updated, Trels_idx, point); point[0] = point_updated[0]; } // Copy to output newpoints[nP * batch_index + point_index] = point[0]; } return; } __global__ void cpab_cuda_kernel_forward_2D(const int nP, const int batch_size, float* newpoints, const float* points, const float* Trels, const int* nStepSolver, const int* nc, const int broadcast) { int point_index = blockIdx.x * blockDim.x + threadIdx.x; int batch_index = blockIdx.y * blockDim.y + threadIdx.y; if(point_index < nP && batch_index < batch_size) { // Get point float point[2]; point[0] = points[broadcast*batch_index*nP*2+point_index]; point[1] = points[broadcast*batch_index*nP*2+point_index + nP]; // Define start index for the matrices belonging to this batch // batch * num_elem * 4 triangles pr cell * cell in x * cell in y int start_idx = batch_index * 6 * 4 * nc[0] * nc[1]; // Iterate in nStepSolver int cellidx; for(int n = 0; n < nStepSolver[0]; n++){ // Find cell idx cellidx = cuda_findcellidx_2D(point, nc[0], nc[1]); // Extract the mapping in the cell const float* Trels_idx = Trels + 6*cellidx + start_idx; // Calculate trajectory of point float point_updated[2]; A_times_b_2D(point_updated, Trels_idx, point); point[0] = point_updated[0]; point[1] = point_updated[1]; } // Copy to output newpoints[2 * nP * batch_index + point_index] = point[0]; newpoints[2 * nP * batch_index + point_index + nP] = point[1]; } return; } __global__ void cpab_cuda_kernel_forward_3D(const int nP, const int batch_size, float* newpoints, const float* points, const float* Trels, const int* nStepSolver, const int* nc, const int broadcast) { int point_index = blockIdx.x * blockDim.x + threadIdx.x; int batch_index = blockIdx.y * blockDim.y + threadIdx.y; if(point_index < nP && batch_index < batch_size) { // Get point float point[3]; point[0] = points[broadcast*batch_index*nP*3+point_index]; point[1] = points[broadcast*batch_index*nP*3+point_index + nP]; point[2] = points[broadcast*batch_index*nP*3+point_index + 2*nP]; // Define start index for the matrices belonging to this batch // batch * 12 params pr cell * 5 triangles pr cell * cell in x * cell in y * cell in z int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2]; // Iterate in nStepSolver int cellidx; for(int n = 0; n < nStepSolver[0]; n++){ // Find cell idx cellidx = cuda_findcellidx_3D(point, nc[0], nc[1], nc[2]); // Extract the mapping in the cell const float* Trels_idx = Trels + 12*cellidx + start_idx; // Calculate trajectory of point float point_updated[3]; A_times_b_3D(point_updated, Trels_idx, point); point[0] = point_updated[0]; point[1] = point_updated[1]; point[2] = point_updated[2]; } // Copy to output newpoints[3 * nP * batch_index + point_index] = point[0]; newpoints[3 * nP * batch_index + point_index + nP] = point[1]; newpoints[3 * nP * batch_index + point_index + 2 * nP] = point[2]; } return; } __global__ void cpab_cuda_kernel_backward_1D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC, float* grad, const float* points, const float* As, const float* Bs, const int* nStepSolver, const int* nc, const int broadcast) { // Allocate memory for computations float p[1], v[1], pMid[1], vMid[1], q[1], qMid[1]; float B_times_T[1], A_times_dTdAlpha[1], u[1], uMid[1]; float Alocal[2], Blocal[2]; int cellidx; // Thread index int point_index = threadIdx.x + blockIdx.x * blockDim.x; int batch_index = threadIdx.y + blockIdx.y * blockDim.y; int dim_index = threadIdx.z + blockIdx.z * blockDim.z; // Make sure we are within bounds if(point_index < nP && batch_index < n_theta && dim_index < d){ int index = nP * batch_index + point_index; int boxsize = nP * n_theta; // Define start index for the matrices belonging to this batch // batch * 2 params pr cell * cell in x int start_idx = batch_index * 2 * nc[0]; // Get point p[0] = points[broadcast*batch_index*nP*1+point_index]; // Step size for solver double h = (1.0 / nStepSolver[0]); // Iterate a number of times for(int t=0; t<nStepSolver[0]; t++) { // Get current cell cellidx = cuda_findcellidx_1D(p, nc[0]); // Get index of A int As_idx = 2*cellidx; // Extract local A for(int i = 0; i < 2; i++){ Alocal[i] = (As + As_idx + start_idx)[i]; } // Compute velocity at current location A_times_b_1D(v, Alocal, p); // Compute midpoint pMid[0] = p[0] + h*v[0]/2.0; // Compute velocity at midpoint A_times_b_1D(vMid, Alocal, pMid); // Get index of B int Bs_idx = 2 * dim_index * nC + As_idx; // Get local B for(int i = 0; i < 2; i++){ Blocal[i] = (Bs + Bs_idx)[i]; } // Copy q q[0] = grad[dim_index*boxsize + index]; // Step 1: Compute u using the old location // Find current RHS (term 1 + term 2) A_times_b_1D(B_times_T, Blocal, p); // Term 1 A_times_b_linear_1D(A_times_dTdAlpha, Alocal, q); // Term 2 // Sum both terms u[0] = B_times_T[0] + A_times_dTdAlpha[0]; // Step 2: Compute mid "point" qMid[0] = q[0] + h * u[0]/2.0; // Step 3: Compute uMid A_times_b_1D(B_times_T, Blocal, pMid); // Term 1 A_times_b_linear_1D(A_times_dTdAlpha, Alocal, qMid); // Term 2 // Sum both terms uMid[0] = B_times_T[0] + A_times_dTdAlpha[0]; // Update q q[0] += uMid[0] * h; // Update gradient grad[dim_index * boxsize + index] = q[0]; // Update p p[0] += vMid[0]*h; } } return; } __global__ void cpab_cuda_kernel_backward_2D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC, float* grad, const float* points, const float* As, const float* Bs, const int* nStepSolver, const int* nc, const int broadcast) { // Allocate memory for computations float p[2], v[2], pMid[2], vMid[2], q[2], qMid[2]; float B_times_T[2], A_times_dTdAlpha[2], u[2], uMid[2]; float Alocal[6], Blocal[6]; int cellidx; // Thread index int point_index = threadIdx.x + blockIdx.x * blockDim.x; int batch_index = threadIdx.y + blockIdx.y * blockDim.y; int dim_index = threadIdx.z + blockIdx.z * blockDim.z; // Make sure we are within bounds if(point_index < nP && batch_index < n_theta && dim_index < d){ int index = 2 * nP * batch_index + point_index; int boxsize = 2 * nP * n_theta; // Define start index for the matrices belonging to this batch // batch * num_elem * 4 triangles pr cell * cell in x * cell in y int start_idx = batch_index * 6 * 4 * nc[0] * nc[1]; // Get point p[0] = points[broadcast*batch_index*nP*2+point_index]; p[1] = points[broadcast*batch_index*nP*2+point_index + nP]; // Step size for solver double h = (1.0 / nStepSolver[0]); // Iterate a number of times for(int t=0; t<nStepSolver[0]; t++) { // Get current cell cellidx = cuda_findcellidx_2D(p, nc[0], nc[1]); // Get index of A int As_idx = 6*cellidx; // Extract local A for(int i = 0; i < 6; i++){ Alocal[i] = (As + As_idx + start_idx)[i]; } // Compute velocity at current location A_times_b_2D(v, Alocal, p); // Compute midpoint pMid[0] = p[0] + h*v[0]/2.0; pMid[1] = p[1] + h*v[1]/2.0; // Compute velocity at midpoint A_times_b_2D(vMid, Alocal, pMid); // Get index of B int Bs_idx = 6 * dim_index * nC + As_idx; // Get local B for(int i = 0; i < 6; i++){ Blocal[i] = (Bs + Bs_idx)[i]; } // Copy q q[0] = grad[dim_index*boxsize + index]; q[1] = grad[dim_index*boxsize + index + nP]; // Step 1: Compute u using the old location // Find current RHS (term 1 + term 2) A_times_b_2D(B_times_T, Blocal, p); // Term 1 A_times_b_linear_2D(A_times_dTdAlpha, Alocal, q); // Term 2 // Sum both terms u[0] = B_times_T[0] + A_times_dTdAlpha[0]; u[1] = B_times_T[1] + A_times_dTdAlpha[1]; // Step 2: Compute mid "point" qMid[0] = q[0] + h * u[0]/2.0; qMid[1] = q[1] + h * u[1]/2.0; // Step 3: Compute uMid A_times_b_2D(B_times_T, Blocal, pMid); // Term 1 A_times_b_linear_2D(A_times_dTdAlpha, Alocal, qMid); // Term 2 // Sum both terms uMid[0] = B_times_T[0] + A_times_dTdAlpha[0]; uMid[1] = B_times_T[1] + A_times_dTdAlpha[1]; // Update q q[0] += uMid[0] * h; q[1] += uMid[1] * h; // Update gradient grad[dim_index * boxsize + index] = q[0]; grad[dim_index * boxsize + index + nP] = q[1]; // Update p p[0] += vMid[0]*h; p[1] += vMid[1]*h; } } return; } __global__ void cpab_cuda_kernel_backward_3D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC, float* grad, const float* points, const float* As, const float* Bs, const int* nStepSolver, const int* nc, const int broadcast) { // Allocate memory for computations float p[3], v[3], pMid[3], vMid[3], q[3], qMid[3]; float B_times_T[3], A_times_dTdAlpha[3], u[3], uMid[3]; float Alocal[12], Blocal[12]; int cellidx; // Thread index int point_index = threadIdx.x + blockIdx.x * blockDim.x; int batch_index = threadIdx.y + blockIdx.y * blockDim.y; int dim_index = threadIdx.z + blockIdx.z * blockDim.z; // Make sure we are within bounds if(point_index < nP && batch_index < n_theta && dim_index < d){ int index = 3 * nP * batch_index + point_index; int boxsize = 3 * nP * n_theta; // Define start index for the matrices belonging to this batch // batch * 12 params pr cell * 6 triangles pr cell * cell in x * cell in y * cell in z int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2]; // Get point p[0] = points[broadcast*batch_index*nP*3+point_index]; p[1] = points[broadcast*batch_index*nP*3+point_index + nP]; p[2] = points[broadcast*batch_index*nP*3+point_index + 2 * nP]; // Step size for solver double h = (1.0 / nStepSolver[0]); // Iterate a number of times for(int t=0; t<nStepSolver[0]; t++) { // Get current cell cellidx = cuda_findcellidx_3D(p, nc[0], nc[1], nc[2]); // Get index of A int As_idx = 12*cellidx; // Extract local A for(int i = 0; i < 12; i++){ Alocal[i] = (As + As_idx + start_idx)[i]; } // Compute velocity at current location A_times_b_3D(v, Alocal, p); // Compute midpoint pMid[0] = p[0] + h*v[0]/2.0; pMid[1] = p[1] + h*v[1]/2.0; pMid[2] = p[2] + h*v[2]/2.0; // Compute velocity at midpoint A_times_b_3D(vMid, Alocal, pMid); // Get index of B int Bs_idx = 12 * dim_index * nC + As_idx; // Get local B for(int i = 0; i < 12; i++){ Blocal[i] = (Bs + Bs_idx)[i]; } // Copy q q[0] = grad[dim_index * boxsize + index]; q[1] = grad[dim_index * boxsize + index + nP]; q[2] = grad[dim_index * boxsize + index + 2*nP]; // Step 1: Compute u using the old location // Find current RHS (term 1 + term 2) A_times_b_3D(B_times_T, Blocal, p); // Term 1 A_times_b_linear_3D(A_times_dTdAlpha, Alocal, q); // Term 2 // Sum both terms u[0] = B_times_T[0] + A_times_dTdAlpha[0]; u[1] = B_times_T[1] + A_times_dTdAlpha[1]; u[2] = B_times_T[2] + A_times_dTdAlpha[2]; // Step 2: Compute mid "point" qMid[0] = q[0] + h * u[0]/2.0; qMid[1] = q[1] + h * u[1]/2.0; qMid[2] = q[2] + h * u[2]/2.0; // Step 3: Compute uMid A_times_b_3D(B_times_T, Blocal, pMid); // Term 1 A_times_b_linear_3D(A_times_dTdAlpha, Alocal, qMid); // Term 2 // Sum both terms uMid[0] = B_times_T[0] + A_times_dTdAlpha[0]; uMid[1] = B_times_T[1] + A_times_dTdAlpha[1]; uMid[2] = B_times_T[2] + A_times_dTdAlpha[2]; // Update q q[0] += uMid[0] * h; q[1] += uMid[1] * h; q[2] += uMid[2] * h; // Update gradient grad[dim_index * boxsize + index] = q[0]; grad[dim_index * boxsize + index + nP] = q[1]; grad[dim_index * boxsize + index + 2 * nP] = q[2]; // Update p p[0] += vMid[0]*h; p[1] += vMid[1]*h; p[2] += vMid[2]*h; } } return; }
11,161
// MaskEventsIn is auto-generated from csv file struct MaskEventsIn{ int nev; uint *luminosityBlock; int *HLT_Ele32_WPTight_Gsf; int *HLT_IsoMu24; uint *nElectron; uint *nMuon; }; // EventsIn is auto-generated from csv file struct EventsIn{ int nev; int *HLT_Ele32_WPTight_Gsf; int *HLT_IsoMu24; uint *nElectron; uint *nMuon; float *Electron_pt; float *Electron_eta; float *Electron_phi; float *Electron_mass; int *Electron_cutBased; float *Electron_pfRelIso03_all; int *Electron_pdgId; float *Muon_pt; float *Muon_eta; float *Muon_phi; float *Muon_mass; int *Muon_isGlobal; int *Muon_isPFcand; int *Muon_tightId; float *Muon_pfRelIso03_all; int *Muon_pdgId; uint *cumsum_nElectron; uint *cumsum_nMuon; }; // EventsMid is auto-generated from csv file struct EventsMid{ int MAXNLEPTON; int *iPassElectron; uint *iPassMuon; }; // EventsOut is auto-generated from csv file struct EventsOut{ int *channel; int *nPassElectron; int *nPassMuon; float *lepton1Pt; float *lepton1Eta; float *lepton1Phi; float *lepton1M; float *lepton2Pt; float *lepton2Eta; float *lepton2Phi; float *lepton2M; float *dileptonPt; float *dileptonM; float *leptonsDeltaPhi; float *leptonsDeltaR; float *lepton1Pdgid; float *lepton2Pdgid; float *lepton1Reliso; float *lepton2Reliso; }; // some handy lorentz verctor and methords struct P4_PtEtaPhiM{ float pt; float eta; float phi; float m; }; __device__ P4_PtEtaPhiM lorentz_add( P4_PtEtaPhiM *p1, P4_PtEtaPhiM *p2){ float px1 = p1->pt*cos(p1->phi); float py1 = p1->pt*sin(p1->phi); float pz1 = p1->pt*sinh(p1->eta); float pe1 = sqrt(px1*px1 + py1*py1 + pz1*pz1 + p1->m*p1->m); float px2 = p2->pt*cos(p2->phi); float py2 = p2->pt*sin(p2->phi); float pz2 = p2->pt*sinh(p2->eta); float pe2 = sqrt(px2*px2 + py2*py2 + pz2*pz2 + p2->m*p2->m); float qx = px1+px2; float qy = py1+py2; float qz = pz1+pz2; float qe = pe1+pe2; float q_pt = sqrt(qx*qx + qy*qy); float q_eta = 0.0; // FIX ME float q_phi = 0.0; // FIX ME float q_m = sqrt(qe*qe - qx*qx - qy*qy - qz*qz); struct P4_PtEtaPhiM q = {q_pt, q_eta, q_phi, q_m}; return q; } // root function return phi in [-pi,pi] //https://root.cern.ch/doc/master/TVector2_8cxx_source.html#l00103 __device__ float phi_mpi_pi(float x){ while(x>M_PI) x -= 2*M_PI; while(x<-M_PI) x += 2*M_PI; return x; } ////////////// // mask // ////////////// __global__ void knl_mask(MaskEventsIn *evsI, bool *mask) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { bool isPass = false; if ( (evsI->HLT_Ele32_WPTight_Gsf[iev] || evsI->HLT_IsoMu24[iev]) && (evsI->nElectron[iev]>=2 || evsI->nMuon[iev]>=2) ){ isPass = true; } mask[iev] = isPass; } } ////////////////// // obj-electron // ////////////////// __global__ void knl_objectSelection_electron(EventsIn *evsI, EventsMid *evsM, EventsOut *evsO) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { const int cumsum_nObject = evsI->cumsum_nElectron[iev]; const int nObject = evsI->nElectron[iev]; int nPassObject = 0; // loop over all enectrons in the event for( int i = cumsum_nObject; i < cumsum_nObject + nObject; i++){ if (nPassObject >= evsM->MAXNLEPTON) break; if( evsI->Electron_pt[i] > 20 && abs(evsI->Electron_eta[i]) < 2.5 && evsI->Electron_cutBased[i] >= 3 ){ evsM->iPassElectron[iev*evsM->MAXNLEPTON + nPassObject] = i-cumsum_nObject; nPassObject++; } } // end of loop evsO->nPassElectron[iev] = nPassObject; } } ////////////////// // obj-muon // ////////////////// __global__ void knl_objectSelection_muon(EventsIn *evsI, EventsMid *evsM, EventsOut *evsO) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { const int cumsum_nObject = evsI->cumsum_nMuon[iev]; const int nObject = evsI->nMuon[iev]; int nPassObject = 0; // loop over all enectrons in the event for( int i = cumsum_nObject; i < cumsum_nObject + nObject; i++){ if (nPassObject >= evsM->MAXNLEPTON) break; if( evsI->Muon_pt[i] > 10 && abs(evsI->Muon_eta[i]) < 2.4 && evsI->Muon_isGlobal[i] == 1 && evsI->Muon_isPFcand[i] == 1 && evsI->Muon_tightId[i] == 1 ){ evsM->iPassMuon[iev*evsM->MAXNLEPTON + nPassObject] = i-cumsum_nObject; nPassObject++; } } // end of loop evsO->nPassMuon[iev] = nPassObject; } } ////////////////// // event selection ////////////////// __global__ void knl_eventSelection(EventsIn *evsI, EventsMid *evsM, EventsOut *evsO) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { int MAXNLEPTON = evsM->MAXNLEPTON; struct P4_PtEtaPhiM lep1, lep2, dilepton; evsO->channel[iev] = -1; if (evsI->HLT_Ele32_WPTight_Gsf[iev]==1 && evsO->nPassElectron[iev]>=2 && evsO->nPassMuon[iev]==0){ // get index int l1 = evsM->iPassElectron[iev*MAXNLEPTON+0] + evsI->cumsum_nElectron[iev]; int l2 = evsM->iPassElectron[iev*MAXNLEPTON+1] + evsI->cumsum_nElectron[iev]; // pt threshold if (evsI->Electron_pt[l1]<32 || evsI->Electron_pt[l2]<20) return; // opposite sign if (evsI->Electron_pdgId[l1] * evsI->Electron_pdgId[l2] > 0) return; // dilepton mass veto lep1 = {evsI->Electron_pt[l1], evsI->Electron_eta[l1], evsI->Electron_phi[l1], evsI->Electron_mass[l1]}; lep2 = {evsI->Electron_pt[l2], evsI->Electron_eta[l2], evsI->Electron_phi[l2], evsI->Electron_mass[l2]}; dilepton = lorentz_add(&lep1, &lep2); if(dilepton.m<60 || dilepton.m>130) return; // fillout evsO evsO->channel[iev] = 0; evsO->lepton1Pdgid[iev] = evsI->Electron_pdgId[l1]; evsO->lepton2Pdgid[iev] = evsI->Electron_pdgId[l2]; evsO->lepton1Reliso[iev] = evsI->Electron_pfRelIso03_all[l1]; evsO->lepton2Reliso[iev] = evsI->Electron_pfRelIso03_all[l2]; } else if (evsI->HLT_IsoMu24[iev]==1 && evsO->nPassElectron[iev]==0 && evsO->nPassMuon[iev]>=2){ // get index int l1 = evsM->iPassMuon[iev*MAXNLEPTON+0] + evsI->cumsum_nMuon[iev]; int l2 = evsM->iPassMuon[iev*MAXNLEPTON+1] + evsI->cumsum_nMuon[iev]; // pt threshold if (evsI->Muon_pt[l1]<27 || evsI->Muon_pt[l2]<10) return; // opposite sign if (evsI->Muon_pdgId[l1] * evsI->Muon_pdgId[l2] > 0) return; // dilepton mass veto lep1 = {evsI->Muon_pt[l1], evsI->Muon_eta[l1], evsI->Muon_phi[l1], evsI->Muon_mass[l1]}; lep2 = {evsI->Muon_pt[l2], evsI->Muon_eta[l2], evsI->Muon_phi[l2], evsI->Muon_mass[l2]}; dilepton = lorentz_add(&lep1, &lep2); if(dilepton.m<60 || dilepton.m>130) return; // fillout evsO evsO->channel[iev] = 1; evsO->lepton1Pdgid[iev] = evsI->Muon_pdgId[l1]; evsO->lepton2Pdgid[iev] = evsI->Muon_pdgId[l2]; evsO->lepton1Reliso[iev] = evsI->Muon_pfRelIso03_all[l1]; evsO->lepton2Reliso[iev] = evsI->Muon_pfRelIso03_all[l2]; } /////////////////// // fill leptons p4 /////////////////// if (evsO->channel[iev] != -1){ // lep1 p4 evsO->lepton1Pt[iev] = lep1.pt; evsO->lepton1Eta[iev] = lep1.eta; evsO->lepton1Phi[iev] = lep1.phi; evsO->lepton1M[iev] = lep1.m; // lep2 p4 evsO->lepton2Pt[iev] = lep2.pt; evsO->lepton2Eta[iev] = lep2.eta; evsO->lepton2Phi[iev] = lep2.phi; evsO->lepton2M[iev] = lep2.m; // dilepton p4 evsO->dileptonPt[iev] = dilepton.pt; evsO->dileptonM[iev] = dilepton.m; // lep1-lep2 delta float deltaPhi = phi_mpi_pi(lep1.phi-lep2.phi); evsO->leptonsDeltaR[iev] = sqrt((lep1.eta-lep2.eta)*(lep1.eta-lep2.eta) + deltaPhi*deltaPhi) ; evsO->leptonsDeltaPhi[iev] = deltaPhi; } } }
11,162
#include <stdio.h> #define BLOCK 16 #define NUM 32 __shared__ unsigned b[NUM]; __global__ void paratest(unsigned * a) { unsigned bid = blockIdx.x; unsigned tid = threadIdx.x; b[tid] = a[tid]; __syncthreads(); for (unsigned i = 0; i < 4; i++) { if (bid % 2 != 0) { if (tid < 1024) { unsigned idx = bid * blockDim.x + tid; b[tid] = a[idx] + 1; if (tid % 2 != 0) { b[tid] = 2; } else { if (tid > 0) b[tid] = b[tid-1]+1; } } else { b[tid] = b[tid-1]; } } else { unsigned idx = bid * blockDim.x + tid; b[tid] = a[idx] + 1; } } __syncthreads(); if (tid % 2 == 0) { printf("even number !\n"); } else { printf("odd number !\n"); } } int main() { unsigned *da; cudaMalloc((void **)&da, sizeof(unsigned)*BLOCK*NUM); //unsigned a = 8; //unsigned b = test_external_library(a); //printf("The returned b's value is %d\n", b); paratest<<<BLOCK, NUM>>>(da); cudaFree(da); }
11,163
#include <cuda.h> #include <stdio.h> #include <math.h> #define N 2048 #define T 1024 // numero max de threads por bloco // função executada na GPU __global__ void vecAdd (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { dc[i] = da[i] + db[i]; } } // função executada na CPU __host__ void initvet(int *host_a, int *host_b) { // Inicialização dos vetores a e b for (int i=0; i < N; i++) { host_a[i] = N-i; host_b[i] = i; } } // função executada na CPU __host__ void printvetores (int *a, int *b, int *c) { printf("\t [i] \t A\t B\t C\t \n"); for (int i=0; i < N; i++) { if ((i % 100) == 0) printf("\t [%d] \t %d\t %d\t %d\n", i, a[i], b[i], c[i]); } } // função principal executada iniciada em CPU int main(int argc, char const *argv[]) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; size = N * sizeof(int); // Inicialização dos vetores cudaMallocHost((void **) &a, size); cudaMallocHost((void **) &b, size); initvet(a,b); // alocação de memória na GPU para os vetores (a,b e c) cudaMalloc ((void **) &dev_a, size); cudaMalloc ((void **) &dev_b, size); cudaMalloc ((void **) &dev_c, size); // cópia dos vetores gerados em CPU p/ memória da GPU cudaMemcpy (dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy (dev_b, b, size, cudaMemcpyHostToDevice); // execução do kernel vecAdd em GPU. vecAdd<<<(int)ceil(N/T),T>>>(dev_a, dev_b, dev_c); cudaMallocHost((void **) &c, size); cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); printvetores (a, b, c); printf ("\n **** \n Nro Threads = %d\n Nro Max por Bloco = %d\n Nro de Blocos = %d\n", N, T, (int) ceil(N/T)); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); return 0; }
11,164
#include<stdio.h> #include<time.h> #include<iostream> #define w 256 #define h 256 #define N w*h using namespace std; __global__ void reduce(int*,int*,int*); int main(void) { int* hostA = (int*)malloc(N*sizeof(int)); int* hostB = (int*)malloc(N*sizeof(int)); int* hostMean = (int*)malloc(sizeof(int)); *hostMean = 32767; int* deviceA; int *deviceB;int*deviceMean; cudaMalloc(&deviceA,sizeof(int)*N); cudaMalloc(&deviceB,sizeof(int)*N); cudaMalloc(&deviceMean,sizeof(int)); //randomly generate array hostA srand(time(0)); int i; //initialize host vector by random elements for(i=0;i<N;i++) { hostA[i] = i; } hostB[0]=0.0; cudaMemcpy(deviceA,hostA,N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(deviceB,hostB,N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(deviceMean,hostMean,sizeof(int),cudaMemcpyHostToDevice); dim3 blocksize(256); dim3 gridsize(N/blocksize.x); float gpu_elapsed_time; cudaEvent_t gpu_start,gpu_stop; cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_stop); cudaEventRecord(gpu_start,0); reduce<<<gridsize,blocksize>>>(deviceA,deviceB,deviceMean); cudaDeviceSynchronize(); cudaMemcpy(hostB,deviceB,sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(gpu_stop, 0); cudaEventSynchronize(gpu_stop); cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop); cudaEventDestroy(gpu_start); cudaEventDestroy(gpu_stop); double std_dev = pow(hostB[0]/(N),0.5); cout<<"Reduced array standard deviation is = "<<std_dev<<endl; std::cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl; clock_t cpu_start = clock(); int sum=0; for(int i=0;i<N;i++){ sum = sum + int(pow((hostA[i] - (*hostMean)),2.0)); } //cout<<"sum == "<<sum<<endl; double std_dev_actual = pow(sum/(N),0.5); printf("Actual value of standard deviation should be: %f \n", std_dev_actual); clock_t cpu_stop = clock(); clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC; std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl; cudaFree(deviceA); cudaFree(deviceB); delete[] hostB; delete[] hostA; } __global__ void reduce(int* input,int* output,int* mean) { __shared__ int shared_data[256]; int i = blockIdx.x*blockDim.x+threadIdx.x; shared_data[threadIdx.x] = int( pow(double(input[i]- *mean),2.0)); __syncthreads(); for(int s=1;s<blockDim.x;s*=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { shared_data[index] += shared_data[index + s]; } __syncthreads(); } if (threadIdx.x == 0) atomicAdd(output,shared_data[0]); }
11,165
/* @file diffusion.cu * @brief 2D Solver for heat diffusion equation with fixed initial conditions written to run on GPU. * * This is a solver for diffusion equation using the method of finite differences. The linear system * of the method is solved using conjugate gradients. It solves the equation du/dt - lambda*L^2u = 0 * where L is the Laplacian operator, u is temperature and t is time. It solves this equation in * time and space. The time domain is defined by tfinal, that is [0,tfinal]. The spatial domain is * defined by step and offset, where step is the distance of the gridpoints in meters and offset is * the number of grid points in both directions ( the grid is a square ). The initial condition is * hardcoded into the procedure initializeMatrix. The boundary condition is harcoded in the * procedure run. All of the input of this program is hardcoded - the intention was to demonstrate * the performance difference between C and CUDA code, not to make a general solver. The output of * this program is a set of files named matrix<t>.csv containing the 2D temperature function at time * t. * @author Richard Finger */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #define BLOCK_SIZE 512 // CUDA block size #define offset 512 // determines dimension of spatial grid #define step 0.02 // spatial discretization step const int tfinal = 50; // simulation starts at t=0 and ends at tfinal const float lambda = 0.001 / (step*step); // lambda from the heat eqaution const int dim = offset + 2; // dimension of spatial grid const int length = offset * offset; // length of the storage vector __global__ void cudamv(float* res,float* y) { /* @brief Multiply vector x by matrix A of the system of linear equations on GPU. * * Multiply vector x by matrix A of the system of linear equations obtained from discretization. * Notice that the matrix A does not change during the simulation and therefore we can harcode it * into this procedure. This will significantly improve performance. * * @param[in] y Pointer to input vector y * @param[out] res Pointer to outpur vector res * @return void */ __shared__ float x[BLOCK_SIZE + 2*offset]; int g_i = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.x + offset; x[i] = y[g_i]; if(threadIdx.x < offset) { x[threadIdx.x ] = (g_i - offset) > 0 ? y[(g_i - offset)] : 0; x[threadIdx.x + BLOCK_SIZE + offset] = (g_i + BLOCK_SIZE) < length ? y[g_i + BLOCK_SIZE] : 0; } __syncthreads(); float result = 0; int tmp = g_i % offset; result += (1 + 4*lambda) * x[i]; result += (-lambda) * ((g_i+1 < length && tmp < offset -1) ? x[i+1] :0); result += (-lambda) * ((g_i+offset < length ) ? x[i+offset] :0); result += (-lambda) * ((g_i-1 > -1 && tmp > 0 ) ? x[i-1] :0); result += (-lambda) * ((g_i-offset > -1) ? x[i-offset] :0); res[g_i] = result; } __global__ void cudadot(float* res,float* a, float* b) { /* @brief Calculate the dot product of two vectors on GPU. * * @param[in] a Pointer to input vector a * @param[in] b Pointer to input vector b * @param[out] res Dot product of a and b * @return void */ unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; for(int i = 0; i < 2; i++) { res[index + i*length/2] = a[index + i*length/2] * b[index + i*length/2]; } __shared__ volatile float sdata[BLOCK_SIZE]; unsigned int t_id = threadIdx.x; sdata[t_id] = res[index] + res[index + length/2]; __syncthreads(); if(BLOCK_SIZE >= 256) { if(t_id < 128) { sdata[t_id] += sdata[t_id + 128]; } __syncthreads(); } if(BLOCK_SIZE >= 128) { if(t_id < 64) { sdata[t_id] += sdata[t_id + 64]; } __syncthreads(); } if(t_id < 32) { sdata[t_id] += sdata[t_id + 32]; sdata[t_id] += sdata[t_id + 16]; sdata[t_id] += sdata[t_id + 8]; sdata[t_id] += sdata[t_id + 4]; sdata[t_id] += sdata[t_id + 2]; sdata[t_id] += sdata[t_id + 1]; } if(t_id == 0) res[blockIdx.x] = sdata[0]; } float dot(float* d_res,float* d_a,float* d_b) { /* @brief Calculate the dot product of two vectors. * * @param[in] d_a Pointer to input vector d_a * @param[in] d_b Pointer to input vector d_b * @param[in] d_res Pointer to intermediate result vector d_res * @return dot Dot product of d_a and d_b */ cudadot<<<length/BLOCK_SIZE,BLOCK_SIZE/2>>>(d_res,d_a,d_b); float* res = (float*) malloc(sizeof(float)*length/BLOCK_SIZE); cudaMemcpy(res,d_res,sizeof(float)*length/BLOCK_SIZE,cudaMemcpyDeviceToHost); float result = 0; for(int i = 0; i < length/BLOCK_SIZE; i++) { result += res[i]; } free(res); return result; } float norm(float* res, float* a) { /* @brief Calculate the norm of a vector. * * @param[in] a Pointer to input vector a * @param[in] res Pointer to intermediate result vector res * @return norm Norm of vector a */ return dot(res,a,a); } __global__ void cudaadd(float* res, float* a, float* b) { /* @brief Calculate the sum of two vectors on GPU. * * @param[in] a Pointer to input vector a * @param[in] b Pointer to input vector b * @param[out] res Pointer to output vector res * @return void */ unsigned int g_id = blockIdx.x*blockDim.x + threadIdx.x; res[g_id] = a[g_id] + b[g_id]; } __global__ void cudasub(float* res, float* a, float* b) { /* @brief Calculate the difference of two vectors on GPU. * * @param[in] a Pointer to input vector a * @param[in] b Pointer to input vector b * @param[out] res Pointer to output vector res * @return void */ unsigned int g_id = blockIdx.x*blockDim.x + threadIdx.x; res[g_id] = a[g_id] - b[g_id]; } __global__ void cudamul(float* res, float alpha, float* b) { /* @brief Multiply a vector by a scalar on GPU. * * @param[in] b Pointer to input vector b * @param[in] alpha Scalar alpha * @param[out] res Pointer to output vector res * @return void */ unsigned int g_id = blockIdx.x*blockDim.x + threadIdx.x; res[g_id] = alpha * b[g_id]; } void matrixSolver(float* host_xnew,float* b) { /* @brief Solve system of linear equations using conjugate gradients method. * * Solve system of linear equations using conjugate gradients method. The algorithm is iteratively * solving the system of equation Ax=b where the matrix A is fixed - it is defined by the equation * and its discretization. The matrix is hardcoded in the procedure mv. * * @param[in] b Pointer to right hand side b - array of size length * @param[out] host_xnew Solution of the linear system * @return void */ float epsilon = 0.001; //Precision of the solution int size = length * sizeof(float); float* xold ; float* xnew ; float* rold ; float* rnew ; float* pold ; float* pnew ; float* tmp ; float* mult ; float* res ; float* d_b; cudaMalloc((void**) &d_b , size); cudaMalloc((void**) &xold, size); cudaMalloc((void**) &xnew, size); cudaMalloc((void**) &rold, size); cudaMalloc((void**) &rnew, size); cudaMalloc((void**) &pold, size); cudaMalloc((void**) &pnew, size); cudaMalloc((void**) &tmp , size); cudaMalloc((void**) &mult, size); cudaMalloc((void**) &res , size); cudaMemset(d_b , 0, size); cudaMemset(xold, 0, size); cudaMemset(xnew, 0, size); cudaMemset(rold, 0, size); cudaMemset(rnew, 0, size); cudaMemset(pold, 0, size); cudaMemset(pnew, 0, size); cudaMemset(tmp , 0, size); cudaMemset(mult, 0, size); cudaMemset(res , 0, size); cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice); cudamv<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(tmp,xold); cudasub<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(rold,d_b,tmp); cudaMemcpy(pold,rold,size,cudaMemcpyDeviceToDevice); float alpha = 0; float beta = 0; while(norm(res,rold) > epsilon) { cudamv<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(mult,pold); alpha = dot(res,rold,rold)/dot(res,mult,pold); cudamul<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(mult,alpha,pold); cudaadd<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(xnew,xold,mult); cudamv<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(mult,pold); cudamul<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(tmp,-alpha,mult); cudaadd<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(rnew,rold,tmp); beta = dot(res,rnew,rnew)/dot(res,rold,rold); cudamul<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(mult,beta,pold); cudaadd<<<length/BLOCK_SIZE,BLOCK_SIZE>>>(pnew,rnew,mult); cudaMemcpy(rold,rnew,size,cudaMemcpyDeviceToDevice); cudaMemcpy(pold,pnew,size,cudaMemcpyDeviceToDevice); cudaMemcpy(xold,xnew,size,cudaMemcpyDeviceToDevice); } cudaMemcpy(host_xnew,xnew,size,cudaMemcpyDeviceToHost); cudaFree(xold); cudaFree(xnew); cudaFree(pold); cudaFree(rold); cudaFree(pnew); cudaFree(rnew); cudaFree(tmp); cudaFree(mult); cudaFree(res); cudaFree(d_b); return ; } void printMatrix(int iFile, float uold[][dim]) { /* @brief Print matrix into a file called matrix<iFile>.csv . * * @param[in] iFile File number * @param[in] uold Matrix to print * @return void */ char str[14]; char num[4]; strcpy(str,"matrix"); num[0] = '0' + (iFile % 10); num[1] = '\0'; if(iFile > 9) { num[0] = '0' + (iFile-(iFile % 10))/10; num[1] = '0' + (iFile % 10); num[2] = '\0'; } if(iFile > 99) { num[0] = '0' + (iFile-(iFile % 100))/100; num[1] = '0' + ((iFile-(iFile % 10))/10 % 10); num[2] = '0' + (iFile % 10); num[3] = '\0'; } strcat(str,num); strcat(str,".csv"); FILE* f = fopen(str,"w"); for(int i = 0; i < dim; i++) { for(int j = 0; j < dim; j++) { fprintf(f, "%f, ",uold[i][j]); } fprintf(f, "\n"); } fclose(f); } void initializeMatrix(float uold[][dim]) { /* @brief Initializes the solution matrix - the initial conditions are hard coded. * * This procedure initializes the matrix uold to -1 everywhere except in two elipses centered at * [10,10] and [25,30] where the value is 10. * * @param[out] uold Solution matrix * @return void */ for(int i = 0; i < dim; i++) { for(int j = 0; j < dim; j++) { if( ((i - 10)*(i - 10) < 10 && (j - 10)*(j - 10) < 17 ) || ((i - 25)*(i - 25) < 17 && (j - 30)*(j - 30) < 10 ) ) { uold[i][j] = 10; } else { uold[i][j] = -1; } } } } void run() { /* @brief Runs the algorithm to solve 2D heat equation and prints the result into files. * * This procedure drives the solver. It initializes the temperature matrix uold and solves the * linear system of equations to obtain the new temperature unew. It sets the boundary condition and * prints the results into files named matrix<t>.csv where t is time. For the purposes of the * calculation the 2D matrix uold is stored into 1D vector x. * @param[out] uold Solution matrix * @return void */ float uold[dim][dim]; float unew[dim][dim]; float b[length]; float x[length]; int t = 0; int iFile = 1; initializeMatrix(uold); while(t <= tfinal) { printf("Calculating t = %d\n",t); // Initialize the right hand side b for(int k = 0; k < length; k++) { int j2 = k % (dim-2); int i2 = (k-j2) / (dim-2); b[k] = uold[i2+1][j2+1]; if(i2 == 0 || i2 == dim-3) b[k]+= - lambda; if(j2 == 0 || j2 == dim-3) b[k]+= - lambda; } matrixSolver(x,b); // Translate 1D vector into 2D matrix for(int k = 0; k < length; k++) { int j2 = k % (dim-2); int i2 = (k-j2)/(dim-2); unew[i2+1][j2+1] = x[k]; } // Set boundary condition and update solution for(int i = 0; i < dim; i++) { for(int j = 0; j < dim; j++) { if(i == 0 || i== dim-1 || j == 0 || j == dim-1) { uold[i][j] = -1; } else { uold[i][j] = unew[i][j]; } } } t++; if(0 == t % 1 ) { printMatrix(iFile,uold); iFile++; } } } int main(int argc,char** argv) { /* @brief Main. * * @param[in] argc * @param[in] argv * @return int */ run(); return 0; }
11,166
#include <stdio.h> #include <stdlib.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float *elements; } Matrix; // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { cudaSetDevice(0); cudaDeviceSynchronize(); size_t available, total; cudaMemGetInfo(&available, &total); printf("Mem total: %ld Bytes\nMem available: %ld Bytes\n", available, total); // Load A and B to device memory Matrix d_A; d_A.width = A.width; size_t size = A.width * sizeof(float); printf("size of A: %ld\n", size); cudaMalloc(&d_A.elements, size); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: allocation A %s\n", cudaGetErrorString(error)); exit(-1); } cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; cudaMalloc(&d_B.elements, size); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: allocation B %s\n", cudaGetErrorString(error)); exit(-1); } cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; error = cudaGetLastError(); cudaMalloc(&d_C.elements, size); if (error != cudaSuccess) { fprintf(stderr, "ERROR: allocation C %s\n", cudaGetErrorString(error)); exit(-1); } // Invoke kernel dim3 dimBlock(BLOCK_SIZE, 1); dim3 dimGrid(B.width / BLOCK_SIZE, 1); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: calculation error %s\n", cudaGetErrorString(error)); exit(-1); } // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { fprintf(stderr, "ERROR: copying C %s\n", cudaGetErrorString(error)); exit(-1); } // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { int id = blockIdx.x * blockDim.x + threadIdx.x; C.elements[id] = A.elements[id] * B.elements[id]; } int myrand() { return rand() / (RAND_MAX / 10); } int main() { // A x B srand(0); Matrix A, B, C; A.height = B.height = C.height = 1; A.width = B.width = C.width = 1 * BLOCK_SIZE; A.elements = (float *)malloc(A.width * sizeof(float)); B.elements = (float *)malloc(B.width * sizeof(float)); C.elements = (float *)malloc(C.width * sizeof(float)); printf("Content of A: \n"); for (int j = 0; j < A.width; j++) { A.elements[j] = myrand(); printf("%2d", (int)A.elements[j]); } printf("\n"); printf("\n\nContent of B: \n"); for (int j = 0; j < B.width; j++) { B.elements[j] = myrand(); printf("%2d", (int)B.elements[j]); } printf("\n"); MatMul(A, B, C); printf("\n\nContent of C: \n"); for (int j = 0; j < C.width; j++) { printf("%3d", (int)C.elements[j]); } printf("\n"); return 0; }
11,167
// // Created by Chen on 11/8/2020. // #include <cstdio> int maxThreadsPerblock; void cudaInit() { cudaDeviceProp prop{}; if(cudaGetDeviceProperties (&prop, 0) != cudaSuccess) { fprintf(stderr, "CUDA init error: %s\n", cudaGetErrorString(cudaGetLastError())); return; } maxThreadsPerblock = prop.maxThreadsPerBlock; }
11,168
#include <cmath> /* log2() */ #include <cstdint> /* int64_t, uint64_t */ #include <cstdlib> /* srand(), rand() */ #include <ctime> /* time() */ #include <iostream> /* std::cout, std::endl */ #include "../include/utils_device.cuh" //INCLUDE HEADER FILE /** * Return vector with each element of the input at its bit-reversed position * * @param vec The vector to bit reverse * @param n The length of the vector, must be a power of two * @return The bit reversed vector */ __device__ uint64_t *bit_reverse_D(uint64_t *vec, uint64_t n){ uint64_t num_bits = log2_D(n); uint64_t *result; result = (uint64_t *) malloc(n*sizeof(uint64_t)); uint64_t reverse_num; for(uint64_t i = 0; i < n; i++){ reverse_num = 0; for(uint64_t j = 0; j < num_bits; j++){ reverse_num = reverse_num << 1; if(i & (1 << j)){ reverse_num = reverse_num | 1; } } result[reverse_num] = vec[i]; } return result; } /** * Perform the operation 'base^exp (mod m)' using the memory-efficient method * * @param base The base of the expression * @param exp The exponent of the expression * @param m The modulus of the expression * @return The result of the expression */ __device__ uint64_t modExp_D(uint64_t base, uint64_t exp, uint64_t m){ uint64_t result = 1; while(exp > 0){ if(exp % 2){ result = modulo_D(result*base, m); } exp = exp >> 1; base = modulo_D(base*base,m); } return result; } /** * Perform the operation 'base (mod m)' * * @param base The base of the expression * @param m The modulus of the expression * @return The result of the expression */ __device__ uint64_t modulo_D(int64_t base, int64_t m){ int64_t result = base % m; return (result >= 0) ? result : result + m; } __device__ uint64_t log2_D( uint64_t n){ return log2(float(n)); }; __device__ uint64_t pow_D( uint64_t base,uint64_t power){ return pow(float(base),float(power)); };
11,169
#define NUMBER_OF_ELEMENTS 5 #define BLOCK_DIM 1024 #define OFFSET 0 // When MAX_PERM = 0, means find all permutations #define MAX_PERM 0 #define NEXT_PERM_LOOP 1 __constant__ long long arr[20][20] = { /*Not shown here to save space*/ }; // function to swap character // a - the character to swap with b // b - the character to swap with a __device__ void swap( char* a, char* b) { char tmp = *a; *a = *b; *b = tmp; } // function to reverse the array (sub array in array) // first - 1st character in the array (sub-array in array) // last - 1 character past the last character __device__ void reverse( char* first, char* last) { for (; first != last && first != --last; ++first) swap(first, last); } // function to find the next permutation (sub array in array) // first - 1st character in the array (sub-array in array) // last - 1 character past the last character __device__ void next_permutation( char* first, char* last) { char* next = last; --next; if(first == last || first == next) return; while(true) { char* next1 = next; --next; if(*next < *next1) { char* mid = last; --mid; for(; !(*next < *mid); --mid) ; swap(next, mid); reverse(next1, last); return; } if(next == first) { reverse(first, last); return; } } } __global__ void PermuteHybrid(char* arrDest, long long* offset, long long* Max) { long long index = threadIdx.x + blockIdx.x * blockDim.x; if(index >= (*Max/(NEXT_PERM_LOOP+1))) return; index *= NEXT_PERM_LOOP+1; long long tmpindex = index; index += *offset; char arrSrc[NUMBER_OF_ELEMENTS]; char arrTaken[NUMBER_OF_ELEMENTS]; for(char i=0; i<NUMBER_OF_ELEMENTS; ++i) { arrSrc[i] = i; arrTaken[i] = 0; } char size = NUMBER_OF_ELEMENTS; for(char i=NUMBER_OF_ELEMENTS-1; i>=0; --i) { for(char j=i; j>=0; --j) { if(index >= arr[i][j]) { char foundcnt = 0; index = index - arr[i][j]; for(char k=0;k<NUMBER_OF_ELEMENTS; ++k) { if(arrTaken[k]==0) // not taken { if(foundcnt==j) { arrTaken[k] = 1; // set to taken arrDest[ (tmpindex*NUMBER_OF_ELEMENTS) + (NUMBER_OF_ELEMENTS-size) ] = arrSrc[k]; break; } foundcnt++; } } break; } } --size; } long long idx = tmpindex*NUMBER_OF_ELEMENTS; for(char a=1; a<NEXT_PERM_LOOP+1; ++a) { long long idx2 = a*NUMBER_OF_ELEMENTS; for(char i=0; i<NUMBER_OF_ELEMENTS; ++i) { arrDest[ idx + idx2 + i ] = arrDest[ idx + ((a-1)*NUMBER_OF_ELEMENTS) + i ]; } next_permutation(arrDest + idx + idx2, arrDest+idx + idx2 + NUMBER_OF_ELEMENTS); } }
11,170
//optimization homework #4 cs 677 Theodore Jagodits #include <stdio.h> #include <stdlib.h> #include "string.h" #include <iostream> #define DEFAULT_SIZE 128 __global__ void unknown_algo(float *inp1, float *inp2, float *result, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; float temp = 0.0f; for(int j = 0; j < size; j++){ temp += inp2[id * size + j]; result[id * size + j] = temp; for(int k = 0; k < size; k++){ result[id * size + j] += inp1[j] * inp1[k]; } } } int main( int argc, char **argv ){ int size = 128; //create vars int input1_bytes = size * sizeof(float); int num_bytes = size * size * sizeof(float); //malloc device float *d_input1 = (float *) malloc(input1_bytes); float *d_input2 = (float *) malloc(num_bytes); float *d_result = (float *) malloc(num_bytes); //malloc host float *h_input1 = (float *) malloc(input1_bytes); float *h_input2 = (float *) malloc(num_bytes); float *h_result = (float *) malloc(num_bytes); //cuda malloc cudaMalloc(&d_input1, input1_bytes); cudaMalloc(&d_input2, num_bytes); cudaMalloc(&d_result, num_bytes); //put in data for(int o = 0; o < size; o++){ h_input1[o] = 1; for(int p = 0; p < size; p++){ h_input2[size * o + p] = 1; } } printf("here\n"); //copy over memory cudaMemcpy(d_input1, h_input1, input1_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_input2, h_input2, num_bytes, cudaMemcpyHostToDevice); //declare block and grid size for kernel int block_size = 128; int grid_size = (int)ceil((float)size/block_size); //run kernel unknown_algo<<< grid_size, block_size >>> (d_input1, d_input2, d_result, size); // Copy result back to host cudaMemcpy(h_result, d_result, num_bytes, cudaMemcpyDeviceToHost); //print output for(int o = 0; o < size; o++){ for(int p = 0; p < size; p++){ printf("%lf ", h_result[o*size + p]); } printf("\n"); } //free all vars //free(d_input1); //free(d_input2); //free(d_result); free(h_input1); free(h_input2); free(h_result); cudaFree(d_input1); cudaFree(d_input2); cudaFree(d_result); return 0; }
11,171
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> void addArray(int* a, int size, int* res) { int i = 0; for (i = 1; i < size; i++) { a[0] += a[i]; } *res = a[0]; } __global__ void kernel(int* a, int size, int* res) { int idx= blockDim.x*blockIdx.x + threadIdx.x; for (int i = 1; i <= size / 2; i *= 2) { if (idx % (2 * i) == 0) { printf("BEFORE [Thread %d]: %d\n", idx, a[idx]); a[idx] += a[idx + i]; printf("AFTER [Thread %d]: %d\n", idx, a[idx]); } else { printf("[Thread %d] returning\n", idx); return; } __syncthreads(); } *res = a[idx]; } int main() { int N = 8; int *h_arr, *d_arr, *h_res, *d_res; cudaEvent_t start, stop; float time; h_arr = (int*)malloc(N * sizeof(int)); h_res = (int*)malloc(sizeof(int)); cudaMalloc(&d_arr, N*sizeof(int)); cudaMalloc(&d_res, sizeof(int)); for (int i = 0; i < N; i++) { h_arr[i] = i; printf("[%d]", i); } cudaMemcpy(d_arr, h_arr, N * sizeof(int), cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); kernel<<<1, 8>>>(d_arr, N, d_res); cudaMemcpy(h_res, d_res, sizeof(int), cudaMemcpyDeviceToHost); //addArray(h_arr, 8, h_res); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Result: %d, Time: %f", *h_res, time); cudaEventDestroy(start); cudaEventDestroy(stop); getchar(); return 0; }
11,172
#include "cuda_MP6.cuh" //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// int cuda_MP6(int argc, char* argv[]) { int errorM = 0; float device_time; float host_time; int* size = NULL; //(int*)malloc(1 * sizeof(int)); unsigned int data2read = 1; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof(float) * num_elements; float* h_data = (float*)malloc(mem_size); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Randomly generate input data and write the result to // file name specified by first argument // * Two arguments: Read the first argument which indicate the size of the array, // randomly generate input data and write the input data // to the second argument. (for generating random input data) // * Three arguments: Read the first file which indicate the size of the array, // then input data from the file name specified by 2nd argument and write the // SCAN output to file name specified by the 3rd argument. switch (argc - 1) { case 2: // Determine size of array // cutReadFilei(argv[1], &size, &data2read, true); data2read = ReadFileData_MP6(size, argv[1], 1); if (data2read != 1) { printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof(float) * num_elements; h_data = (float*)malloc(mem_size); for (unsigned int i = 0; i < num_elements; ++i) { h_data[i] = (int)(rand() % MAX_RAND); } WriteFile_MP6(h_data, argv[2], num_elements); break; case 3: // Three Arguments // cutReadFilei(argv[1], &size, &data2read, true); data2read = ReadFileData_MP6(size, argv[1], 1); if (data2read != 1) { printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof(float) * num_elements; h_data = (float*)malloc(mem_size); // errorM = ReadFile(h_data, argv[2], size[0]); errorM = ReadFileData_MP6(h_data, argv[2], size[0]); if (errorM != 1) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 // Use DEFAULT_NUM_ELEMENTS num_elements num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof(float) * num_elements; h_data = (float*)malloc(mem_size); // initialize the input data on the host for (unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } clock_t timer; // compute reference solution float* reference = (float*)malloc(mem_size); timer = clock(); computeGold_MP6(reference, h_data, num_elements); printf("\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); host_time = clock() - timer; printf("Host CPU Processing time: %f (ms)\n", 1000*((float)host_time) / CLOCKS_PER_SEC); // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; cudaMalloc((void**)&d_idata, mem_size); cudaMalloc((void**)&d_odata, mem_size); // copy host memory to device input array cudaMemcpy(d_idata, h_data, mem_size, cudaMemcpyHostToDevice); // initialize all the other device arrays to be safe cudaMemcpy(d_odata, h_data, mem_size, cudaMemcpyHostToDevice); // **===-------- MP4.2 - Allocate data structure here -----------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement prescanArray_v1(d_odata, d_idata, 16); // Run the prescan timer = clock(); // **===-------- MP4.2 - Modify the body of this function -----------===** prescanArray_v1(d_odata, d_idata, num_elements); // **===-----------------------------------------------------------===** cudaThreadSynchronize(); device_time = clock() - timer; printf("CUDA Processing time: %f (ms)\n", 1000 * ((float)device_time) / CLOCKS_PER_SEC); printf("Speedup: %fX\n", device_time / host_time); // **===-------- MP4.2 - Deallocate data structure here -----------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // copy result from device to host cudaMemcpy(h_data, d_odata, sizeof(float) * num_elements, cudaMemcpyDeviceToHost); if ((argc - 1) == 3) // Three Arguments, write result to file { WriteFile_MP6(h_data, argv[3], num_elements); } else if ((argc - 1) == 1) // One Argument, write result to file { WriteFile_MP6(h_data, argv[1], num_elements); } /* cout << "i, h_data, ref, cuda calculations: \n"; for (int i = 0; i < 514; i++) { cout << i << " " << reference[i] << " " << h_data[i] << endl; } */ // Check if the result is equivalent to the expected soluion // unsigned int result_regtest = cutComparef(reference, h_data, num_elements); bool result_regtest = compareGold_MP6(reference, h_data, num_elements); printf("Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); // cleanup memory free(h_data); free(reference); cudaFree(d_odata); cudaFree(d_idata); return 0; } int ReadFileData_MP6(float* M, char* file_name, int size) { unsigned int data_read = size; // cutReadFilef(file_name, &(M->elements), &data_read, true); ifstream iFile(file_name); unsigned i = 0; if (iFile) { float data; while (iFile >> data) { M[i++] = data; } } return (i != data_read); } int ReadFileData_MP6(int* M, char* file_name, int size) { unsigned int data_read = size; // cutReadFilef(file_name, &(M->elements), &data_read, true); ifstream iFile(file_name); unsigned i = 0; if (iFile) { int data; while (iFile >> data) { M[i++] = data; } } return (i != data_read); } void WriteFile_MP6(float* M, char* file_name, int size) { // cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f); ofstream oFile(file_name); if (oFile) { for (int i = 0; i < size; i++) { oFile << M[i] << " "; } oFile.close(); } } //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set //! Each element is the sum of the elements before it in the array. //! @param reference reference data, computed but preallocated //! @param idata input data as provided to device //! @param len number of elements in reference / idata //////////////////////////////////////////////////////////////////////////////// void computeGold_MP6(float* reference, float* idata, const unsigned int len) { // inclusive scan /* double total_sum = 0; for (unsigned int i = 0; i < len; ++i) { total_sum += idata[i]; reference[i] = total_sum; } */ // exclusive scan reference[0] = 0; double total_sum = 0; for (unsigned int i = 1; i < len; i++) { total_sum += idata[i - 1]; reference[i] = idata[i - 1] + reference[i - 1]; } if (total_sum != reference[len - 1]) printf("Warning: exceeding single-precision accuracy. Scan will be inaccurate.\n"); } bool compareGold_MP6(float* ref, const float* C, const unsigned int N) { double precision = 0.00001f; for (int i = 0; i < N; i++) { if (abs(ref[i] - C[i]) > precision) { cout << i << ": " << ref[i] << ", " << C[i] << endl; return false; } } return true; } #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 // MP4.2 - You can use any other block size you wish. #define BLOCK_SIZE_MP6 256 // MP4.2 - Host Helper Functions (allocate your own data structure...) // MP4.2 - Device Functions // MP4.2 - Kernel Functions // **===-------- MP4.2 - Modify the body of this function -----------===** // You may need to make multiple kernel calls, make your own kernel // function in this file, and then call them from here. void prescanArray_v1(float *outArray, float *inArray, int numElements) { int num_block = ceil((float)numElements / (2 * BLOCK_SIZE_MP6)); dim3 dimGrid, dimBlock; dimGrid.x = num_block; dimGrid.y = dimGrid.z = 1; dimBlock.x = BLOCK_SIZE_MP6; dimBlock.y = dimBlock.z = 1; float* sumArray = NULL; cudaMalloc((void**)&sumArray, sizeof(float) * num_block); reduction_kernel_MP6 << <dimGrid, dimBlock >> > (outArray, inArray, sumArray, numElements); cudaDeviceSynchronize(); dimGrid.x = 1; post_scan_kernel_MP6 << <dimGrid, dimBlock >> > (sumArray, num_block); cudaDeviceSynchronize(); dimGrid.x = num_block; post_process_kernel_MP6 << <dimGrid, dimBlock >> > (outArray, sumArray, numElements); cudaDeviceSynchronize(); cudaFree(sumArray); } // **===-----------------------------------------------------------===** __global__ void reduction_kernel_MP6(float *out_data, float *in_data, float *sum_data, int n) { __shared__ float ds_data[2 * BLOCK_SIZE_MP6]; unsigned int tx = threadIdx.x; unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; ds_data[2 * tx] = (2 * id < n ? in_data[2 * id] : 0); ds_data[2 * tx + 1] = (2 * id + 1 < n ? in_data[2 * id + 1] : 0); for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) { __syncthreads(); unsigned int index = (tx + 1) * 2 * stride - 1; if (index < 2 * blockDim.x) ds_data[index] += ds_data[index - stride]; } __syncthreads(); if (threadIdx.x == 0) sum_data[blockIdx.x] = ds_data[2 * blockDim.x - 1]; for (unsigned int stride = blockDim.x / 2; stride > 0; stride /= 2) { __syncthreads(); unsigned int index = (tx + 1) * 2 * stride - 1; if (index + stride < 2 * blockDim.x) ds_data[index + stride] += ds_data[index]; } __syncthreads(); if (2 * id < n) out_data[2 * id] = ds_data[2 * tx]; if (2 * id + 1 < n) out_data[2 * id + 1] = ds_data[2 * tx + 1]; } __global__ void post_scan_kernel_MP6(float *sumArray, int n) { __shared__ float ds_data[2 * BLOCK_SIZE_MP6]; unsigned int tx = threadIdx.x; unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; ds_data[2 * tx] = (2 * id < n ? sumArray[2 * id] : 0); ds_data[2 * tx + 1] = (2 * id + 1 < n ? sumArray[2 * id + 1] : 0); for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) { __syncthreads(); unsigned int index = (tx + 1) * 2 * stride - 1; if (index < 2 * blockDim.x) ds_data[index] += ds_data[index - stride]; } for (unsigned int stride = blockDim.x / 2; stride > 0; stride /= 2) { __syncthreads(); unsigned int index = (tx + 1) * 2 * stride - 1; if (index + stride < 2 * blockDim.x) ds_data[index + stride] += ds_data[index]; } __syncthreads(); if (2 * id < n) sumArray[2 * id] = ds_data[2 * tx]; if (2 * id + 1 < n) sumArray[2 * id + 1] = ds_data[2 * tx + 1]; } __global__ void post_process_kernel_MP6(float *outArray, float *sum_data, int n) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if (blockIdx.x > 0) { if (2 * id < n) outArray[2 * id] += sum_data[blockIdx.x - 1]; if (2 * id + 1 < n) outArray[2 * id + 1] += sum_data[blockIdx.x - 1]; } }
11,173
#include "includes.h" __global__ void cosineSimilarityCuda(float3* pDotProducts, size_t pSize, float* results) { int instance = blockIdx.x * blockDim.x + threadIdx.x; while (instance < pSize) { results[instance] = pDotProducts[instance].y / (sqrtf(pDotProducts[instance].x)* sqrtf(pDotProducts[instance].z)); instance += gridDim.x; } }
11,174
#include "includes.h" #define TIME 5. #define TIME_STEP .1 #define STEP 1. #define K TIME_STEP / SQUARE(STEP) #define SQUARE(x) (x * x) #define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__)) __global__ void Kernel(double * device, const uint size) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i == 0) { device[i] = .0; } else if (i == size - 1) { device[size - 1] = device[size - 2] + 5 * STEP; } else if (i < size) { device[i] = (device[i + 1] - 2 * device[i] + device[i - 1]) * K + device[i]; } }
11,175
#include <cuda.h> int main() { void *d; cudaMalloc(&d, 1024); cudaFree(d); }
11,176
/*#include <host_defines.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include "caffe/util/device_alternate.hpp" #include "caffe/util/math_functions.hpp" #ifndef nullptr #define nullptr NULL #endif // TODO: check offloading communication control logic in gpu paper for synchronisation techniques. // see cuStream{Write,Wait}Value32 enum status {none, waiting, requested, served, used}; struct data_exchange { //TODO: volatile variables status status;// = status::none; int requested_size;// = 0; int actual_size;// = 0; void* ptr;// = nullptr; }; void empty_data_exchange(data_exchange *); __device__ void blockUntil(data_exchange* dataExchange, status until) { while (true) { //TODO: read, with no race condition //TODO: CAS if (dataExchange->status == until) { return; } } } // From HOST to have a way to invoke data serving __global__ void waitForDataRequest(data_exchange* dataExchange) { // TODO: CAS dataExchange->status = waiting; blockUntil(dataExchange, requested); } // From HOST when data are served to the GPU __global__ void dataResponse(data_exchange* requestMount) { // blockUntil(requestMount, status::waiting); // TODO: copy things requestMount->status = served; blockUntil(requestMount, used); return; } __device__ void requestData(data_exchange* requestMount) { if (threadIdx.x == 0) { //TODO: write, with no race condition cas requestMount->status = requested; } } __global__ void data_layer_gpu(data_exchange* requestMount, int* ptr, cudaStream_t stream) { //TODO: How many things to transfer //TODO: Can assume count == blockDim.x atomicAdd(&requestMount->requested_size, 1); __syncthreads(); //cuStreamCreate(&stream); //cuStreamWriteValue32(stream, (CUdeviceptr)ptr, 42, CU_STREAM_WAIT_VALUE_GEQ); requestData(requestMount); __syncthreads(); //TODO: add image on the ith position in the blob //TODO: pass on the blob / return? } void thread2(data_exchange* cudaPrototype) { //execute listener //1 per block bool running = true; while(true) { // Wait for signal waitForDataRequest<<<1, 1>>>(cudaPrototype); // Acquire data void ** data; //data = getData(); // Push data GPU // TODO: copy things to gpu // Send response dataResponse<<<1, 1>>>(cudaPrototype); } } int main() { // Init data_exchange* prototype = nullptr; data_exchange* cudaPrototype = nullptr; prototype = (data_exchange*) malloc(sizeof(data_exchange)); CUDA_CHECK(cudaMalloc((void **)&cudaPrototype, sizeof(data_exchange))); empty_data_exchange(prototype); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); CUDA_CHECK(cudaMemcpy(cudaPrototype, prototype, sizeof(prototype), cudaMemcpyDefault)); //CAFFE_GET_BLOCKS(2); //CAFFE_CUDA_NUM_THREADS; int num_of_warps; for (int i = 0; i < num_of_warps; ++i) { // Todo: spawn threads thread2(cudaPrototype); } data_layer_gpu<<<1, 32>>>(cudaPrototype, (int*)cudaPrototype, stream); } void empty_data_exchange(data_exchange* data_exchage) { data_exchage->requested_size = 0; data_exchage->actual_size = 0; data_exchage->status = none; data_exchage->ptr = nullptr; }*/
11,177
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <thrust/functional.h> // #include <iomanip> #include <iostream> int main(int argc, char *argv[]) { long n = atol(argv[1]); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); thrust::host_vector<float> h_vec(n); for (long i = 0; i < n; i++) { h_vec[i] = 1.1; } thrust::device_vector<float> d_vec(h_vec.size()); thrust::device_vector<float> d_vec1(h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); float init = 0.0; cudaEventRecord(start); // No need to allocate another device vector. Do it in place thrust::exclusive_scan(d_vec.begin(), d_vec.end(), d_vec1.begin(), init, thrust::plus<float>()); cudaEventRecord(stop); cudaEventSynchronize(stop); float ms; cudaEventElapsedTime(&ms, start, stop); thrust::copy(d_vec1.begin(), d_vec1.end(), h_vec.begin()); //for (long i = 0; i < n; i++) { // std::cout << setprecision(12) << h_vec[i] << " "; //} //std::cout << std::endl; std::cout << h_vec[n-1] << std::endl; std::cout << ms << std::endl; return 0; }
11,178
#include <stdlib.h> #include <stdio.h> #include <time.h> #define N 3 #define M 3 //Data: a and b inputs, output: c. int matrixA[N][M], matrixB[N][M], matrixC[N*N][M*M]; //Runs in GPU cores. __global__ void tensorProduct(int n, int m, int *matrixA, int *matrixB, int *result) { int i = threadIdx.x; int j = threadIdx.y; //Perform tensor product. for (int k = 0; k < n; k++){ for (int l = 0; l < m; l++){ result[(j * m + l) + (i * n + k) * m * m] = matrixA[i*m+j] * matrixB[k*m+l]; } } } void tensorProductDevice(int n, int m, int *a, int *b, int *c){ //Parameters. int *aD, *bD, *cD; int matrixSize = n * m * sizeof(int); int resultSize = n * n * m * m * sizeof(int); dim3 blocks(1,1); dim3 threads(n,m); //1. Assign memory cudaMalloc(&aD, matrixSize); cudaMalloc(&bD, matrixSize); cudaMalloc(&cD, resultSize); //2. Set device 0, copy the information to device. cudaSetDevice(0); cudaMemcpy(aD, a, matrixSize, cudaMemcpyHostToDevice); cudaMemcpy(bD, b, matrixSize, cudaMemcpyHostToDevice); //Execute kernel. tensorProduct<<<blocks, threads>>>(n, m, aD, bD, cD); //Copy data from device back to host. cudaMemcpy(c, cD, resultSize, cudaMemcpyDeviceToHost); //Free memory. cudaFree(aD); cudaFree(bD); cudaFree(cD); } int main(){ srand(time(NULL)); //Fill the matrices. for(int i = 0; i < N; i++){ for (int j = 0; j < M; j++){ matrixA[i][j] = 1 + rand() % 5; matrixB[i][j] = 1 + rand() % 5; } } //Call to perform tensor operation. tensorProductDevice(N, M, (int *) matrixA, (int *) matrixB, (int *) matrixC); printf("Elements of A:\n"); for (int i = 0; i < N; i++){ for (int j = 0; j < M; j++){ printf("%d\t", matrixA[i][j]); } printf("\n"); } printf("\n"); printf("Elements of B:\n"); for (int i = 0; i < N; i++){ for (int j = 0; j < M; j++){ printf("%d\t", matrixB[i][j]); } printf("\n"); } printf("\n"); printf("Result:\n"); for (int i = 0; i < N * N; i++){ for (int j = 0; j < M * M; j++){ printf("%d\t", matrixC[i][j]); } printf("\n"); } printf("\n"); }
11,179
#include <stdio.h> #include <stdlib.h> #include <time.h> #define R 4096 #define C 4096 #define BLOCK_W 32 #define BLOCK_H 32 #define SHARED_SIZE (BLOCK_W*BLOCK_H*4) float *a, *b, *result; __global__ void multiple(float *A, float *B, float *res){ __shared__ float shared_A[BLOCK_W * BLOCK_H]; __shared__ float shared_B[BLOCK_W * BLOCK_H]; int tx = threadIdx.x, ty = threadIdx.y; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float ret = 0; for (int k = 0; k < (C - 1) / BLOCK_W + 1; k++){ if (row < R && k * BLOCK_W + tx < C) shared_A[ty * BLOCK_W + tx] = A[row * C + k * BLOCK_W + tx]; else shared_A[ty * BLOCK_W + tx] = 0; if (col < C && k * BLOCK_H + ty < R) shared_B[ty * BLOCK_W + tx] = B[(k * BLOCK_W + ty) * C + col]; else shared_B[ty * BLOCK_W + tx] = 0; __syncthreads(); for (int i = 0; i < BLOCK_W; i++){ ret += shared_A[ty * BLOCK_W + i] * shared_B[i * BLOCK_W + tx]; } __syncthreads(); } if (row < C && col < C) res[row * C + col] = ret; } int main(){ float *d_a, *d_b, *d_r; cudaEvent_t start, end; float etime; dim3 block(BLOCK_W, BLOCK_H); dim3 grid(C/BLOCK_W, R/BLOCK_H); a = (float*)malloc(sizeof(float)*R*C); b = (float*)malloc(sizeof(float)*R*C); result = (float*)malloc(sizeof(float)*R*C); cudaEventCreate(&start); cudaEventCreate(&end); srand(time(NULL)); // random number creation for (int i = 0; i < R; i++) for (int j = 0; j < C; j++){ a[i * C + j] = (rand() % 1000000) / (float)10000; b[i * C + j] = (rand() % 1000000) / (float)10000; } // cuda var initialization cudaMalloc((void**)&d_a, sizeof(float)*R*C); cudaMalloc((void**)&d_b, sizeof(float)*R*C); cudaMalloc((void**)&d_r, sizeof(float)*R*C); // kernel call & exec time check cudaEventRecord(start, 0); cudaMemcpy(d_a, a, sizeof(float)*R*C, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float)*R*C, cudaMemcpyHostToDevice); multiple<<<grid, block, 2 * SHARED_SIZE>>>(d_a, d_b, d_r); cudaMemcpy(result, d_r, sizeof(float)*R*C, cudaMemcpyDeviceToHost); cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&etime, start, end); printf("EXEC TIME : %f ms\n", etime); cudaEventDestroy(start); cudaEventDestroy(end); cudaFree(d_a); cudaFree(d_b); cudaFree(d_r); return 0; }
11,180
/* * dotproduct.cu * includes setup funtion called from "driver" program * also includes kernel function 'kernel_dotproduct[2]()' * largely inspired in the pdf http://www.cuvilib.com/Reduction.pdf */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define BLOCK_SIZE 1024 struct timeval tp1, tp2; __global__ void kernel_dotproduct(long long *force_d, long long *distance_d, long long *result_d, long long size) { extern __shared__ long long sadata[]; int n = blockDim.x; int nTotalThreads; if (!n){ nTotalThreads = n; }else{ //(0 == 2^0) int x = 1; while(x < n) { x <<= 1; } nTotalThreads = x; } // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; long long i = blockIdx.x*nTotalThreads + threadIdx.x; sadata[tid] = 0; if(i < size){ sadata[tid] = force_d[i]*distance_d[i]; } __syncthreads(); // do reduction in shared mem //if(i < size){ for (unsigned int s=1; s < nTotalThreads; s *= 2) { if (tid % (2*s) == 0) { sadata[tid] += sadata[tid + s]; } __syncthreads(); } //} // write result for this block to global mem if (tid == 0) result_d[blockIdx.x] = sadata[0]; } template <unsigned int blockSize> __global__ void kernel_dotproduct2(long long *force_d, long long *distance_d, long long *result_d, long long size) { extern __shared__ long long sdata[]; int n = blockDim.x; int nTotalThreads; if (!n){ nTotalThreads = n; }else{ //(0 == 2^0) int x = 1; while(x < n) { x <<= 1; } nTotalThreads = x; } unsigned int tid = threadIdx.x; long long i = blockIdx.x*(nTotalThreads*2) + threadIdx.x; sdata[tid] = 0; if((i+nTotalThreads)< size){ sdata[tid] = force_d[i]*distance_d[i] + force_d[i+nTotalThreads]*distance_d[i+nTotalThreads] ; } else { if(i < size){ sdata[tid] = force_d[i]*distance_d[i]; } } __syncthreads(); for (long long s=nTotalThreads/2; s>32 && (tid+s) < size; s>>=1) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid < 32) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } // write result for this block to global mem if (tid == 0) result_d[blockIdx.x] = sdata[0]; } // This function is called from the host computer. // It manages memory and calls the function that is executed on the GPU extern "C" void cuda_dotproduct (long long *force, long long *distance, long long arraySize, long long *result_array, double *time_result) { // force_d, distance_d and result_d are the GPU counterparts of the arrays that exists in host memory long long *force_d; long long *distance_d; long long *result_d; cudaError_t op_result; // Reset the device and exit op_result = cudaDeviceReset(); if (op_result != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(op_result)); exit(EXIT_FAILURE); } // allocate space in the device op_result = cudaMalloc ((void**) &force_d, sizeof(long long) * arraySize); if (op_result != cudaSuccess) { fprintf(stderr, "cudaMalloc (foce) failed."); exit(1); } op_result = cudaMalloc ((void**) &distance_d, sizeof(long long) * arraySize); if (op_result != cudaSuccess) { fprintf(stderr, "cudaMalloc (distance) failed."); exit(1); } op_result = cudaMalloc ((void**) &result_d, sizeof(long long)*arraySize); if (op_result != cudaSuccess) { fprintf(stderr, "cudaMalloc (result) failed."); exit(1); } //copy the arrays from host to the device op_result = cudaMemcpy (force_d, force, sizeof(long long) * arraySize, cudaMemcpyHostToDevice); if (op_result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host->dev (force) failed."); exit(1); } op_result = cudaMemcpy (distance_d, distance, sizeof(long long) * arraySize, cudaMemcpyHostToDevice); if (op_result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host->dev (distance) failed."); exit(1); } op_result = cudaMemcpy (result_d, result_array, sizeof(long long) * arraySize, cudaMemcpyHostToDevice); if (op_result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host->dev (result) failed."); exit(1); } int threads; if(arraySize < 128){ threads = 64; } else if (arraySize < 256 ){ threads = 128; } else if (arraySize < 512){ threads = 256; } else if (arraySize < 1024){ threads = 512; } else { threads = BLOCK_SIZE; } long long block_size = threads; long long blocks = ceil(arraySize / ((float) block_size)); // set execution configuration dim3 dimblock (block_size); dim3 dimgrid (blocks); int smemSize = dimblock.x * sizeof(long long); // actual computation: Call the kernel gettimeofday(&tp1, NULL); switch (threads) { case 64: kernel_dotproduct2<64><<<dimgrid,dimblock,smemSize>>>(force_d, distance_d, result_d, arraySize); break; case 128: kernel_dotproduct2<128><<<dimgrid,dimblock,smemSize>>>(force_d, distance_d, result_d, arraySize); break; case 256: kernel_dotproduct2<256><<<dimgrid,dimblock,smemSize>>>(force_d, distance_d, result_d, arraySize); break; case 512: kernel_dotproduct2<256><<<dimgrid,dimblock,smemSize>>>(force_d, distance_d, result_d, arraySize); break; default: kernel_dotproduct2<BLOCK_SIZE><<<dimgrid,dimblock,smemSize>>>(force_d, distance_d, result_d, arraySize); break; } gettimeofday(&tp2, NULL); *time_result = (double) (tp2.tv_usec - tp1.tv_usec) / 1000000 + (double) (tp2.tv_sec - tp1.tv_sec); op_result = cudaMemcpy (result_array, result_d, sizeof(long long)*arraySize, cudaMemcpyDeviceToHost); if (op_result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host <- dev (result) failed."); exit(1); } // release the memory on the GPU op_result = cudaFree (force_d); if (op_result != cudaSuccess) { fprintf(stderr, "cudaFree (force) failed."); exit(1); } op_result = cudaFree (distance_d); if (op_result != cudaSuccess) { fprintf(stderr, "cudaFree (distance) failed."); exit(1); } op_result = cudaFree (result_d); if (op_result != cudaSuccess) { fprintf(stderr, "cudaFree (distance) failed."); exit(1); } }
11,181
#include <cuda_runtime.h> #include "device_launch_parameters.h" #include <curand_kernel.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <malloc.h> #define BOOL int #define TRUE 1 #define FALSE 0 #define populationSize 128 #define chromosomeSize 10 #define maxGeneration 500 #define crossRate 0.8 #define mutationRate 0.01 #define eliteCount 0.05*populationSize //typedef float float; float LB[10] = {0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5}; //lower bound float UB[10] = {5, 4, 5, 4, 5, 5, 5, 5, 5, 4}; //upper bound float *a; //Tzaihe float *aa; //yingliK float *aaa; //Tyingli int aRow; int aaaRow; float Dysum[9]; __device__ float c_LB[10]; //lower bound __device__ float c_UB[10]; //upper bound __device__ float *c_a; //Tzaihe __device__ float *c_aa; //yingliK __device__ float *c_aaa; //Tyingli __device__ int c_aRow; __device__ int c_aaaRow; __device__ float c_Dysum[9]; float bestFitnessOfGen; //每一代的最优适应度 int bestIndexOfGen; //每一代的最优适应度位置 float aveFitnessOfGen[maxGeneration]; //每一代的平均最优适应度 float fval; //最终最优适应度 int G; //取得最终最优适应度的迭代次数 //BOOL elitism = TRUE; //是否精英选择 float *createMatrix(int rows, int cols) { float *matrix = (float*)malloc(rows * cols * sizeof(float)); return matrix; } __global__ void initSeed(unsigned int seed, curandState_t* states) { int idx = threadIdx.x; curand_init(seed, idx, 0, &states[idx]); } //Get data from files BOOL getData(const char *fileName, float *x, int rows, int cols) { // open file to read data FILE *fp; fp = fopen(fileName, "r"); if (fp == NULL) { printf("Open file %s error!!\n", fileName); return FALSE; } // read data for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { fscanf(fp, "%f", (x + i * cols + j)); } } return TRUE; } // //sig2ext in rainflow __host__ __device__ float *sig2ext(float *sigy, float *dty, int rows, int *lenOfArray) { int n = 0, m = 0, k = 0, l = 0; //w=diff(sig); //w=logical([1;(w(1:end-1).*w(2:end))<=0;1]); float *w = (float*)malloc(rows * sizeof(float)); for (int i = 1; i < rows; i++) { w[i - 1] = sigy[i] - sigy[i - 1]; } for (int i = rows - 2; i > 0; i--) { float tmp = w[i] * w[i - 1]; if (tmp <= 0) { w[i] = 1; } else { w[i] = 0; n++; } } w[0] = w[rows - 1] = 1; //ext=sigy(w); exttime=dty(w); float *ext = (float*)malloc((rows - n) * sizeof(float)); float *exttime = (float*)malloc((rows - n) * sizeof(float)); for (int i = 0, j = 0; i < rows - n && j < rows;) { if (w[j] == 0) { j++; } else { ext[i] = sigy[j]; exttime[i] = dty[j]; i++; j++; } } //w=diff(ext); //w=~logical([0; w(1:end-1)==0 & w(2:end)==0; 0]); for (int i = 1; i < rows - n; i++) { w[i - 1] = ext[i] - ext[i - 1]; } for (int i = rows - n - 2; i > 0; i--) { if (w[i - 1] == 0 && w[i] == 0) { w[i] = 0; m++; } else { w[i] = 1; } } w[0] = w[rows - n - 1] = 1; //ext=ext(w); exttime=exttime(w); for (int i = 0, j = 0; i < rows - n - m && j < rows - n;) { if (w[j] == 0) { j++; } else { ext[i] = ext[j]; exttime[i] = exttime[j]; i++; j++; } } //w=~logical([0; ext(1:end-1)==ext(2:end)]); for (int i = 1; i < rows - n - m; i++) { if (ext[i - 1] == ext[i]) { w[i] = 0; k++; } else { w[i] = 1; } } w[0] = 1; //ext=ext(w); for (int i = 0, j = 0; i < rows - n - m - k && j < rows - n - m;) { if (w[j] == 0) { j++; } else { ext[i] = ext[j]; i++; j++; } } //w2=(exttime(2:end)-exttime(1:end-1))./2 //exttime=[exttime(1:end-1)+w2.*~w(2:end); exttime(end)]; //exttime=exttime(w); float *w2 = (float*)malloc((rows - n - m - 1) * sizeof(float)); for (int i = 1; i < rows - n - m; i++) { w2[i - 1] = (exttime[i] - exttime[i - 1]) / 2.00; } for (int i = 0, j = 1; i < rows - n - m - 1 && j < rows - n - m;) { if (w[j] == 0) { exttime[i] = w2[i] * 1.00 + exttime[i]; i++; j++; } else { exttime[i] = w2[i] * 0.00 + exttime[i]; i++; j++; } } for (int i = 0, j = 0; i < rows - n - m - k && j < rows - n - m;) { if (w[j] == 0) { j++; } else { exttime[i] = exttime[j]; i++; j++; } } //length(ext)>2, w=diff(ext); w=logical([1; w(1:end-1).*w(2:end)<0; 1]); //ext4=ext(w); exttime=exttime(w); float *ext4 = NULL; *lenOfArray = 0; if (rows - n - m - k > 2) { for (int i = 1; i < rows - n - m - k; i++) { w[i - 1] = ext[i] - ext[i - 1]; } for (int i = rows - n - m - k - 2; i > 0; i--) { if (w[i - 1] * w[i] < 0) { w[i] = 1; } else { w[i] = 0; l++; } } w[0] = 1; w[rows - n - m - k - 1] = 1; *lenOfArray = rows - n - m - k - l; ext4 = (float*)malloc(2 * (*lenOfArray) * sizeof(float)); for (int i = 0, j = 0; i < rows - n - m - k - l && j < rows - n - m - k;) { if (w[j] == 0) { j++; } else { ext4[i] = ext[j]; ext4[i + (*lenOfArray)] = exttime[j]; i++; j++; } } } free(w); free(w2); free(ext); free(exttime); return ext4; } // //rainFlow in rainflow __host__ __device__ float *rainFlow(float *ext, float *exttime, int lenOfSig2ext, int *lenOfRainflow) { float *rfy = NULL, *rfyResult = NULL; //function rfy5 float a[128], t[128], ampl, mean, period, atime; int cNr = 1; int j = -1; //create 2D rfy(5 * (lenOfSig2ext -1)) rfy = (float*)malloc(5 * (lenOfSig2ext - 1) * sizeof(float)); int columnId = 0; int pointId = 0; for (int i = 0; i < lenOfSig2ext; i++) { a[++j] = *(ext + pointId); t[j] = *(exttime + pointId); while ((j >= 2) && (fabs(a[j - 1] - a[j - 2]) <= fabs(a[j] - a[j - 1]))) { ampl = fabs((a[j - 1] - a[j - 2]) / 2); switch (j) { case 0: { break; } case 1: { break; } case 2: { mean = (a[0] + a[1]) / 2; period = (t[1] - t[0]) * 2; atime = t[0]; a[0] = a[1]; a[1] = a[2]; t[0] = t[1]; t[1] = t[2]; j = 1; if (ampl > 0) { *(rfy + columnId*5 + 0) = ampl; *(rfy + columnId*5 + 1) = mean; *(rfy + columnId*5 + 2) = 0.50; *(rfy + columnId*5 + 3) = atime; *(rfy + columnId*5 + 4) = period; columnId++; } break; } default: { mean = (a[j - 1] + a[j - 2]) / 2; period = (t[j - 1] - t[j - 2]) * 2; atime = t[j - 2]; a[j - 2] = a[j]; t[j - 2] = t[j]; j = j - 2; if (ampl > 0) { *(rfy + columnId*5 + 0) = ampl; *(rfy + columnId*5 + 1) = mean; *(rfy + columnId*5 + 2) = 1.00; *(rfy + columnId*5 + 3) = atime; *(rfy + columnId*5 + 4) = period; columnId++; cNr++; } break; } } } pointId++; } for (int i = 0; i < j; i++) { ampl = fabs(a[i] - a[i + 1]) / 2; mean = (a[i] + a[i + 1]) / 2; period = (t[i + 1] - t[i]) * 2; atime = t[i]; if (ampl > 0) { *(rfy + columnId*5 + 0) = ampl; *(rfy + columnId*5 + 1) = mean; *(rfy + columnId*5 + 2) = 0.50; *(rfy + columnId*5 + 3) = atime; *(rfy + columnId*5 + 4) = period; columnId++; } } //create 2D rfyResult(5 * (lenOfSig2ext - cNr)) rfyResult = (float*)malloc(5 * (lenOfSig2ext - cNr) * sizeof(float)); *lenOfRainflow = lenOfSig2ext - cNr; for (int i = 0; i < 5 * (lenOfSig2ext - cNr); i++) { rfyResult[i] = rfy[i]; } free(rfy); return rfyResult; } // //rfhist in rainflow __host__ __device__ float *rfhist(float *rfy, int lenOfRainflow, int *lenOfRfhist) { float *noy = NULL, *xoy = NULL; int x = 32; *lenOfRfhist = x; xoy = (float*)malloc(x * sizeof(float)); noy = (float*)malloc(x * sizeof(float)); memset(noy, 0, x * sizeof(float)); //halfc=find(rfy(3,:)==0.5) int halfcNum = 0; for (int i = 0; i < lenOfRainflow; i++) { if (rfy[i * 5 + 2] == 0.50) halfcNum++; } int *halfc = NULL; halfc = (int*)malloc(halfcNum * sizeof(int)); for (int i = 0, j = 0; i < lenOfRainflow && j < halfcNum;) { if (rfy[i * 5 + 2] == 0.50) { halfc[j] = i; j++; } i++; } float min = rfy[0], max = rfy[0]; for (int i = 0; i < lenOfRainflow; i++) { if (rfy[i * 5] >= max) { max = rfy[i * 5]; } else if (rfy[i * 5] <= min) { min = rfy[i * 5]; } } float wid = (max - min) / x; for (int i = 0; i < x; i++) { xoy[i] = min + (float) (i + 0.50) * wid; } for (int i = 0; i < lenOfRainflow; i++) { int j; j = (int) floor((rfy[i * 5] - min) / wid); if (j != 0 && fabs((rfy[i * 5] - min) - wid * j) < 1e-10) { noy[j - 1] += 1; } else { noy[j] += 1; } } //if ~isempty(halfc) { //[N2 x]=hist(rf(r,halfc),x) N2 = noy2, x = *xoy //N1=N1-0.5*N2 N1 = noy // } if (halfcNum != 0) { float *noy2 = (float*)malloc(x * sizeof(float)); memset(noy2, 0, x * sizeof(float)); float *rf = (float*)malloc(halfcNum * sizeof(float)); for (int i = 0; i < halfcNum; i++) { int j = halfc[i]; rf[i] = rfy[j * 5]; } for (int i = 0; i < halfcNum; i++) { int j; j = (int) floor((rf[i] - min) / wid); if (j != 0 && fabs((rf[i] - min) - wid * j) < 1e-10) { noy2[j - 1] += 1; } else { noy2[j] += 1; } } for (int i = 0; i < x; i++) { noy[i] -= noy2[i] * 0.5; } free(noy2); free(rf); } float *rfhist = (float*)malloc(2 * x * sizeof(float)); for (int i = 0; i < x; i++) { rfhist[i] = noy[i]; rfhist[i + x] = xoy[i]; } free(halfc); free(xoy); free(noy); return rfhist; } void testPreData() { for (int kk = 0; kk < 9; kk++) { float *sigy = (float*)malloc(aaaRow * sizeof(float)); float *dty = (float*)malloc(aaaRow * sizeof(float)); for (int i = 0; i < aaaRow; i++) { sigy[i] = aaa[i * 11 + kk + 2]; dty[i] = aaa[i * 11 + 1]; } float *ext = NULL, *exttime = NULL; int lenOfSig2ext; ext = sig2ext(sigy, dty, aaaRow, &lenOfSig2ext); exttime = ext + lenOfSig2ext; float *rfy = NULL; int lenOfRainflow; rfy = rainFlow(ext, exttime, lenOfSig2ext, &lenOfRainflow); float *noy = NULL, *xoy = NULL; int lenOfRfhist; noy = rfhist(rfy, lenOfRainflow, &lenOfRfhist); xoy = noy + lenOfRfhist; for (int i = 0; i < lenOfRfhist; i++) { Dysum[kk] += noy[i] * pow(xoy[i] * 0.21 / 70, 3.5); } //printf("%e\n", Dysum[kk]); free(sigy); free(dty); free(ext); free(rfy); free(noy); } } //fitness Function float HfitnessFcn(float *x) { //initial Dzsum in every generation float *Dzsum = (float*)malloc(9 * sizeof(float)); memset(Dzsum, 0, sizeof(float) * 9); float *Tzb = (float*)malloc(aRow * 9 * sizeof(float)); memset(Tzb, 0, aRow * 9 * sizeof(float)); for (int i = 0; i < 9; i++) { for (int j = 0; j < aRow; j++) { Tzb[j * 9 + i] = x[0] * aa[0 * 9 + i] * a[j * 16 + 2] + x[1] * aa[1 * 9 + i] * a[j * 16 + 3] + x[2] * aa[2 *0 + i] * a[j * 16 + 4] + x[3] * aa[3 * 9 + i] * a[j * 16 + 5] + x[4] * aa[4 * 9 + i] * a[j * 16 + 6] + x[5] * aa[5 * 9 + i] * a[j * 16 + 7] + x[6] * aa[6 * 9 + i] * a[j * 16 + 8] + x[7] * aa[7 * 9 + i] * a[j * 16 + 9] + x[8] * aa[8 * 9 + i] * a[j * 16 + 10] + x[9] * aa[9 * 9 + i] * a[j * 16 + 11]; } } for (int k = 0; k < 9; k++) { float *sig = (float*)malloc(aRow * sizeof(float)); float *dt = (float*)malloc(aRow * sizeof(float)); for (int i = 0; i < aRow; i++) { sig[i] = Tzb[i * 9 + k]; dt[i] = a[i * 9 + 1]; } float *ext = NULL, *exttime = NULL; int lenOfSig2ext; ext = sig2ext(sig, dt, aRow, &lenOfSig2ext); exttime = ext + lenOfSig2ext; float *rf = NULL; int lenOfRainflow; rf = rainFlow(ext, exttime, lenOfSig2ext, &lenOfRainflow); float *no = NULL, *xo = NULL; int lenOfRfhist; no = rfhist(rf, lenOfRainflow, &lenOfRfhist); xo = no + lenOfRfhist; for (int i = 0; i < lenOfRfhist; i++) { Dzsum[k] += no[i] * pow(xo[i] * 0.21 / 70, 3.5); } //printf("%e\n", Dzsum[k]); free(sig); free(dt); free(ext); free(rf); free(no); } float y = 0; for (int i = 0; i < 9; i++) { //constraint : c =(Dysum[i]-Dzsum[i]) <= 0 float c = Dysum[i] - Dzsum[i]; if (c <= 0) { y += pow(c, 2); } else { y = 100; } } //printf("%e\n", y); free(Dzsum); free(Tzb); return y; } void initial(float *populationArray){ for (int i = 0; i < populationSize; i++) { float *x = (float*)malloc(chromosomeSize * sizeof(float)); for (int j = 0; j < chromosomeSize; j++) { int high_pos = rand(); int low_pos = (rand() & ((1 << 16) - 1)); high_pos = (high_pos & ((1 << 15) - 1)); int value = low_pos + (high_pos << 16); populationArray[i * chromosomeSize + j] = (UB[j] - LB[j]) * ((float) value / ((1U << 31) - 1)) + LB[j]; x[j] = populationArray[i * chromosomeSize + j]; } float tmp_fit = HfitnessFcn(x); if (tmp_fit > 99) { i--; } //printf("%e\n",tmp_fit); free(x); } } __device__ float DfitnessFcn(float *x) { //initial Dzsum in every generation float *Dzsum = (float*)malloc(9 * sizeof(float)); memset(Dzsum, 0, sizeof(float) * 9); float *Tzb = (float*)malloc(c_aRow * 9 * sizeof(float)); memset(Tzb, 0, c_aRow * 9 * sizeof(float)); for (int i = 0; i < 9; i++) { for (int j = 0; j < c_aRow; j++) { Tzb[j * 9 + i] = x[0] * c_aa[0 * 9 + i] * c_a[j * 16 + 2] + x[1] * c_aa[1 * 9 + i] * c_a[j * 16 + 3] + x[2] * c_aa[2 *0 + i] * c_a[j * 16 + 4] + x[3] * c_aa[3 * 9 + i] * c_a[j * 16 + 5] + x[4] * c_aa[4 * 9 + i] * c_a[j * 16 + 6] + x[5] * c_aa[5 * 9 + i] * c_a[j * 16 + 7] + x[6] * c_aa[6 * 9 + i] * c_a[j * 16 + 8] + x[7] * c_aa[7 * 9 + i] * c_a[j * 16 + 9] + x[8] * c_aa[8 * 9 + i] * c_a[j * 16 + 10] + x[9] * c_aa[9 * 9 + i] * c_a[j * 16 + 11]; } } for (int k = 0; k < 9; k++) { float *sig = (float*)malloc(c_aRow * sizeof(float)); float *dt = (float*)malloc(c_aRow * sizeof(float)); for (int i = 0; i < c_aRow; i++) { sig[i] = Tzb[i * 9 + k]; dt[i] = c_a[i * 9 + 1]; } float *ext = NULL, *exttime = NULL; int lenOfSig2ext; ext = sig2ext(sig, dt, c_aRow, &lenOfSig2ext); exttime = ext + lenOfSig2ext; float *rf = NULL; int lenOfRainflow; rf = rainFlow(ext, exttime, lenOfSig2ext, &lenOfRainflow); float *no = NULL, *xo = NULL; int lenOfRfhist; no = rfhist(rf, lenOfRainflow, &lenOfRfhist); xo = no + lenOfRfhist; for (int i = 0; i < lenOfRfhist; i++) { Dzsum[k] += no[i] * pow(xo[i] * 0.21 / 70, 3.5); } printf("%e\n", Dzsum[k]); free(sig); free(dt); free(ext); free(rf); free(no); } float y = 0; for (int i = 0; i < 9; i++) { //constraint : c =(Dysum[i]-Dzsum[i]) <= 0 float c = c_Dysum[i] - Dzsum[i]; if (c <= 0) { y += pow(c, 2); } else { y = 100; } } printf("%e\n", y); free(Dzsum); free(Tzb); return y; } __global__ void GfitnessFcn(float *populationArray, float *fitness){ int idx = blockIdx.x + threadIdx.x; float *x = (float*)malloc(chromosomeSize * sizeof(float)); memset(x, 0, chromosomeSize * sizeof(float)); for (int j = 0; j < chromosomeSize; j++) { *(x + j) = *(populationArray +idx * chromosomeSize + j); } fitness[idx] = DfitnessFcn(x); free(x); } //sum fitness __host__ __device__ float sum(float *x) { float sum = 0; for (int i = 0; i < populationSize; i++) { sum += x[i]; } return sum; } //best fitness position float *bestFitness(float *fitness) { //bestRes[bestFitness][bestIndex] float bestFitness = fitness[0]; int bestIndex = 0; float *bestRes = (float*)malloc(2 * sizeof(float)); for (int i = 0; i < populationSize; i++) { if (fitness[i] < bestFitness) { bestFitness = fitness[i]; bestIndex = i; } } bestRes[0] = bestFitness; bestRes[1] = bestIndex; return bestRes; } //select pre __global__ void selectPre(float *fitness, float *Fitness, float *tmpFitness, float *populationArray, float *tmpPopulationArray){ int idx = threadIdx.x; Fitness[idx] = 1 / fitness[idx]; __syncthreads(); tmpFitness[idx] = fitness[idx]; __syncthreads(); for(int i = 0; i < chromosomeSize; i++){ tmpPopulationArray[idx * chromosomeSize + i] = populationArray[idx * chromosomeSize + i]; } __syncthreads(); } //select function 轮盘选择 __global__ void selectFcn(float *populationArray, float *tmpPopulationArray, float *fitness, float *Fitness, float *tmpFitness, float sumFitness, float *populationPro, curandState_t *states) { //printf("selectFcn\n"); int idx = threadIdx.x; //每个个体被选择的概率 populationPro[idx] = Fitness[idx] / sumFitness; __syncthreads(); //轮盘选择 int index; curandState_t s; s = states[idx]; float ss = curand_uniform(&s); while (ss < 0.0001) ss = curand_uniform(&s); //printf("%e\n", ss); for (int j = 0; j < populationSize; j++) { ss -= populationPro[j]; if (ss <= 0) { index = j; //printf("%d\n", index); break; } } //产生新种群 for (int j = 0; j < chromosomeSize; j++) { populationArray[idx * chromosomeSize + j] = tmpPopulationArray[index * chromosomeSize + j]; } __syncthreads(); fitness[idx] = tmpFitness[index]; __syncthreads(); } //cross function 每两个个体做判断 __global__ void crossFcn(float *populationArray, curandState_t *states) { //printf("crossFcn\n"); int idx = threadIdx.x; curandState_t s = states[idx]; curandState_t t = states[idx]; float ss = curand_uniform(&s); int tt = curand(&t); //判断当前两个个体是否做交叉 if (ss < crossRate){ for (int j = 0; j < chromosomeSize; j++) { //判断两个个体中的染色体是否做交叉 if (tt % 2 != 0) { float tmp = populationArray[idx * chromosomeSize + j]; populationArray[idx * chromosomeSize + j] = populationArray[(idx + populationSize/2) * chromosomeSize + j]; populationArray[(idx + populationSize/2) * chromosomeSize + j] = tmp; } } } } //mutation function __global__ void mutationFcn(float *populationArray, curandState_t *states) { //printf("mutationFcn\n"); int idx = threadIdx.x; curandState_t s = states[idx]; curandState_t t = states[idx]; float ss = curand_uniform(&s); int tt = curand(&t); float scale = 0.5, shrink = 0.75; scale -= scale * shrink * idx / maxGeneration; //判断当前个体是否变异 if (ss < mutationRate){ for (int j = 0; j < chromosomeSize; j++) { //判断当前染色体是否变异 if (tt % 2 != 0) { float tmpChromosome; do { tmpChromosome = populationArray[idx * chromosomeSize + j] + scale * (c_UB[j] - c_LB[j]) * ss; //判断是否越界 } while (tmpChromosome > c_UB[j] || tmpChromosome < c_LB[j]); populationArray[idx * chromosomeSize + j] = tmpChromosome; } } } } //rank fitness int *rankForElitism(float *fitness) { // initialize rank array int *rank = (int *)malloc(populationSize * sizeof(int)); for (int i = 0; i < populationSize; i++) { rank[i] = i; } // rank fitness in increase order for (int i = populationSize - 1; i > 0; i--) { for (int j = 0; j < i; j++) { if (fitness[rank[j]] > fitness[rank[j + 1]]) { int tmp_rank = rank[j]; rank[j] = rank[j + 1]; rank[j + 1] = tmp_rank; } } } return rank; } //select function 轮盘选择 void selectFcn(float *populationArray, float *fitness, float *populationPro) { // float tmpPopulationArray[populationSize * chromosomeSize]; // float tmpFitness[populationSize]; float *tmpPopulationArray = (float*)malloc(populationSize * chromosomeSize * sizeof(float)); float *tmpFitness = (float*)malloc(populationSize * sizeof(float)); for (int i = 0; i < populationSize; i++) { for (int j = 0; j < chromosomeSize; j++) { tmpPopulationArray[i * chromosomeSize + j] = populationArray[i * chromosomeSize + j]; } tmpFitness[i] = fitness[i]; } //每个个体被选择的概率 float *Fitness = (float*)malloc(populationSize * sizeof(float)); float sumFitness = 0; for (int i = 0; i < populationSize; i++) { Fitness[i] = 1 / fitness[i]; } sumFitness = sum(Fitness); for (int i = 0; i < populationSize; i++) { populationPro[i] = Fitness[i] / sumFitness; } free(Fitness); //轮盘选择 int *index = (int*)malloc(populationSize * sizeof(int)); for (int i = 0; i < populationSize; i++) { float pick = ((float) rand()) / RAND_MAX; while (pick < 0.0001) pick = ((float) rand()) / RAND_MAX; for (int j = 0; j < populationSize; j++) { pick -= populationPro[j]; if (pick <= 0) { index[i] = j; //printf("%d\n", index[i]); break; } } } //产生新种群 for (int i = 0; i < populationSize; i++) { for (int j = 0; j < chromosomeSize; j++) { populationArray[i * chromosomeSize + j] = tmpPopulationArray[index[i] * chromosomeSize + j]; } fitness[i] = tmpFitness[index[i]]; } free(index); free(tmpPopulationArray); free(tmpFitness); } //cross function 每两个个体做判断 void crossFcn(float *populationArray) { //printf("crossFcn\n"); for (int i = 0; i < populationSize; i += 2) { //判断当前两个个体是否做交叉 float pick1 = ((float) rand()) / RAND_MAX; if (pick1 > crossRate) continue; for (int j = 0; j < chromosomeSize; j++) { //判断两个个体中的染色体是否做交叉 int pick2 = rand(); if (pick2 & 1) { float tmp = populationArray[i * chromosomeSize + j]; populationArray[i * chromosomeSize + j] = populationArray[(i+1) * chromosomeSize + j]; populationArray[(i+1) * chromosomeSize + j] = tmp; } } } } //mutation function void mutationFcn(float *populationArray) { //printf("mutationFcn\n"); float scale = 0.5, shrink = 0.75; for (int i = 0; i < populationSize; i++) { scale -= scale * shrink * i / maxGeneration; //判断当前个体是否变异 float pick1 = ((float) rand()) / RAND_MAX; if (pick1 > mutationRate) continue; for (int j = 0; j < chromosomeSize; j++) { //判断当前染色体是否变异 int pick2 = rand(); if (pick2 & 1) { float tmpChromosome; do { float pick3 = ((float) rand()) / RAND_MAX * 2 - 1; tmpChromosome = populationArray[i * chromosomeSize + j] + scale * (UB[j] - LB[j]) * pick3; //判断是否越界 } while (tmpChromosome > UB[j] || tmpChromosome < LB[j]); populationArray[i * chromosomeSize + j] = tmpChromosome; } } } } int main(int argc, char *argv[]){ time_t start = clock(); srand(time(NULL)); if(argc != 6){ printf("ERROR\n"); return 0; } BOOL success = TRUE; aRow = atoi(argv[2]); a = createMatrix(aRow, 16); success = getData(argv[1], a, aRow, 16); if (!success) { return 0; } aa = createMatrix(10, 9); success = getData(argv[3], aa, 10, 9); if (!success) { return 0; } aaaRow = atoi(argv[5]); aaa = createMatrix(aaaRow, 11); success = getData(argv[4], aaa, aaaRow, 11); if (!success) { return 0; } testPreData(); cudaMemcpyToSymbol(c_a, a, aRow * 16 * sizeof(float)); cudaMemcpyToSymbol(c_aa, aa, 10 * 9 * sizeof(float)); cudaMemcpyToSymbol(c_aaa, aaa, aaaRow * 11 * sizeof(float)); cudaMemcpyToSymbol(c_aRow, &aRow, sizeof(float)); cudaMemcpyToSymbol(c_aaaRow, &aaaRow, sizeof(float)); cudaMemcpyToSymbol(c_LB, LB, 10 * sizeof(float)); cudaMemcpyToSymbol(c_UB, UB, 10 * sizeof(float)); cudaMemcpyToSymbol(c_Dysum, Dysum, 9 * sizeof(float)); float *populationArray; float *fitness; float *populationPro; float *Fitness; float *tmpPopulationArray; float *tmpFitness; float *X_10; fval = 100; //BOOL elitism = TRUE; cudaMallocManaged(&populationArray, populationSize * chromosomeSize * sizeof(float)); cudaMallocManaged(&fitness, populationSize * sizeof(float)); cudaMallocManaged(&populationPro, populationSize * sizeof(float)); cudaMallocManaged(&Fitness, populationSize * sizeof(float)); cudaMallocManaged(&tmpPopulationArray, populationSize * chromosomeSize * sizeof(float)); cudaMallocManaged(&tmpFitness, populationSize * sizeof(float)); cudaMallocManaged(&X_10, 10 * sizeof(float)); cudaMemset(populationArray, 0, populationSize * chromosomeSize * sizeof(float)); cudaMemset(fitness, 0, populationSize * sizeof(float)); cudaMemset(populationPro, 0, populationSize * sizeof(float)); cudaMemset(Fitness, 0, populationSize * sizeof(float)); cudaMemset(tmpPopulationArray, 0, populationSize * chromosomeSize * sizeof(float)); cudaMemset(tmpFitness, 0, populationSize * sizeof(float)); cudaMemset(X_10, 0, 10 * sizeof(float)); curandState_t *states = NULL; cudaMalloc((void**)&states, populationSize * sizeof(curandState_t)); initSeed<<<1, populationSize>>>(time(NULL), states); cudaDeviceSynchronize(); //initial population initial(populationArray); for(int n = 0; n < maxGeneration; n++){ for (int i = 0; i < populationSize; i++) { float *x = (float*)malloc(chromosomeSize * sizeof(float)); for (int j = 0; j < chromosomeSize; j++) { x[j] = populationArray[i * chromosomeSize + j]; } fitness[i] = HfitnessFcn(x); free(x); } //每一代最优适应度及其位置 //bestRes[bestFitness][bestIndex] float *bestRes = bestFitness(fitness); bestFitnessOfGen = bestRes[0]; bestIndexOfGen = (int) bestRes[1]; if (bestFitnessOfGen < fval) { fval = bestFitnessOfGen; for (int k = 0; k < chromosomeSize; k++) { X_10[k] = populationArray[bestIndexOfGen * chromosomeSize + k]; } G = n + 1; } // printf("1bestFitness : %e\n", bestFitnessOfGen); // printf("1fval : %e\n", fval); //printf("%e\n", bestFitnessOfGen); //printf("%d\n", bestIndexOfGen); free(bestRes); if(G == maxGeneration - 1) break; selectPre<<<1, populationSize>>>(fitness, Fitness, tmpFitness, populationArray, tmpPopulationArray); cudaDeviceSynchronize(); float sumFitness = sum(Fitness); selectFcn<<<1, populationSize>>>(populationArray, tmpPopulationArray, fitness, Fitness, tmpFitness, sumFitness, populationPro, states); cudaDeviceSynchronize(); //selectFcn(populationArray, fitness, populationPro); crossFcn<<<1, populationSize/2>>>(populationArray, states); cudaDeviceSynchronize(); //crossFcn(populationArray); mutationFcn<<<1, populationSize>>>(populationArray, states); cudaDeviceSynchronize(); //mutationFcn(populationArray); } printf("fval:%e\n", fval); printf("X:%f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", X_10[0], X_10[1], X_10[2], X_10[3], X_10[4], X_10[5], X_10[6], X_10[7], X_10[8], X_10[9]); printf("Gen:%d\n", G); time_t stop = clock(); printf("time:%e\n", ((float) (stop - start)) / CLOCKS_PER_SEC); cudaFree(c_Dysum); cudaFree(c_LB); cudaFree(c_UB); cudaFree(populationArray); cudaFree(fitness); cudaFree(populationPro); cudaFree(Fitness); cudaFree(tmpPopulationArray); cudaFree(tmpFitness); cudaFree(X_10); cudaFree(states); free(a); free(aa); free(aaa); return 0; }
11,182
//compile with "nvcc -arch=sm_20 -lcudart" #include <stdio.h> __global__ void add (int *a, int *b, int *c) { *c = *a + *b; } int main( void ) { int a,b,c; int *a2,*b2,*c2; cudaMalloc((void**) &a2, sizeof(int)); cudaMalloc((void**) &b2, sizeof(int)); cudaMalloc((void**) &c2, sizeof(int)); a = 5; b = 42; cudaMemcpy(a2, &a , sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b2, &b , sizeof(int), cudaMemcpyHostToDevice); add<<<1,1>>>(a2,b2,c2); cudaMemcpy(&c, c2, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(a2); cudaFree(b2); cudaFree(c2); printf("%i\n",a); printf("%i\n",b); printf("%i\n",c); }
11,183
#include <stdio.h> #include <math.h> #define T threadIdx #define B blockIdx #define T_W 2 //T_W => Tile Width that lowers the burden on GPU for computation /*matrix multiplication kernels*/ // shared __global__ void MatrixMulSh( float *Md , float *Nd , float *Pd , const int W ) { //These shared variables are present in shared memory that is common to all threads within a block. __shared__ float Mds [T_W][T_W] ; __shared__ float Nds [T_W][T_W] ; // calculate thread id unsigned int col = T_W*B.x + T.x; unsigned int row = T_W*B.y + T.y; //printf("---------COL OF [%d]{%d} is: %d ------- \n", B.x, T.x, col); //printf("---------ROW OF [%d]{%d} is: %d ------- \n", B.y, T.y, row); float Pvalue = 0; // m indicate number of phase for (int m = 0 ; m < W/T_W ; m++ ) { //printf("***** FOR M : %d ******\n", m); Mds[T.y][T.x] = Md[row*W + (m*T_W + T.x)]; //printf("\nMds[%d][%d] = Md [%d]\n", T.y, T.x, (row*W+(m*T_W + T.x))); Nds[T.y][T.x] = Nd[ col+( m*T_W + T.y) * W ] ; //printf("\nNds[%d][%d] = Nd [%d]\n", T.y, T.x, (col+(m*T_W + T.y)*W)); __syncthreads() ; //for synchronizing the threads for (int k = 0; k < T_W; ++k) { Pvalue += Mds[T.x][k] * Nds[k][T.y]; //printf("\nPvalue += Mds[%d][%d] * Nds[%d][%d]\n", T.x, k, k, T.y); } __syncthreads(); } Pd[row*W + col] = Pvalue; //printf("\n~~~Pd[%d] = %d~~~\n", row*W+col, Pvalue); } int main () { const int W = 6; float array1_h[W][W],array2_h[W][W],M_result_array_h[W][W]; float *array1_d,*array2_d ,*M_result_array_d ; // device array *result_array_d int i , j; //input in host array //hardcoding 1 in all slots of 1st array and 2 in all slots of 2nd array for ( i = 0 ; i<W ; i++ ) { for (j = 0 ; j<W ; j++ ) { array1_h[i][j] = 1; array2_h[i][j] = 2; } } //create device array cudaMalloc ( (void **)&array_name, sizeofmatrixinbytes) ; cudaMalloc((void **) &array1_d , W*W*sizeof (int) ) ; cudaMalloc((void **) &array2_d , W*W*sizeof (int) ) ; //copy host array to device array; cudaMemcpy ( dest , source , W , direction ) cudaMemcpy ( array1_d , array1_h , W*W*sizeof (int) , cudaMemcpyHostToDevice ) ; cudaMemcpy ( array2_d , array2_h , W*W*sizeof (int) , cudaMemcpyHostToDevice ) ; //allocating memory for resultent device array cudaMalloc((void **) &M_result_array_d , W*W*sizeof (int) ); //calling kernal dim3 dimBlock ( W/T_W , W/T_W ,1 ) ; dim3 dimThread ( T_W, T_W, 1 ) ; #if 1 MatrixMulSh<<<dimBlock,dimThread>>> ( array1_d , array2_d ,M_result_array_d , W) ; #endif // all gpu function blocked till kernel is working //copy back result_array_d to result_array_h cudaMemcpy(M_result_array_h , M_result_array_d , W*W*sizeof(int),cudaMemcpyDeviceToHost) ; cudaFree(array1_d); cudaFree(array2_d); cudaFree(M_result_array_d); //printf the result array for ( i = 0 ; i<W ; i++ ) { for ( j = 0 ; j < W ; j++ ) { printf ("%f ",M_result_array_h[i][j] ) ; } printf ("\n") ; } cudaFree(M_result_array_h); }
11,184
#include "includes.h" __global__ void CudaGetBitAndOfRows(unsigned int* table1D, unsigned int* row, int rowSize, int tableRowCount) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < tableRowCount * rowSize) { table1D[idx] = table1D[idx] & row[idx % rowSize]; } }
11,185
#include<stdio.h> __global__ void Matrix_mult(int *a, int *b, int *c, int *m, int *n, int *p) { int col = blockIdx.y * blockDim.y + threadIdx.y; int row = blockIdx.x * blockDim.x + threadIdx.x; int temp = 0, i; if(row<*m && col<*p) for(i=0; i<*n; i++) temp += a[row * (*n) + i] * b[i * (*p) + col]; c[row * (*p) + col] = temp; } int main() { int m = 3,n = 2,p = 2, i, j; int a[m*n], b[n*p], c[m*n]; int *cuda_a, *cuda_b, *cuda_c, *cuda_m, *cuda_n, *cuda_p; printf("Matrix A:\n"); for(i=0; i<m; i++) { for(j=0; j<n; j++) { a[i*n+j] = rand()%100; printf("%d ", a[i*n+j]); } printf("\n"); } printf("\nMatrix B:\n"); for(i=0; i<n; i++) { for(j=0; j<p; j++) { b[i*p+j] = rand()%100; printf("%d ",b[i*p+j]); } printf("\n"); } printf("\n"); cudaMalloc((void**)&cuda_a, m*n*sizeof(int)); cudaMalloc((void**)&cuda_b, n*p*sizeof(int)); cudaMalloc((void**)&cuda_c, m*p*sizeof(int)); cudaMalloc((void**)&cuda_m, sizeof(int)); cudaMalloc((void**)&cuda_n, sizeof(int)); cudaMalloc((void**)&cuda_p, sizeof(int)); cudaMemcpy(cuda_a, a, m*n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_b, b, n*p*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_m, &m, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_n, &n, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_p, &p, sizeof(int), cudaMemcpyHostToDevice); dim3 threadsPerBlock(m, p); dim3 blocksPerGrid(1, 1); Matrix_mult<<<blocksPerGrid,threadsPerBlock>>> (cuda_a, cuda_b, cuda_c, cuda_m, cuda_n, cuda_p); cudaMemcpy(c, cuda_c, m*p*sizeof(int), cudaMemcpyDeviceToHost); printf("Result Matrix:\n"); for(i=0; i<m; i++) { for(j=0; j<p; j++) { printf("%d ", c[i*p+j]); } printf("\n"); } cudaFree(cuda_a); cudaFree(cuda_b); cudaFree(cuda_c); cudaFree(cuda_m); cudaFree(cuda_n); cudaFree(cuda_p); return 0; }
11,186
#include "includes.h" __global__ void IntervalToBinaryVector(float input, float* outputs, int steps) { int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; if(id < steps) { float fraction = 1.0f / steps; outputs[id] = input >= fraction * id && input <= fraction * (id + 1); } }
11,187
/************************************************************************************\ * * * Copyright © 2014 Advanced Micro Devices, Inc. * * Copyright (c) 2015 Mark D. Hill and David A. Wood * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following are met: * * * * You must reproduce the above copyright notice. * * * * Neither the name of the copyright holder nor the names of its contributors * * may be used to endorse or promote products derived from this software * * without specific, prior, written permission from at least the copyright holder. * * * * You must include the following terms in your license and/or other materials * * provided with the software. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A * * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER * * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * * OF SUCH DAMAGE. * * * * Without limiting the foregoing, the software may implement third party * * technologies for which you must obtain licenses from parties other than AMD. * * You agree that AMD has not obtained or conveyed to you, and that you shall * * be responsible for obtaining the rights to use and/or distribute the applicable * * underlying intellectual property rights related to the third party technologies. * * These third party technologies are not licensed hereunder. * * * * If you use the software (in whole or in part), you shall adhere to all * * applicable U.S., European, and other export laws, including but not limited to * * the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), * * and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant * * to Section 740.6 of the EAR, you hereby certify that, except pursuant to a * * license granted by the United States Department of Commerce Bureau of Industry * * and Security or as otherwise permitted pursuant to a License Exception under * * the U.S. Export Administration Regulations ("EAR"), you will not (1) export, * * re-export or release to a national of a country in Country Groups D:1, E:1 or * * E:2 any restricted technology, software, or source code you receive hereunder, * * or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such * * technology or software, if such foreign produced direct product is subject to * * national security controls as identified on the Commerce Control List (currently * * found in Supplement 1 to Part 774 of EAR). For the most current Country Group * * listings, or for additional information about the EAR or your obligations under * * those regulations, please refer to the U.S. Bureau of Industry and Security's * * website at http://www.bis.doc.gov/. * * * \************************************************************************************/ #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "../graph_parser/parse.h" #include "../graph_parser/util.h" #include "kernel_maxmin.cu" #ifdef GEM5_FUSION #include <stdint.h> extern "C" { void m5_work_begin(uint64_t workid, uint64_t threadid); void m5_work_end(uint64_t workid, uint64_t threadid); } #endif #define RANGE 2048 void print_vector(int *vector, int num); int main(int argc, char **argv) { char *tmpchar; int num_nodes; int num_edges; int file_format = 1; bool directed = 0; cudaError_t err = cudaSuccess; if (argc == 3) { tmpchar = argv[1]; //graph inputfile file_format = atoi(argv[2]); //graph format } else { fprintf(stderr, "You did something wrong!\n"); exit(1); } srand(7); // Allocate the CSR structure csr_array *csr; // Parse graph file and store into a CSR format if (file_format == 1) csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed); else if (file_format == 0) csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed); else { printf("reserve for future"); exit(1); } // Allocate the vertex value array int *node_value = (int *)malloc(num_nodes * sizeof(int)); if (!node_value) fprintf(stderr, "node_value malloc failed\n"); // Allocate the color array int *color = (int *)malloc(num_nodes * sizeof(int)); if (!color) fprintf(stderr, "color malloc failed\n"); // Initialize all the colors to -1 // Randomize the value for each vertex for (int i = 0; i < num_nodes; i++) { color[i] = -1; node_value[i] = rand() % RANGE; } int *row_d; int *col_d; int *max_d; int *min_d; int *color_d; int *node_value_d; int *stop_d; // Create device-side buffers for the graph err = cudaMalloc(&row_d, num_nodes * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err)); return -1; } err = cudaMalloc(&col_d, num_edges * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d): %s\n", num_edges , cudaGetErrorString(err)); return -1; } // Termination variable err = cudaMalloc(&stop_d, sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc stop_d (size:%d) => %s\n", 1 , cudaGetErrorString(err)); return -1; } // Create device-side buffers for color err = cudaMalloc(&color_d, num_nodes * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc color_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err)); return -1; } err = cudaMalloc(&node_value_d, num_nodes * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc node_value_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err)); return -1; } err = cudaMalloc(&max_d, num_nodes * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc max_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err)); return -1; } err = cudaMalloc(&min_d, num_nodes * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc min_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err)); return -1; } // Copy data to device-side buffers double timer1 = gettime(); #ifdef GEM5_FUSION m5_work_begin(0, 0); #endif err = cudaMemcpy(color_d, color, num_nodes * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy color_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMemcpy(node_value_d, node_value, num_nodes * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy node_value_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } int block_size = 256; int num_blocks = (num_nodes + block_size - 1) / block_size; // Set up kernel dimensions dim3 threads(block_size, 1, 1); dim3 grid(num_blocks, 1, 1); int stop = 1; int graph_color = 1; // Initialize arrays ini<<< grid, threads >>>(max_d, min_d, num_nodes); // Main computation loop double timer3 = gettime(); while (stop) { stop = 0; // Copy the termination variable to the device err = cudaMemcpy(stop_d, &stop, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: write stop_d: %s\n", cudaGetErrorString(err)); } // Launch the color kernel 1 color1 <<< grid, threads >>>(row_d, col_d, node_value_d, color_d, stop_d, max_d, min_d, graph_color, num_nodes, num_edges); // Launch the color kernel 2 color2 <<< grid, threads >>>(node_value_d, color_d, max_d, min_d, graph_color, num_nodes, num_edges); err = cudaMemcpy(&stop, stop_d, sizeof(int), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "ERROR: read stop_d: %s\n", cudaGetErrorString(err)); } // Update the color label for the next iter graph_color = graph_color + 2; } cudaThreadSynchronize(); double timer4 = gettime(); // Copy back the color array err = cudaMemcpy(color, color_d, num_nodes * sizeof(int), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("ERROR: cudaMemcpy(): %s\n", cudaGetErrorString(err)); return -1; } #ifdef GEM5_FUSION m5_work_end(0, 0); #endif double timer2 = gettime(); // Print out color and timing statistics printf("total number of colors used: %d\n", graph_color); printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000); printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000); #if 1 // Dump the color array into an output file print_vector(color, num_nodes); #endif // Free host-side buffers free(node_value); free(color); csr->freeArrays(); free(csr); // Free CUDA buffers cudaFree(row_d); cudaFree(col_d); cudaFree(max_d); cudaFree(color_d); cudaFree(node_value_d); cudaFree(stop_d); return 0; } void print_vector(int *vector, int num) { FILE * fp = fopen("result.out", "w"); if (!fp) { printf("ERROR: unable to open result.txt\n"); } for (int i = 0; i < num; i++) fprintf(fp, "%d: %d\n", i + 1, vector[i]); fclose(fp); }
11,188
#include "adderKernel.cuh" __global__ void gpuAdd(int* d_a, int* d_b, int* d_c) { *d_c = *d_a + *d_b; } int gpuAdd(int const a, int const b) { int h_c; int *d_a, *d_b, *d_c; cudaMalloc((void**)&d_a, sizeof(int)); cudaMalloc((void**)&d_b, sizeof(int)); cudaMalloc((void**)&d_c, sizeof(int)); cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice); gpuAdd << <1, 1 >> > (d_a, d_b, d_c); cudaMemcpy(&h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return h_c; }
11,189
#ifndef _UNIONFIND_CU #define _UNIONFIND_CU template <class T> __device__ __host__ void make_set(T *parent, T value) { parent[value] = value; } template <class T> __device__ __host__ T find_root(T *parent, T index) { T current = index; while (current != parent[current]) { index = parent[current]; // Shorten the path a bit along the way // TODO: proper path compression? parent[current] = parent[index]; current = index; } return current; } template <class T> __device__ __host__ void merge(T *parent, T x, T y) { T x_root = find_root(parent, x); T y_root = find_root(parent, y); if (x_root < y_root) { parent[y_root] = x_root; } if (y_root < x_root) { parent[x_root] = y_root; } } #endif // _UNIONFIND_CU
11,190
#include "includes.h" __device__ float sigmoid(float x) { return 1.0f / (1 + __expf(-x)); } __global__ void updateBiasKernel_sigmoid(float* dZ, float* b, int cols, int row, float learning_rate){ int bid = blockIdx.x; extern __shared__ float _share[]; //float * _max = _share; float * _sum = _share; float* sp = dZ + cols * bid; _sum[threadIdx.x] = 0.0; for(int id = threadIdx.x ; id < cols; id += blockDim.x){ // int id = tid + threadIdx.x; //if(id < cols){ _sum[threadIdx.x] += sp[id]; //} } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); b[bid] -= learning_rate * (_sum[0]/cols); }
11,191
#include "includes.h" __global__ void sum_reduc(int* data, int* len, int* width){ int indx = blockIdx.x * gridDim.x + threadIdx.x; int sum = 0; for (int i=indx; i<indx + *width; i++){ if (i < *len) sum += data[i]; } data[indx] = sum; }
11,192
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <cuda.h> #include <cuda_runtime_api.h> /* Number of function values to calculate */ #define RANGE_ELEMENTS_COUNT 20000000 /* Maximum possible CUDA blocks */ #define CUDA_BLOCKS_MAX 65535 /** Calculates function values by using GPU (on device) * @param output - output buffer for calculated values; * @param elements_per_thread - elements to be calculated by one GPU thread. */ __global__ void device_sin(double* output, unsigned int elements_per_thread) { /* Pi contant */ const double const_2pi = 8.0 * atan(1.0); /* Cycle counter */ unsigned int i; /* Determine indicies of values to be calculated */ unsigned int thread_start = (blockIdx.x * blockDim.x + threadIdx.x) * elements_per_thread; unsigned int thread_end = thread_start + elements_per_thread; /* Check whenever the thread is not redundant */ if (thread_start >= RANGE_ELEMENTS_COUNT) { return; } /* Fix indicies excess if any */ thread_end = (thread_end <= RANGE_ELEMENTS_COUNT)? thread_end: RANGE_ELEMENTS_COUNT; /* Calculate values */ for (i = thread_start; i < thread_end; ++i) { output[i] = sin((const_2pi * i) / (RANGE_ELEMENTS_COUNT - 1)); } } /** Calculates function values by using CPU (on host) * @param output - output buffer for calculated values. */ void host_sin(double* output) { /* Pi contant */ const double const_2pi = 8.0 * atan(1.0); /* Cycle counter */ unsigned int i; /* Calculate values */ for (i = 0; i < RANGE_ELEMENTS_COUNT; ++i) { output[i] = sin((const_2pi * i) / (RANGE_ELEMENTS_COUNT - 1)); } } int main(int argc, char* argv[]) { /* Whenever to use CUDA flag */ int use_cuda = 0; /* Properly calculated values flag */ int are_counted_properly = 1; /* Cycle counter */ unsigned int i; /* CUDA threads per blocks (user defined) */ unsigned int cuda_threads_per_block = 1; /* CUDA blocks count (to be calculated) */ unsigned int cuda_blocks_count; /* Elements to be calculated per each CUDA thread (to be calculated) */ unsigned int cuda_elements_per_thread = 0; /* Buffer for calculated function values on host (will be calculated by CPU, or copied from GPU memory) */ double* host_function_values = NULL; /* Buffer for calculated function values on host (for checking purposes, will be calculated by CPU) */ double* host_right_function_values = NULL; /* Buffer for calculated function values on device (will be calculated by GPU) */ double* device_function_values = NULL; /* Elapsed calculation time */ float elapsed_time = 0.0f; /* Necessary buffer size for calculated values */ const unsigned int fv_size = RANGE_ELEMENTS_COUNT * sizeof(double); /* CUDA events to count elapsed time */ cudaEvent_t cuda_event_start, cuda_event_stop; /* Variable for CUDA errors processing */ cudaError_t cuda_error = cudaSuccess; /* Check whenever there are proper arguments */ if ((argc > 1) && (argc < 3)) { fprintf(stderr, "ERROR: Bad arguments.\n Usage: cuda_sin [--cuda <threads_per_block>]\n"); return 1; } else if ((argc > 1) && (strncmp(argv[1], "--cuda", 7) != 0)) { fprintf(stderr, "ERROR: Bad arguments.\n Usage: cuda_sin [--cuda <threads_per_block>]\n"); return 1; } if (argc > 1) { /* Switch CUDA usage flag */ use_cuda = 1; /* Try to read the --cuda value */ if (sscanf(argv[2], "%u", &cuda_threads_per_block) != 1) { fprintf(stderr, "ERROR: Bad number of threads_per_block.\n"); return 1; } else if (cuda_threads_per_block == 0) { /* Bad threads count value */ fprintf(stderr, "ERROR: Bad number of threads_per_block.\n"); return 1; } } /* Calculate CUDA blocks count (must not exceed maximum possible value) and elements to be calculated per each thread */ do { ++cuda_elements_per_thread; cuda_blocks_count = (RANGE_ELEMENTS_COUNT / (cuda_elements_per_thread * cuda_threads_per_block)) + ((RANGE_ELEMENTS_COUNT % (cuda_elements_per_thread * cuda_threads_per_block) > 0)? 1: 0); } while (cuda_blocks_count > CUDA_BLOCKS_MAX); /* Create CUDA events */ cuda_error = cudaEventCreate(&cuda_event_start); if (cuda_error != cudaSuccess) { fprintf(stderr, "ERROR: Unable to create CUDA event.\n"); return 2; } cuda_error = cudaEventCreate(&cuda_event_stop); if (cuda_error != cudaSuccess) { cudaEventDestroy(cuda_event_start); fprintf(stderr, "ERROR: Unable to create CUDA event.\n"); return 2; } /* Allocate host memory for proper values */ host_right_function_values = (double*)malloc(fv_size); if (!host_right_function_values) { cudaEventDestroy(cuda_event_start); cudaEventDestroy(cuda_event_stop); fprintf(stderr, "ERROR: Unable to allocate host memory.\n"); return 3; } /* Allocate host memory for values to be checked */ host_function_values = (double*)malloc(fv_size); if (!host_function_values) { free(host_right_function_values); cudaEventDestroy(cuda_event_start); cudaEventDestroy(cuda_event_stop); fprintf(stderr, "ERROR: Unable to allocate host memory.\n"); return 3; } if (use_cuda) { /* Allocate GPU memory for values to be calculated */ cuda_error = cudaMalloc((void**)&device_function_values, fv_size); if (cuda_error == cudaSuccess) { /* Asynchronously note that calculation has started */ cudaEventRecord(cuda_event_start, 0); /* Asynchronously run GPU calculations */ device_sin<<<cuda_blocks_count, cuda_threads_per_block>>>(device_function_values, cuda_elements_per_thread); /* Asynchronously note that calculation has been completed */ cudaEventRecord(cuda_event_stop, 0); /* Wait for all the asynchronous calls to be executed and proceeded */ cudaEventSynchronize(cuda_event_stop); /* Copy GPU calculated values into the host memory*/ cudaMemcpy(host_function_values, device_function_values, fv_size, cudaMemcpyDeviceToHost); /* Deallocate GPU memory */ cudaFree((void*)device_function_values); } } else { /* Asynchronously note that calculation has started */ cudaEventRecord(cuda_event_start, 0); /* Wait for event to be proceeded */ cudaEventSynchronize(cuda_event_start); /* Run CPU calculations */ host_sin(host_function_values); /* Asynchronously note that calculation has been completed */ cudaEventRecord(cuda_event_stop, 0); /* Wait for event to be proceeded */ cudaEventSynchronize(cuda_event_stop); } /* Caclulate proper values on host */ host_sin(host_right_function_values); /* Check caclulated values to be proper ones */ for (i = 0; i < RANGE_ELEMENTS_COUNT; ++i) { if (abs(host_function_values[i] - host_right_function_values[i]) > 1e-10) { are_counted_properly = 0; break; } } if (cuda_error == cudaSuccess) { /* Everything was ok with CUDA - calculate elapsed time in ms */ cudaEventElapsedTime(&elapsed_time, cuda_event_start, cuda_event_stop); /* Print information about performed calculations */ fprintf( stdout, "Calculated %s %u values on %s. Time spent: %.02f ms.\n", (are_counted_properly)? "properly": "UNproperly", RANGE_ELEMENTS_COUNT, (use_cuda)? "device": "host", elapsed_time ); } else { /* Something went wrong - just notify the user, cleanup follows */ fprintf(stderr, "ERROR: Unable to allocate device memory.\n"); } /* Cleanup */ free(host_function_values); free(host_right_function_values); cudaEventDestroy(cuda_event_start); cudaEventDestroy(cuda_event_stop); /* If CUDA commands executed successfully - then everything is ok */ return (cuda_error == cudaSuccess)? 0: 3; }
11,193
#include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <cuda_runtime_api.h> #include <errno.h> #include <unistd.h> /****************************************************************************** * This program takes an initial estimate of m and c and finds the associated * rms error. It is then as a base to generate and evaluate 8 new estimates, * which are steps in different directions in m-c space. The best estimate is * then used as the base for another iteration of "generate and evaluate". This * continues until none of the new estimates are better than the base. This is * a gradient search for a minimum in mc-space. * * To compile: * nvcc -o Linear_Regression_Cuda Linear_Regression_Cuda.cu -lm * * To run: * ./Linear_Regression_Cuda * *****************************************************************************/ typedef struct point_t { double a; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; //actual data point_t data[] = { {77.89,116.59},{79.43,135.16},{72.58,124.16},{65.87,84.94}, {77.91,105.07},{77.61,100.91},{65.19,87.28},{68.24,108.54}, {68.18,108.96},{65.14,87.12},{70.73,111.81},{73.74,114.69}, {82.49,132.76},{73.35,112.78},{56.83,82.52},{67.19,101.44}, {77.61,102.34},{27.05,44.84},{29.21,55.05},{25.30,48.07}, {65.59,93.37},{35.28,89.77},{31.64,50.85},{36.60,59.51}, {41.65,86.34},{98.78,128.01},{ 5.42,45.75},{49.43,73.67}, {36.43,61.97},{25.45,59.56},{62.07,107.62},{ 1.85, 1.08}, {22.47,57.37},{65.14,104.10},{81.68,124.48},{49.74,81.14}, {24.74,67.98},{81.19,114.82},{22.01,42.88},{21.87,66.80}, {24.32,51.31},{40.73,64.87},{85.39,117.39},{99.84,130.84}, {89.94,110.06},{11.34,37.57},{15.66,43.89},{21.86,43.02}, {56.84,92.01},{70.79,109.34},{ 9.56,41.21},{45.49,76.47}, {69.82,98.03},{ 0.68,30.24},{44.05,111.12},{20.73,62.02}, {58.79,87.31},{ 0.25,22.01},{66.61,111.75},{27.29,65.91}, {64.23,111.56},{35.20,62.31},{36.66,81.64},{32.06,76.79}, {11.70,44.02},{12.70,59.81},{30.92,63.30},{69.53,106.64}, {25.42,32.71},{75.51,109.91},{74.45,111.39},{83.26,125.38}, {51.09,81.88},{14.50,50.64},{46.19,85.17},{50.28,81.00}, {51.74,111.03},{19.15,47.39},{ 6.62,52.12},{83.28,116.55}, {24.57,64.07},{96.86,140.23},{36.35,99.61},{86.02,127.51}, { 0.05,41.28},{55.84,87.11},{73.81,111.87},{79.97,132.52}, {80.95,115.80},{25.94,53.39},{45.79,80.87},{12.62,54.74}, {67.76,119.85},{14.06,50.09},{58.02,93.00},{55.61,97.31}, {92.22,132.14},{88.79,133.96},{82.43,130.24},{96.46,130.65}, {41.90,61.25},{ 2.27,32.10},{36.70,67.17},{90.89,133.39}, {80.96,113.41},{46.55,68.11},{10.87,42.11},{51.80,92.48}, { 8.46,31.79},{52.76,77.57},{19.83,63.87},{46.78,87.26}, {10.15,14.95},{48.00,75.03},{85.88,125.50},{69.24,96.36}, {23.04,61.58},{79.99,109.31},{33.67,68.16},{ 4.70,34.15}, {61.83,101.19},{76.25,110.46},{83.68,112.22},{21.52,57.67}, {72.25,110.78},{51.48,86.33},{86.99,122.27},{12.42,33.53}, {76.19,107.34},{16.72,48.04},{49.31,59.84},{10.03,40.51}, {91.11,129.19},{70.49,111.54},{17.74,61.65},{78.79,117.17}, {16.81,55.70},{31.53,67.73},{43.22,88.99},{56.03,102.01}, {33.13,80.87},{19.83,56.99},{38.78,67.60},{33.66,80.33}, {47.47,69.57},{94.55,143.95},{99.35,142.97},{22.15,48.34}, {88.30,111.71},{66.22,130.98},{57.05,80.07},{ 0.71,20.71}, {74.40,128.40},{70.38,114.90},{31.95,61.31},{ 5.10,36.77}, { 4.21,35.84},{65.62,103.20},{ 7.67,51.22},{66.40,98.34}, {64.63,98.42},{53.82,96.89},{54.77,81.23},{60.46,97.21}, {42.49,96.46},{ 9.50,33.58},{ 8.11,44.04},{40.20,62.45}, {11.03,53.23},{96.17,129.36},{94.16,136.40},{82.10,113.67}, {42.86,67.55},{ 9.28,49.54},{67.46,83.96},{38.59,68.58}, {34.09,52.84},{79.80,106.27},{56.32,106.17},{20.61,47.76}, {22.06,64.25},{35.96,70.64},{12.49,30.40},{17.24,50.23}, {34.22,80.32},{17.78,41.95},{43.45,84.58},{76.12,101.20}, {38.00,66.47},{71.81,118.38},{68.21,111.27},{59.81,90.14}, {99.87,125.63},{74.76,95.95},{ 7.87,51.92},{83.25,115.00}, {12.50,57.32},{71.77,106.42},{67.97,110.25},{21.96,53.06}, {86.72,125.71},{37.90,81.30},{16.89,50.69},{43.52,90.65}, {33.71,70.99},{45.15,87.54},{97.23,129.50},{ 5.85,11.96}, {67.18,98.25},{62.84,108.96},{37.23,83.34},{93.22,134.68}, { 0.02,30.72},{20.85,56.61},{40.12,72.75},{76.88,91.67}, {73.80,109.78},{30.25,58.43},{17.66,57.04},{65.87,98.37}, {34.99,59.52},{73.02,115.45},{ 7.38,49.85},{22.52,46.14}, {90.51,114.60},{53.37,82.30},{36.40,60.14},{27.46,54.74}, { 8.68,20.17},{ 4.25,19.95},{26.96,63.71},{39.20,91.31}, {17.85,59.22},{92.75,117.65},{35.14,60.78},{69.97,98.10}, {59.31,90.93},{93.95,157.29},{48.64,86.90},{30.94,64.23}, {88.51,119.82},{23.59,52.46},{84.88,135.91},{54.30,94.62}, {16.90,35.86},{89.84,121.84},{35.61,75.77},{20.39,48.77}, { 8.06,41.27},{50.32,77.86},{47.44,89.91},{11.38,28.04}, { 5.54,36.67},{44.45,65.78},{61.46,88.28},{35.10,80.69}, {54.26,94.41},{31.56,77.15},{72.35,111.07},{42.50,79.59}, {37.91,86.60},{36.52,62.67},{49.28,96.70},{34.56,84.47}, {92.95,145.64},{49.25,83.05},{87.40,112.45},{43.03,96.44}, {19.10,48.86},{28.75,70.50},{36.80,81.74},{94.73,128.92}, {83.60,107.24},{ 4.39,39.96},{ 3.15,40.00},{30.18,34.12}, {98.43,137.12},{26.07,43.67},{79.79,132.04},{56.74,75.55}, {10.77,41.45},{33.87,76.15},{95.54,134.71},{97.26,143.74}, {66.13,88.73},{51.02,77.93},{52.22,87.21},{85.65,128.58}, { 4.46,47.72},{92.20,138.26},{46.51,78.46},{69.64,92.41}, {69.22,118.77},{82.09,118.86},{30.90,72.28},{22.69,56.93}, {60.51,93.07},{ 9.46,30.38},{59.35,79.46},{89.18,124.95}, { 2.24,23.63},{90.38,143.27},{70.52,136.81},{49.83,76.39}, {62.62,100.97},{15.10,46.18},{20.86,43.67},{71.03,115.72}, {10.42,23.53},{22.24,63.83},{64.14,104.10},{62.14,113.81}, {34.27,83.00},{30.78,70.31},{76.30,112.15},{62.48,101.74}, {18.70,52.65},{34.22,61.44},{59.99,106.34},{64.38,109.00}, {91.57,136.33},{ 8.88,33.29},{74.90,113.41},{85.26,118.22}, {45.86,81.27},{63.72,99.11},{58.01,88.11},{23.03,55.87}, {76.54,104.12},{49.94,81.37},{51.70,81.04},{10.60,41.01}, {96.24,136.95},{69.96,100.65},{94.14,130.05},{ 2.30,51.14}, { 9.04,38.79},{44.95,93.23},{37.11,70.49},{42.96,71.55}, {77.37,112.48},{26.35,61.63},{55.20,84.12},{78.18,122.58}, {87.76,132.66},{97.71,144.61},{32.94,79.64},{43.19,69.60}, {52.64,96.71},{93.11,136.27},{82.88,127.13},{ 9.76,34.52}, { 6.27,29.27},{93.42,128.51},{16.53,46.56},{46.63,84.07}, {89.24,129.61},{90.53,128.83},{30.39,57.73},{43.63,90.07}, {53.08,95.59},{16.05,60.09},{ 0.53,36.14},{65.39,118.78}, {89.72,136.78},{53.86,94.94},{25.68,50.80},{89.21,127.19}, { 5.12,40.08},{27.50,56.84},{40.56,66.21},{97.47,143.93}, {78.94,127.15},{19.28,55.64},{ 4.22,35.54},{98.28,148.04}, {46.04,81.66},{19.82,50.62},{99.40,138.49},{43.07,57.26}, {67.27,109.65},{23.99,62.49},{45.98,71.80},{89.84,133.27}, {44.35,83.08},{23.73,46.67},{ 3.78,57.80},{82.53,125.86}, {22.20,42.87},{80.17,130.25},{72.42,109.67},{98.11,127.35}, {64.13,108.07},{25.55,63.47},{34.50,55.33},{86.89,136.62}, { 2.55,23.55},{53.55,86.20},{82.61,111.69},{98.28,149.93}, {67.96,119.39},{55.44,90.01},{92.62,138.20},{63.05,123.03}, {47.80,83.73},{72.65,118.21},{99.76,121.95},{65.60,109.46}, {57.32,99.05},{ 2.91,25.35},{50.78,79.98},{30.51,60.33}, {66.72,90.11},{17.77,45.17},{41.06,85.22},{29.88,68.16}, { 2.40,22.79},{99.89,152.70},{58.07,91.46},{72.13,99.98}, {75.04,99.67},{68.16,105.94},{ 2.30, 9.85},{94.97,134.07}, {28.66,80.23},{ 4.52,41.33},{77.24,117.10},{36.24,64.77}, {30.33,67.71},{79.10,108.07},{66.87,117.90},{18.70,29.48}, {61.29,85.22},{39.21,75.00},{81.99,114.20},{75.72,99.86}, {82.18,108.55},{18.71,39.70},{78.85,131.66},{39.95,68.61}, {37.73,67.67},{83.18,133.05},{95.64,130.99},{36.23,72.36}, {56.04,87.42},{39.32,73.21},{88.45,132.44},{71.41,117.23}, {42.98,66.83},{96.92,141.27},{53.24,90.27},{34.06,57.84}, {86.69,119.91},{89.69,118.76},{99.40,135.59},{85.75,120.06}, {95.47,145.69},{12.19,59.44},{62.06,90.38},{35.49,66.18}, {39.90,56.52},{56.66,100.89},{82.00,125.16},{15.95,38.99}, { 5.38,31.87},{67.92,91.98},{98.98,151.72},{ 2.15,27.30}, {19.03,40.45},{94.90,142.41},{ 0.67,23.73},{98.15,138.25}, {50.71,70.52},{80.42,124.17},{30.03,76.11},{35.67,60.26}, {17.61,46.21},{ 9.02,32.92},{58.37,91.11},{ 3.44,40.79}, {14.39,18.83},{ 5.66,36.62},{14.27,60.82},{83.36,123.44}, { 4.00,27.01},{26.80,63.17},{72.30,96.68},{72.09,127.51}, {57.75,92.66},{ 6.50,43.22},{38.64,71.12},{95.82,121.53}, {50.91,78.79},{18.01,46.48},{14.56,37.77},{48.75,81.69}, {27.03,60.40},{46.10,81.08},{19.05,48.60},{14.04,47.91}, {83.08,125.66},{49.22,92.10},{89.06,135.78},{54.42,108.24}, {87.16,130.50},{60.90,110.81},{72.29,124.80},{79.96,106.71}, {68.41,118.46},{84.76,104.03},{96.40,135.76},{59.76,80.37}, {29.19,74.90},{80.01,99.83},{97.02,145.13},{87.86,124.36}, {46.43,86.94},{15.52,52.42},{61.92,102.38},{24.39,63.32}, {78.96,119.67},{56.16,105.14},{86.63,129.72},{67.21,80.26}, {21.06,47.66},{ 7.17,42.97},{83.01,126.81},{48.46,90.84}, {44.48,70.76},{65.19,98.26},{90.25,118.36},{38.07,80.24}, {37.94,74.99},{89.57,142.89},{ 0.90,60.27},{58.95,72.82}, { 7.55,58.33},{15.08,52.77},{16.42,46.33},{96.79,142.15}, { 1.20,37.18},{ 7.15,41.39},{88.10,122.37},{74.12,115.21}, {23.48,43.20},{66.45,101.69},{67.50,112.76},{40.79,67.20}, {97.89,143.10},{58.46,100.12},{86.84,141.21},{29.34,67.57}, { 4.35,45.85},{ 7.95,50.82},{45.97,89.31},{62.14,95.93}, {11.32,57.47},{36.53,56.51},{46.37,89.27},{44.20,80.22}, {23.27,64.75},{22.08,53.15},{69.36,89.48},{96.13,138.69}, { 6.58,37.97},{59.71,100.31},{70.33,101.83},{71.33,104.21}, {20.12,50.78},{59.47,86.83},{98.15,136.89},{ 2.50,35.96}, { 8.45,33.38},{59.35,110.74},{26.15,55.02},{70.79,127.16}, {32.64,88.59},{67.44,116.92},{20.82,64.57},{85.07,123.61}, {37.02,66.14},{16.67,41.49},{28.11,53.12},{71.62,102.00}, { 9.73,40.82},{62.60,94.55},{59.83,102.03},{88.10,109.45}, {57.33,92.74},{55.39,109.29},{92.38,117.08},{78.09,130.24}, {95.04,138.56},{23.79,47.33},{80.20,111.03},{17.85,52.49}, {73.59,106.15},{39.74,95.07},{94.84,128.27},{71.99,91.99}, {56.24,115.39},{99.41,118.85},{ 3.86,39.05},{70.93,118.63}, {54.59,96.03},{ 6.98,44.65},{15.49,60.38},{16.53,42.92}, {11.69,40.70},{76.34,108.40},{21.13,42.93},{36.87,87.47}, {13.18,54.59},{80.94,115.97},{21.66,59.48},{ 6.57,36.66}, {30.07,51.05},{15.70,57.10},{90.22,130.29},{63.06,97.59}, {38.32,89.03},{ 6.24,45.37},{28.74,72.37},{51.08,89.46}, {83.73,132.22},{89.47,128.66},{52.96,70.62},{64.21,102.32}, {79.77,106.59},{65.28,97.82},{68.02,96.98},{35.68,63.30}, {88.49,125.58},{87.00,117.82},{46.68,88.34},{80.27,130.06}, { 5.86,37.42},{91.43,140.75},{34.66,74.60},{11.64,38.73}, {89.50,120.61},{74.72,110.05},{64.76,126.70},{82.79,124.26}, {80.76,122.14},{10.03,24.46},{ 7.99,49.56},{71.82,97.91}, {66.08,123.61},{92.65,116.25},{ 9.84,34.96},{55.02,86.90}, { 4.90,36.88},{ 2.99,49.89},{18.37,45.69},{86.79,131.40}, {31.98,61.04},{11.25,39.39},{14.23,51.26},{ 7.00,49.52}, {12.79,50.64},{93.34,142.92},{14.56,55.30},{ 1.36,44.86}, {39.95,50.68},{46.62,76.76},{32.22,80.11},{11.84,49.74}, {20.67,56.73},{96.29,133.37},{67.82,96.63},{ 1.12,34.98}, {72.72,113.55},{51.74,79.73},{57.91,82.88},{70.49,87.78}, {30.42,75.80},{30.00,49.25},{46.33,81.17},{86.32,107.82}, { 8.94,37.41},{71.59,78.68},{95.60,142.61},{66.85,107.33}, {39.97,76.00},{78.91,109.51},{61.46,113.58},{ 7.78,55.60}, {58.43,94.30},{73.76,114.08},{62.44,107.76},{31.49,52.35}, {48.13,82.49},{79.76,138.97},{42.20,66.12},{23.96,42.34}, {99.34,132.51},{35.49,84.22},{51.36,94.43},{58.05,105.06}, {16.60,29.11},{53.17,95.70},{71.33,126.53},{84.70,115.20}, {28.80,76.47},{49.29,87.33},{ 2.18,37.63},{66.44,97.07}, {58.53,96.12},{65.98,105.79},{44.40,80.38},{59.24,107.15}, {13.62,45.20},{44.51,82.51},{77.03,130.94},{59.59,103.55}, {97.40,141.07},{58.99,94.94},{88.86,122.28},{12.20,45.50}, {51.71,80.04},{88.59,123.42},{69.88,113.17},{ 1.16,42.94}, {34.08,54.28},{ 9.32,26.82},{11.33,47.60},{93.69,140.01}, {93.51,126.66},{60.50,90.91},{61.97,96.68},{ 9.44,39.23}, {30.70,68.86},{31.11,60.61},{73.91,115.70},{87.67,136.37}, {58.40,82.14},{45.16,79.46},{71.18,80.79},{19.44,50.76}, {99.42,144.45},{67.64,106.86},{35.88,78.04},{68.90,136.79}, {24.91,54.57},{22.51,67.57},{49.85,80.31},{63.67,108.11}, {67.44,102.87},{10.25,62.81},{65.47,97.94},{30.56,50.89}, {41.56,64.02},{10.09,44.75},{92.91,127.59},{88.14,139.96}, { 2.33,37.08},{99.25,144.57},{26.23,57.89},{ 5.86,30.05}, {51.97,98.88},{63.41,95.68},{60.10,93.67},{95.34,150.97}, {55.30,103.24},{63.37,95.68},{79.53,105.33},{10.67,37.08}, {43.44,75.54},{30.69,55.63},{94.48,135.50},{33.07,67.47}, {19.59,62.40},{68.53,107.23},{15.59,42.99},{90.55,131.59}, {22.43,66.53},{41.50,72.21},{34.30,77.95},{54.27,113.13}, {31.89,81.30},{48.53,76.03},{46.03,79.07},{12.67,44.36}, {88.73,127.08},{57.63,94.21},{96.43,137.06},{24.18,66.87}, {94.17,134.92},{40.44,84.72},{ 6.88,36.82},{37.40,80.81}, {40.86,67.83},{ 8.02,52.88},{ 4.29,17.26},{79.35,132.97}, {83.66,117.99},{64.19,94.48},{ 4.95,23.02},{72.11,123.93}, {43.29,74.70},{48.62,74.92},{29.24,62.12},{93.94,151.96}, {75.15,126.68},{84.68,119.27},{ 0.00,40.86},{ 7.82,49.87}, {61.80,103.04},{38.81,62.37},{71.06,110.07},{48.24,80.77}, {28.58,76.85},{ 5.92,32.67},{21.48,45.33},{78.56,131.61}, {67.15,93.29},{ 6.82,49.66},{77.85,112.57},{88.00,132.33}, {22.69,39.34},{93.81,135.63},{67.53,104.05},{64.18,93.75}, {24.40,62.80},{77.88,106.70},{ 6.88,47.88},{80.95,132.76}, {80.01,102.53},{58.29,96.48},{58.44,93.06},{30.65,78.46}, {30.69,56.85},{90.58,143.37},{ 9.54,45.31},{19.14,48.46}, {11.11,30.65},{65.64,117.55},{87.68,139.32},{55.10,97.72}, {58.25,83.49},{98.54,129.12},{54.80,96.41},{75.74,112.95}, {81.75,115.13},{83.67,107.43},{80.51,118.77},{15.02,47.60}, {75.20,109.14},{95.42,131.44},{76.60,130.11},{27.66,60.66}, {89.43,137.46},{59.59,94.03},{68.03,107.63},{44.94,75.77}, {97.25,132.84},{46.26,65.32},{38.88,97.96},{57.18,97.30}, {18.93,62.65},{ 3.75,21.68},{49.56,77.42},{39.61,77.05}, {69.18,102.75},{58.27,94.45},{ 1.82,27.82},{52.54,81.93}, {49.54,66.81},{41.75,87.30},{92.52,122.21},{96.28,156.62}, {96.70,145.80},{ 6.84,48.97},{71.71,131.32},{84.63,117.96}, {26.22,47.12},{75.15,108.17},{ 2.88,25.42},{41.59,93.59}, {55.66,78.63},{27.12,71.20},{80.13,118.94},{59.52,95.82}, {52.65,93.72},{16.97,35.73},{ 9.43,48.24},{11.71,55.13}, { 9.30,46.23},{85.00,125.44},{12.00,41.45},{75.97,114.09}, {96.48,143.44},{30.98,47.06},{96.34,140.61},{52.30,86.84}, {36.45,54.15},{89.95,121.50},{99.68,146.29},{53.68,86.46}, {88.17,120.91},{20.95,60.61},{71.57,97.83},{70.60,108.36}, {20.98,49.15},{22.55,53.02},{42.64,82.82},{67.45,111.34}, { 3.91,40.60},{94.06,118.31},{ 4.39,39.14},{25.39,52.71}, {43.58,91.27},{63.65,93.31},{83.36,123.06},{30.96,65.58}, {51.71,90.85},{10.78,35.24},{30.17,56.11},{22.79,56.65}, {94.05,143.42},{74.41,116.96},{89.16,141.41},{86.30,129.48}, {33.46,62.67},{33.59,93.96},{50.41,107.05},{19.10,61.85}, {66.10,122.19},{21.92,74.51},{45.83,77.94},{84.48,116.26}, { 7.02,44.63},{ 6.24,33.73},{63.14,112.67},{12.72,54.66}, {77.56,118.55},{21.49,68.00},{43.79,101.03},{90.39,116.56}, {52.02,76.65},{75.59,127.31},{ 4.13,40.09},{38.67,76.41}, { 1.44,21.54},{89.87,137.76},{93.09,118.45},{69.08,116.83}, {39.02,85.56},{ 2.77,26.54},{98.26,130.85},{69.94,102.69}, {16.88,41.94},{23.36,51.67},{28.57,57.49},{30.32,68.20}, {55.56,90.01},{68.63,102.29},{77.93,118.49},{62.59,115.24}, {84.44,122.07},{28.85,59.61},{67.58,107.20},{94.78,137.13}, {42.41,58.06},{85.98,102.66},{69.50,108.50},{ 5.94,54.64}, {46.33,93.94},{97.01,134.77},{45.11,74.00},{ 2.88,30.02}, {71.87,105.24},{ 9.66,43.40},{95.74,126.35},{99.10,137.37}, {21.97,52.89},{65.30,94.66},{20.16,68.47},{ 8.03,28.56}, {25.42,55.87},{31.36,68.51},{82.24,127.48},{64.51,90.01}, {92.83,125.44},{20.66,42.55},{ 1.65,35.93},{56.49,102.53}, {68.91,104.59},{ 7.00,23.73},{17.92,52.30},{81.51,134.23}, {53.19,82.77},{77.14,114.74},{63.24,106.36},{67.63,110.29}, {98.66,132.52},{10.41,47.43},{16.23,49.49},{ 4.49,43.28} }; double residual_error(double a, double y, double m, double c) { double e = (m * a) + c - y; return e * e; } __device__ double d_residual_error(double a, double y, double m, double c) { double e = (m * a) + c - y; return e * e; } double RmsError(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].a, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } __global__ void d_RmsError(double *m, double *c, double *error_sum_arr, point_t *d_data) { /* Calculate the current index by using: - The thread id - The block id - The number of threads per block */ int i = threadIdx.x + blockIdx.x * blockDim.x; //Work out the error sum 1000 times and store them in an array. error_sum_arr[i] = d_residual_error(d_data[i].a, d_data[i].y, *m, *c); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; //Get the system time before we begin the linear regression. clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; //Device variables double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = RmsError(bm, bc); //Allocate memory for d_dm error = cudaMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_dc error = cudaMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_error_sum_arr error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_data error = cudaMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } //Copy memory for dm to d_dm error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for dc to d_dc error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for data to d_data error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error, cudaGetErrorString(error)); } for(i=0;i<8;i++) { //Host variable storing the array returned from the kernel function. double h_error_sum_arr[1000]; //Stores the total sum of the values from the error sum array. double error_sum_total; //Stores the mean of the total sum of the error sums. double error_sum_mean; //Call the RmsError function using 100 blocks and 10 threads. d_RmsError <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); cudaThreadSynchronize(); //Copy memory for d_error_sum_arr error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error, cudaGetErrorString(error)); } //Loop through the error sum array returned from the kernel function for(int j=0; j<n_data; j++) { //Add each error sum to the error sum total. error_sum_total += h_error_sum_arr[j]; } //Calculate the mean for the error sum. error_sum_mean = error_sum_total / n_data; //Calculate the square root for the error sum mean. e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } //Reset the error sum total. error_sum_total = 0; } //printf("best m,c is %lf,%lf with error %lf in direction %d\n", //dm[best_error_i], dc[best_error_i], best_error, best_error_i); if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } //Free memory for d_dm error = cudaFree(d_dm); if(error){ fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_dc error = cudaFree(d_dc); if(error){ fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_data error = cudaFree(d_data); if(error){ fprintf(stderr, "cudaFree on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_error_sum_arr error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); //Get the system time after we have run the linear regression function. clock_gettime(CLOCK_MONOTONIC, &finish); //Calculate the time spent between the start time and end time. time_difference(&start, &finish, &time_elapsed); //Output the time spent running the program. printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
11,194
#include "cuda_runtime.h" #include "kernel.cuh" #include <stdio.h> __global__ void vectorAdditionKernel(double* A, double* B, double* C, int arraySize) { // Get thread ID. int threadID = blockDim.x * blockIdx.x + threadIdx.x; // Check if thread is within array bounds. if (threadID < arraySize) { // Add a and b. C[threadID] = A[threadID] + B[threadID]; } } /** * Wrapper function for the CUDA kernel function. * @param A Array A. * @param B Array B. * @param C Sum of array elements A and B directly across. * @param arraySize Size of arrays A, B, and C. */ void kernel(double* A, double* B, double* C, int arraySize) { // Initialize device pointers. double* d_A, *d_B, *d_C; // Allocate device memory. cudaMalloc((void**)&d_A, arraySize * sizeof(double)); cudaMalloc((void**)&d_B, arraySize * sizeof(double)); cudaMalloc((void**)&d_C, arraySize * sizeof(double)); // Transfer arrays a and b to device. cudaMemcpy(d_A, A, arraySize * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, arraySize * sizeof(double), cudaMemcpyHostToDevice); // Calculate blocksize and gridsize. dim3 blockSize(512, 1, 1); dim3 gridSize(512 / arraySize + 1, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // Launch CUDA kernel. vectorAdditionKernel <<<gridSize, blockSize >> > (d_A, d_B, d_C, arraySize); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Effective Bandwidth (GB/s): %f", 3 * 4 * 3 / milliseconds); // Copy result array c back to host memory. cudaMemcpy(C, d_C, arraySize * sizeof(double), cudaMemcpyDeviceToHost); }
11,195
#include <stdio.h> #include <driver_types.h> #include <curand_mtgp32_kernel.h> // KERNEL __global__ void square(float *d_out, float *d_in){ int idx = threadIdx.x; float f = d_in[idx]; // d_out[idx] = f*f; d_out[idx] = f*f*f; } int main(){ // const int ARRAY_SIZE = 64; const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); //input array on host float *h_in = (float*) std::malloc(ARRAY_BYTES); for (int i = 0; i < ARRAY_SIZE; i++){ h_in[i] = float(i); } float *h_out = (float*) std::malloc(ARRAY_BYTES); // GPU pointers float * d_in; float * d_out; // Allocate GPU cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); // Move from cpu to gpu cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // Launch a kernel square<<<1, ARRAY_SIZE>>>(d_out, d_in); // copy back to cpu cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // Print array for (int i = 0; i < ARRAY_SIZE; i++){ printf("%.0f", h_out[i]); printf(((i % 4) != 3) ? "\t": "\n"); } cudaFree(d_in); cudaFree(d_out); std::free(h_in); std::free(h_out); return 0; }
11,196
#include "includes.h" __global__ void yuv2rgbKernel(int *imgy,int *imgcb,int *imgcr, int *imgr,int *imgg,int *imgb, int n) { int r, g, b; int y, cb, cr; int index; index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n){ y = imgy[index]; cb = imgcb[index]; cr = imgcr[index]; r = (int)( 1*y + 0*cb + 1.14*cr); g = (int)( 1*y - 0.396*cb - 0.581*cr); b = (int)( 1*y + 2.029*cb + 0*cr); imgr[index] = r; imgg[index] = g; imgb[index] = b; } }
11,197
#include <stdio.h> #include <stdlib.h> #include <math.h> #define BLOCK_SIZE 16 #define MAX(i,j) ( (i)<(j) ? (j):(i) ) #define MIN(i,j) ( (i)<(j) ? (i):(j) ) #define SubArrayA(x,y) subArrayA[(x)*BLOCK_SIZE+(y)] #define SubArrayB(x,y) subArrayB[(x)*BLOCK_SIZE+(y)] #define InputArrayA(x,y) inputArrayA[(x)*BLOCK_SIZE+(y)] #define InputArrayB(x,y) inputArrayB[(x)*BLOCK_SIZE+(y)] #define ImageOut(x,y) imageOut[(x)*imageSize+(y)] #define ZiArray(x,y) ziArray[(x)*imageSize+(y)] __device__ float gaussianDistance(float *inputArrayA,float *inputArrayB,int xj,int yj,int halfPatchWidth); __device__ float weightingFunct(float *inputArrayA,float *inputArrayB,int xj,int yj,int halfPatchWidth,float sigma,float Zi); __device__ float normFactor(float *inputArrayA,float *inputArrayB,int halfPatchWidth,float sigma); __device__ float nonLocalMeans(float *inputArrayA,float *inputArrayB,float *imageOut,int xi,int yi,int halfPatchWidth,int imageSize,float sigma,float Zi); __global__ void mainGpuFunction(float const * const inputArray,float *imageOut,float *ziArray,int halfPatchWidth,int imageSize,float sigma,int flag) { // pixel int xi = blockIdx.x * blockDim.x + threadIdx.x; int yi = blockIdx.y * blockDim.y + threadIdx.y; // if((xi<imageSize)&&(yi<imageSize)){ __shared__ float subArrayA[BLOCK_SIZE*BLOCK_SIZE]; __shared__ float subArrayB[BLOCK_SIZE*BLOCK_SIZE]; // Z=0 //if(blockIdx.z==0) ImageOut(xi,yi)=0; //=========================== // blockIdx.z block // block. // blockIdx.z=neighblockX*blockDim.x+neighblockY int neighblockX; int neighblockY; for(int i=0;i<blockDim.x;i++){ for(int j=0;j<blockDim.y;j++){ if(blockIdx.z==(i*blockDim.x+j)) { neighblockX=i; neighblockY=j; i=blockDim.x;// loop break; } } } //=========================== int xj = neighblockX*blockDim.x + threadIdx.x; int yj = neighblockY* blockDim.y + threadIdx.y; // Zi if(flag==1) { // block // thread SubArrayA(threadIdx.x,threadIdx.y)=inputArray[xi*imageSize+yi]; __syncthreads(); // block // blockIdx.z SubArrayB(threadIdx.x,threadIdx.y)=inputArray[xj*imageSize+yj]; __syncthreads(); float Zi=normFactor(subArrayA,subArrayB,halfPatchWidth,sigma); atomicAdd(&ZiArray(xi,yi),Zi); // i __syncthreads(); } else // w(i,j)*f(j) { // block // thread SubArrayA(threadIdx.x,threadIdx.y)=inputArray[xi*imageSize+yi]; __syncthreads(); // block // blockIdx.z SubArrayB(threadIdx.x,threadIdx.y)=inputArray[xj*imageSize+yj]; __syncthreads(); float SumWeight=nonLocalMeans(subArrayA,subArrayB,imageOut,xi,yi,halfPatchWidth,imageSize,sigma,ZiArray(xi,yi)); atomicAdd(&ImageOut(xi,yi),SumWeight); __syncthreads(); } } } __device__ float nonLocalMeans(float *inputArrayA,float *inputArrayB,float *imageOut,int xi,int yi,int halfPatchWidth,int imageSize,float sigma,float Zi){ float ww=0; for(int xj=0;xj<BLOCK_SIZE;xj++) { for(int yj=0;yj<BLOCK_SIZE;yj++) { ww+=weightingFunct(inputArrayA,inputArrayB,xj,yj,halfPatchWidth,sigma,Zi)*InputArrayB(xj,yj); //w(i,j)*f(j) } } return(ww); } // w(i,j)=w([xi,yi] [xj,yj]) __device__ float weightingFunct(float *inputArrayA,float *inputArrayB,int xj,int yj,int halfPatchWidth,float sigma,float Zi){ float distance=gaussianDistance(inputArrayA,inputArrayB,xj,yj,halfPatchWidth); return ( ( expf(-(distance/(sigma*sigma))) )/Zi); } // Z(i)=Z(xi,yi) __device__ float normFactor(float *inputArrayA,float *inputArrayB,int halfPatchWidth,float sigma){ float square_sigma=sigma*sigma; float z=0; for(int i=0;i<BLOCK_SIZE;i++) { for(int j=0;j<BLOCK_SIZE;j++) { float distance=gaussianDistance(inputArrayA,inputArrayB,i,j,halfPatchWidth); z+=expf(-(distance/square_sigma) ); } } return (z); } // |f(Ni)-f(Nj)| // Gaussian Euclidean Distance __device__ float gaussianDistance(float *inputArrayA,float *inputArrayB,int xj,int yj,int halfPatchWidth){ int xi=threadIdx.x; int yi=threadIdx.y; // i pixel // j pixel int ai; int bi; int aj; int bj; int SumWeight=0; // float distance=0;// pixel float diff=0; // 2 pixel for(int i=-halfPatchWidth;i<=halfPatchWidth;i++) { for(int j=-halfPatchWidth;j<=halfPatchWidth;j++) { ai=xi+i; bi=yi+j; aj=xj+i; bj=yj+j; if((aj<0)||(aj>=BLOCK_SIZE)) aj=xj-i; if((bj<0)||(bj>=BLOCK_SIZE)) bj=yj-j; if((ai<0)||(ai>=BLOCK_SIZE)) ai=xi-i; if((bi<0)||(bi>=BLOCK_SIZE)) bi=yi-j; if (ai!=xi || bi!=yi)// { int weight=1/(MAX(ai-xi,xi-ai)+MAX(bi-yi,yi-bi)); SumWeight+=weight; diff=InputArrayA(ai,bi)-InputArrayB(aj,bj); distance+=diff*diff*weight; } } } return (distance/SumWeight); }
11,198
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include "GameOfLife.cuh" namespace GameOfLifeCUDALibrary { __global__ void GameOfLife(const unsigned char* lifeData, const int worldWidth, const int worldHeight, unsigned char* resultLifeData) { int worldSize = worldWidth * worldHeight; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < worldSize; i += stride) { int x = i % worldWidth; int yAbs = i - x; int xLeft = (x + worldWidth - 1) % worldWidth; int xRight = (x + 1) % worldWidth; int yAbsUp = (yAbs + worldSize - worldHeight) % worldSize; int yAbsDown = (yAbs + worldHeight) % worldSize; //Count alive cells int aliveCells = lifeData[xLeft + yAbsUp] + lifeData[x + yAbsUp] + lifeData[xRight + yAbsUp] + lifeData[xLeft + yAbs] + lifeData[xRight + yAbs] + lifeData[xLeft + yAbsDown] + lifeData[x + yAbsDown] + lifeData[xRight + yAbsDown]; resultLifeData[x + yAbs] = aliveCells == 3 || (aliveCells == 2 && lifeData[x + yAbs]) ? 1 : 0; } } __global__ void InitWorld(unsigned char* lifeData, int worldWidth, int worldHeight, unsigned char* resultLifeData) { int worldSize = worldWidth * worldHeight; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < worldSize; i += stride) { lifeData[i] = 0; resultLifeData[i] = 0; } } __global__ void InitWithImage(int imageWidth, int imageHeight, const unsigned char* imageBuffer, int worldWidth, int worldHeight, unsigned char* lifeData) { int imageSize = imageWidth * imageHeight; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < imageSize; i += stride) { int xImage = i % imageWidth; int yImage = (i - xImage) / imageWidth; lifeData[yImage * worldWidth + xImage] = imageBuffer[xImage + yImage*imageWidth] == 255 ? 1 : 0; } } __global__ void WriteImage(int imageWidth, int imageHeight, unsigned char* imageBuffer, int worldWidth, int worldHeight, unsigned char* lifeData) { int imageSize = imageWidth * imageHeight; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < imageSize; i += stride) { int xImage = i % imageWidth; int yImage = (i - xImage) / imageWidth; imageBuffer[xImage + yImage * imageWidth] = lifeData[yImage * worldWidth + xImage] == 1 ? 255 : 0; } } __global__ void WriteRGBImage(unsigned char* lifeData, int worldWidth, int worldHeight, Pixel* image) { int imageSize = worldWidth * worldHeight; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < imageSize; i += stride) { int x = i % worldWidth; int yAbs = i - x; if (lifeData[x + yAbs] == 1) { image[x + yAbs].r = 255; image[x + yAbs].b = 255; image[x + yAbs].g = 255; int xLeft = (x + worldWidth - 1) % worldWidth; int xRight = (x + 1) % worldWidth; int xLeft2 = (x + worldWidth - 2) % worldWidth; int xRight2 = (x + 2) % worldWidth; int yAbsUp = (yAbs + imageSize - worldHeight) % imageSize; int yAbsDown = (yAbs + worldHeight) % imageSize; int yAbsUp2 = (yAbs + imageSize - 2 * worldHeight) % imageSize; int yAbsDown2 = (yAbs + 2 * worldHeight) % imageSize; if (lifeData[x + yAbs] == 1 && lifeData[xLeft + yAbs] == 1 && lifeData[xRight + yAbs] == 1 && lifeData[xLeft + yAbsUp] == 0 && lifeData[x + yAbsUp] == 0 && lifeData[xRight + yAbsUp] == 0 && lifeData[xLeft + yAbsDown] == 0 && lifeData[x + yAbsDown] == 0 && lifeData[xRight + yAbsDown] == 0 && lifeData[xLeft2 + yAbs] == 0 && lifeData[xLeft2 + yAbsUp] == 0 && lifeData[xLeft2 + yAbsDown] == 0 && lifeData[xRight2 + yAbs] == 0 && lifeData[xRight2 + yAbsUp] == 0 && lifeData[xRight2 + yAbsDown] == 0) { image[x + yAbs].b = 0; image[xLeft + yAbs].b = 0; image[xRight + yAbs].b = 0; image[x + yAbs].g = 0; image[xLeft + yAbs].g = 0; image[xRight + yAbs].g = 0; } else if (lifeData[x + yAbs] == 1 && lifeData[x + yAbsUp] == 1 && lifeData[x + yAbsDown] == 1 && lifeData[xLeft + yAbs] == 0 && lifeData[xLeft + yAbsUp] == 0 && lifeData[xLeft + yAbsDown] == 0 && lifeData[xRight + yAbs] == 0 && lifeData[xRight + yAbsUp] == 0 && lifeData[xRight + yAbsDown] == 0 && lifeData[xLeft + yAbsUp2] == 0 && lifeData[xLeft + yAbsDown2] == 0 && lifeData[xRight + yAbsUp2] == 0 && lifeData[xRight + yAbsDown2] == 0) { image[x + yAbs].g = 0; image[x + yAbsUp].g = 0; image[x + yAbsDown].g = 0; image[x + yAbs].b = 0; image[x + yAbsUp].b = 0; image[x + yAbsDown].b = 0; } } else { image[x + yAbs].r = 0; image[x + yAbs].b = 0; image[x + yAbs].g = 0; } } } void game_of_life_cuda(unsigned char*& lifeData, const int worldWidth, const int worldHeight, unsigned char*& resultLifeData, int generations) { int blockSize = 256; int numBlocks = (blockSize + worldWidth * worldHeight - 1) / blockSize; for (int i = 0; i < generations; i++) { GameOfLife <<<numBlocks, blockSize>>> (lifeData, worldWidth, worldHeight, resultLifeData); cudaDeviceSynchronize(); std::swap(lifeData, resultLifeData); } } void init_world_cuda(unsigned char*& lifeData, int worldWidth, int worldHeight, unsigned char*& resultLifeData) { if (cudaMalloc(&lifeData, (size_t)worldWidth * worldHeight * sizeof(unsigned char)) || cudaMalloc(&resultLifeData, (size_t)worldWidth * worldHeight * sizeof(unsigned char))) { printf("Allocation failed!"); } int blockSize = 256; int numBlocks = (blockSize + worldWidth * worldHeight - 1) / blockSize; InitWorld <<<numBlocks, blockSize >>> (lifeData, worldWidth, worldHeight, resultLifeData); cudaDeviceSynchronize(); } void write_image_cuda(int imageWidth, int imageHeight, unsigned char* imageBuffer, int worldWidth, int worldHeight, unsigned char* lifeData) { int blockSize = 256; int numBlocks = (blockSize + imageWidth * imageHeight - 1) / blockSize; WriteImage <<<numBlocks, blockSize >>> (imageWidth, imageHeight, imageBuffer, worldWidth, worldHeight, lifeData); cudaDeviceSynchronize(); } void oscilator_detection(unsigned char*& lifeData, int worldWidth, int worldHeight, unsigned char*& resultLifeData, Pixel* image) { int blockSize = 256; int numBlocks = (blockSize + worldWidth * worldHeight - 1) / blockSize; GameOfLife <<<numBlocks, blockSize >>> (lifeData, worldWidth, worldHeight, resultLifeData); cudaDeviceSynchronize(); std::swap(lifeData, resultLifeData); WriteRGBImage <<<numBlocks, blockSize >>> (lifeData, worldWidth, worldHeight, image); cudaDeviceSynchronize(); } void init_world_with_image_cuda(int imageWidth, int imageHeight, unsigned char* imageBuffer, int worldWidth, int worldHeight, unsigned char* lifeData) { int blockSize = 256; int numBlocks = (blockSize + imageWidth * imageHeight - 1) / blockSize; InitWithImage <<<numBlocks, blockSize >>> (imageWidth, imageHeight, imageBuffer, worldWidth, worldHeight, lifeData); cudaDeviceSynchronize(); } __device__ bool isHorizontalBlinker(unsigned char* lifeData, int x, int yAbs, int xLeft, int xRight, int yAbsDown, int yAbsUp) { return lifeData[x + yAbs] == 1 && lifeData[xLeft + yAbs] == 1 && lifeData[xRight + yAbs] == 1 && lifeData[xLeft + yAbsUp] == 0 && lifeData[x + yAbsUp] == 0 && lifeData[xRight + yAbsUp] == 0 && lifeData[xLeft + yAbsDown] == 0 && lifeData[x + yAbsDown] == 0 && lifeData[xRight + yAbsDown] == 0; } __device__ bool isVerticalBlinker(unsigned char* lifeData, int x, int yAbs, int xLeft, int xRight, int yAbsDown, int yAbsUp) { return false; } }
11,199
#include "includes.h" __global__ void atomic_red(const float *gdata, float *out){ size_t idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < N) atomicAdd(out, gdata[idx]); }
11,200
#include "includes.h" __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { for (int i=0;i<b;++i) { for (int j=0;j<m;++j) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { grad_points[ii*c+l] += grad_out[j*nsample*c+k*c+l]; } } } idx+=m*nsample; grad_out+=m*nsample*c; grad_points+=n*c; } }