serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
4,701
#include "includes.h" __global__ void kDot_m1T_m2(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows, const int m1_columns, const int m2_columns ){ /* Increments the output matrix with the product of two matrices: m1 transposed and m2. Inputs: m1: array, left matrix of size m1_rows x m1_columns (m1 transposed will be of size m1_columns x m1_rows) m2: array, right matrix of size m1_rows x m2_columns output: array, the results of the computation are to be stored here: m1 * m2, product of two arrays m1 and m2, a matrix of size m1_columns x m2_columns m1_rows: int, number of rows in the left matrix m1 m1_columns: int, number of columns in the left matrix m1 m2_rows: int, number of rows in the left matrix m2 */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_columns; int c = i % m2_columns; int id_T; float t_output = 0.0; for( int k = 0; k < m1_rows; ++k ) { id_T = k * m1_columns + r; t_output += m1[ id_T ] * m2[ k * m2_columns + c ]; } output[i] += t_output; } }
4,702
#include <stdio.h> #define N 2048 * 2048 // Number of elements in each vector /* * Optimize this already-accelerated codebase. Work iteratively, * and use nsys to support your work. * * Aim to profile `saxpy` (without modifying `N`) running under * 20us. * * Some bugs have been placed in this codebase for your edification. */ __global__ void saxpy(int * a, int * b, int * c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i = tid; i < N; i+=stride) { c[i] = 2 * a[i] + b[i]; } } int main() {\ int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceProp props; cudaGetDeviceProperties(&props,deviceId); numberOfSMs = props.multiProcessorCount; int *a, *b, *c; int size = N * sizeof (int); // The total number of bytes per vector cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); // Initialize memory for( int i = 0; i < N; ++i ) { a[i] = 2; b[i] = 1; c[i] = 0; } cudaMemPrefetchAsync(a,size,deviceId); cudaMemPrefetchAsync(b,size,deviceId); cudaMemPrefetchAsync(c,size,deviceId); int threads_per_block = 256; int number_of_blocks = 32 * numberOfSMs; saxpy <<< number_of_blocks, threads_per_block >>> ( a, b, c ); cudaDeviceSynchronize(); // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); cudaFree( a ); cudaFree( b ); cudaFree( c ); }
4,703
#include <iostream> using namespace std; #define N 32 #define NT 16 #define NB 2 // reduction with 2 blocks of 16 each __global__ void reduction(int * input, int * output) { __shared__ int tmp[NT]; tmp[threadIdx.x] = input[threadIdx.x + blockIdx.x * blockDim.x]; __syncthreads(); // 16 -> 8 if (threadIdx.x < blockDim.x/2) tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x/2]; __syncthreads(); // 8 -> 4 if (threadIdx.x < blockDim.x/4) tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x/4]; __syncthreads(); // 4 -> 2 if (threadIdx.x < blockDim.x/8) tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x/8]; __syncthreads(); // 2 -> 1 if (threadIdx.x == 0) { tmp[threadIdx.x] += tmp[threadIdx.x + 1]; output[blockIdx.x] = tmp[threadIdx.x]; } } int main() { int h_input[N], h_output[NB]; int * d_input, * d_output; for (int i=0; i<N; i++) h_input[i] = 1; cudaMalloc( (void**)&d_input, N*sizeof(int) ); cudaMalloc( (void**)&d_output, NB*sizeof(int) ); cudaMemcpy( d_input, h_input, N*sizeof(int), cudaMemcpyHostToDevice ); reduction<<< NB,NT >>>(d_input, d_output); cudaMemcpy( h_output, d_output, NB*sizeof(int), cudaMemcpyDeviceToHost ); cout << "Result0 is " << h_output[0] << endl; cout << "Result1 is " << h_output[1] << endl; return 0; }
4,704
#include <stdio.h> #include <assert.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define warp_size 32 #define Hwarp_size 16 #define A 0 #define B 15 void checkCUDAError(const char* msg); __host__ __device__ inline double f(double x) { return exp(x)*sin(x); } __host__ __device__ inline unsigned int getFirstSetBitPos(int n) { return log2((float)(n&-n))+1; } __global__ void romberg(double a, double b, int row_size, double *result) //row_size<=25, preferably 14 { extern __shared__ double local_array[]; double diff = (b-a)/gridDim.x, step; int max_eval = (1<<(row_size-1)),k; b = a + (blockIdx.x+1)*diff; a += blockIdx.x*diff; step = (b-a)/max_eval; double local_col[25]; for(int i = 0; i < row_size; i++) local_col[i] = 0.0; if(!threadIdx.x) { k = blockDim.x; local_col[0] = f(a) + f(b); } else k = threadIdx.x; for(; k < max_eval; k += blockDim.x) { local_col[row_size - getFirstSetBitPos(k)] += 2.0*f(a + step*k); } for(int i = 0; i < row_size; i++) { local_array[row_size*threadIdx.x + i] = local_col[i]; } __syncthreads(); if(threadIdx.x < row_size) { double sum = 0.0; for(int i = threadIdx.x; i < blockDim.x*row_size; i+=row_size) sum += local_array[i]; local_array[threadIdx.x] = sum; } if(!threadIdx.x) { double *romberg_table = local_col; romberg_table[0] = local_array[0]; for(int k = 1; k < row_size; k++) romberg_table[k] = romberg_table[k-1] + local_array[k]; for(int k = 0; k < row_size; k++) romberg_table[k]*= (b-a)/(1<<(k+1)); for(int col = 0 ; col < row_size-1 ; col++) { for(int row = row_size-1; row > col; row--) { romberg_table[row] = romberg_table[row] + (romberg_table[row] - romberg_table[row-1])/((1<<(2*col+1))-1); } } result[blockIdx.x] = romberg_table[row_size-1]; } } int main( int argc, char** argv) { double *d_result, *h_result,sum=0.0; int numBlocks = 128, numThreadsPerBlock = 64, row_size = 13, max_eval = (1<<(row_size-1)); cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); cudaMalloc( (void **) &d_result, numBlocks*sizeof(double) ); h_result = new double[numBlocks]; timeval t; double t1,t2,t3,t4; gettimeofday(&t, NULL); t1 = t.tv_sec*1000.0 + (t.tv_usec/1000.0); romberg<<< numBlocks, numThreadsPerBlock, row_size*numThreadsPerBlock*sizeof(double) >>>(A,B,row_size,d_result); cudaThreadSynchronize(); gettimeofday(&t, NULL); t2 = t.tv_sec*1000.0 + (t.tv_usec/1000.0); checkCUDAError("kernel invocation"); cudaMemcpy( h_result, d_result, numBlocks*sizeof(double), cudaMemcpyDeviceToHost ); checkCUDAError("memcpy"); //for(int k = 0; k<(max_eval+1)*numBlocks; k++ ) // printf("%lf\t",h_result[k]); for(int k=0;k<numBlocks;k++) sum+=h_result[k]; printf("TIME : %lf ms with ans = %lf\n\n\n",t2-t1,sum); } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
4,705
/* Simple example on using the Unified Memory https://devblogs.nvidia.com/parallelforall/ To compile nvcc managedMemoryAdd.cu -o managedMemoryAdd To profile nvprof ./managedMemoryAdd */ #include <iostream> #include <stdio.h> #include <math.h> // Simple kernel to add elements __global__ void addSingleThread(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } __global__ void addMoreThreads(int n, float *x, float *y) { // Let the kernel calculate which part of the input signal to play with int index = threadIdx.x; int stride = blockDim.x; // Just did this to keep the syntax similar to the previous example for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } __global__ void addGridThreads(int n, float *x, float *y) { // Let the kernel calculate which part of the input signal to play with, but // now also include the grid information int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { // N will be 1 million (1048576) int N = 1<<20; float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU // On this case a grid made of one block, where this block has 1 thread addSingleThread<<<1, 1>>>(N, x, y); // Now we have a grid of one block and this block has 256 threads addMoreThreads<<<1, 256>>>(N, x, y); // Now we calculate the grid dimensions int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; printf("GridSize(in blocks):%d BlockSize(in threads):%d\n",numBlocks,blockSize); addGridThreads<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
4,706
#include <stdlib.h> #include <math.h> void distance(float *dist,int dim,int *m,int *n) { int i,j,k,h=0; float x[3],dm[3]; float voxel_side = 2.0f; h = 0; dm[0] = (float)m[0]; dm[1] = (float)m[1]; dm[2] = (float)m[2]; for (i=0;i<m[0];i++) { x[0] = (float)i; if (dm[0]/2. < x[0] && x[0] < dm[0]) x[0] -= dm[0]; else if (dm[0]/2 < -x[0] && -x[0] < dm[0]) x[0] += dm[0]; x[0] /= (float)n[0]; for (j=0;j<m[1];j++) { x[1] = (float)j; if (dm[1]/2. < x[1] && x[1] < dm[1]) x[1] -= m[1]; else if (dm[1]/2. < -x[1] && -x[1] < dm[1]) x[1] += dm[1]; x[1] /= (float)n[1]; for (k=0;k<m[2];k++) { x[2] = (float)k; if (dm[2]/2. < x[2] && x[2] < dm[2]) x[2] -= dm[2]; else if (dm[2]/2. < -x[2] && -x[2] < dm[2]) x[2] += dm[2]; x[2] /= (float)n[2]; dist[h] = sqrt( pow(voxel_side*n[0]*x[0],2)+pow(voxel_side*n[1]*x[1],2)+pow(voxel_side*n[2]*x[2],2) ); h++; } } } }
4,707
// fermi // Avoid mangling of function names extern "C" { __global__ void matmulKernel (int n, int m, int p, float* c, const float* a, const float* b); } __global__ void matmulKernel (int n, int m, int p, float* c, const float* a, const float* b) { const int ttj = threadIdx.x; const int wtj = threadIdx.y; const int bj = blockIdx.x; const int i = blockIdx.y; const int nrThreadsNrThreadsM = 32; const int nrWarpsNrThreadsM = 32; const int tj = 32 * wtj + ttj; const int j = 1024 * bj + (32 * wtj + ttj); float sum = 0.0; for (int k = 0; k < p; k++) { sum = sum + a[k + i * p] * b[1024 * bj + (32 * wtj + ttj) + k * m]; } c[1024 * bj + (32 * wtj + ttj) + i * m] += sum; }
4,708
//============================================================================ // Name : AddingVectors.cu // Author : Rashi Goyal // Version : // Copyright : Your copyright notice // Description : Vector Addition using CUDA & C++, //============================================================================ #include <iostream> using namespace std; // kernel GPU steps to add vectors __global__ void add_vectors( int *gpu_vector_a, int *gpu_vector_b, int *gpu_vector_c, int vector_length) { // computing index from generic pattern int index = (blockIdx.x*blockDim.x)+threadIdx.x; // handle the data at this index // avoid out of bound Array size if (index < vector_length) gpu_vector_c[index] = gpu_vector_a[index] + gpu_vector_b[index]; } int main() { cout << "!!!Program 0:Adding Vectors!!!" <<endl;// prints !!!Hello World!!! // Variable to store user choice of vector size int user_choice=0; int invalid_selected=1; //Declaring CPU vectors for storing values and results int *vector_a; int *vector_b; int *vector_c; //Declaring Device/GPU vectors for storing values & results int *gpu_vector_a; int *gpu_vector_b; int *gpu_vector_c; //Declaring varialble for blocks in a Grid for kernel processing int grid_n_blocks; //Declaring number of threads in a block for kernel processing int grid_n_threads; //variables to calculate processing time clock_t start; clock_t end; float time_elapsed; //variables to calculate vector size int vector_length=1; int vector_size=1; //Displaying various matrix sizes for users to select cout<<endl; cout<<"1. 2^8 = 256 elements "<<endl; cout<<"2. 2^9 = 512 elements"<<endl; cout<<"3. 2^12 = 4096 elements"<<endl; cout<<"4. 2^15 = 32768 elements"<<endl; cout<<"5. Exit"<<endl; while(invalid_selected==1){ cout<<"please select a valid vector size: "<<endl; cin>>user_choice; if(user_choice==1){ vector_length=256; grid_n_threads=256; invalid_selected=0; }else if(user_choice==2){ vector_length=512; grid_n_threads=256; invalid_selected=0; }else if(user_choice==3){ vector_length=4096; grid_n_threads=512; invalid_selected=0; }else if(user_choice==4){ vector_length=32768; grid_n_threads=512; invalid_selected=0; }else if(user_choice==5){ terminate(); } } //Declaring variables to store vector length & size vector_size = vector_length*sizeof(int); //2^7 //Allocating memory to CPU vectors vector_a = (int*)malloc(vector_size); vector_b = (int*)malloc(vector_size); vector_c = (int*)malloc(vector_size); //Allocating memory fo Device/GPU vectors for storing values & results cudaMalloc(&gpu_vector_a,vector_size); cudaMalloc(&gpu_vector_b,vector_size); cudaMalloc(&gpu_vector_c,vector_size); //Load CPU vectors with random values srand(time(0)); for(int i = 0; i<vector_length; i++){ //Loading vector_a & vector_b with random values vector_a[i]=rand()%10+1; vector_b[i]=rand()%20+10; } //Print vector A cout<<endl<<endl<<"Vector A :["; for(int i = 0; i<vector_length; i++){ cout<<vector_a[i]<<","; } cout<<"]"<<endl; //Print vector B cout<<endl<<endl<<"Vector B :["; for(int i = 0; i<vector_length; i++){ cout<<vector_b[i]<<","; } cout<<"]"<<endl; //Copying CPU Vector variables into GPU vector variables cudaMemcpy(gpu_vector_a,vector_a,vector_size,cudaMemcpyHostToDevice); cudaMemcpy(gpu_vector_b,vector_b,vector_size,cudaMemcpyHostToDevice); //call Kernel to perform addition grid_n_blocks=ceil(vector_length/grid_n_threads); start=clock(); //storing start time add_vectors<<<grid_n_blocks,grid_n_threads>>>( gpu_vector_a, gpu_vector_b, gpu_vector_c ,vector_length); end=clock(); //storing end time time_elapsed=(end - start)/(float) 1000; //calculating elapsed time //Copying GPU Vector variable into CPU vector variable cudaMemcpy(vector_c,gpu_vector_c,vector_size,cudaMemcpyDeviceToHost); //Printing results for computed variable cout<<endl<<endl<<"Vector C :["; for(int i = 0; i<vector_length; i++){ cout<<vector_c[i]<<","; } cout<<"]"<<endl; cout<<"Number of Blocks in each Grid : "<<grid_n_blocks<<endl; cout<<"Number of Threads in each Block : "<<grid_n_threads<<endl; cout<<"Total time taken is : "<<time_elapsed<<" ms"<<endl; // releasing the memory allocated on the GPU cudaFree( gpu_vector_a ); cudaFree( gpu_vector_b ); cudaFree( gpu_vector_c ); return 0; }
4,709
#include<cmath> /* // make sure function are inlined to avoid multiple definition #ifndef __CUDA_ARCH__ #undef __global__ #define __global__ inline __attribute__((always_inline)) #undef __forceinline__ #define __forceinline__ inline __attribute__((always_inline)) #endif */ namespace edm { template<typename X, typename Y, typename Z> __device__ __host__ __forceinline__ auto fma(X a, Y b, Z c) { #if defined(EDM_FORCE_FMA) || defined(__FMA__) || defined(FP_FAST_FMA) || defined(__CUDA_ARCH__) return std::fma(a,b,c); #else return a*b+c; #endif } } template<typename T> __device__ __host__ T foo(T a, T b) { return std::sqrt(a*a-b*b); } template<typename T> __device__ __host__ T foo2(T a, T b) { return (a*a-b*b); } __device__ __host__ inline double bar(double a, double b) { return std::sqrt(std::fma(a,a,-b*b)); } __device__ __host__ inline double edmbar(double a, double b) { return std::sqrt(edm::fma(a,a,-b*b)); } #include<iostream> #include <cstdlib> #include<cstdio> __global__ void doit(double x, double y) { auto s = foo(x,y); printf("on device:\n%a\n%a\n%a\n%a\n",s,foo2(x,y),bar(x,y),edmbar(x,y)); } int main(int argc, char** argv) { #ifdef EDM_FORCE_FMA std::cout << "force use of fma" << std::endl; #endif #ifdef __FMA__ std::cout << "hardware fma supported" << std::endl; #endif #ifdef FP_FAST_FMA std::cout << "fast fma supported" << std::endl; #endif // double x = 0x1.3333333333333p+0; double x = 884279719003555.0; // 1.2; double y=x; if (argc>1) x=atof(argv[1]); if (argc>2) y=atof(argv[2]); auto s = foo(x,y); std::cout << std::hexfloat << s << std::endl; std::cout << std::hexfloat << foo2(x,y) << std::endl; std::cout << bar(x,y) << std::endl; std::cout << edmbar(x,y) << std::endl; doit<<<1,1,0,0>>>(x,y); cudaDeviceSynchronize(); }
4,710
#include <cstdio> #include <cstdlib> static const int DIM = 128; __global__ void Normalize128(float *data, const int N) { float tmp[DIM]; float norm1 = 0; float *start = data + threadIdx.x + blockIdx.x*blockDim.x; #pragma unroll for (int i = 0; i < DIM; ++i) { tmp[i] = *start; norm1 += abs(tmp[i]); start += N; } float norm1_inv = 1.0f / norm1; start = data + threadIdx.x + blockIdx.x*blockDim.x; #pragma unroll for (int i = 0; i < DIM; ++i) { // const int idx = i*N; *start = (tmp[i]) * norm1_inv; start += N; } } int main(){ float *h_ran, *d_ran; h_ran = (float *)malloc(sizeof(float)*128); for(int i = 0 ; i < 128 ; i ++){ h_ran[i] = 2; } cudaMalloc(&d_ran, sizeof(float)*128); cudaMemcpy(d_ran, h_ran, sizeof(float)*128, cudaMemcpyHostToDevice); Normalize128<<<1, 1>>>(d_ran, 1); cudaMemcpy(h_ran, d_ran, sizeof(float)*128, cudaMemcpyDeviceToHost); printf("0: %f \n", h_ran[0]); // free memory free(h_ran); cudaFree(d_ran); return 0; }
4,711
#include "includes.h" __global__ void cunnx_WindowGate2_updateOutput_kernel( float *output, float *centroids, float *normalizedCentroids, float *inputIndice, float *outputIndice, const float *input, const float *noise, int inputSize, int outputSize, int inputWindowSize, int outputWindowSize, int windowStride, int train) { __shared__ float buffer[WINDOWGATE2_THREADS+1]; unsigned int tx = threadIdx.x; unsigned int k = blockIdx.x; const float *input_k = input + inputSize*k; float *output_k = output + outputWindowSize*k; // get coordinate of centoid buffer[tx] = 0; for (unsigned int i=tx; i<inputSize; i+=blockDim.x) buffer[tx] += input_k[i]*(float)(i+1); // add (reduce) for (unsigned int stride = WINDOWGATE2_THREADS >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (tx < stride) buffer[tx] += buffer[tx+stride]; } if (tx == 0) { float centroid = buffer[0]; // make centroid a number between 0 and 1 centroid /= (float)(inputSize); normalizedCentroids[k] = centroid; if ( train ) { centroid += noise[k]; centroid = fminf(fmaxf(0,centroid),1); } // align centroid to output centroid *= (float)(outputSize); float inputIdx = centroid/(float)(inputSize) - 0.5*(float)inputWindowSize; float outputIdx = centroid - 0.5*(float)outputWindowSize; // clip indices inputIdx = fminf(inputIdx, inputSize-inputWindowSize+1); inputIdx = fmaxf(inputIdx, 1); outputIdx = fminf(outputIdx, outputSize-outputWindowSize+1); outputIdx = fmaxf(outputIdx, 1); inputIdx = ceilf(inputIdx); outputIdx = ceilf(outputIdx); // align centroid to outputWindow centroid -= (outputIdx-1); inputIndice[k] = (int)inputIdx; outputIndice[k] = (int)outputIdx; centroids[k] = centroid; buffer[WINDOWGATE2_THREADS] = inputIdx; } __syncthreads(); float inputIdx = buffer[WINDOWGATE2_THREADS]; const float *inputWindow = input_k + (int)inputIdx; for (int i=tx; i<outputWindowSize; i+=blockDim.x) { output_k[i] = inputWindow[(int)floorf(((float)i)/windowStride)]; } }
4,712
#include "includes.h" __global__ void force_calc_EMA ( float *Force, double *Force_old, int num_atom, int num_q, float *f_ptxc, float *f_ptyc, float *f_ptzc, int num_atom2, int num_q2, int *Ele, double EMA_norm, float force_ramp) { // Do column tree sum of f_ptxc for f_ptx for every atom, then assign threadIdx.x == 0 (3 * num_atoms) to Force. Force is num_atom * 3. if (blockIdx.x >= num_atom) return; for (int ii = blockIdx.x; ii < num_atom; ii += gridDim.x) { for (int stride = num_q2 / 2; stride > 0; stride >>= 1) { __syncthreads(); for(int iAccum = threadIdx.x; iAccum < stride; iAccum += blockDim.x) { f_ptxc[ii + iAccum * num_atom2] += f_ptxc[ii + iAccum * num_atom2 + stride * num_atom2]; f_ptyc[ii + iAccum * num_atom2] += f_ptyc[ii + iAccum * num_atom2 + stride * num_atom2]; f_ptzc[ii + iAccum * num_atom2] += f_ptzc[ii + iAccum * num_atom2 + stride * num_atom2]; } } __syncthreads(); if (threadIdx.x == 0) { if (Ele[ii]) { Force_old[ii*3 ] *= (EMA_norm - 1.0); Force_old[ii*3 ] -= (double)f_ptxc[ii]; Force_old[ii*3 ] /= EMA_norm; Force_old[ii*3 + 1] *= (EMA_norm - 1.0); Force_old[ii*3 + 1] -= (double)f_ptyc[ii]; Force_old[ii*3 + 1] /= EMA_norm; Force_old[ii*3 + 2] *= (EMA_norm - 1.0); Force_old[ii*3 + 2] -= (double)f_ptzc[ii]; Force_old[ii*3 + 2] /= EMA_norm; Force[ii*3 ] = (float)Force_old[ii*3 ] * force_ramp; Force[ii*3 + 1] = (float)Force_old[ii*3 + 1] * force_ramp; Force[ii*3 + 2] = (float)Force_old[ii*3 + 2] * force_ramp; } } __syncthreads(); } }
4,713
#include <bits/stdc++.h> #include <cuda.h> using namespace std; #define CEIL(a,b) ((a-1)/b+1) #define N 1024 __global__ void Sum(float* d_a,float* d_b,float* d_c,int r,int c) { int x=blockIdx.x*blockDim.x + threadIdx.x; int y=blockIdx.y*blockDim.y + threadIdx.y; int index=c*y+x; if(x<c && y<r) d_c[index]=d_a[index]+d_b[index]; } int main() { int r,c; cout<<"enter row and column : "; cin>>r>>c; float h_a[r][c], h_b[r][c], h_c[r][c]; int bytes=r*c*sizeof(float); for(int i=0;i<r;i++) { for(int j=0;j<c;j++) { h_a[i][j]=rand()%1000; h_b[i][j]=rand()%1000; } } float *d_a, *d_b, *d_c; cudaMalloc((void**)&d_b, bytes); cudaMalloc((void**)&d_a, bytes); cudaMalloc((void**)&d_c, bytes); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); dim3 block(32, 32, 1); dim3 grid(CEIL(r, 32), CEIL(c, 32), 1); Sum<<<grid, block>>>(d_a,d_b,d_c,r,c); cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); bool verify=true; for(int i=0;i<r;i++) { for(int j=0;j<c;j++) { if(h_c[i][j]!=h_a[i][j]+h_b[i][j]) verify=false; } } if(verify) cout<<"Result is Correct"; else cout<<"Incorrect Result"; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
4,714
#include <iostream> using namespace std; __global__ void cube(float * d_out, float * d_in){ int idx = threadIdx.x; float val = d_in[idx]; d_out[idx] = val*val*val; } int main(){ const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_in[ARRAY_SIZE]; float h_out[ARRAY_SIZE]; // Fill the input array for(int i = 0; i < ARRAY_SIZE; ++i){ h_in[i] = float(i); } // Declare GPU memory pointers float * d_in; float * d_out; // Allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // Transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // Launch kernel cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // Transfer results to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // Print results for(int i = 0; i < ARRAY_SIZE; i++){ cout << h_out[i] << endl; } cudaFree(d_in); cudaFree(d_out); return 0; }
4,715
#include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "definitions.cuh" #include <time.h> #include "stdio.h" //Number of elements on which to perform CFD unsigned int Ni = 512; // Y elements unsigned int Nj = 512; // X elements unsigned int nIterations = 10000; // No Of Iterations unsigned int kernelVersion =2; // Decides which GPU kernel version to call (Set it to 1 or 2) int main(int argc, char** argv) { //Variables for Timing float cpuTime, gpuTime; // CPU and GPU Pointers ( d_XX : refers to pointer pointing to GPU memory. This is just a convention) float *t = NULL, *t_prev = NULL; float *d_t = NULL,*d_t_prev= NULL; parseCommandLineArguments(argc, (char **)argv); printf("\n Ni= %d, Nj=%d nIteration=%d",Ni,Nj,nIterations); unsigned int size = Ni * Nj * sizeof(float); if(!initializeCPU(&t, &t_prev) ) { printf("\n Error in allocating memory on CPU!!!"); unInitializeCPU(&t, &t_prev); getchar(); return 0; } if (!initializeGPU(&d_t, &d_t_prev)) { printf("\n Error in allocating memory on GPU!!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); return 0; } //Perform CFD on CPU performCPUCFD(t,t_prev, &cpuTime); // To temporarily store CPU data. This is just for comparing with GPU output float *tempBuffer = (float*) calloc(Ni*Nj, sizeof(float)); memcpy(tempBuffer, t_prev, size); //Perform CFD on GPU if(!performGPUCFD(d_t,d_t_prev, t, t_prev, &gpuTime)) { printf("\n GPU Kernel failed !!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); return 0; } printf("\n Is host equal to device = %d", checkHostEqualsDevice(tempBuffer,t)); printf("\n Speedup = %fx", (float)(cpuTime/gpuTime)); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); printf("\n Finished Processing!!!"); getchar(); } void parseCommandLineArguments(int argc, char**argv) { if (argc >= 1) { for (int i=1; i < argc; i++) { int bFirstArgIsParam = false; int string_start = 0; while (argv[i][string_start] == '-') string_start++; char *string_argv = &argv[i][string_start]; if (!STRNCASECMP(string_argv, "Ni=", 3)) { bFirstArgIsParam = true; Ni = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "Nj=", 3)) { bFirstArgIsParam = true; Nj = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "iterations=", 11)) { bFirstArgIsParam = true; nIterations = atoi(&string_argv[11]); continue; } if (!STRNCASECMP(string_argv, "kernel=", 7)) { bFirstArgIsParam = true; kernelVersion = atoi(&string_argv[7]); continue; } if (!bFirstArgIsParam) { printf("Invalid arguments\n"); for (int n=0; n < argc; n++) { printf("argv[%d] = %s\n", n, argv[n]); } printf("\n"); exit(0); } } } if(( Ni % THREADS_PER_BLOCK_Y != 0) || (Nj % THREADS_PER_BLOCK_X != 0)) { fprintf(stderr, "Please specify Ni & Nj as multiple of 16 !!!!"); getchar(); exit(0); } } int initializeCPU(float **t, float **t_prev) { *t = (float*) calloc(Ni*Nj, sizeof(float)); *t_prev = (float*) calloc(Ni*Nj, sizeof(float)); if((*t)==NULL || (*t_prev) == NULL) return 0; else return 1; } void unInitializeCPU(float **t, float **t_prev) { if((*t) !=NULL) free(*t); if((*t_prev) != NULL) free(*t_prev); } int initializeGPU(float **d_t, float **d_t_prev) { unsigned int size = Ni * Nj * sizeof(float); // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); getchar(); return 0; } // Allocate GPU buffers. cudaStatus = cudaMalloc((void**)&(*d_t), size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); getchar(); return 0; } // Allocate GPU buffers . cudaStatus = cudaMalloc((void**)&(*d_t_prev), size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = cudaMemset((*d_t),0, size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = cudaMemset((*d_t_prev),0, size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); getchar(); return 0; } return 1; } void unInitializeGPU(float **d_t, float **d_t_prev) { cudaError_t cudaStatus; if((*d_t)!=NULL) cudaStatus = cudaFree((*d_t)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaFree failed!"); return; } if((*d_t_prev)!=NULL) cudaStatus = cudaFree((*d_t_prev)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaFree failed!"); return; } cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); getchar(); return; } } void performCPUCFD(float *t, float *t_prev, float *cpuTime) { float h,x,y; h = 1.0f/(Ni-1); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } float elapsedTimeInMs = 0.0f; clock_t start = clock(); for(unsigned int k=0;k<nIterations;k++) { for(unsigned int j=1;j<(Nj-1);j++) { for(unsigned int i=1;i<(Ni-1);i++) { t[i*Nj+j] = 0.25f * (t_prev[(i-1)*Nj+j] + t_prev[(i+1)*Nj+j] + t_prev[i*Nj+(j-1)] + t_prev[i*Nj+(j+1)] - 4*h*h); } } float* pingPong = t_prev; t_prev = t; t = pingPong; } clock_t end = clock(); elapsedTimeInMs = (float)((end - start) * 1000 / CLOCKS_PER_SEC); printf("\n CPU Time:: %f ms", elapsedTimeInMs); *cpuTime = elapsedTimeInMs; } int performGPUCFD(float *d_t, float *d_t_prev, float *t, float *t_prev, float*gpuTime) { float h,x,y; const char *str = (char*) malloc(1024); // To store error string //Decide how many blocks per thread and how many blocks per grid dim3 dimBlock(THREADS_PER_BLOCK_X,THREADS_PER_BLOCK_Y); dim3 dimGrid(Nj/dimBlock.x,Ni/dimBlock.y); h = 1.0f/(Ni-1); memset(t_prev, 0, sizeof(float) * Ni * Nj); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } //Copy data to device cudaMemcpy(d_t_prev, t_prev, sizeof(float) * Ni * Nj , cudaMemcpyHostToDevice); //Insert event to calculate time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //This calls Version 1 of kernel which uses Global memory if(kernelVersion ==1) { cudaEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. calculateCFD_V1<<<dimGrid,dimBlock>>>(d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } //This calls Version 2 of kernel which uses optimization by copying data to shared memory else if(kernelVersion ==2) { cudaEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. calculateCFD_V2<<<dimGrid,dimBlock>>>(d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("\n GPU Time:: %f ms", elapsedTime); *gpuTime = elapsedTime; cudaError_t cudaStatus = cudaMemcpy(t, d_t_prev, sizeof(float) * Ni * Nj , cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); str = cudaGetErrorString(cudaStatus); fprintf(stderr, "CUDA Error!:: %s\n", str); getchar(); return 0; } return 1; } int checkHostEqualsDevice(float* o_host, float* o_device) { int flag =1; float tolerance = 0.0001f; //Compare the results for(unsigned int j=0;j<Nj;j++) { for(unsigned int i=0;i<Ni;i++) { if( (o_host[i*Nj+j] - o_device[i*Nj+j]) >= tolerance || (o_host[i*Nj+j] - o_device[i*Nj+j]) <= -tolerance) { printf("\n D=[%f]!=H=[%f] since Diff > tol %f for [%d][%d]",o_device[i*Nj+j], o_host[i*Nj+j],tolerance, i, j); flag =0; //getchar(); } } } return flag; }
4,716
#include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void touchMemory(float* memory) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; }
4,717
#include <iostream> #include <cuda.h> #include <stdlib.h> #include <ctime> #include <cmath> #include <limits> __global__ void sum_vectors(double *a, double *b, double *c, int size){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { c[idx] = a[idx] + b[idx]; } } int check(double *a, double *b, double *c, int size) { for (int i = 0; i < size; ++i) { if (std::fabs(c[i] - (a[i] + b[i])) > std::numeric_limits<double>::epsilon()) { return 0; } } return 1; } int main(int argc, char **argv){ int n = atoi(argv[1]); int n_device = (n - 1)/2 + 1; int bytes_device = n_device * sizeof(double); double *h_a, *h_b, *h_c; size_t bytes = n * sizeof(double); cudaMallocHost(&h_a, bytes); cudaMallocHost(&h_b, bytes); cudaMallocHost(&h_c, bytes); cudaHostRegister(h_a, bytes, 0); cudaHostRegister(h_b, bytes, 0); cudaHostRegister(h_c, bytes, 0); for (int i = 0; i < n; i++){ h_a[i] = i; h_b[i] = 3 * i; } double *d_a1, *d_b1, *d_c1; double *d_a2, *d_b2, *d_c2; cudaMalloc(&d_a1, bytes_device); cudaMalloc(&d_b1, bytes_device); cudaMalloc(&d_c1, bytes_device); cudaSetDevice(1); cudaMalloc(&d_a2, bytes_device); cudaMalloc(&d_b2, bytes_device); cudaMalloc(&d_c2, bytes_device); cudaSetDevice(0); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); int blockSize, gridSize; blockSize = 1024; gridSize = (n_device - 1) / 1024 + 1; cudaSetDevice(0); cudaMemcpyAsync(d_a1, &h_a[0], bytes_device, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_b1, &h_b[0], bytes_device, cudaMemcpyHostToDevice); sum_vectors<<<gridSize, blockSize>>>(d_a1, d_b1, d_c1, n_device); cudaMemcpyAsync(&h_c[0], d_c1, bytes_device, cudaMemcpyDeviceToHost); cudaSetDevice(1); cudaMemcpyAsync(d_a2, &h_a[n_device], bytes_device, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_b2, &h_b[n_device], bytes_device, cudaMemcpyHostToDevice); sum_vectors<<<gridSize, blockSize>>>(d_a2, d_b2, d_c2, n_device); cudaMemcpyAsync(&h_c[n_device], d_c2, bytes_device, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaSetDevice(0); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << "Gpu time: " << milliseconds << " milliseconds" << std::endl; int res = check(h_a, h_b, h_c, n); if (res) { std::cout << "Correct result" << std::endl; } else { std::cout << "Not correct result" << std::endl; } cudaFree(d_a1); cudaFree(d_b1); cudaFree(d_c1); cudaFree(d_a2); cudaFree(d_b2); cudaFree(d_c2); cudaHostUnregister(h_a); cudaHostUnregister(h_b); cudaHostUnregister(h_c); return 0; }
4,718
#include "includes.h" extern "C" extern "C" __global__ void dropoutTrain( const float* arguments, float* dropoutMask, float* results, const float dropoutFraction, const long size ) { const int X = gridDim.x; const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x; if(index < size) { const float mask = dropoutFraction < dropoutMask[index]; dropoutMask[index] = mask; results[index] = mask * arguments[index]; } }
4,719
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #ifndef _SCAN_BEST_KERNEL_CU_ #define _SCAN_BEST_KERNEL_CU_ #endif // #ifndef _SCAN_BEST_KERNEL_CU_
4,720
/******************************************************************* * * DESCRIPTION: This program performs the multiplication between * a matrix and a vector in a GPU * * AUTHOR: Eduardo Gutarra Velez * * DATE: 02/01/2010 * *******************************************************************/ #include <iostream> #include <ctime> using namespace std; /** * Number of threads per block */ const int blocksize = 32; time_t seconds; /******************************************************************* * * Kernel Name: mult_matrix_by_vector * * Parameters: inputs are: matrix a (size NxN ), array b (size N), * value of N. ouputs are: * * Description: This is the kernel to perform the multiplication * between a matrix and a vector. Each thread performs the dot * product between each row and the vector. * *******************************************************************/ __global__ void mult_matrix_by_vector( float* a, float *b, float *c, int N ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float Cvalue = 0; int index; /* * Each thread will perform the dot product between the row of the matrix * and the vector that is being multiplied. */ if ( i < N ) { for ( int e = 0; e < N; ++e ) { index = e + i*N; Cvalue += a[index]*b[e]; } c[i] = Cvalue; } } int main ( int argc, char *argv[] ) { /** * Command line arguments must be 1 which is the number of rows * and columns for a matrix and the size of the vector. */ if ( argc != 2 ) { cout<<"usage: "<< argv[0] <<" <size n>\n"; return EXIT_FAILURE; } int N = atoi(argv[1]); float *a = new float[N*N]; float *b = new float[N]; float *c = new float[N]; seconds = time (NULL); srand(seconds); for ( int i = 0; i < N*N; ++i ) { // calculate a random number between 0 and 1000 // a[i] = (float) (rand()%RAND_MAX); a[i] = (float) i; } for ( int i = 0; i < N; ++i ) { // calculate a random number between 0 and 1000 // b[i] = (float) (rand()%RAND_MAX); b[i] = (float) i; c[i] = (float) 0; } float *ad, *bd, *cd; const int sizeVec = N*sizeof(float); const int sizeMat = N*sizeVec; cudaMalloc( (void**)&ad, sizeMat ); cudaMalloc( (void**)&bd, sizeVec ); cudaMalloc( (void**)&cd, sizeVec ); cudaMemcpy( ad, a, sizeMat, cudaMemcpyHostToDevice ); cudaMemcpy( bd, b, sizeVec, cudaMemcpyHostToDevice ); dim3 dimBlock(blocksize); dim3 dimGrid(ceil(N/(float)blocksize)); /** * CPU-only version of the program. */ // int idx; // for ( int i = 0; i < N; ++i ) // { // for ( int j = 0; j < N; ++j ) // { // idx = j + i*N; // c[i] += a[idx]*b[j]; // } // } // // cout << "c[:" << 0 << "]=" << c[0] << endl; // cout << "c[:" << 1 << "]=" << c[1] << endl; // cout << "c[:" << 2 << "]=" << c[2] << endl; // cout << "c[:" << 3 << "]=" << c[3] << endl; // cout << "c[:" << 4 << "]=" << c[4] << endl; // // cout << endl; mult_matrix_by_vector<<<dimGrid, dimBlock>>>( ad, bd, cd, N ); cudaMemcpy( c, cd, sizeVec, cudaMemcpyDeviceToHost ); /** * GPU Output. */ cout << "c[" << 0 << "]=" << c[0] << endl; cout << "c[" << 1 << "]=" << c[1] << endl; cout << "c[" << 2 << "]=" << c[2] << endl; cout << "c[" << 3 << "]=" << c[3] << endl; // cout << "c[" << 4 << "]=" << c[4] << endl; cudaFree( ad ); cudaFree( bd ); cudaFree( cd ); delete[] a; delete[] b; delete[] c; return 0; }
4,721
#define t_max 1 #define t 1 /* (ux[0][0][0][0][1]=(alpha*(u[1][0][0][0][0]+u[-1][0][0][0][0]))) (uy[0][0][0][0][2]=(beta*(u[0][1][0][0][0]+u[0][-1][0][0][0]))) (uz[0][0][0][0][3]=(gamma*(u[0][0][1][0][0]+u[0][0][-1][0][0]))) */ __global__ void gradient(float * * ux_1_0_out, float * * uy_2_0_out, float * * uz_3_0_out, float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max) { /* float * const u__u_0[16] = { u_0_0 } ; float * const u__ux_1[16] = { ux_1_0 } ; float * const u__uy_2[16] = { uy_2_0 } ; float * const u__uz_3[16] = { uz_3_0 } ; */ int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int idx_1_2; int p_idx_x; int p_idx_x_max; int p_idx_y; int p_idx_y_max; int p_idx_z; int p_idx_z_max; int size_1_1; int size_1_2; //int t; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x)); p_idx_x_max=(p_idx_x+1); p_idx_y=(threadIdx.y+(tmp*blockDim.y)); p_idx_y_max=(p_idx_y+1); p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); p_idx_z_max=(p_idx_z+1); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0]) */ /* _idx0 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+2) */ _idx0=(((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+2); /* _idx1 = ((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x) */ _idx1=(_idx0-2); /* _idx2 = ((((p_idx_z*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x) */ _idx2=((((_idx1+(((((-2*p_idx_z)-2)*t)-x_max)*y_max))+(((((-2*p_idx_z)-2)*t)-1)*x_max))+(((-4*p_idx_z)-4)*(t*t)))+(((-2*p_idx_y)-2)*t)); ux_1_0[_idx2]=(alpha*(u_0_0[_idx0]+u_0_0[_idx1])); /* _idx3 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+2)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+4)*t))+p_idx_x)+1) */ _idx3=(((_idx1+x_max)+(2*t))+1); /* _idx4 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+(((((2*p_idx_z)+2)*t)+p_idx_y)*x_max))+(((4*p_idx_z)+4)*(t*t)))+((2*p_idx_y)*t))+p_idx_x)+1) */ _idx4=(((_idx1-x_max)-(2*t))+1); uy_2_0[_idx2]=(beta*(u_0_0[_idx3]+u_0_0[_idx4])); /* _idx5 = (((((((((p_idx_z+2)*x_max)+(((2*p_idx_z)+4)*t))*y_max)+((((((2*p_idx_z)+4)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+8)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */ _idx5=((((_idx4+((x_max+(2*t))*y_max))+(((2*t)+1)*x_max))+(4*(t*t)))+(2*t)); /* _idx6 = ((((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(((((2*p_idx_z)*t)+p_idx_y)+1)*x_max))+((4*p_idx_z)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */ _idx6=((((_idx1+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1); uz_3_0[_idx2]=(gamma*(u_0_0[_idx5]+u_0_0[_idx6])); } } __global__ void initialize(float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max) { float * const u__u_0[16] = { u_0_0 } ; float * const u__ux_1[16] = { ux_1_0 } ; float * const u__uy_2[16] = { uy_2_0 } ; float * const u__uz_3[16] = { uz_3_0 } ; int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int idx_1_2; int p_idx_x; int p_idx_x_max; int p_idx_y; int p_idx_y_max; int p_idx_z; int p_idx_z_max; int size_1_1; int size_1_2; //int t; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x)); p_idx_x_max=(p_idx_x+1); p_idx_y=(threadIdx.y+(tmp*blockDim.y)); p_idx_y_max=(p_idx_y+1); p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); p_idx_z_max=(p_idx_z+1); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0]) */ /* _idx0 = ((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x) */ _idx0=((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x); u_0_0[_idx0]=0.1; /* _idx1 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+2) */ _idx1=(_idx0+2); u_0_0[_idx1]=0.1; /* _idx2 = ((((p_idx_z*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x) */ _idx2=((((_idx0+(((((-2*p_idx_z)-2)*t)-x_max)*y_max))+(((((-2*p_idx_z)-2)*t)-1)*x_max))+(((-4*p_idx_z)-4)*(t*t)))+(((-2*p_idx_y)-2)*t)); u__ux_1[(t-1)][_idx2]=0.2; /* _idx3 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+(((((2*p_idx_z)+2)*t)+p_idx_y)*x_max))+(((4*p_idx_z)+4)*(t*t)))+((2*p_idx_y)*t))+p_idx_x)+1) */ _idx3=(((_idx0-x_max)-(2*t))+1); u_0_0[_idx3]=0.1; /* _idx4 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+2)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+4)*t))+p_idx_x)+1) */ _idx4=((_idx3+(2*x_max))+(4*t)); u_0_0[_idx4]=0.1; u__uy_2[(t-1)][_idx2]=0.30000000000000004; /* _idx5 = ((((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(((((2*p_idx_z)*t)+p_idx_y)+1)*x_max))+((4*p_idx_z)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */ _idx5=((((_idx0+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1); u_0_0[_idx5]=0.1; /* _idx6 = (((((((((p_idx_z+2)*x_max)+(((2*p_idx_z)+4)*t))*y_max)+((((((2*p_idx_z)+4)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+8)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */ _idx6=((((_idx3+((x_max+(2*t))*y_max))+(((2*t)+1)*x_max))+(4*(t*t)))+(2*t)); u_0_0[_idx6]=0.1; u__uz_3[(t-1)][_idx2]=0.4; } }
4,722
#include<stdio.h> #include<iostream> #include <thrust/device_vector.h> #include <thrust/host_vector.h> int main(int argc, char **argv) { FILE *fptr; int ch=1; int inputLength; /* parse the input arguments */ //@@ Insert code here // Import host input data //@@ Read data from the raw files here //@@ Insert code here char *filename = "input0.raw"; fptr = fopen(filename, "r"); if (fptr == NULL) { perror("Cannot open file input0\n"); exit(0); } int i=0; fscanf(fptr,"%d",&inputLength); printf("Input Length=%d",inputLength); float *hostInput1 = (float *)malloc(sizeof(float)*inputLength); float *hostInput2 = (float *)malloc(sizeof(float)*inputLength); float *hostOutput; while (i!=inputLength) { fscanf(fptr, "%f" ,&ch); *(hostInput1+i)=ch; // if(i==0) // printf("hostinput[0]=%f ",*(hostInput1+i)); ++i; } fclose(fptr); char filename2[11] = "input1.raw"; fptr = fopen(filename2, "r"); if (fptr == NULL) { perror("Cannot open file input1\n"); exit(0); } i=0; ch=1; fscanf(fptr,"%f",&ch); while (i!=inputLength) { fscanf(fptr, "%f" ,&ch); *(hostInput2+i)=ch; ++i; } fclose(fptr); // Declare and allocate host output //@@ Insert code here hostOutput = (float *)malloc(sizeof(float)*inputLength); // Declare and allocate thrust device input and output vectors //@@ Insert code here thrust::device_vector<float> x(inputLength); thrust::device_vector<float> y(inputLength); thrust::device_vector<float> z(inputLength); // Copy to device //@@ Insert code here thrust::copy (hostInput1,hostInput1+inputLength,x.begin()); thrust::copy (hostInput2,hostInput2+inputLength,y.begin()); // Execute vector addition //@@ Insert Code here thrust::transform(x.begin(), x.end(), y.begin(), z.begin(), thrust::plus<float>()); ///////////////////////////////////////////////////////// // Copy data back to host //@@ Insert code here thrust::copy(z.begin(),z.end(),hostOutput); //Check if output is correct char filename3[11] = "output.raw"; fptr = fopen(filename3, "r"); if (fptr == NULL) { printf("Cannot open file output\n"); exit(0); } i=0; ch=1; fscanf(fptr,"%d",&ch); float f; while (i!=inputLength) { fscanf(fptr, "%f" ,&f); if(i<10) printf("\n%f\n",f); //if(f!=*(hostOutput+i)) //printf("Wrong Answer i=%d\nhostOp[i]=%d\nch=%d\n",i,*(hostOutput+i),f); ++i; } fclose(fptr); free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
4,723
template<typename T> __device__ void getRow(const T* matrix, const int row, T* result, const int rows, const int cols) { int bx = blockIdx.x; int tx = threadIdx.x; int index = bx * blockDim.x + tx; result[index] = matrix[row * cols + index]; } template<typename T> __device__ void matrixAddRow(const T* matrix, const T* row, T* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int rowIndex = by * blockDim.y + ty; int colIndex = bx * blockDim.x + tx; if (rowIndex < rows && colIndex < cols) { T value = matrix[rowIndex * cols + colIndex] + row[colIndex]; result[rowIndex * cols + colIndex] = value; } } template<typename T> __device__ void rowAddMatrix(const T* row, const T* matrix, T* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int rowIndex = by * blockDim.y + ty; int colIndex = bx * blockDim.x + tx; if (rowIndex < rows && colIndex < cols) { T value = matrix[rowIndex * cols + colIndex] + row[colIndex]; result[rowIndex * cols + colIndex] = value; } } template<typename T> __device__ void matrixSubRow(const T* matrix, const T* row, T* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int rowIndex = by * blockDim.y + ty; int colIndex = bx * blockDim.x + tx; if (rowIndex < rows && colIndex < cols) { T value = matrix[rowIndex * cols + colIndex] - row[colIndex]; result[rowIndex * cols + colIndex] = value; } } template<typename T> __device__ void rowSubMatrix(const T* row, const T* matrix, T* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int rowIndex = by * blockDim.y + ty; int colIndex = bx * blockDim.x + tx; if (rowIndex < rows && colIndex < cols) { T value = row[colIndex] - matrix[rowIndex * cols + colIndex]; result[rowIndex * cols + colIndex] = value; } } template<typename T> __device__ void matrixTimesRow(const T* matrix, const T* row, T* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int rowIndex = by * blockDim.y + ty; int colIndex = bx * blockDim.x + tx; if (rowIndex < rows && colIndex < cols) { T value = matrix[rowIndex * cols + colIndex] * row[colIndex]; result[rowIndex * cols + colIndex] = value; } } template<typename T> __device__ void rowTimesMatrix(const T* row, const T* matrix, T* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int rowIndex = by * blockDim.y + ty; int colIndex = bx * blockDim.x + tx; if (rowIndex < rows && colIndex < cols) { T value = matrix[rowIndex * cols + colIndex] * row[colIndex]; result[rowIndex * cols + colIndex] = value; } } template<typename T> __device__ void matrixDivRow(const T* matrix, const T* row, T* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int rowIndex = by * blockDim.y + ty; int colIndex = bx * blockDim.x + tx; if (rowIndex < rows && colIndex < cols) { T value = matrix[rowIndex * cols + colIndex] / row[colIndex]; result[rowIndex * cols + colIndex] = value; } } template<typename T> __device__ void rowDivMatrix(const T* row, const T* matrix, T* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int rowIndex = by * blockDim.y + ty; int colIndex = bx * blockDim.x + tx; if (rowIndex < rows && colIndex < cols) { T value = row[colIndex] / matrix[rowIndex * cols + colIndex]; result[rowIndex * cols + colIndex] = value; } }
4,724
#include "device_launch_parameters.h" #include <stdio.h> /* enum Type { NOT_NODE = 0, AND_NODE, IMPLIES_NODE, UNIVERSAL_NODE, EXISTENTIAL_NODE, BFUNC_NODE, EMPTY_NODE, SAME, SZ_SPD_CLOSE, SZ_LOC_CLOSE, SZ_LOC_DIST, SZ_LOC_DIST_NEQ , SZ_LOC_RANGE, OR_NODE };*/ #define NOT_NODE 0 #define AND_NODE 1 #define IMPLIES_NODE 2 #define UNIVERSAL_NODE 3 #define EXISTENTIAL_NODE 4 #define BFUNC_NODE 5 #define EMPTY_NODE 6 #define SAME 7 #define SZ_SPD_CLOSE 8 #define SZ_LOC_CLOSE 9 #define SZ_LOC_DIST 10 #define SZ_LOC_DIST_NEQ 11 #define SZ_LOC_RANGE 12 #define OR_NODE 13 #define MAX_PARAM_NUM 2 #define MAX_CCT_SIZE 3000000 #define MAX_LINK_SIZE 5000 #define DEBUG struct Context{ int id; double latitude; double longitude; double speed; int plateNumber; }; struct Node { Node *next; Node *tail; int params[MAX_PARAM_NUM]; }; __device__ bool truth_values[MAX_CCT_SIZE]; __device__ Node links[MAX_CCT_SIZE]; extern "C" __device__ bool same(Context c1, Context c2){ return (c1.plateNumber == c2.plateNumber); } extern "C" __device__ bool sz_spd_close(Context c1, Context c2){ return ((c1.speed - c2.speed) >= -50.0 && (c1.speed - c2.speed) <= 50.0); } extern "C" __device__ bool sz_loc_close(Context c1, Context c2){ return ((c1.latitude - c2.latitude) * (c1.latitude - c2.latitude) + (c1.longitude - c2.longitude) * (c1.longitude - c2.longitude)) <= 0.000001; } extern "C" __device__ bool sz_loc_dist(Context c1, Context c2){ return ((c1.latitude - c2.latitude) * (c1.latitude - c2.latitude) + (c1.longitude - c2.longitude) * (c1.longitude - c2.longitude)) <= 0.000625; } extern "C" __device__ bool sz_loc_dist_neq(Context c1, Context c2){ double dist = ((c1.latitude - c2.latitude) * (c1.latitude - c2.latitude) + (c1.longitude - c2.longitude) * (c1.longitude - c2.longitude)); bool result = true; if (dist > 0.000625 || dist == 0) { result = false; } return result; //return (dist <= 0.000625) && (dist != 0); } extern "C" __device__ bool sz_loc_range(Context c){ return c.longitude >= 112.0 && c.longitude <= 116.0 && c.latitude >=20.0 && c.latitude <= 24.0; } extern "C" __device__ void init_node(Node *n){ n->next = NULL; n->tail = n; for (int i = 0; i < MAX_PARAM_NUM; i++) { n->params[i] = -1; } } extern "C" __device__ bool is_null_node(Node *n){ bool res = true; for (int i = 0; i < MAX_PARAM_NUM; i++) { res = res && (n->params[i] == -1); } return res; } extern "C" __device__ void linkHelper(Node *link1, Node *link2) { //inital and assumpt that link1 != null, links != null if (is_null_node(link1)) { for (int i = 0; i < MAX_PARAM_NUM; i++) { link1->params[i] = link2->params[i]; } link1->next = NULL; link1->tail = link1; if(link2->next != NULL) { link2->next->tail = link2->tail; } link2 = link2->next; } if (link2 == NULL) { return; } link1->tail->next = link2; link1->tail = link2->tail; } extern "C" __device__ int calc_offset( int node, int tid, Context *params, int *parent, int *left_child, int *right_child, int *node_type, int *pattern_idx, int *pattern_begin, int *pattern_length, int *pattern, double *longitude, double *latitude, double *speed, int *plateNumber, // contexts int *branch_size) { int offset = branch_size[node]; int current_node = node; int index = 0, tmp = tid; while (parent[current_node] != -1) { int type = node_type[parent[current_node]]; if (type == EXISTENTIAL_NODE || type == UNIVERSAL_NODE) { int len = pattern_length[pattern_idx[parent[current_node]]]; int branch_idx = tmp % len; tmp /= len; params[index].id = pattern[pattern_begin[pattern_idx[parent[current_node]]] + branch_idx];//(pattern + pattern_idx[parent[current_node]] * MAX_PATTERN_SIZE)[(branch_idx + pattern_begin[pattern_idx[parent[current_node]]]) % MAX_PATTERN_SIZE]; params[index].latitude = latitude[params[index].id]; params[index].longitude = longitude[params[index].id]; params[index].speed = speed[params[index].id]; params[index].plateNumber = plateNumber[params[index].id]; offset += branch_idx * branch_size[current_node] ; // printf("branch_idx = %d, branch_size = %d\n", branch_idx, branch_size[current_node]); index++; } else if (type == AND_NODE || type == IMPLIES_NODE || type == OR_NODE) { if (right_child[parent[current_node]] == current_node) { offset += branch_size[left_child[parent[current_node]]]; } } else { offset += 0; } current_node = parent[current_node]; } return offset - 1; } extern "C" __global__ void evaluation(int *parent, int *left_child, int *right_child, int *node_type, int *pattern_idx, //constraint rule int *branch_size, int cunit_begin, int cunit_end,//cunit_end is the root of cunit int *pattern_begin, int *pattern_length, int *pattern, //patterns double *longitude, double *latitude, double *speed,int *plateNumber,// contexts short *truth_value_result, int *link_result, int *link_num, int *cur_link_size, int last_cunit_root, int ccopy_num) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < ccopy_num) { Context params[MAX_PARAM_NUM]; for (int i = 0; i < MAX_PARAM_NUM; i++) { params[i].id = -1; } int ccopy_root_offset = calc_offset(cunit_end, tid, params, parent, left_child, right_child, node_type, pattern_idx, pattern_begin, pattern_length, pattern, longitude, latitude, speed, plateNumber, branch_size); //#ifdef DEBUG // printf("root = %d, ccopynum = %d, offset = %d\n",cunit_end, ccopy_num, ccopy_root_offset); //#endif for (int node = cunit_begin; node <= cunit_end; node++) { int offset = ccopy_root_offset - (cunit_end - node); int type = node_type[node]; bool value; Node* cur_links = &links[offset]; init_node(cur_links); switch(type) { case UNIVERSAL_NODE: { int step = branch_size[left_child[node]]; value = true; bool first = true; for (int i = 0; i < pattern_length[pattern_idx[node]]; i++) { value = value && truth_values[offset - (i * step + 1)]; if(!truth_values[offset - (i * step + 1)]) { if(first) { init_node(cur_links); first = false; } linkHelper(cur_links, &(links[offset - (i * step + 1)])); } else if(value) { linkHelper(cur_links, &(links[offset - (i * step + 1)])); } } break; } case EXISTENTIAL_NODE: { int step = branch_size[left_child[node]]; value = false; bool first = true; for (int i = 0; i < pattern_length[pattern_idx[node]]; i++) { value = value || truth_values[offset - (i * step + 1)]; if(truth_values[offset - (i * step + 1)]) { if(first) { init_node(cur_links); first = false; } linkHelper(cur_links, &(links[offset - (i * step + 1)])); } else if(!value) { linkHelper(cur_links, &(links[offset - (i * step + 1)])); } } break; } case AND_NODE: { //right && left value = truth_values[offset - 1] && truth_values[offset - (branch_size[right_child[node]] + 1)]; if (truth_values[offset - 1] == value) { linkHelper(cur_links, &(links[offset - 1])); } if (truth_values[offset - (branch_size[right_child[node]] + 1)] == value) { linkHelper(cur_links, &(links[offset - (branch_size[right_child[node]] + 1)])); } break; } case OR_NODE: { //right || left value = truth_values[offset - 1] || truth_values[offset - (branch_size[right_child[node]] + 1)]; if (truth_values[offset - 1] == value) { linkHelper(cur_links, &(links[offset - 1])); } if (truth_values[offset - (branch_size[right_child[node]] + 1)] == value) { linkHelper(cur_links, &(links[offset - (branch_size[right_child[node]] + 1)])); } break; } case IMPLIES_NODE: { //!left || right value = !truth_values[offset - (branch_size[right_child[node]] + 1)] || truth_values[offset - 1]; if(value) { linkHelper(cur_links, &(links[offset - 1])); linkHelper(cur_links, &(links[offset - (branch_size[right_child[node]] + 1)])); } else { linkHelper(cur_links, &(links[offset - 1])); } break; } case NOT_NODE: { value = !truth_values[offset - 1]; linkHelper(cur_links, &(links[offset - 1])); break; } default : { //BFUNC switch(type) { case SAME: { value = same(params[0], params[1]); break; } case SZ_SPD_CLOSE: { value = sz_spd_close(params[0], params[1]); break; } case SZ_LOC_CLOSE: { value = sz_loc_close(params[0], params[1]); break; } case SZ_LOC_DIST: { value = sz_loc_dist(params[0], params[1]); break; } case SZ_LOC_DIST_NEQ: { value = sz_loc_dist_neq(params[0], params[1]); break; } case SZ_LOC_RANGE: { value = sz_loc_range(params[0]); break; } } for (int i = 0; i < MAX_PARAM_NUM; i++) { cur_links->params[i] = params[i].id; } break; } } truth_values[offset] = value; } if (last_cunit_root == cunit_end ) { *truth_value_result = truth_values[ccopy_root_offset]; if(!truth_values[ccopy_root_offset]) { int len = 0; for(Node *head = &links[ccopy_root_offset]; head != NULL; head = head ->next) { if(len < MAX_LINK_SIZE) { for(int j = 0; j < MAX_PARAM_NUM; j++) { link_result[MAX_PARAM_NUM * len + j] = head->params[j]; } } len++; } *cur_link_size = len; *link_num = len > MAX_LINK_SIZE ? MAX_LINK_SIZE : len; } } } }
4,725
#include "includes.h" __global__ void calc(float *result, float *b, float *a, int size){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size){ float temp; for (int j = 0; j < size; j++){ temp = *(a + j + (idx * size)) * (*(b + j)); atomicAdd(&result[idx], temp); } } }
4,726
#include "includes.h" __global__ void dot_cmp_kernal_reduce(float *g_idata1, float *g_idata2, float *g_odata) { extern __shared__ float sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; sdata[tid] = g_idata1[i]*g_idata2[i] + g_idata1[i+blockDim.x]*g_idata2[i+blockDim.x]; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
4,727
#include <iostream> #include <fstream> #include <vector> #include <stdio.h> #include <algorithm> #include <time.h> using namespace std; void readInt(int &n, int &m) { ifstream fin_n("data/nums.txt"); fin_n >> n >> m; } void readGraph(unsigned long long *neib, int n, int m) { ifstream fin_g("data/graph.txt"); vector<vector<int> > vert; vert.resize(n); for (int i = 0; i < m; ++i) { int u, v; fin_g >> u >> v; u--, v--; neib[i] = ((unsigned long long)u << 32) + v; } } __global__ void select_winner_odd(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; int u, v; u = temp & 0xffffffff; v = temp >> 32; if (parent[u] != parent[v]) { parent[max(parent[u], parent[v])] = parent[min(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void select_winner_even(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < e_num) { unsigned long long temp = edge_list[tid]; unsigned int u, v; u = temp & 0xffffffff; v = (temp >> 32) & 0xffffffff; if (parent[u] != parent[v]) { parent[min(parent[u], parent[v])] = parent[max(parent[u], parent[v])]; *flag = 1; } else { mark[tid] = 1; } } } __global__ void jump(int *parent, int v_num, int *flag) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < v_num) { int p = parent[tid]; int p_p = parent[p]; if (p != p_p) { parent[tid] = p_p; (*flag) = 1; } } } int main() { int n, m; readInt(n, m); unsigned long long *h_edge_list, *d_edge_list; h_edge_list = (unsigned long long*)malloc(m * sizeof(unsigned long long)); readGraph(h_edge_list, n, m); int h_parent[n], *d_parent; int h_mark[m], *d_mark; for (int i = 0; i < n; ++i) { h_parent[i] = i; } for (int i = 0; i < m; ++i) { h_mark[i] = 0; } int flag[1], *d_flag; int count = 0; clock_t beg = clock(); do { flag[0] = 0; cudaMalloc(&d_parent, n * sizeof(int)); cudaMalloc(&d_edge_list, m * sizeof(unsigned long long)); cudaMalloc(&d_mark, m * sizeof(int)); cudaMalloc(&d_flag, sizeof(int)); cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_edge_list, h_edge_list, m * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(d_mark, h_mark, m * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice); if (count) { select_winner_odd<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } else { select_winner_even<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m); } cudaThreadSynchronize(); cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(&d_parent); cudaFree(&d_edge_list); cudaFree(&d_mark); cudaFree(&d_flag); if (!flag[0]) { break; } count ^= 1; do { flag[0] = 0; cudaMalloc(&d_flag, sizeof(int)); cudaMalloc(&d_parent, n * sizeof(int)); cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice); jump<<<256, 256>>>(d_parent, n, d_flag); cudaThreadSynchronize(); cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(&d_flag); cudaFree(&d_parent); } while(flag[0]); } while(flag); cout << float(clock() - beg) / CLOCKS_PER_SEC << endl; sort(h_parent, h_parent + n); cout << unique(h_parent, h_parent + n) - h_parent; }
4,728
#include <vector> #include <iostream> #include <sstream> #include <fstream> #include <string> #include <climits> #include <cmath> const int DIMENSION = 3; const int BLOCK_SIZE = 1024; const int BLOCK_ITEMS = 2048; const int EXPONENT = 11; const int DEVICE_ID = 0; const int ELEMENTS = DIMENSION * DIMENSION; void read_file(std::ifstream& file, const int number_of_matrices, const int number_of_matrices_std, std::vector<int>& v) { int first, second, third; std::string dump; for (int k = 0; k < number_of_matrices; k++) { file >> dump; for (int i = 0; i < ELEMENTS; i += DIMENSION) { file >> first >> second >> third; v[(number_of_matrices_std * i) + k] = first; v[(number_of_matrices_std * (i + 1)) + k] = second; v[(number_of_matrices_std * (i + 2)) + k] = third; } } } std::vector<int> set_host_v(char* filename) { int number_of_matrices; std::ifstream file(filename); if (file.is_open()) file >> number_of_matrices; int number_of_matrices_std = ceil(number_of_matrices / (double) BLOCK_ITEMS) * BLOCK_ITEMS; std::vector<int> v (ELEMENTS * number_of_matrices_std, INT_MAX); read_file(file, number_of_matrices, number_of_matrices_std, v); file.close(); return v; } void set_device_v(int number_of_matrices, std::vector<int>& host_v, void*& device_v) { int block_size = ELEMENTS * number_of_matrices * sizeof(int); cudaMalloc(&device_v, block_size); cudaMemcpy(device_v, host_v.data(), block_size, cudaMemcpyHostToDevice); } __global__ void reduce_block(void* m) { int* m_int = (int*) m; int index = (BLOCK_ITEMS * blockIdx.x) + threadIdx.x; __shared__ int m_shared[BLOCK_ITEMS]; m_shared[threadIdx.x] = m_int[index]; m_shared[BLOCK_SIZE + threadIdx.x] = m_int[BLOCK_SIZE + index]; for (int block_size = BLOCK_ITEMS; block_size > 1; block_size >>= 1) { int index_1 = threadIdx.x; int index_2 = index_1 + (block_size / 2); if (threadIdx.x >= (block_size >> 1)) return; m_shared[index_1] = min(m_shared[index_1], m_shared[index_2]); __syncthreads(); } if (threadIdx.x == 0) m_int[index] = m_shared[0]; } __global__ void compress_block(void* v, int number_of_matrices, int offset) { int* v_int = (int*) v; int index_compressed = (blockIdx.x * number_of_matrices) + threadIdx.x + (offset * BLOCK_SIZE); int index_expanded = (blockIdx.x * number_of_matrices) + threadIdx.x * BLOCK_ITEMS + (offset * BLOCK_ITEMS * BLOCK_SIZE); v_int[index_compressed] = v_int[index_expanded]; __syncthreads(); } __global__ void compress_line(void* v, int line, int number_of_blocks, int next_number_of_blocks) { int index_expanded_1 = line * number_of_blocks * BLOCK_ITEMS + threadIdx.x; int index_compressed_1 = line * next_number_of_blocks * BLOCK_ITEMS + threadIdx.x; int index_expanded_2 = line * number_of_blocks * BLOCK_ITEMS + BLOCK_SIZE + threadIdx.x; int index_compressed_2 = line * next_number_of_blocks * BLOCK_ITEMS + BLOCK_SIZE + threadIdx.x; int* v_int = (int*) v; v_int[index_compressed_1] = v_int[index_expanded_1]; v_int[index_compressed_2] = v_int[index_expanded_2]; } __global__ void final_compress(void* v, int number_of_matrices) { int* v_int = (int*) v; for (int i = 0; i < ELEMENTS; i++) { v_int[i] = v_int[i * BLOCK_ITEMS]; } } void print_result_matrix(void* device_v) { int result[ELEMENTS]; cudaMemcpy((void*)result, device_v, ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < ELEMENTS; i++) { std::cout << result[i]; if ((i + 1) % DIMENSION) std::cout << " "; else std::cout << std::endl; } } int main(int argc, char* argv[]) { void* device_v; cudaSetDevice(DEVICE_ID); std::vector<int> host_v = set_host_v(argv[1]); int number_of_matrices = host_v.size() / ELEMENTS; set_device_v(number_of_matrices, host_v, device_v); for (int k = number_of_matrices; k > 1; k >>= EXPONENT) { k = ceil(k / (double) BLOCK_ITEMS) * BLOCK_ITEMS; // Reduction int number_of_blocks = k >> EXPONENT; reduce_block<<< number_of_blocks * ELEMENTS, BLOCK_SIZE >>>(device_v); int next_number_of_blocks = ceil (number_of_blocks / (double) BLOCK_ITEMS); /* std::cout << next_number_of_blocks << " " << number_of_blocks << std::endl; */ // Compression int threads_per_block = (number_of_blocks < BLOCK_SIZE)? number_of_blocks : BLOCK_SIZE; for (int i = 0; i < number_of_blocks >> 10; i++) compress_block<<< ELEMENTS, threads_per_block >>> (device_v, k, i); for (int i = 1; i < ELEMENTS && number_of_blocks > 1; i++) compress_line<<< 1, BLOCK_SIZE >>> (device_v, i, number_of_blocks, next_number_of_blocks); } final_compress<<<1,1>>>(device_v, number_of_matrices); print_result_matrix(device_v); cudaDeviceReset(); return(0); }
4,729
#ifndef THREADS_PER_BLOCK #define THREADS_PER_BLOCK 1024 #endif __global__ void vc(float *dA, float *dB, int N) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < N) { dA[id] = dB[id]; } } extern "C" { void vcGPU(float* A, float *B, int start, int end, int GPUN) { float *dA, *dB; cudaMalloc(&dA, sizeof(float) * GPUN); cudaMalloc(&dB, sizeof(float) * GPUN); cudaMemcpy(dB, B + start, sizeof(float) * GPUN, cudaMemcpyHostToDevice); vc<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dA, dB, GPUN); cudaDeviceSynchronize(); cudaMemcpy(A + start, dA, sizeof(float) * GPUN, cudaMemcpyDeviceToHost); cudaFree(dA); cudaFree(dB); } }
4,730
/* * This is a CUDA version of bellman_ford algorithm * Compile: nvcc -std=c++11 -arch=sm_52 -o cuda_bellman_ford cuda_bellman_ford.cu * Run: ./cuda_bellman_ford <input file> <number of blocks per grid> <number of threads per block>, you will find the output file 'output.txt' * */ #include <string> #include <cassert> #include <iostream> #include <fstream> #include <algorithm> #include <iomanip> #include <cstring> #include <sys/time.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> using std::string; using std::cout; using std::endl; #define INF 1000000 /* * This is a CHECK function to check CUDA calls */ #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } /** * utils is a namespace for utility functions * including I/O (read input file and print results) and matrix dimension convert(2D->1D) function */ namespace utils { int N; //number of vertices int *mat; // the adjacency matrix void abort_with_error_message(string msg) { std::cerr << msg << endl; abort(); } //translate 2-dimension coordinate to 1-dimension int convert_dimension_2D_1D(int x, int y, int n) { return x * n + y; } int read_file(string filename) { std::ifstream inputf(filename, std::ifstream::in); if (!inputf.good()) { abort_with_error_message("ERROR OCCURRED WHILE READING INPUT FILE"); } inputf >> N; //input matrix should be smaller than 20MB * 20MB (400MB, we don't have too much memory for multi-processors) assert(N < (1024 * 1024 * 20)); mat = (int *) malloc(N * N * sizeof(int)); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { inputf >> mat[convert_dimension_2D_1D(i, j, N)]; } return 0; } int print_result(bool has_negative_cycle, int *dist) { std::ofstream outputf("output.txt", std::ofstream::out); if (!has_negative_cycle) { for (int i = 0; i < N; i++) { if (dist[i] > INF) dist[i] = INF; outputf << dist[i] << '\n'; } outputf.flush(); } else { outputf << "FOUND NEGATIVE CYCLE!" << endl; } outputf.close(); return 0; } }//namespace utils __global__ void bellman_ford_one_iter(int n, int *d_mat, int *d_dist, bool *d_has_next, int iter_num){ int global_tid = blockDim.x * blockIdx.x + threadIdx.x; int elementSkip = blockDim.x * gridDim.x; if(global_tid >= n) return; for(int u = 0 ; u < n ; u ++){ for(int v = global_tid; v < n; v+= elementSkip){ int weight = d_mat[u * n + v]; if(weight < INF){ int new_dist = d_dist[u] + weight; if(new_dist < d_dist[v]){ d_dist[v] = new_dist; *d_has_next = true; } } } } } /** * Bellman-Ford algorithm. Find the shortest path from vertex 0 to other vertices. * @param blockPerGrid number of blocks per grid * @param threadsPerBlock number of threads per block * @param n input size * @param *mat input adjacency matrix * @param *dist distance array * @param *has_negative_cycle a bool variable to recode if there are negative cycles */ void bellman_ford(int blocksPerGrid, int threadsPerBlock, int n, int *mat, int *dist, bool *has_negative_cycle) { dim3 blocks(blocksPerGrid); dim3 threads(threadsPerBlock); int iter_num = 0; int *d_mat, *d_dist; bool *d_has_next, h_has_next; cudaMalloc(&d_mat, sizeof(int) * n * n); cudaMalloc(&d_dist, sizeof(int) *n); cudaMalloc(&d_has_next, sizeof(bool)); *has_negative_cycle = false; for(int i = 0 ; i < n; i ++){ dist[i] = INF; } dist[0] = 0; cudaMemcpy(d_mat, mat, sizeof(int) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(d_dist, dist, sizeof(int) * n, cudaMemcpyHostToDevice); for(;;){ h_has_next = false; cudaMemcpy(d_has_next, &h_has_next, sizeof(bool), cudaMemcpyHostToDevice); bellman_ford_one_iter<<<blocks, threads>>>(n, d_mat, d_dist, d_has_next, iter_num); CHECK(cudaDeviceSynchronize()); cudaMemcpy(&h_has_next, d_has_next, sizeof(bool), cudaMemcpyDeviceToHost); iter_num++; if(iter_num >= n-1){ *has_negative_cycle = true; break; } if(!h_has_next){ break; } } if(! *has_negative_cycle){ cudaMemcpy(dist, d_dist, sizeof(int) * n, cudaMemcpyDeviceToHost); } cudaFree(d_mat); cudaFree(d_dist); cudaFree(d_has_next); } int main(int argc, char **argv) { if (argc <= 1) { utils::abort_with_error_message("INPUT FILE WAS NOT FOUND!"); } if (argc <= 3) { utils::abort_with_error_message("blocksPerGrid or threadsPerBlock WAS NOT FOUND!"); } string filename = argv[1]; int blockPerGrid = atoi(argv[2]); int threadsPerBlock = atoi(argv[3]); int *dist; bool has_negative_cycle = false; assert(utils::read_file(filename) == 0); dist = (int *) calloc(sizeof(int), utils::N); //time counter timeval start_wall_time_t, end_wall_time_t; float ms_wall; cudaDeviceReset(); //start timer gettimeofday(&start_wall_time_t, nullptr); //bellman-ford algorithm bellman_ford(blockPerGrid, threadsPerBlock, utils::N, utils::mat, dist, &has_negative_cycle); CHECK(cudaDeviceSynchronize()); //end timer gettimeofday(&end_wall_time_t, nullptr); ms_wall = ((end_wall_time_t.tv_sec - start_wall_time_t.tv_sec) * 1000 * 1000 + end_wall_time_t.tv_usec - start_wall_time_t.tv_usec) / 1000.0; std::cerr.setf(std::ios::fixed); std::cerr << std::setprecision(6) << "Time(s): " << (ms_wall/1000.0) << endl; utils::print_result(has_negative_cycle, dist); free(dist); free(utils::mat); return 0; }
4,731
__global__ void rgb2gray(double * result, double * * I, int rows, int cols) { *result = rows; }
4,732
#include "includes.h" __global__ void computeCost(const double *Params, const float *uproj, const float *mu, const float *W, const bool *match, const int *iC, const int *call, float *cmax){ int NrankPC,j, NchanNear, tid, bid, Nspikes, Nthreads, k, my_chan, this_chan, Nchan; float xsum = 0.0f, Ci, lam; Nspikes = (int) Params[0]; NrankPC = (int) Params[1]; Nthreads = blockDim.x; lam = (float) Params[5]; NchanNear = (int) Params[6]; Nchan = (int) Params[7]; tid = threadIdx.x; bid = blockIdx.x; while(tid<Nspikes){ my_chan = call[tid]; if (match[my_chan + bid * Nchan]){ xsum = 0.0f; for (k=0;k<NchanNear;k++) for(j=0;j<NrankPC;j++){ this_chan = iC[k + my_chan * NchanNear]; xsum += uproj[j + NrankPC * k + NrankPC*NchanNear * tid] * W[j + NrankPC * this_chan + NrankPC*Nchan * bid]; } Ci = max(0.0f, xsum) + lam/mu[bid]; cmax[tid + bid*Nspikes] = Ci * Ci / (1.0f + lam/(mu[bid] * mu[bid])) - lam; } tid+= Nthreads; } }
4,733
#include "includes.h" __global__ void ReferenceGemm_kernel( int M, int N, int K, float alpha, float const *A, int lda, float const *B, int ldb, float beta, float *C, int ldc) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < M && j < N) { float accumulator = 0; for (int k = 0; k < K; ++k) { accumulator += A[i + k * lda] * B[k + j * ldb]; } C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc]; } }
4,734
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #define TILE_SIZE 16 __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A x B * where A is a (m x k) matrix * where B is a (k x n) matrix * where C is a (m x n) matrix * * Use shared memory for tiling * ********************************************************************/ // INSERT KERNEL CODE HERE __shared__ float ds_A[TILE_SIZE][TILE_SIZE]; __shared__ float ds_B[TILE_SIZE][TILE_SIZE]; //declare and initialize ids int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; //calculate row/column int row = blockDim.y * by + ty; int col = blockDim.x * bx + tx; //declare P value variable for matrix sum float pValue = 0.0; //compute the P element by looping over M and N matrices for (int p = 0; p < ((k-1)/TILE_SIZE+1) ; p++) { //load A and B tiles into shared memory //loads 0 into memory if out of bounds if(row < m && (p*TILE_SIZE + tx) < k) { ds_A[ty][tx] = A[row*k + p*TILE_SIZE + tx]; } else { ds_A[ty][tx] = 0.0; } if (col < n && (p*TILE_SIZE + ty) < k) { ds_B[ty][tx] = B[(p*TILE_SIZE+ty)*k + col]; } else { ds_B[ty][tx] = 0.0; } __syncthreads(); //compute p value //if (row < m && col < n) { for (int j = 0; j < TILE_SIZE; j++) { pValue += ds_A[ty][j] * ds_B[j][tx]; } //} __syncthreads(); } //assign pValues to C if within bounds if(row < m && col < n) { C[row*n+col] = pValue; } } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- //printf("INIT THREADS AND KERNEL GRIDS...\n"); fflush(stdout); const unsigned int BLOCK_SIZE = TILE_SIZE; //INSERT CODE HERE dim3 dimGrid(((n-1)/TILE_SIZE)+1, ((m-1)/TILE_SIZE)+1); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Invoke CUDA kernel ----------------------------------------------------- //INSERT CODE HERE mysgemm<<<dimGrid, dimBlock>>>(m, n, k, A, B, C); }
4,735
#include <cstdio> #include <cstdlib> #include <iostream> #include <ctime> #include <cuda_runtime.h> using namespace std; int main(int argc, char** argv){ int count = 0; cudaGetDeviceCount(&count); if (count == 0) { cerr << "There is no device" << endl; system("pause"); return 0; } int i; for (int i = 0; i < count; ++i) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { cerr << "no cida 1.x" << endl; } cudaSetDevice(i); system("pause"); return 0; }
4,736
// CUDACast #2 #include <stdio.h> #define SIZE 1024 // Enable to launch on GPU // tells compiler that function is going to be executed // on the gpu and callable on the host __global__ void VectorAdd(int* a, int* b, int* c, int n) { // Select the thread index to figure out how to index into vector int i = threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main() { int* a, * b, * c; int* d_a, * d_b, * d_c; a = (int*)malloc(SIZE * sizeof(int)); b = (int*)malloc(SIZE * sizeof(int)); c = (int*)malloc(SIZE * sizeof(int)); // Must allocate memory on GPU cudaMalloc(&d_a, SIZE * sizeof(int)); cudaMalloc(&d_b, SIZE * sizeof(int)); cudaMalloc(&d_c, SIZE * sizeof(int)); for (int i = 0; i < SIZE; i++) { a[i] = i; b[i] = i; c[i] = 0; } // Copies values to GPU cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, SIZE * sizeof(int), cudaMemcpyHostToDevice); // Launch VectorAdd kernel on GPU // <<<BLOCKS, #Threads>>> VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE); // Copy from GPU back to CPU cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost); // Check results for (int i = 0; i < 10; i++) printf("c[%d] = %d\n", i, c[i]); // Free from CPU free(a); free(b); free(c); // Free from cuda cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
4,737
#include "cuda_runtime.h" #include <stdio.h> #include <time.h> const int M = 1024; const int N = 512; //2维网格1维线程块 __global__ void VectorAdd(float* a, float* b, float* c) { int thread_id = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; c[thread_id] = a[thread_id] + b[thread_id]; } int main() { int start = clock(); float a[M * N] = { 0.0 }; float b[M * N] = { 0.0 }; float c[M * N] = { 0.0 }; float* device_a = NULL; float* device_b = NULL; float* device_c = NULL; //分配显存 cudaMalloc((void**)&device_a, sizeof(float) * M * N); cudaMalloc((void**)&device_b, sizeof(float) * M * N); cudaMalloc((void**)&device_c, sizeof(float) * M * N); for (int i = 0; i < M * N; i++) { a[i] = i; b[i] = i; } //将内存中a和b数组的值复制到GPU中显存中 cudaMemcpy(device_a, a, sizeof(float) * M * N, cudaMemcpyHostToDevice); cudaMemcpy(device_b, b, sizeof(float) * M * N, cudaMemcpyHostToDevice); //一个kernel函数由一个gpu的一个grid执行 //调用核函数 cpu调用 gpu运行 dim3 dim_grid(32, 32); //一个grid网格包含n / 512个线程块blocks(为了充分利用sm blocks尽可能多) dim3 dim_block(N); //一个线程块block包含 512个线程threads(最多不超过512个) VectorAdd<<<dim_grid, dim_block>>>(device_a, device_b, device_c); //GPU计算任务完成后 将数据传输回CPU cudaMemcpy(c, device_c, sizeof(float) * M * N, cudaMemcpyDeviceToHost); for (int i = 0; i < M * N; i++) printf("%.0f + %.0f = %.0f\t", a[i], b[i], c[i]); int end = clock(); printf("\n程序耗时:%dms\n", (end - start) / 1000); //释放gpu显存 cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); return 0; }
4,738
#include "includes.h" __global__ void triad(float* A, float* B, float* C, float s) { int gid = threadIdx.x + (blockIdx.x * blockDim.x); C[gid] = A[gid] + s*B[gid]; }
4,739
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <float.h> #include <cublas_v2.h> #include "utils.cuh" #include "gpchange.cuh" /* Minimize the marginal or conditional log likelihood of given data * as a function of the hyper-parameters using conjugate gradient descent. * * The current values of the hyper-parameters are chosen as the starting point. * The "length" gives the length of the run: if it is positive, it gives the * maximum number of line searches,if negative its absolute gives the max * allowed number of function evaluations. The "red" parameter indicates the * reduction in function value to be expected in the first line-search. * * The function returns when either its length is up, or if no further progress * can be made (ie, we are at a (local) minimum, or so close that due to * numerical problems, we cannot get any closer). The function sets the final solution * as the updated log hyper-parameters for the covariance function. * * The Polack-Ribiere flavour of conjugate gradients is used to compute search * directions, and a line search using quadratic and cubic polynomial * approximations and the Wolfe-Powell stopping criteria is used together with * the slope ratio method for guessing initial step sizes. Additionally a bunch * of checks are made to make sure that exploration is taking place and that * extrapolation will not be unboundedly large. * */ __device__ void minimize(double *X, int nX, double *Y, int nY, int length, int red, double *lh0, int nh, int numthreads, int method, int n){ if((method != 1) && (method != 2)){ return; } char order = 'C'; double f0, d3, x2, f2, d2, f3, x4 = 0, f4 = 0, d4 = 0, A, B; double *df0, *df3, *L0, *l, *l1; int i = 0, ls_failed = 0, j; l = (double *) malloc(sizeof(double) * nh); if (l == nullptr) { printf("could not allocate memory\n"); return; } for(j = 0; j < nh; j++) l[j] = lh0[j]; double int1 = 0.1, ext = 3.0 , ratio = 10, sig = 0.1; double rho = sig / 2; int mx = 20; if(red == -1) red = 1; df0 = (double *) malloc(sizeof(double) * nh); if (df0 == nullptr) { printf("could not allocate memory\n"); return; } df3 = (double *) malloc(sizeof(double) * nh); if (df3 == nullptr) { printf("could not allocate memory\n"); return; } L0 = (double *) malloc(sizeof(double) * nh); if (L0 == nullptr) { printf("could not allocate memory\n"); return; } /* compute loglikelihood and derivatives */ if(method == 1){ computeMLLChol(&f0, df0, X, Y, nX, nY, lh0, nh, order); } /* end computation */ //printf("Line search: Iteration = %d, value = %.6lf\n",i,f0); double *s = (double *) malloc(sizeof(double) * nh); if (s == nullptr) { printf("could not allocate memory\n"); return; } for (j = 0; j < nh; j++) s[j] = -1 * df0[j]; if(fabsf(length) < 0) i = i + 1; //double d0 = -1 * cblas_ddot(nh,s,1,s,1); cublasHandle_t hdl; cublasStatus_t status = cublasCreate_v2(&hdl); if(status != CUBLAS_STATUS_SUCCESS) return; double ret; status = cublasDdot(hdl, nh, s, 1, s, 1, &ret); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } double d0 = -1 * ret; double x3 = red / (1 - d0); while(i < length){ if(length > 0) i = i + 1; //cblas_dcopy(nh, l, 1, L0, 1); status = cublasCreate_v2(&hdl); if (status != CUBLAS_STATUS_SUCCESS) return; status = cublasDcopy(hdl, nh, l, 1, L0, 1); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } double F0 = f0; double *dF0 = (double *) malloc(sizeof(double) * nh); if (dF0 == nullptr) { printf("could not allocate memory\n"); return; } for(j = 0; j < nh; j++) dF0[j] = df0[j]; int m; if(length > 0) m = mx; else m = minu(mx, -1 * (length + i)); l1 = (double *)malloc(sizeof(double) * nh); if (l1 == nullptr) { printf("could not allocate memory\n"); return; } while(1){ x2 = 0; f2 = f0; d2 = d0; f3 = f0; //df3 = (double *) malloc(sizeof(double) * nh); for(j = 0; j < nh; j++) df3[j] = df0[j]; int success = 0; while ((!success) && (m > 0)){ m -= 1; if(length < 0) i = i + 1; for(j = 0; j < nh; j++) l1[j] = l[j] + s[j] * x3; /* compute loglikelihood and derivatives */ if(method == 1) { computeMLLChol(&f3, df3, X, Y, nX, nY, l1, nh, order); } int s1 = 0; if(isnan(f3) || isinf(f3)) s1 = 1; for(j = 0;j < nh; j++){ if(isnan(df3[j]) || isinf(df3[j])) s1 = 1; } if(!s1) success = 1; else x3 = (x2 + x3)/2; } if(f3 < F0){ for(j = 0; j < nh; j++) L0[j] = l[j] + s[j] * x3; F0 = f3; for(j = 0; j < nh; j++) dF0[j] = df3[j]; } //d3 = cblas_ddot(nh, df3, 1, s, 1); //recompute slope status = cublasCreate_v2(&hdl); if (status != CUBLAS_STATUS_SUCCESS) return; status = cublasDdot(hdl, nh, df3, 1, s, 1, &ret); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } d3 = ret; if((d3 > sig * d0) || (f3 > f0 + x3 * rho * d0) || (m == 0)) break; double x1 = x2, f1 = f2, d1 = d2; x2 = x3; f2 = f3; d2 = d3; A = 6 * (f1 - f2) + 3 * (d2 + d1) * (x2 - x1); B = 3 * (f2 - f1) - (2 * d1 + d2) * (x2 - x1); x3 = x1 - d1 * pow(x2 - x1, 2) / (B + sqrt(B * B - A * d1 * (x2 - x1))); if(isnan(x3) || isinf(x3) || x3 < 0) x3 = x2 * ext; else if(x3 > x2 * ext) x3 = x2 * ext; else if(x3 < x2 + int1 * (x2 - x1)) x3 = x2 + int1 * (x2 - x1); } free(l1); while (((fabsf(d3) > -sig * d0) || (f3 > f0 + x3 * rho * d0)) && m > 0){ if(d3 > 0 || f3 > f0+ x3 * rho * d0){ x4 = x3; f4 = f3; d4 = d3; }else{ x2 = x3; f2 = f3; d2 = d3; } if (f4 > f0){ x3 = x2 - (0.5 * d2 * pow(x4 - x2, 2) / (f4 - f2 - d2 * (x4 - x2))); }else{ A = 6 * (f2 - f4) / (x4 - x2) + 3 * (d4 + d2); B = 3 * (f4 - f2) - (2 * d2 + d4) * (x4 - x2); x3 = x2 + (sqrt(B * B - A * d2 * pow(x4 - x2, 2)) - B) / A; } if(isnan(x3) || isinf(x3)){ x3 = (x2 + x4) / 2; } x3 = maxu(minu(x3, x4 - int1 * (x4 - x2)), x2 + int1 * (x4 - x2)); l1 = (double *) malloc(sizeof(double) * nh); if (l1 == nullptr) { printf("could not allocate memory\n"); return; } for (j = 0; j < nh; j++) l1[j] = l[j] + s[j] * x3; /* compute loglikelihood and derivatives */ if(method == 1){ computeMLLChol(&f3, df3, X, Y, nX, nY, l1, nh, order); } /* end computation */ if(f3 < F0){ for(j = 0; j < nh; j++){ L0[j] = l[j] + s[j] * x3; dF0[j] = df3[j]; } F0 = f3; } m--; if(length < 0) i = i + 1; //d3 = cblas_ddot(nh, df3, 1, s, 1); status = cublasCreate_v2(&hdl); if (status != CUBLAS_STATUS_SUCCESS) return; status = cublasDdot(hdl, nh, df3, 1, s, 1, &ret); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } d3 = ret; } if ((fabsf(d3) < -sig * d0) && (f3 < f0 + x3 * rho * d0)){ for(j = 0; j < nh; j++) {l[j] += s[j] * x3; } //printf("Line search: Iteration = %d, value = %.6lf\n",i,f0); f0 = f3; //double _int = (cblas_ddot(nh, df3, 1, df3, 1) - cblas_ddot(nh, df0, 1, df3, 1)) / (cblas_ddot(nh, df0, 1, df0, 1)); double ret1, ret2, ret3; status = cublasCreate_v2(&hdl); if (status != CUBLAS_STATUS_SUCCESS) return; status = cublasDdot(hdl, nh, df3, 1, df3, 1, &ret1); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } status = cublasCreate_v2(&hdl); if (status != CUBLAS_STATUS_SUCCESS) return; status = cublasDdot(hdl, nh, df0, 1, df3, 1, &ret2); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } status = cublasCreate_v2(&hdl); if (status != CUBLAS_STATUS_SUCCESS) return; status = cublasDdot(hdl, nh, df0, 1, df0, 1, &ret3); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } double _int = (ret1 - ret2) / ret3; for(j = 0; j < nh; j++) s[j] = s[j] * _int - df3[j]; for(j = 0; j < nh; j++) df0[j] = df3[j]; d3 = d0; //d0 = cblas_ddot(nh,df0,1,s,1); status = cublasCreate_v2(&hdl); if (status != CUBLAS_STATUS_SUCCESS) return; status = cublasDdot(hdl, nh, df0, 1, s, 1, &ret); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } d0 = ret; if(d0 > 0){ for(j = 0; j < nh; j++) s[j] = -1 * df0[j]; //d0 = -1 * cblas_ddot(nh,s,1,s,1); status = cublasCreate_v2(&hdl); if (status != CUBLAS_STATUS_SUCCESS) return; status = cublasDdot(hdl, nh, s, 1, s, 1, &ret); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } d0 = -1 * ret; } x3 = x3 * minu(ratio, d3 / (d0 - FLT_MIN)); ls_failed = 0; }else{ for(j = 0; j < nh; j++){ l[j] = L0[j]; df0[j] = dF0[j]; dF0[j] = df3[j]; } f0 = F0; if(ls_failed == 1|| i > fabsf(length)) break; for(j = 0; j < nh; j++) s[j] = -1 * df0[j]; //d0 = -1 * cblas_ddot(nh, s, 1, s, 1); status = cublasCreate_v2(&hdl); if (status != CUBLAS_STATUS_SUCCESS) return; status = cublasDdot(hdl, nh, s, 1, s, 1, &ret); if (status != CUBLAS_STATUS_SUCCESS) return; if (cublasDestroy_v2(hdl) != CUBLAS_STATUS_SUCCESS) { printf("cublasDestroy failed\n"); return; } d0 = -1 * ret; x3 = 1/(1 - d0); ls_failed = 1; } free(dF0); } for(j = 0; j < nh; j++) lh0[j] = l[j]; free(l); free(df0); free(df3); free(L0); free(s); free(df3); }
4,740
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) { if (comp < (var_1 / (var_2 + var_3))) { if (comp == ldexpf(powf(var_4 + (-0.0f - (-1.5613E-41f * var_5)), (var_6 / (+1.1736E-2f / var_7 / cosf((-1.8611E-27f + -1.1853E-44f - var_8))))), 2)) { comp += floorf(+1.2703E-4f / sinf((-1.1071E-35f / acosf((+1.8437E36f / +0.0f * +1.2212E14f))))); if (comp == (+1.7207E-42f - +1.3187E-43f / var_9 + var_10 * sqrtf(var_11 + (-1.2206E-35f / var_12)))) { comp += -1.2395E-42f * +1.1947E29f - var_13; float tmp_1 = var_14 - var_15; comp = tmp_1 * (-1.7753E-24f - cosf((+1.1999E-44f - +1.0404E35f - -0.0f))); } if (comp >= powf((-1.0831E-44f - (-1.6197E-37f * (var_16 + var_17))), var_18 - var_19)) { comp += +1.8563E-37f / (+1.1762E-36f / (var_20 - (+1.3362E-44f / expf(-0.0f - -0.0f)))); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21); cudaDeviceSynchronize(); return 0; }
4,741
#include "rgb2yuv.cuh" #include <stdint.h> #include <cuda_runtime.h> #define CHECK(res) { if(res != cudaSuccess){printf("Error :%s:%d , ", __FILE__,__LINE__); \ printf("code : %d , reason : %s \n", res,cudaGetErrorString(res));exit(-1);}} #define CUDA(x) cudaCheckError((x), #x, __FILE__, __LINE__) #define CUDA_FAILED(x) (CUDA(x) != cudaSuccess) #define COLOR_COMPONENT_MASK 0x3FF #define COLOR_COMPONENT_BIT_SIZE 10 #define FIXED_DECIMAL_POINT 24 #define FIXED_POINT_MULTIPLIER 1.0f #define FIXED_COLOR_COMPONENT_MASK 0xffffffff #define LOG_CUDA "[cuda] " __constant__ uint32_t constAlpha; __constant__ float constHueColorSpaceMat[9]; /** * iDivUp */ inline __device__ __host__ int iDivUp( int a, int b ) { return (a % b != 0) ? (a / b + 1) : (a / b); } inline cudaError_t cudaCheckError(cudaError_t retval, const char* txt, const char* file, int line ) { //int activeDevice = -1; //cudaGetDevice(&activeDevice); //Log("[cuda] device %i - %s\n", activeDevice, txt); printf(LOG_CUDA "%s\n", txt); if( retval != cudaSuccess ) { printf(LOG_CUDA " %s (error %u) (hex 0x%02X)\n", cudaGetErrorString(retval), retval, retval); printf(LOG_CUDA " %s:%i\n", file, line); } return retval; } inline __device__ void rgb_to_y(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y) { y = static_cast<uint8_t>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100); } inline __device__ void rgb_to_yuv(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y, uint8_t& u, uint8_t& v) { rgb_to_y(r, g, b, y); u = static_cast<uint8_t>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100); v = static_cast<uint8_t>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100); } template <typename T, bool formatYV12> __global__ void RGB_to_YV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height ) { const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2; const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2; printf("[%d,%d]\n",x,y); const int x1 = x + 1; const int y1 = y + 1; if( x1 >= width || y1 >= height ) return; const int planeSize = height * dstPitch; uint8_t* y_plane = dst; uint8_t* u_plane; uint8_t* v_plane; if( formatYV12 ) { u_plane = y_plane + planeSize; v_plane = u_plane + (planeSize / 4); // size of U & V planes is 25% of Y plane } else { v_plane = y_plane + planeSize; // in I420, order of U & V planes is reversed u_plane = v_plane + (planeSize / 4); } T px; uint8_t y_val, u_val, v_val; px = src[y * srcAlignedWidth + x]; rgb_to_y(px.x, px.y, px.z, y_val); y_plane[y * dstPitch + x] = y_val; px = src[y * srcAlignedWidth + x1]; rgb_to_y(px.x, px.y, px.z, y_val); y_plane[y * dstPitch + x1] = y_val; px = src[y1 * srcAlignedWidth + x]; rgb_to_y(px.x, px.y, px.z, y_val); y_plane[y1 * dstPitch + x] = y_val; px = src[y1 * srcAlignedWidth + x1]; rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val); y_plane[y1 * dstPitch + x1] = y_val; const int uvPitch = dstPitch / 2; const int uvIndex = (y / 2) * uvPitch + (x / 2); u_plane[uvIndex] = u_val; v_plane[uvIndex] = v_val; } template <typename T, bool formatNV12> __global__ void RGB_to_NV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height ) { const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2; const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2; printf("[%d,%d]\n",x,y); const int x1 = x + 1; const int y1 = y + 1; if( x1 >= width || y1 >= height ) return; const int planeSize = height * dstPitch; uint8_t* y_plane = dst; uint8_t* u_plane; u_plane = y_plane + planeSize; T px; uint8_t y_val, u_val, v_val; px = src[y * srcAlignedWidth + x]; rgb_to_y(px.x, px.y, px.z, y_val); y_plane[y * dstPitch + x] = y_val; px = src[y * srcAlignedWidth + x1]; rgb_to_y(px.x, px.y, px.z, y_val); y_plane[y * dstPitch + x1] = y_val; px = src[y1 * srcAlignedWidth + x]; rgb_to_y(px.x, px.y, px.z, y_val); y_plane[y1 * dstPitch + x] = y_val; px = src[y1 * srcAlignedWidth + x1]; rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val); y_plane[y1 * dstPitch + x1] = y_val; const int uvPitch = dstPitch / 2; const int uvIndex = (y / 2) * uvPitch + (x / 2); if(formatNV12){ u_plane[uvIndex*2+1] = u_val; u_plane[uvIndex*2] = v_val; }else{ u_plane[uvIndex*2] = u_val; u_plane[uvIndex*2+1] = v_val; } } __device__ void YUV2RGB(uint32_t *yuvi, float *red, float *green, float *blue) { // Prepare for hue adjustment /* float luma, chromaCb, chromaCr; luma = (float)yuvi[0]; chromaCb = (float)((int)yuvi[1] - 512.0f); chromaCr = (float)((int)yuvi[2] - 512.0f); // Convert YUV To RGB with hue adjustment *red = MUL(luma, constHueColorSpaceMat[0]) + MUL(chromaCb, constHueColorSpaceMat[1]) + MUL(chromaCr, constHueColorSpaceMat[2]); *green= MUL(luma, constHueColorSpaceMat[3]) + MUL(chromaCb, constHueColorSpaceMat[4]) + MUL(chromaCr, constHueColorSpaceMat[5]); *blue = MUL(luma, constHueColorSpaceMat[6]) + MUL(chromaCb, constHueColorSpaceMat[7]) + MUL(chromaCr, constHueColorSpaceMat[8]);*/ const float luma = float(yuvi[0]); const float u = float(yuvi[1]) - 512.0f; const float v = float(yuvi[2]) - 512.0f; /*R = Y + 1.140V G = Y - 0.395U - 0.581V B = Y + 2.032U*/ /**green = luma + 1.140f * v; *blue = luma - 0.395f * u - 0.581f * v; *red = luma + 2.032f * u;*/ *red = luma + 1.140f * v; *green = luma - 0.395f * u - 0.581f * v; *blue = luma + 2.032f * u; } __device__ uint32_t RGBAPACK_8bit(float red, float green, float blue, uint32_t alpha) { uint32_t ARGBpixel = 0; // Clamp final 10 bit results red = min(max(red, 0.0f), 255.0f); green = min(max(green, 0.0f), 255.0f); blue = min(max(blue, 0.0f), 255.0f); // Convert to 8 bit unsigned integers per color component ARGBpixel = ((((uint32_t)red) << 24) | (((uint32_t)green) << 16) | (((uint32_t)blue) << 8) | (uint32_t)alpha); return ARGBpixel; } __device__ uint32_t RGBAPACK_10bit(float red, float green, float blue, uint32_t alpha) { uint32_t ARGBpixel = 0; // Clamp final 10 bit results red = min(max(red, 0.0f), 1023.f); green = min(max(green, 0.0f), 1023.f); blue = min(max(blue, 0.0f), 1023.f); uint32_t intRed = (uint32_t)red; intRed = intRed >> 2; // Convert to 8 bit unsigned integers per color component // ARGBpixel = ((((uint32_t)red >> 2) << 24) | // (((uint32_t)green >> 2) << 16) | // (((uint32_t)blue >> 2) << 8) | (uint32_t)alpha); // ARGBpixel = ((((uint32_t)red >> 2) << 24) |(((uint32_t)green >> 2) << 16) |(((uint32_t)blue >> 2) << 8) ); // printf("[%d,%d] int red %d ,int green %d,blue %d",((uint32_t)red >> 2),((uint32_t)green >> 2),((uint32_t)blue >> 2),ARGBpixel); uint8_t * pRed =(uint8_t *) &ARGBpixel; *pRed=((uint32_t)red >> 2); *(pRed+1)=((uint32_t)green >> 2); *(pRed+2)=((uint32_t)blue >> 2); printf("red is %d,green is %d,blue is %d,postion0-3,%d,%d,%d,%d\n",((uint32_t)red >> 2),((uint32_t)green >> 2),((uint32_t)blue >> 2),*(pRed),*(pRed+1),*(pRed+2),*(pRed+3)); return ARGBpixel; } __global__ void NV12ToARGB(uint32_t *srcImage, size_t nSourcePitch, uint32_t *dstImage, size_t nDestPitch, uint32_t width, uint32_t height) { int x, y; uint32_t yuv101010Pel[2]; uint32_t processingPitch = ((width) + 63) & ~63; uint32_t dstImagePitch = nDestPitch >> 2; uint8_t *srcImageU8 = (uint8_t *)srcImage; processingPitch = nSourcePitch; // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width) return; //x = width - 1; if (y >= height) return; // y = height - 1; // Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way. // if we move to texture we could read 4 luminance values yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32_t chromaOffset = processingPitch * height; int y_chroma = y >> 1; if (y & 1) // odd scanline ? { uint32_t chromaCb; uint32_t chromaCr; chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } else { yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2)); yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2)); } // this steps performs the color conversion uint32_t yuvi[6]; float red[2], green[2], blue[2]; yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK); yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK); yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK); yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK); // YUV to RGB Transformation conversion YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]); YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]); // Clamp the results to RGBA dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(red[0], green[0], blue[0], constAlpha); dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(red[1], green[1], blue[1], constAlpha); uint8_t * pRead =(uint8_t*) &dstImage[y * dstImagePitch + x ]; // if(x%4 ==0&&y%4==0){ // printf("[%d,%d] red is %d,green is %d \n",x,y,pRead[0],pRead[1]); // } } void rgb2yuv(const char *src,uint8_t *dest,int width,int height){ printf("rgb2yuv width %d,height %d\n",width,height); const dim3 block(32, 8); const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2)); uchar3 * pChar3 = (uchar3 *) src; // Allocate the device input vector B uchar3 *nvPChar2 = NULL; cudaError_t err = cudaMalloc((void **)&nvPChar2, width*height*sizeof(uchar3)); uint8_t *nvYuv = NULL; err = cudaMalloc((void **)&nvYuv, width*height*sizeof(uint8_t)*3/2); printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(nvPChar2, pChar3, width*height*sizeof(uchar3), cudaMemcpyHostToDevice); RGB_to_YV12<uchar3, true><<<grid, block>>>(nvPChar2, width,nvYuv, width, width, height); err = cudaMemcpy(dest, nvYuv, width*height*3/2, cudaMemcpyDeviceToHost); err = cudaFree(nvPChar2); err = cudaFree(nvYuv); CHECK(cudaDeviceSynchronize()); } void rgb2NV12(const char *src,uint8_t *dest,int width,int height){ printf("rgb2yuv width %d,height %d\n",width,height); const dim3 block(32, 8); const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2)); uchar3 * pChar3 = (uchar3 *) src; // Allocate the device input vector B uchar3 *nvPChar2 = NULL; cudaError_t err = cudaMalloc((void **)&nvPChar2, width*height*sizeof(uchar3)); uint8_t *nvYuv = NULL; err = cudaMalloc((void **)&nvYuv, width*height*sizeof(uint8_t)*3/2); printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(nvPChar2, pChar3, width*height*sizeof(uchar3), cudaMemcpyHostToDevice); RGB_to_NV12<uchar3, true><<<grid, block>>>(nvPChar2, width,nvYuv, width, width, height); err = cudaMemcpy(dest, nvYuv, width*height*3/2, cudaMemcpyDeviceToHost); err = cudaFree(nvPChar2); err = cudaFree(nvYuv); CHECK(cudaDeviceSynchronize()); } bool nv12ColorspaceSetup = false; // cudaNV12SetupColorspace cudaError_t cudaNV12SetupColorspace( float hue = 0.0f ) { const float hueSin = sin(hue); const float hueCos = cos(hue); float hueCSC[9]; const bool itu601 = false; if( itu601 /*CSC == ITU601*/) { //CCIR 601 hueCSC[0] = 1.1644f; hueCSC[1] = hueSin * 1.5960f; hueCSC[2] = hueCos * 1.5960f; hueCSC[3] = 1.1644f; hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f); hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f); hueCSC[6] = 1.1644f; hueCSC[7] = hueCos * 2.0172f; hueCSC[8] = hueSin * -2.0172f; } else /*if(CSC == ITU709)*/ { //CCIR 709 hueCSC[0] = 1.0f; hueCSC[1] = hueSin * 1.57480f; hueCSC[2] = hueCos * 1.57480f; hueCSC[3] = 1.0; hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f); hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f); hueCSC[6] = 1.0f; hueCSC[7] = hueCos * 1.85560f; hueCSC[8] = hueSin * -1.85560f; } if( CUDA_FAILED(cudaMemcpyToSymbol(constHueColorSpaceMat, hueCSC, sizeof(float) * 9)) ) return cudaErrorInvalidSymbol; uint32_t cudaAlpha = ((uint32_t)0xff); if( CUDA_FAILED(cudaMemcpyToSymbol(constAlpha, &cudaAlpha, sizeof(uint32_t))) ) return cudaErrorInvalidSymbol; nv12ColorspaceSetup = true; return cudaSuccess; } // cudaNV12ToARGB32 void cudaNV12ToRGBA( char* srcDev, size_t srcPitch, char* destDev, size_t destPitch, size_t width, size_t height ) { // if( !srcDev || !destDev ) // return cudaErrorInvalidDevicePointer; // if( srcPitch == 0 || destPitch == 0 || width == 0 || height == 0 ) // return cudaErrorInvalidValue; if( !nv12ColorspaceSetup ) cudaNV12SetupColorspace(0.0f); const dim3 blockDim(32,16,1); const dim3 gridDim((width+(2*blockDim.x-1))/(2*blockDim.x), (height+(blockDim.y-1))/blockDim.y, 1); char *nvRGBA = NULL; cudaError_t err = cudaMalloc((void **)&nvRGBA, width*height*sizeof(char)*4); char *nvNV12 = NULL; err = cudaMalloc((void **)&nvNV12, width*height*sizeof(char)*3/2); printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(nvNV12, srcDev, width*height*sizeof(char)*3/2, cudaMemcpyHostToDevice); NV12ToARGB<<<gridDim, blockDim>>>( (uint32_t*)nvNV12, srcPitch, (uint32_t*)nvRGBA, destPitch, width, height ); err = cudaMemcpy(destDev, nvRGBA, width*height*4, cudaMemcpyDeviceToHost); err = cudaFree(nvRGBA); err = cudaFree(nvNV12); CHECK(cudaDeviceSynchronize()); }
4,742
#include <stdio.h> #include <fstream> #include <iostream> #include <vector> #include <random> #include <unistd.h> #include <math.h> #include <assert.h> #include <string.h> #include <chrono> /* we need these includes for CUDA's random number stuff */ #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #define THREADS_P_BLOCK 1024 #define MAX_WEIGHT 10 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /* this GPU kernel function is used to initialize the random states */ __global__ void rand_init(unsigned int seed, curandState_t* states) { /* we have to initialize the state */ curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ blockIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[blockIdx.x]); } // Reset a FLOAT array in GPU __global__ void reset_float(float *v, long int N, float val){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N; i += stride) v[i] = val; } // Reset a INT array in GPU __global__ void reset_int(int *v, int N, int val){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N; i += stride){ v[i] = val; } } // Evaporate pheromone matrix in GPU __global__ void evaporate(float *t, float p, int N){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N; i += stride){ t[i] = t[i] * (1-p); if(t[i] < 1) t[i] = 1; } } // Prinf stuff in GPU without copy it to host // use <<<1,1>>> or <<<1,1,0,stream>>> // More blocks and threads will print things // in wrong order __global__ void printmat(float *t, int N){ printf("\n"); for (int i = 0; i < N; i += 1){ for (int j = 0; j < N; j += 1){ printf("%.2f ", t[i*N + j]); } printf("\n"); } printf("\n"); } // Update trail in GPU __global__ void update_trail(float *t, int N, int N_ANTS, int N_EDGES, int* d_sol, int* sum){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N_ANTS; i += stride){ int ant_id = i; int *sol = &d_sol[ant_id*N_EDGES]; // Find solution from ant I // For each node in solution, update trail for(int idx_sol = 1; idx_sol < N_EDGES; idx_sol++){ if(sol[idx_sol] == -1){ break; } int from = sol[idx_sol-1]; int to = sol[idx_sol]; t[from*N + to] += sum[ant_id]; } } } // Select a random int from 0 to N-1 // with probability in array prob, of size N __device__ int randChoice(curandState_t *state, float *prob, int N){ float c = curand_uniform(state); float cum = 0; for(int i = 0; i < N; i++){ if(c <= prob[i] + cum) return i; cum += prob[i]; } return N-1; } // Run the routine for one or more ants, depends on // the number of threads and blocks avalilable __global__ void ant(curandState_t* states, float *t, int *g, int N, int N_ANTS,int N_EDGES, int *d_sol, int *d_sum, int *d_visited, int alpha, int beta){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int ant_id = index; ant_id < N_ANTS; ant_id += stride){ // int ant_id = i; // Get pointers to array corresponding to this Ant int *visited = &(d_visited[ant_id*N]); int *sol = &(d_sol[ant_id*N_EDGES]); // Random Initial node float c = curand_uniform(&states[blockIdx.x]); int now_node = (int)(c * N); // Init aux variables int end = 0; int sol_idx = 0; // Alloc probability array to select next node float *probs = new float[N]; // Init solution sol[sol_idx] = now_node; visited[now_node] = 1; d_sum[ant_id] = 0; while(end == 0){ sol_idx++; //Calc Neighs Probs end = 1; float norm = 0; for(int neigh = 0; neigh < N; neigh ++){ probs[neigh] = 0; // If has valid neigh if(g[now_node*N + neigh] > 0 && visited[neigh] == 0){ float Tij = t[now_node*N + neigh]; float Nij = (float)g[now_node*N + neigh] / (float)MAX_WEIGHT; float res = pow(Tij, alpha) * pow(Nij, beta); probs[neigh] = res; norm += res; end = 0; } } // Have no valid neighs if(end) break; //Norm probs to sum 1 for(int neigh = 0; neigh < N; neigh ++){ probs[neigh] = (probs[neigh] / norm); } // Choose next node int target = randChoice(&states[blockIdx.x], probs, N); assert(target >= 0 && target < N); // Add target to solution d_sum[ant_id] += g[now_node*N + target]; sol[sol_idx] = target; visited[target] = 1; // Move Ant now_node = target; } // Free stuff free(probs); } } void printHelp(){ std::cout << std::endl; std::cout << "Usage: ./ACO <input database> <N_ITER> <N_ANTS> <EVAPORATION RATE> <ALPHA> <BETA>" << std::endl; exit(0); } int main(int argc, char* argv[]) { if( argc < 6) printHelp(); std::string database(argv[1]); std::ifstream infile(database); std::vector<std::vector<int>> adjList; int N = 0; int N_EDGES = 0; int N_ITER = atoi(argv[2]); int N_ANTS = atoi(argv[3]); float EVAP = atof(argv[4]); int alpha = atoi(argv[5]); int beta = atoi(argv[6]); int METRICS = 0; std::string exp_id; if( argc > 7){ exp_id = std::string(argv[7]); METRICS = 1; } int n1, n2, w; while (infile >> n1 >> n2 >> w) { N_EDGES++; if(n1 > N) N = n1; if(n2 > N) N = n2; adjList.push_back(std::vector<int>({n1-1, n2-1, w})); } infile.close(); std::cout << "--------------- Config ---------------" << std::endl; std::cout << "Database: " << database << std::endl; std::cout << "N Vertex: " << N << std::endl; std::cout << "N Edges: " << N_EDGES << std::endl; std::cout << "N Ants: " << N_ANTS << std::endl; std::cout << "Max Iterations: " << N_ITER << std::endl; std::cout << "Evaportation: " << EVAP << std::endl; std::cout << "alpha: " << alpha << std::endl; std::cout << "beta: " << beta << std::endl; std::cout << "Exp: " << exp_id << std::endl; std::cout << "--------------------------------------" << std::endl << std::endl; std::ofstream outfile; if(METRICS){ outfile.open("results/" + exp_id + ".txt"); outfile << "DATABASE " << database << std::endl; outfile << "N " << N << std::endl; outfile << "N_EDGES " << N_EDGES << std::endl; outfile << "N_ANTS " << N_ANTS << std::endl; outfile << "N_ITER " << N_ITER << std::endl; outfile << "EVAP " << EVAP << std::endl; outfile << "alpha " << alpha << std::endl; outfile << "beta " << beta << std::endl; } // Pointers float *d_t; float *t; int *sol, *sum; int *d_sol, *d_sum; int *d_visited; int *d_g, *g; int *best_sol; int best_sum = 0; // Host Array g = (int *)malloc(N * N * sizeof(int)); t = (float *)malloc(N * N * sizeof(float)); sol = (int *)malloc(N_EDGES * N_ANTS * sizeof(int)); sum = (int *)malloc(N_ANTS * sizeof(int)); best_sol = (int *)malloc(N_EDGES * sizeof(int)); // Device Array gpuErrchk(cudaMalloc(&d_t, N * N * sizeof(float))); gpuErrchk(cudaMalloc(&d_g, N * N * sizeof(int))); gpuErrchk(cudaMalloc(&d_sol, N_EDGES * N_ANTS * sizeof(int))); // solutions gpuErrchk(cudaMalloc(&d_visited, N * N_ANTS * sizeof(int))); // solutions gpuErrchk(cudaMalloc(&d_sum, N_ANTS * sizeof(int))); // sums // Populate Graph for(int i = 0; i < N*N; i++) g[i] = 0; for(auto it = std::begin(adjList); it != std::end(adjList); ++it) { int i = (*it)[0]; int j = (*it)[1]; int w = (*it)[2]; g[(i*N)+j] = w; } gpuErrchk(cudaMemcpy(d_g, g, N*N*sizeof(int), cudaMemcpyHostToDevice)); int nnBlocks = ((N*N) / THREADS_P_BLOCK) + 1; int nBlocks = (N / THREADS_P_BLOCK) + 1; // Setup Random Number Generator curandState_t* states; gpuErrchk(cudaMalloc((void**) &states, nnBlocks * sizeof(curandState_t))); rand_init<<<nnBlocks, 1>>>(time(0), states); int initial_node = 0; reset_float<<<nnBlocks, THREADS_P_BLOCK>>>(d_t, N*N, 1.0); gpuErrchk( cudaDeviceSynchronize() ); for(int iter = 0; iter < N_ITER; iter++){ // printf("Iter...\n"); std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now(); // Reset Solutions, Visited list and Sum list reset_int<<<nnBlocks, THREADS_P_BLOCK>>>(d_sol, N_EDGES * N_ANTS, -1); reset_int<<<nnBlocks, THREADS_P_BLOCK>>>(d_visited, N * N_ANTS, 0); reset_int<<<nBlocks, THREADS_P_BLOCK>>>(d_sum, N_ANTS, 0); gpuErrchk( cudaDeviceSynchronize() ); // Run Ants // printf("Start Ants\n"); ant<<<nBlocks, THREADS_P_BLOCK>>>(states, d_t, d_g, N, N_ANTS, N_EDGES, d_sol, d_sum, d_visited, alpha, beta); gpuErrchk( cudaDeviceSynchronize() ); // printf("End Ants\n"); // Evaporate trail evaporate<<<nnBlocks, THREADS_P_BLOCK>>>(d_t, EVAP, N*N); gpuErrchk( cudaDeviceSynchronize() ); // Update trail update_trail<<<nBlocks, THREADS_P_BLOCK>>>(d_t, N, N_ANTS, N_EDGES, d_sol, d_sum); // //Print Trail // printmat<<<1, 1>>>(d_t, N); // gpuErrchk( cudaDeviceSynchronize() ); // Pull solutions cudaMemcpy(sol, d_sol, N_ANTS*N_EDGES*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(sum, d_sum, N_ANTS*sizeof(int), cudaMemcpyDeviceToHost); gpuErrchk( cudaDeviceSynchronize() ); float mean_phero; if(METRICS){ cudaMemcpy(t, d_t, N*N*sizeof(float), cudaMemcpyDeviceToHost); mean_phero = 0; for(int i = 0; i < N*N; i++){ mean_phero += t[i]; } mean_phero = mean_phero / (float)N*N; } if(METRICS){ outfile << "START_NODE " << initial_node << " ITER " << iter << " MEAN_PHERO " << mean_phero << " : "; } // Save Best Solution for(int i = 0; i < N_ANTS; i++){ if(sum[i] > best_sum){ best_sum = sum[i]; memcpy(best_sol, &sol[i*N_EDGES], N_EDGES*sizeof(int)); } if(METRICS){ outfile << sum[i] << " "; } } if(METRICS){ outfile << std::endl; } std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); std::cout << "[" << iter << "] " << "Best sum: " << best_sum << " - Time difference = " << std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count() << "[ms]" << std::endl; } printf("Best Sol: %i\n", best_sum); printf("------------------------------------------------\n["); for(int idx_sol = 0; idx_sol < N_EDGES; idx_sol++){ if(best_sol[idx_sol] == -1) break; printf("%i, ", best_sol[idx_sol]+1); } printf("]\n"); printf("------------------------------------------------\n"); printf("%i\n", best_sum); if(METRICS){ for(int idx_sol = 0; idx_sol < N_EDGES; idx_sol++){ if(best_sol[idx_sol] == -1) break; outfile << best_sol[idx_sol]+1 << " "; } outfile << std::endl; outfile << best_sum << std::endl; } // std::cout << "Checking Solution..." << std::endl; // int s = 0; // for(int idx_sol = 1; idx_sol < N_EDGES; idx_sol++){ // int from = best_sol[idx_sol - 1 ]; // int to = best_sol[idx_sol]; // if(to == -1) break; // s += g[from * N + to]; // printf("G[%i -> %i] : %i | Sum: %i\n", from, to, g[from * N + to], s); // } // if(s != best_sum){ // printf("SOLUTION DO NOT MATCH VALUE\n"); // printf("%i vs %i\n", s, best_sum); // } // std::cout << "Done!" << std::endl; cudaFree(d_sol); cudaFree(d_sum); cudaFree(d_t); cudaFree(d_visited); cudaFree(d_g); if(METRICS) outfile.close(); return 0; }
4,743
//============================================================================================================= /** * @file rapmusic_kernel.cu * @author Christoph Dinh <christoph.dinh@live.de>; * @version 1.0 * @date March, 2011 * * @section LICENSE * * Copyright (C) 2011 Christoph Dinh. All rights reserved. * * No part of this program may be photocopied, reproduced, * or translated to another program language without the * prior written consent of the author. * * * @brief ToDo Documentation.. * */ //************************************************************************************************************* //============================================================================================================= // CUDA INCLUDES //============================================================================================================= #include "../include/rapmusic_kernel.cuh" #include "../include/cusvd.cuh" #include "../include/cuhelpers.cuh" //************************************************************************************************************* //============================================================================================================= // STL INCLUDES //============================================================================================================= #define SHDEBUG //************************************************************************************************************* //============================================================================================================= // DEFINE NAMESPACE HPCLib //============================================================================================================= namespace HPCLib { //************************************************************************************************************* //============================================================================================================= // Kernels and Device Functions //============================================================================================================= __device__ void cuGetPointPair( const int p_iPoints, const int p_iCurIdx, int *p_pIdx1, int *p_pIdx2) { int ii = p_iPoints*(p_iPoints+1)/2-1-p_iCurIdx; int K = (int)floor((sqrt((double)(8*ii+1))-1)/2); *p_pIdx1 = p_iPoints-1-K; *p_pIdx2 = (p_iCurIdx-p_iPoints*(p_iPoints+1)/2 + (K+1)*(K+2)/2)+(*p_pIdx1); } //************************************************************************************************************* __global__ void cuCalcPairCombinations( int p_iNumPoints, int p_iNumCombinations, int* p_pPairIdxCombinations) { int t_iCombIdx = threadIdx.x + blockIdx.x * blockDim.x; int *t_pIdx1 = new int; int *t_pIdx2 = new int; while (t_iCombIdx < p_iNumCombinations) { cuGetPointPair(p_iNumPoints, t_iCombIdx, t_pIdx1, t_pIdx2); p_pPairIdxCombinations[2*t_iCombIdx] = *t_pIdx1; p_pPairIdxCombinations[2*t_iCombIdx+1] = *t_pIdx2; t_iCombIdx += gridDim.x*blockDim.x; } delete t_pIdx2; delete t_pIdx1; } //************************************************************************************************************* __global__ void cuCalcU_B( float* p_pMatProj_Phi_s, int p_iProj_Phi_sRows, int p_iProj_Phi_sCols, float* p_pMatU_B, int* p_pRank) { int t_iSizeProj_Phi_sMat = p_iProj_Phi_sRows * p_iProj_Phi_sCols; extern __shared__ float t_pSharedMem[]; float* t_pU_B = t_pSharedMem; float* t_pW = t_pU_B + t_iSizeProj_Phi_sMat; float* t_pYZCache = t_pW + p_iProj_Phi_sCols; float* t_pSVDCache_all = t_pYZCache + blockDim.y*blockDim.z; if(threadIdx.z == 0)//To prevent other z threads performing memory access { //Copy first Lead Field point int i = threadIdx.y;//combination index while(i < t_iSizeProj_Phi_sMat) { t_pU_B[i] = p_pMatProj_Phi_s[i]; i += blockDim.y; } } cuSVD_UW_shared( t_pU_B, /* [m x n ]*/ p_iProj_Phi_sRows, /* rows */ p_iProj_Phi_sCols, /* columns */ t_pW, /* [nx1]*/ t_pSVDCache_all, /* [nx1] */ t_pYZCache ); *p_pRank = 0; int t_iKey = 0; float t_vMax = 0; for(int n = 0; n < p_iProj_Phi_sCols; ++n) { if (t_pW[n] >= 0.00001f) { if(t_vMax < t_pW[n]) { t_iKey = n; t_vMax = t_pW[n]; } ++(*p_pRank); } } //order nonzero Singular values int* vecOrdKey = new int[*p_pRank]; float* vecOrdVal = new float[*p_pRank]; vecOrdKey[0] = t_iKey; vecOrdVal[0] = t_vMax; //very uneffective key-value-sorting for(int k = 1; k < *p_pRank; ++k) { vecOrdVal[k] = 0; for(int n = 0; n < p_iProj_Phi_sCols; ++n) { if( t_pW[n] <= vecOrdVal[k-1] && n != vecOrdKey[k-1] && t_pW[n] > vecOrdVal[k] ) { vecOrdKey[k] = n; vecOrdVal[k] = t_pW[n]; } } } int c = 0; for(int n = 0; n < *p_pRank; ++n)//ToDo Parallel { int m = threadIdx.y; while(m < p_iProj_Phi_sRows) { p_pMatU_B[c*p_iProj_Phi_sRows + m] = t_pU_B[vecOrdKey[n]*p_iProj_Phi_sRows + m]; m += blockDim.y; } ++c; } __syncthreads(); } //************************************************************************************************************* __device__ void cuGetLeadFieldPair( float* p_pMatProjLeadField, //Input int p_iLeadFieldRows, float* p_pMatProj_G, int p_iIdx1, int p_iIdx2) { if(threadIdx.z == 0)//To prevent other z threads performing memory access { int iidx1 = p_iIdx1 * p_iLeadFieldRows*3;//index with offset (idx1 * p_iLeadFieldRows) int iidx2 = p_iIdx2 * p_iLeadFieldRows*3;//index with offset (idx2 * p_iLeadFieldRows) int t_iSizePointMat = p_iLeadFieldRows*3; //Copy first Lead Field point int i = threadIdx.y;//combination index while(i < t_iSizePointMat) { p_pMatProj_G[i] = p_pMatProjLeadField[iidx1 + i]; i += blockDim.y; } //Copy second Lead Field point i = threadIdx.y;//combination index while(i < t_iSizePointMat) { p_pMatProj_G[t_iSizePointMat+i] = p_pMatProjLeadField[iidx2 + i]; i += blockDim.y; } } } //************************************************************************************************************* __global__ void RapMusicSubcorr( float* p_pMatProjLeadField, //Input int p_iLeadFieldRows, int p_iLeadFieldCols, int* p_pPairIdxCombinations, //Combination int p_iNumOfCombinations, float* p_pMatU_B, //[rowsA x colsB] //from kernel part2 int p_iColsB, float* p_pRoh ) { const int t_iSizePairMat = p_iLeadFieldRows * 6; const int t_iPairCols = 6; const int iColsA = 6; const int t_iSizeSVDCache = t_iPairCols+1+1; const int t_iSizeCorMat = iColsA * p_iColsB; //Create all Pair Mats in shared mem extern __shared__ float t_pSharedMem[]; //Split shared Memory float* t_pMatProj_G_all = t_pSharedMem;//size = t_iSizePairMat*blockDim.x float* t_pW_all = t_pMatProj_G_all + t_iSizePairMat*blockDim.x;//size = t_iPairCols*blockDim.x float* t_pCor_all = t_pW_all + t_iPairCols*blockDim.x;//size = t_iSizeCorMat*blockDim.x float* t_pCacheYZ_all = t_pCor_all + t_iSizeCorMat*blockDim.x;//size = blockDim.y*blockDim.z*blockDim.x float* t_pSVDCache_all = t_pCacheYZ_all + blockDim.y*blockDim.z*blockDim.x;//size = t_iSizeSVDCache*blockDim.x //Split YZ Cache float* t_pCacheYZ = t_pCacheYZ_all +(threadIdx.x*blockDim.y*blockDim.z); float* t_pCacheY = t_pCacheYZ + threadIdx.z*blockDim.y; int t_iNumPairMatsPerBlock = blockDim.x;//Number of threads in x int t_iCurrentBlock = blockIdx.x; int t_iCombIdx = threadIdx.x + t_iCurrentBlock * t_iNumPairMatsPerBlock; while (t_iCombIdx < p_iNumOfCombinations) { int idx1 = p_pPairIdxCombinations[2*t_iCombIdx];//*3, 3x more cols -> x y z int idx2 = p_pPairIdxCombinations[2*t_iCombIdx+1];//*3, 3x more cols -> x y z float* t_pMatProj_G = t_pMatProj_G_all + t_iSizePairMat*threadIdx.x; cuGetLeadFieldPair( p_pMatProjLeadField, //Input p_iLeadFieldRows, t_pMatProj_G, idx1, idx2); __syncthreads(); //Part 1 //fhre svd auf paarmat aus float* w = t_pW_all + t_iPairCols*threadIdx.x; float* t_pSVDCache = t_pSVDCache_all + t_iSizeSVDCache*threadIdx.x; cuSVD_UW_shared( t_pMatProj_G, p_iLeadFieldRows, t_iPairCols, w, t_pSVDCache, t_pCacheYZ); // cuSVD_UW( t_pMatProj_G, p_iLeadFieldRows, t_iPairCols, w); __syncthreads(); float* t_pMatU_A_full = t_pMatProj_G; //if once a singularvalue is smaller than epsilon = 10^-5 the following values are also smaller // -> because Singular values are ordered int t_iRank = threadIdx.z; while(t_iRank < t_iPairCols) { if (w[t_iRank] < 0.00001f)//set Eigenvectors with corresponding 0 eigenvalue to 0 { int c = threadIdx.y; while(c < p_iLeadFieldRows) { t_pMatU_A_full[t_iRank*p_iLeadFieldRows + c] = 0.0f; c += blockDim.y; } } t_iRank += blockDim.z; } __syncthreads(); //Part 2 float* U_B = p_pMatU_B; float* Cor = t_pCor_all + t_iSizeCorMat*threadIdx.x;//new float[t_iSizeCorMat];//p_pMatCor+(t_iCombIdx * t_iSizeCorMat); //lt. Mosher 1998: C = U_A^T * U_B //Cor.cols() >= Cor.rows() == U_B.cols > U_A.cols if(p_iColsB >= iColsA) //Bug ID 1 - fixed! changed from > to >= { //C = U_B^T * U_A for( int cA = 0; cA < iColsA; ++cA ) { float* t_pMatU_A_full_cur = t_pMatU_A_full + (cA*p_iLeadFieldRows); int cB = threadIdx.z; while(cB < p_iColsB) { float* U_B_cur = U_B + (cB*p_iLeadFieldRows); float* t_pCor_cur = Cor + (cA*p_iColsB + cB); *t_pCor_cur = 0; cuScalarProductY_shared( U_B_cur, t_pMatU_A_full_cur, p_iLeadFieldRows, t_pCacheY, t_pCor_cur ); __syncthreads(); // for( int rAB = 0; rAB < p_iLeadFieldRows; ++rAB) // *t_pCor_cur += U_B_cur[rAB]*t_pMatU_A_full_cur[rAB]; cB += blockDim.z; } __syncthreads(); } } else//ToDo to debug { //C = U_A^T * U_B for( int cB = 0; cB < p_iColsB; ++cB ) { float* U_B_cur = U_B + (cB*p_iLeadFieldRows); int cA = threadIdx.z; while(cA < iColsA) { float* t_pMatU_A_full_cur = t_pMatU_A_full + (cA*p_iLeadFieldRows); float* t_pCor_cur = Cor + (cB*iColsA+cA); *t_pCor_cur = 0; cuScalarProductY_shared( t_pMatU_A_full_cur, U_B_cur, p_iLeadFieldRows, t_pCacheY, t_pCor_cur ); __syncthreads(); // for( int rAB = 0; rAB < p_iLeadFieldRows; ++rAB) // *t_pCor_cur += t_pMatU_A_full_cur[rAB]*U_B_cur[rAB]; cA += blockDim.z; } __syncthreads(); } } //Part 3 int rows = p_iColsB; int cols = iColsA; if (p_iColsB < iColsA) { rows = iColsA; cols = p_iColsB; } //cols are maximal iColsA = 6. //That's why we can use w and shared cache again. they are 6 width cuSVD_W_shared( Cor, rows, cols, w, t_pSVDCache, t_pCacheYZ); // cuSVD_W( Cor, rows, cols, w); __syncthreads(); p_pRoh[t_iCombIdx] = 0; if(threadIdx.y == 0 && threadIdx.z == 0) for(int i = 0; i < cols; ++i) if (p_pRoh[t_iCombIdx] < w[i]) p_pRoh[t_iCombIdx] = w[i]; __syncthreads(); t_iCombIdx += gridDim.x*t_iNumPairMatsPerBlock; } __syncthreads(); } __device__ void cuPowellOffset( float* p_pMatProjLeadField, //Input int p_iLeadFieldRows, float* p_pMatProj_G, int p_iIdx1, int p_iIdx2) { } //************************************************************************************************************* __device__ int cuPowellOffset(int p_iRow, int p_iNumPoints) { return p_iRow*p_iNumPoints - (( (p_iRow-1)*p_iRow) / 2); //triangular series 1 3 6 10 ... = (num_pairs*(num_pairs+1))/2 } //************************************************************************************************************* __global__ void cuPowellIdxVec(int p_iRow, int p_iNumPoints, int* p_pVecElements) { // if(p_pVecElements != NULL) // delete[] p_pVecElements; // // p_pVecElements = new int(p_iNumPoints); //col combination index int t_iIdx = threadIdx.x + blockIdx.x * blockDim.x; while (t_iIdx < p_iRow) { p_pVecElements[t_iIdx] = cuPowellOffset(t_iIdx+1,p_iNumPoints)-(p_iNumPoints-p_iRow); t_iIdx += gridDim.x*blockDim.x; } //row combination index int off = cuPowellOffset(p_iRow,p_iNumPoints); int length = p_iNumPoints - p_iRow; t_iIdx = threadIdx.x + blockIdx.x * blockDim.x; while (t_iIdx+p_iRow < p_iRow+length) { p_pVecElements[t_iIdx+p_iRow] = off+t_iIdx; t_iIdx += gridDim.x*blockDim.x; } } //************************************************************************************************************* __global__ void PowellRapMusicSubcorr( float* p_pMatProjLeadField, //Input int p_iLeadFieldRows, int p_iLeadFieldCols, int* p_pPairIdxCombinations, //Combination int* p_pRowIndezes, int p_iNumOfDipoles, float* p_pMatU_B, //[rowsA x colsB] //from kernel part2 int p_iColsB, float* p_pRoh ) { const int t_iSizePairMat = p_iLeadFieldRows * 6; const int t_iPairCols = 6; const int iColsA = 6; const int t_iSizeSVDCache = t_iPairCols+1+1; const int t_iSizeCorMat = iColsA * p_iColsB; //Create all Pair Mats in shared mem extern __shared__ float t_pSharedMem[]; //Split shared Memory float* t_pMatProj_G_all = t_pSharedMem;//size = t_iSizePairMat*blockDim.x float* t_pW_all = t_pMatProj_G_all + t_iSizePairMat*blockDim.x;//size = t_iPairCols*blockDim.x float* t_pCor_all = t_pW_all + t_iPairCols*blockDim.x;//size = t_iSizeCorMat*blockDim.x float* t_pCacheYZ_all = t_pCor_all + t_iSizeCorMat*blockDim.x;//size = blockDim.y*blockDim.z*blockDim.x float* t_pSVDCache_all = t_pCacheYZ_all + blockDim.y*blockDim.z*blockDim.x;//size = t_iSizeSVDCache*blockDim.x //Split YZ Cache float* t_pCacheYZ = t_pCacheYZ_all +(threadIdx.x*blockDim.y*blockDim.z); float* t_pCacheY = t_pCacheYZ + threadIdx.z*blockDim.y; int t_iNumPairMatsPerBlock = blockDim.x;//Number of threads in x int t_iCurrentBlock = blockIdx.x; int t_iCombIdx = threadIdx.x + t_iCurrentBlock * t_iNumPairMatsPerBlock; while (t_iCombIdx < p_iNumOfDipoles) { int t_iCurrentIdx = p_pRowIndezes[t_iCombIdx]; int idx1 = p_pPairIdxCombinations[2*t_iCurrentIdx];//*3, 3x more cols -> x y z int idx2 = p_pPairIdxCombinations[2*t_iCurrentIdx+1];//*3, 3x more cols -> x y z float* t_pMatProj_G = t_pMatProj_G_all + t_iSizePairMat*threadIdx.x; cuGetLeadFieldPair( p_pMatProjLeadField, //Input p_iLeadFieldRows, t_pMatProj_G, idx1, idx2); __syncthreads(); //Part 1 //fhre svd auf paarmat aus float* w = t_pW_all + t_iPairCols*threadIdx.x; float* t_pSVDCache = t_pSVDCache_all + t_iSizeSVDCache*threadIdx.x; cuSVD_UW_shared( t_pMatProj_G, p_iLeadFieldRows, t_iPairCols, w, t_pSVDCache, t_pCacheYZ); // cuSVD_UW( t_pMatProj_G, p_iLeadFieldRows, t_iPairCols, w); __syncthreads(); float* t_pMatU_A_full = t_pMatProj_G; //if once a singularvalue is smaller than epsilon = 10^-5 the following values are also smaller // -> because Singular values are ordered int t_iRank = threadIdx.z; while(t_iRank < t_iPairCols) { if (w[t_iRank] < 0.00001f)//set Eigenvectors with corresponding 0 eigenvalue to 0 { int c = threadIdx.y; while(c < p_iLeadFieldRows) { t_pMatU_A_full[t_iRank*p_iLeadFieldRows + c] = 0.0f; c += blockDim.y; } } t_iRank += blockDim.z; } __syncthreads(); //Part 2 float* U_B = p_pMatU_B; float* Cor = t_pCor_all + t_iSizeCorMat*threadIdx.x;//new float[t_iSizeCorMat];//p_pMatCor+(t_iCombIdx * t_iSizeCorMat); //lt. Mosher 1998: C = U_A^T * U_B //Cor.cols() >= Cor.rows() == U_B.cols > U_A.cols if(p_iColsB >= iColsA) //Bug ID 1 - fixed! changed from > to >= { //C = U_B^T * U_A for( int cA = 0; cA < iColsA; ++cA ) { float* t_pMatU_A_full_cur = t_pMatU_A_full + (cA*p_iLeadFieldRows); int cB = threadIdx.z; while(cB < p_iColsB) { float* U_B_cur = U_B + (cB*p_iLeadFieldRows); float* t_pCor_cur = Cor + (cA*p_iColsB + cB); *t_pCor_cur = 0; cuScalarProductY_shared( U_B_cur, t_pMatU_A_full_cur, p_iLeadFieldRows, t_pCacheY, t_pCor_cur ); __syncthreads(); // for( int rAB = 0; rAB < p_iLeadFieldRows; ++rAB) // *t_pCor_cur += U_B_cur[rAB]*t_pMatU_A_full_cur[rAB]; cB += blockDim.z; } __syncthreads(); } } else//ToDo to debug { //C = U_A^T * U_B for( int cB = 0; cB < p_iColsB; ++cB ) { float* U_B_cur = U_B + (cB*p_iLeadFieldRows); int cA = threadIdx.z; while(cA < iColsA) { float* t_pMatU_A_full_cur = t_pMatU_A_full + (cA*p_iLeadFieldRows); float* t_pCor_cur = Cor + (cB*iColsA+cA); *t_pCor_cur = 0; cuScalarProductY_shared( t_pMatU_A_full_cur, U_B_cur, p_iLeadFieldRows, t_pCacheY, t_pCor_cur ); __syncthreads(); // for( int rAB = 0; rAB < p_iLeadFieldRows; ++rAB) // *t_pCor_cur += t_pMatU_A_full_cur[rAB]*U_B_cur[rAB]; cA += blockDim.z; } __syncthreads(); } } //Part 3 int rows = p_iColsB; int cols = iColsA; if (p_iColsB < iColsA) { rows = iColsA; cols = p_iColsB; } //cols are maximal iColsA = 6. //That's why we can use w and shared cache again. they are 6 width cuSVD_W_shared( Cor, rows, cols, w, t_pSVDCache, t_pCacheYZ); // cuSVD_W( Cor, rows, cols, w); __syncthreads(); p_pRoh[t_iCurrentIdx] = 0; if(threadIdx.y == 0 && threadIdx.z == 0) for(int i = 0; i < cols; ++i) if (p_pRoh[t_iCurrentIdx] < w[i]) p_pRoh[t_iCurrentIdx] = w[i]; __syncthreads(); t_iCombIdx += gridDim.x*t_iNumPairMatsPerBlock; } __syncthreads(); } }//Namespace
4,744
/** * The program must read in an NxN matrix with floating point numbers. The program must determine * the index of columns, which contains one element that is equal to the average of the values in * the same column. The outputs are the indices of the columns. */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include "device_launch_parameters.h" #define BLOCK_SIZE 256 double* readMatrix(int size, char *filename) { double *vector; FILE *fp = fopen(filename, "r"); if (fp == NULL) { printf("A fajl nem talalhato!"); exit(1); } vector = (double *) malloc(size * size * sizeof(double)); int i = 0; while (fscanf(fp, "%lf ", &vector[i]) != EOF) { i++; } fclose(fp); return vector; } __global__ void findIndicesKernel(int size, double *vector, int *indices) { int col = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0.0; if (col < size) { for (int row = 0; row < size; row++) { sum += vector[col + row * size]; } double avg = sum / size; indices[col] = -1; for (int row = 0; row < size; row++) { if (vector[col + row * size] == avg) { indices[col] = col; break; } } } } void printMeasuredTime(int size, double time) { FILE *fp = fopen("time.txt", "w"); fprintf(fp, "%dx%d matrix: %.8lf s", size, size, time); fclose(fp); } int* findIndices(int size, double *vector) { int *indices, *device_indices; double *device_vector; size_t vector_size = size * size * sizeof(double); size_t indices_size = size * sizeof(int); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); indices = (int *)malloc(size * sizeof(int)); cudaMalloc((void **)&device_vector, vector_size); cudaMalloc((void **)&device_indices, indices_size); cudaMemcpy(device_vector, vector, vector_size, cudaMemcpyHostToDevice); cudaEventRecord(start); findIndicesKernel<<<ceil(size / BLOCK_SIZE), BLOCK_SIZE>>>(size, device_vector, device_indices); cudaEventRecord(end); cudaMemcpy(indices, device_indices, indices_size, cudaMemcpyDeviceToHost); cudaFree(device_vector); cudaFree(device_indices); cudaEventSynchronize(end); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, end); printMeasuredTime(size, milliseconds / 1000); cudaEventDestroy(start); cudaEventDestroy(end); return indices; } void printResults(int size, int *indices) { FILE *fp = fopen("output.txt", "w"); for (int i = 0; i < size; i++) { if (indices[i] != -1) { fprintf(fp, "%d ", indices[i]); } } fclose(fp); } int main(int argc, char **argv) { if (argc < 2) { printf("Kerem, adja meg a matrix meretet\n es az azt tartalmazo fajl\neleresi utvonalat!\nPelda: hf1 5 matrix.txt"); return 1; } int size = strtod(argv[1], NULL); double *vector = readMatrix(size, argv[2]); int *indices = findIndices(size, vector); printResults(size, indices); free(vector); free(indices); cudaDeviceReset(); return 0; }
4,745
//n<=4096, m<=1024 __global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match){ const int MaxN=4096,MaxM=1024; __shared__ float remainL[MaxN],remainR[MaxM],ratioR[MaxM],ratioL[MaxN]; __shared__ int listR[MaxM],lc; float multiL,multiR; if (n>=m){ multiL=1; multiR=n/m; }else{ multiL=m/n; multiR=1; } for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x;j<n*m;j+=blockDim.x) match[i*n*m+j]=0; for (int j=threadIdx.x;j<n;j+=blockDim.x) remainL[j]=multiL; for (int j=threadIdx.x;j<m;j+=blockDim.x) remainR[j]=multiR; __syncthreads(); for (int j=7;j>=-2;j--){ float level=-powf(4.0f,j); if (j==-2){ level=0; } if (threadIdx.x==0){ lc=0; for (int k=0;k<m;k++) if (remainR[k]>0) listR[lc++]=k; } __syncthreads(); int _lc=lc; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float suml=1e-9f; float x1=xyz1[(i*n+k)*3+0]; float y1=xyz1[(i*n+k)*3+1]; float z1=xyz1[(i*n+k)*3+2]; //for (int l=0;l<m;l++){ for (int _l=0;_l<_lc;_l++){ int l=listR[_l]; float x2=xyz2[(i*m+l)*3+0]-x1; float y2=xyz2[(i*m+l)*3+1]-y1; float z2=xyz2[(i*m+l)*3+2]-z1; float w=expf(level*(x2*x2+y2*y2+z2*z2))*remainR[l]; suml+=w; } ratioL[k]=remainL[k]/suml; } __syncthreads(); //for (int k=threadIdx.x;k<m;k+=blockDim.x){ for (int _k=threadIdx.x;_k<lc;_k+=blockDim.x){ int k=listR[_k]; float sumr=0; float x2=xyz2[(i*m+k)*3+0]; float y2=xyz2[(i*m+k)*3+1]; float z2=xyz2[(i*m+k)*3+2]; for (int l=0;l<n;l++){ float x1=xyz1[(i*n+l)*3+0]-x2; float y1=xyz1[(i*n+l)*3+1]-y2; float z1=xyz1[(i*n+l)*3+2]-z2; float w=expf(level*(x1*x1+y1*y1+z1*z1))*ratioL[l]; sumr+=w; } sumr*=remainR[k]; float consumption=fminf(remainR[k]/(sumr+1e-9f),1.0f); ratioR[k]=consumption*remainR[k]; remainR[k]=fmaxf(0.0f,remainR[k]-sumr); } __syncthreads(); for (int k=threadIdx.x;k<n;k+=blockDim.x){ float suml=0; float x1=xyz1[(i*n+k)*3+0]; float y1=xyz1[(i*n+k)*3+1]; float z1=xyz1[(i*n+k)*3+2]; for (int _l=0;_l<_lc;_l++){ int l=listR[_l]; float x2=xyz2[(i*m+l)*3+0]-x1; float y2=xyz2[(i*m+l)*3+1]-y1; float z2=xyz2[(i*m+l)*3+2]-z1; float w=expf(level*(x2*x2+y2*y2+z2*z2))*ratioL[k]*ratioR[l]; match[i*n*m+l*n+k]+=w; suml+=w; } remainL[k]=fmaxf(0.0f,remainL[k]-suml); } __syncthreads(); } } } void approxmatchLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,float * match){ approxmatch<<<32,512>>>(b,n,m,xyz1,xyz2,match); } __global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){ __shared__ float allsum[512]; const int Block=256; __shared__ float buf[Block*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ float subsum=0; for (int k0=0;k0<m;k0+=Block){ int endk=min(m,k0+Block); for (int k=threadIdx.x;k<(endk-k0)*3;k+=blockDim.x){ buf[k]=xyz2[i*m*3+k0*3+k]; } __syncthreads(); for (int j=threadIdx.x;j<n;j+=blockDim.x){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; for (int k=0;k<endk-k0;k++){ //float x2=xyz2[(i*m+k)*3+0]-x1; //float y2=xyz2[(i*m+k)*3+1]-y1; //float z2=xyz2[(i*m+k)*3+2]-z1; float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=sqrtf(x2*x2+y2*y2+z2*z2); subsum+=match[i*n*m+(k0+k)*n+j]*d; } } __syncthreads(); } allsum[threadIdx.x]=subsum; for (int j=1;j<blockDim.x;j<<=1){ __syncthreads(); if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){ allsum[threadIdx.x]+=allsum[threadIdx.x+j]; } } if (threadIdx.x==0) out[i]=allsum[0]; __syncthreads(); } } void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){ matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out); } __global__ void matchcostgrad(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * grad2){ __shared__ float sum_grad[256*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int kbeg=m*blockIdx.y/gridDim.y; int kend=m*(blockIdx.y+1)/gridDim.y; for (int k=kbeg;k<kend;k++){ float x2=xyz2[(i*m+k)*3+0]; float y2=xyz2[(i*m+k)*3+1]; float z2=xyz2[(i*m+k)*3+2]; float subsumx=0,subsumy=0,subsumz=0; for (int j=threadIdx.x;j<n;j+=blockDim.x){ float x1=x2-xyz1[(i*n+j)*3+0]; float y1=y2-xyz1[(i*n+j)*3+1]; float z1=z2-xyz1[(i*n+j)*3+2]; float d=match[i*n*m+k*n+j]/fmaxf(sqrtf(x1*x1+y1*y1+z1*z1),1e-20f); subsumx+=x1*d; subsumy+=y1*d; subsumz+=z1*d; } sum_grad[threadIdx.x*3+0]=subsumx; sum_grad[threadIdx.x*3+1]=subsumy; sum_grad[threadIdx.x*3+2]=subsumz; for (int j=1;j<blockDim.x;j<<=1){ __syncthreads(); int j1=threadIdx.x; int j2=threadIdx.x+j; if ((j1&j)==0 && j2<blockDim.x){ sum_grad[j1*3+0]+=sum_grad[j2*3+0]; sum_grad[j1*3+1]+=sum_grad[j2*3+1]; sum_grad[j1*3+2]+=sum_grad[j2*3+2]; } } if (threadIdx.x==0){ grad2[(i*m+k)*3+0]=sum_grad[0]; grad2[(i*m+k)*3+1]=sum_grad[1]; grad2[(i*m+k)*3+2]=sum_grad[2]; } __syncthreads(); } } } void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad2){ matchcostgrad<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2); }
4,746
#include <stdio.h> __global__ void helloCUDA(float f) { printf("Hello thread %d, f=%f\n", threadIdx.x, f); } int main() { helloCUDA<<<1, 1>>>(1.2345f); //cudaDeviceSynchronize(); return 0; }
4,747
// Compile with "nvcc -o contacts --std=c++11 contacts.cu" // This is a simple kernel with a skeletal demo app showing how to use it. // It ended up a lot faster than I thought it would... // Type Time(%) Time Calls Avg Min Max Name // GPU activities: 46.91% 8.2560us 2 4.1280us 1.0880us 7.1680us [CUDA memcpy HtoD] // 43.27% 7.6160us 1 7.6160us 7.6160us 7.6160us CalculateContacts(Atom const *, unsigned long, Atom const *, unsigned long, unsigned int*) // 5.45% 960ns 1 960ns 960ns 960ns [CUDA memcpy DtoH] // 4.36% 768ns 1 768ns 768ns 768ns [CUDA memset] // // Intentionally leaving out API stuff because this is not meant to be a standalone app, but // rather something incorporated into your pipeline. #include <unordered_map> #include <string> #include <vector> #include <fstream> #include <iostream> #include <assert.h> #include <stdio.h> #include <cstdint> using namespace std; static const int LIGAND_ATOM_TYPES = 9; static const int RECEPTOR_ATOM_TYPES = 4; static const int MAX_LIGAND_ATOMS = 128; static const float cutoff = 12.0f; static const float binSize = 2.0f; static const float cutoff2 = cutoff * cutoff; static const int BINS = 6; static const int FEATURES = BINS * LIGAND_ATOM_TYPES * RECEPTOR_ATOM_TYPES; struct Atom { float _x; float _y; float _z; int _type; }; #define RTERROR(status, s) \ if (status != cudaSuccess) { \ printf("%s %s\n", s, cudaGetErrorString(status)); \ assert(0); \ cudaDeviceReset(); \ exit(-1); \ } #define LAUNCHERROR(s) \ { \ cudaError_t status = cudaGetLastError(); \ if (status != cudaSuccess) { \ printf("Error: %s launching kernel %s\n", cudaGetErrorString(status), s); \ assert(0); \ getGpu().Shutdown(); \ exit(-1); \ } \ } bool ReadPDBQT(string fname, unordered_map<string, int>& map, vector<Atom>& vMolecule) { vMolecule.resize(0); ifstream input(fname); Atom a; for( std::string line; getline( input, line ); ) { if (line.rfind("ATOM", 0) == 0) { char type[16]; const char* buff = line.c_str(); sscanf(&buff[77], "%s", type); std::unordered_map<std::string,int>::const_iterator got = map.find(type); if (got != map.end()) { sscanf(&buff[30], "%8f%8f%8f", &a._x,&a._y,&a._z); a._type = got->second; //printf("%8.3f %8.3f %8.3f %3s %d\n", a._x, a._y, a._z, type, a._type); vMolecule.push_back(a); } } } return true; } Atom* UploadPDBQT(vector<Atom>& vMolecule) { Atom* pdMolecule; cudaError_t status = cudaMalloc((void**)&pdMolecule, vMolecule.size() * sizeof(Atom)); RTERROR(status, "UploadPDBQT: Failed to allocate memory for molecule.\n"); status = cudaMemcpy(pdMolecule, vMolecule.data(), vMolecule.size() * sizeof(Atom), cudaMemcpyDefault); RTERROR(status, "UploadPDBQT: Failed to upload molecule.\n"); return pdMolecule; } __global__ void CalculateContacts(const Atom* pdLigand, const size_t ligandAtoms, const Atom* pdReceptor, const size_t receptorAtoms, uint32_t* pdFeature) { __shared__ uint32_t sFeature[FEATURES]; __shared__ float3 sLigandPos[MAX_LIGAND_ATOMS]; __shared__ int sLigandOffset[MAX_LIGAND_ATOMS]; // Zero feature map for (size_t i = threadIdx.x; i < FEATURES; i += blockDim.x) { sFeature[i] = 0; } // Read ligand for (size_t i = threadIdx.x; i < ligandAtoms; i += blockDim.x) { Atom a = pdLigand[i]; sLigandPos[i].x = a._x; sLigandPos[i].y = a._y; sLigandPos[i].z = a._z; sLigandOffset[i] = a._type * (RECEPTOR_ATOM_TYPES * BINS); } __threadfence(); __syncthreads(); // Read protein atom for thread size_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < receptorAtoms) { Atom a = pdReceptor[pos]; a._type *= BINS; // Calculate contacts for (size_t i = 0; i < ligandAtoms; i++) { float dx = a._x - sLigandPos[i].x; float dy = a._y - sLigandPos[i].y; float dz = a._z - sLigandPos[i].z; float r2 = dx * dx + dy * dy + dz * dz; if (r2 < cutoff2) { float r = sqrt(r2); int bin = r / binSize; atomicAdd(&sFeature[a._type + sLigandOffset[i] + bin], 1); } } } __threadfence(); __syncthreads(); // Output final counts for (size_t i = threadIdx.x; i < FEATURES; i += blockDim.x) { if (sFeature[i] > 0) { atomicAdd(&pdFeature[i], sFeature[i]); } } } int main(int argc, char** argv) { // Initialize atom data // C N O F P Cl S Br I // 0, 1, 2, 3, 4, 5, 6 7, 8 //ligand_atomic_nums = [6, 7, 8, 9, 15, 16, 17, 35, 53] // 0, 1, 2, 3 // protein_atomic_nums = [6, 7, 8, 16] // C N O S unordered_map<string, int> ligandMap; ligandMap["A"] = 0; ligandMap["C"] = 0; ligandMap["N"] = 1; ligandMap["NA"] = 1; ligandMap["O"] = 2; ligandMap["OA"] = 2; ligandMap["F"] = 3; ligandMap["P"] = 4; ligandMap["CL"] = 5; ligandMap["S"] = 6; ligandMap["SA"] = 6; ligandMap["BR"] = 7; ligandMap["I"] = 8; unordered_map<string, int> proteinMap; proteinMap["A"] = 0; proteinMap["C"] = 0; proteinMap["N"] = 1; proteinMap["NA"] = 1; proteinMap["O"] = 2; proteinMap["OA"] = 2; proteinMap["S"] = 3; proteinMap["SA"] = 3; cudaFree(0); // Read ligand vector<Atom> vLigand; ReadPDBQT("test.pdbqt", ligandMap, vLigand); Atom* pdLigand = UploadPDBQT(vLigand); // Read receptor vector<Atom> vReceptor; ReadPDBQT("final_Mpro_5R84_gast.pdbqt", proteinMap, vReceptor); Atom* pdReceptor = UploadPDBQT(vReceptor); cout << vLigand.size() << " " << vReceptor.size() << endl; // Allocate feature vector uint32_t* pdFeature; cudaError_t status = cudaMalloc((void**)&pdFeature, FEATURES * sizeof(uint32_t)); RTERROR(status, "main: Failed to allocate memory for feature vector.\n"); status = cudaMemset(pdFeature, 0, FEATURES * sizeof(uint32_t)); RTERROR(status, "main: Failed to zero feature vector.\n"); // Calculate contacts uint32_t blockSize = 128; uint32_t blocks = (uint32_t)((vReceptor.size() + blockSize - 1) / blockSize); CalculateContacts<<<blocks, blockSize>>>(pdLigand, vLigand.size(), pdReceptor, vReceptor.size(), pdFeature); // Download contacts vector<uint32_t> vFeature(FEATURES); status = cudaMemcpy(vFeature.data(), pdFeature, FEATURES * sizeof(uint32_t), cudaMemcpyDefault); // Print result for (size_t i = 0; i < FEATURES; i++) printf("%3lu %6u\n", i, vFeature[i]); status = cudaFree(pdFeature); RTERROR(status, "main: Failed to deallocate memory for feature vector.\n"); status = cudaFree(pdLigand); RTERROR(status, "main: Failed to deallocate memory for ligand.\n"); status = cudaFree(pdReceptor); RTERROR(status, "main: Failed to deallocate memory for receptor.\n"); return 0; }
4,748
#include "includes.h" __global__ void gpu_mull2(float* a, float* b, float* c, int n, int m,int p) { int i = blockIdx.x * 32 + threadIdx.x; int j = blockIdx.y; float sum = 0.0f; for (int k = 0; k < p; ++k) { sum += b[i + n * k] * c[k + p * j]; } a[i + n * j] = sum; }
4,749
#include "includes.h" __global__ void vanrossum_get_indices_to_apply_stdp (int* d_postsyns, float* d_last_spike_time_of_each_neuron, float* d_time_of_last_spike_to_reach_synapse, int* d_index_of_last_afferent_synapse_to_spike, bool* d_isindexed_ltd_synapse_spike, int* d_index_of_first_synapse_spiked_after_postneuron, float currtime, int* d_plastic_synapse_indices, size_t total_number_of_plastic_synapses){ int indx = threadIdx.x + blockIdx.x * blockDim.x; // Running through all synapses: while (indx < total_number_of_plastic_synapses){ int idx = d_plastic_synapse_indices[indx]; int postsynaptic_neuron = d_postsyns[idx]; // Check whether a synapse reached a neuron this timestep if (d_time_of_last_spike_to_reach_synapse[idx] == currtime){ // Atomic Exchange the new synapse index atomicExch(&d_index_of_last_afferent_synapse_to_spike[postsynaptic_neuron], idx); } // Check (if we need to) whether a synapse has fired if (!d_isindexed_ltd_synapse_spike[postsynaptic_neuron]){ if (d_time_of_last_spike_to_reach_synapse[idx] == currtime){ d_isindexed_ltd_synapse_spike[postsynaptic_neuron] = true; atomicExch(&d_index_of_first_synapse_spiked_after_postneuron[postsynaptic_neuron], idx); } } // Increment index indx += blockDim.x * gridDim.x; } }
4,750
#include "includes.h" __global__ void set_valid_pos(int32_t* pos_buff, int32_t* count_buff, const int32_t entry_count) { const int32_t start = threadIdx.x + blockDim.x * blockIdx.x; const int32_t step = blockDim.x * gridDim.x; for (int32_t i = start; i < entry_count; i += step) { if (VALID_POS_FLAG == pos_buff[i]) { pos_buff[i] = !i ? 0 : count_buff[i - 1]; } } }
4,751
#include "includes.h" __global__ void Add(float *A, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) A[i] = A[i]+ A[i]; }
4,752
#include "includes.h" __global__ void relu_grad(float *pre_grad, float *output, int rows, int cols) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (j >= cols || i >= rows) return; if (output[i * cols + j] <= 0) pre_grad[i * cols + j] = 0; }
4,753
// TESTTING
4,754
#include <stdio.h> #include <cuda_runtime.h> #include <cuda.h> #include <chrono> #include <iostream> // Compile with // nvcc -O2 -std=c++11 cuda.cu __global__ void empty() { } int main(int argc, char **argv) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; int threadsPerBlock = 256; int blocksPerGrid = 16; //Warmup empty<<<blocksPerGrid, threadsPerBlock>>>(); empty<<<blocksPerGrid, threadsPerBlock>>>(); empty<<<blocksPerGrid, threadsPerBlock>>>(); int nRuns = 1000; std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now(); for(int i=0; i<nRuns; i++) { empty<<<blocksPerGrid, threadsPerBlock>>>(); } std::chrono::steady_clock::time_point end= std::chrono::steady_clock::now(); std::cout << (float)std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count()/nRuns << " microseconds per call" <<std::endl; err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch empty kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
4,755
/** CUDAで学ぶアルゴリズムとデータ構造 ステップバイステップでN−クイーン問題を最適化 一般社団法人 共同通信社 情報技術局 鈴木 維一郎(suzuki.iichiro@kyodonews.jp) コンパイルと実行 $ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g) -c:cpu -r cpu再帰 -g GPU 3.バックトラック  各列、対角線上にクイーンがあるかどうかのフラグを用意し、途中で制約を満た さない事が明らかな場合は、それ以降のパターン生成を行わない。  各列、対角線上にクイーンがあるかどうかのフラグを用意することで高速化を図る。  これまでは行方向と列方向に重複しない組み合わせを列挙するものですが、王妃 は斜め方向のコマをとることができるので、どの斜めライン上にも王妃をひとつだ けしか配置できない制限を加える事により、深さ優先探索で全ての葉を訪問せず木 を降りても解がないと判明した時点で木を引き返すということができます。 実行結果 $ nvcc -O3 CUDA03_N-Queen.cu && ./a.out -g 3.GPU 非再帰 バックトラック N: Total Unique dd:hh:mm:ss.ms 4: 2 0 00:00:00:00.02 5: 10 0 00:00:00:00.00 6: 4 0 00:00:00:00.00 7: 40 0 00:00:00:00.00 8: 92 0 00:00:00:00.01 9: 352 0 00:00:00:00.06 10: 724 0 00:00:00:00.27 11: 2680 0 00:00:00:01.09 12: 14200 0 00:00:00:05.15 */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 #define MAX 27 //変数宣言 long Unique=0; //GPU int down[2*MAX-1]; //down:flagA 縦 配置フラグ  int left[2*MAX-1]; //left:flagB 斜め配置フラグ  int right[2*MAX-1]; //right:flagC 斜め配置フラグ  long TOTAL=0; //CPU,CPUR long UNIQUE=0; //CPU,CPUR int aBoard[MAX]; //関数宣言GPU __global__ void nqueen_cuda(int *d_aBoard,int *d_down,int *d_right,int *d_left,long *d_results,long TOTAL,int row,int size); void solve_nqueen_cuda(int si,long results[2],int steps); bool InitCUDA(); //関数宣言CPU void TimeFormat(clock_t utime,char *form); void NQueen(int row,int size); void NQueenR(int row,int size); // // //hh:mm:ss.ms形式に処理時間を出力 void TimeFormat(clock_t utime,char *form){ int dd,hh,mm; float ftime,ss; ftime=(float)utime/CLOCKS_PER_SEC; mm=(int)ftime/60; ss=ftime-(int)(mm*60); dd=mm/(24*60); mm=mm%(24*60); hh=mm/60; mm=mm%60; if(dd) sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss); else if(hh) sprintf(form," %2d:%02d:%05.2f",hh,mm,ss); else if(mm) sprintf(form," %2d:%05.2f",mm,ss); else sprintf(form," %5.2f",ss); } //GPUカーネル __global__ void nqueen_cuda(int *d_aBoard,int *d_down,int *d_right,int *d_left,long *d_results,long TOTAL,int row,int size){ int sizeE=size-1; bool matched; while(row>=0){ matched=false; // 1回目はaBoard[row]が-1なのでcolを0で初期化 // 2回目以降はcolを<sizeまで右へシフト for(int col=d_aBoard[row]+1;col<size;col++){ if(d_down[col]==0 && d_right[col-row+sizeE]==0 &&d_left[col+row]==0){ //まだ効き筋がない if(d_aBoard[row]!=-1){ //Qを配置済み //colがaBoard[row]におきかわる d_down[d_aBoard[row]] =d_right[d_aBoard[row]-row+sizeE] =d_left[d_aBoard[row]+row]=0; } d_aBoard[row]=col; //Qを配置 d_down[col] =d_right[col-row+sizeE] =d_left[col+row]=1; //効き筋とする matched=true; //配置した break; } } if(matched){ //配置済みなら row++; //次のrowへ if(row==size){ //print(size); //print()でTOTALを++しない TOTAL++; row--; } }else{ if(d_aBoard[row]!=-1){ int col=d_aBoard[row]; /** col の代用 */ d_aBoard[row]=-1; d_down[col] =d_right[col-row+sizeE] =d_left[col+row]=0; } row--; //バックトラック } } d_results[0]=TOTAL; } //CUDA実行関数 void solve_nqueen_cuda(int si,long results[2],int steps){ //メモリ登録 int *h_aBoard; int *h_down; int *h_right; int *h_left; long *h_results; cudaMallocHost((void**)&h_aBoard,sizeof(int)*MAX); cudaMallocHost((void**)&h_down,sizeof(int)*2*MAX-1); cudaMallocHost((void**)&h_right,sizeof(int)*2*MAX-1); cudaMallocHost((void**)&h_left,sizeof(int)*2*MAX-1); cudaMallocHost((void**)&h_results,sizeof(long)*steps); int *d_aBoard; int *d_down; int *d_right; int *d_left; long *d_results; cudaMalloc((void**)&d_aBoard,sizeof(int)*MAX); cudaMalloc((void**)&d_down,sizeof(int)*2*MAX-1); cudaMalloc((void**)&d_right,sizeof(int)*2*MAX-1); cudaMalloc((void**)&d_left,sizeof(int)*2*MAX-1); cudaMalloc((void**)&d_results,sizeof(long)*steps); //初期化 for(int i=0;i<si;i++){ h_aBoard[i]=-1; } //host to device cudaMemcpy(d_aBoard,h_aBoard, sizeof(int)*MAX,cudaMemcpyHostToDevice); cudaMemcpy(d_down,h_down, sizeof(int)*2*MAX-1,cudaMemcpyHostToDevice); cudaMemcpy(d_right,h_right, sizeof(int)*2*MAX-1,cudaMemcpyHostToDevice); cudaMemcpy(d_left,h_left, sizeof(int)*2*MAX-1,cudaMemcpyHostToDevice); cudaMemcpy(d_results,h_results, sizeof(long)*steps,cudaMemcpyHostToDevice); //実行 nqueen_cuda<<<1,1>>>(d_aBoard,d_down,d_right,d_left,d_results,0,0,si); //device to host cudaMemcpy(h_results,d_results, sizeof(long)*steps,cudaMemcpyDeviceToHost); //return用 results[0]=h_results[0]; //開放 cudaFreeHost(h_aBoard); cudaFreeHost(h_down); cudaFreeHost(h_right); cudaFreeHost(h_left); cudaFreeHost(h_results); cudaFree(d_aBoard); cudaFree(d_down); cudaFree(d_left); cudaFree(d_right); cudaFree(d_results); } /** CUDA 初期化 **/ bool InitCUDA(){ int count; cudaGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} } } if(i==count){ fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} cudaSetDevice(i); return true; } // CPU 非再帰版 ロジックメソッド void NQueen(int row,int size){ int sizeE=size-1; bool matched; while(row>=0){ matched=false; // 1回目はaBoard[row]が-1なのでcolを0で初期化 // 2回目以降はcolを<sizeまで右へシフト for(int col=aBoard[row]+1;col<size;col++){ if(down[col]==0 && right[col-row+sizeE]==0 &&left[col+row]==0){ //まだ効き筋がない if(aBoard[row]!=-1){ //Qを配置済み //colがaBoard[row]におきかわる down[aBoard[row]] =right[aBoard[row]-row+sizeE] =left[aBoard[row]+row]=0; } aBoard[row]=col; //Qを配置 down[col] =right[col-row+sizeE] =left[col+row]=1; //効き筋とする matched=true; //配置した break; } } if(matched){ //配置済みなら row++; //次のrowへ if(row==size){ //print(size); //print()でTOTALを++しない TOTAL++; row--; } }else{ if(aBoard[row]!=-1){ int col=aBoard[row]; /** col の代用 */ aBoard[row]=-1; down[col] =right[col-row+sizeE] =left[col+row]=0; } row--; //バックトラック } } } // CPUR 再帰版 ロジックメソッド void NQueenR(int row,int size){ int sizeE=size-1; if(row==size){ TOTAL++; }else{ for(int col=0;col<size;col++){ aBoard[row]=col; if(down[col]==0 && right[row-col+sizeE]==0 && left[row+col]==0){ down[col] =right[row-col+sizeE] =left[row+col]=1; NQueenR(row+1,size); down[col] =right[row-col+sizeE] =left[row+col]=0; } } } } //メインメソッド int main(int argc,char** argv) { bool cpu=false,cpur=false,gpu=false,sgpu=false; int argstart=1,steps=24576; /** パラメータの処理 */ if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='s'||argv[1][1]=='S'){sgpu=true;} else cpur=true; argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s]\n",argv[0]); printf(" -c: CPU only\n"); printf(" -r: CPUR only\n"); printf(" -g: GPU only\n"); printf(" -s: SGPU only\n"); printf("Default CPUR to 8 queen\n"); } /** 出力と実行 */ if(cpu){ printf("\n\n3.CPU 非再帰 バックトラック\n"); }else if(cpur){ printf("\n\n3.CPUR 再帰 バックトラック\n"); }else if(gpu){ printf("\n\n3.GPU 非再帰 バックトラック\n"); }else if(sgpu){ printf("\n\n3.SGPU 非再帰 バックトラック\n"); } if(cpu||cpur){ printf("%s\n"," N: Total Unique hh:mm:ss.ms"); clock_t st; //速度計測用 char t[20]; //hh:mm:ss.msを格納 int min=4; int targetN=17; //aBaord配列を-1で初期化 for(int i=min;i<=targetN;i++){ TOTAL=0; UNIQUE=0; for(int j=0;j<=targetN;j++){ aBoard[j]=-1; } st=clock(); if(cpu){ NQueen(0,i); } if(cpur){ NQueenR(0,i); } TimeFormat(clock()-st,t); printf("%2d:%13ld%16ld%s\n",i,TOTAL,UNIQUE,t); } } /** GPU */ if(gpu||sgpu){ if(!InitCUDA()){return 0;} int min=4;int targetN=18; struct timeval t0;struct timeval t1; int ss;int ms;int dd; long TOTAL; long results[2];//結果格納用 printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int i=min;i<=targetN;i++){ gettimeofday(&t0,NULL); // 計測開始 if(gpu){ solve_nqueen_cuda(i,results,steps); TOTAL=results[0]; UNIQUE=results[1]; } gettimeofday(&t1,NULL); // 計測終了 if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; } int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", i,TOTAL,Unique,dd,hh,mm,ss,ms); } } return 0; }
4,756
#include <stdio.h> // this should use with MPS __global__ void k() { int i = 0; while (true) { i++; } } int main(int argc, char *argv[]) { int *mem, *mem2; int i; cudaFree(0); size_t avail, total; cudaMemGetInfo(&avail, &total); printf("total available memory: %ld\n", avail / 1024 / 1024); cudaDeviceProp props; cudaGetDeviceProperties(&props, 0); k<<<atoi(argv[1]), 1024, props.sharedMemPerBlock>>>(); printf("Press Enter key to continue..."); fgetc(stdin); return 0; }
4,757
#include <stdio.h> #include "cuda.h" #define DIM 20 #define TURNS 1 __global__ void gridmean(float grid[DIM][DIM], float tmp_grid[DIM][DIM]){ int x = blockIdx.x * blockDim.x; int y = blockIdx.y * blockDim.y; float tmp = 0; for(int i = x-1; i <= x+1; i++){ for(int j = y-1; j <= y+1; j++){ if(!(i < 0 || j < 0|| i >= DIM || j >= DIM)){ tmp += grid[i][j]; } } } tmp_grid[x][y] = tmp/9; } __global__ void copy(float grid[DIM][DIM], float tmp_grid[DIM][DIM]){ grid[blockIdx.x][blockIdx.y] = tmp_grid[blockIdx.x][blockIdx.y]; } int main(){ float grid[DIM][DIM]; float tmp_grid[DIM][DIM]; float (*grid_d)[DIM]; float (*tmp_grid_d)[DIM]; int size = DIM*DIM*sizeof(float); cudaMalloc((void**)&grid_d,size); cudaMalloc((void**)&tmp_grid_d,size); for(int i = 0; i < DIM; i++){ for(int j = 0; j < DIM; j++){ grid[i][j] = (float) i*DIM + j; } } //for(int i = 0; i < DIM; i++){ // for(int j = 0; j < DIM; j++){ // printf("%f ", grid[i][j]); // } // printf("\n"); //} cudaMemcpy(grid_d,grid,size, cudaMemcpyHostToDevice); cudaMemcpy(tmp_grid_d,tmp_grid,size, cudaMemcpyHostToDevice); dim3 numBlocks(DIM,DIM); for(int k = 0; k < TURNS; k++){ gridmean<<<numBlocks,1>>>(grid_d,tmp_grid_d); copy<<<numBlocks,1>>>(grid_d,tmp_grid_d); } cudaMemcpy(grid,grid_d,size, cudaMemcpyDeviceToHost); for(int i = 0; i < DIM; i++){ for(int j = 0; j < DIM; j++){ printf("%f ", grid[i][j]); } printf("\n"); } cudaFree(grid_d); return 0; }
4,758
#include <stdexcept> #include <algorithm> #include <cuda_runtime.h> #include <iostream> #include <vector> #include <stdlib.h> #include <time.h> template <class T> class dev_array { // public functions public: explicit dev_array() : start_(0), end_(0) {} // constructor explicit dev_array(size_t size) { allocate(size); } // destructor ~dev_array() { free(); } // resize the vector void resize(size_t size) { free(); allocate(size); } // get the size of the array size_t getSize() const { return end_ - start_; } // get data const T* getData() const { return start_; } T* getData() { return start_; } // set void set(const T* src, size_t size) { size_t min = std::min(size, getSize()); cudaError_t result = cudaMemcpy(start_, src, min * sizeof(T), cudaMemcpyHostToDevice); if (result != cudaSuccess) { throw std::runtime_error("failed to copy to device memory"); } } // get void get(T* dest, size_t size) { size_t min = std::min(size, getSize()); cudaError_t result = cudaMemcpy(dest, start_, min * sizeof(T), cudaMemcpyDeviceToHost); if (result != cudaSuccess) { throw std::runtime_error("failed to copy to host memory"); } } // private functions private: // allocate memory on the device void allocate(size_t size) { cudaError_t result = cudaMalloc((void**)&start_, size * sizeof(T)); if (result != cudaSuccess) { start_ = end_ = 0; throw std::runtime_error("failed to allocate device memory"); } end_ = start_ + size; } // free memory on the device void free() { if (start_ != 0) { cudaFree(start_); start_ = end_ = 0; } } T* start_; T* end_; }; __global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; float tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; } void matrixMultiplication(float *A, float *B, float *C, int N){ // declare the number of blocks per grid and the number of threads per block // use 1 to 512 threads per block dim3 threadsPerBlock(N, N); dim3 blocksPerGrid(1, 1); if (N*N > 512){ threadsPerBlock.x = 512; threadsPerBlock.y = 512; blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y)); } matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, N); } void printMatrix(std::vector<float> mat, int N){ for (int i=0; i<N; i++){ for (int j=0; j<N; j++){ std::cout<<mat[i*N+j]<<", "; } std::cout<<std::endl; } } int main() { // Perform matrix multiplication C = A*B // where A, B and C are NxN matrices int N = 4; int SIZE = N*N; // Allocate memory on the host std::vector<float> h_A(SIZE); std::vector<float> h_B(SIZE); std::vector<float> h_C(SIZE); // Initialize matrices on the host for (int i=0; i<N; i++){ for (int j=0; j<N; j++){ h_A[i*N+j] = sin(i); h_B[i*N+j] = cos(j); } } std::cout<< "\n> Matrix A"<<std::endl; printMatrix(h_A, N); std::cout<< "\n> Matrix B"<<std::endl; printMatrix(h_B, N); // Allocate memory on the device dev_array<float> d_A(SIZE); dev_array<float> d_B(SIZE); dev_array<float> d_C(SIZE); d_A.set(&h_A[0], SIZE); d_B.set(&h_B[0], SIZE); matrixMultiplication(d_A.getData(), d_B.getData(), d_C.getData(), N); cudaDeviceSynchronize(); d_C.get(&h_C[0], SIZE); cudaDeviceSynchronize(); std::cout<< "\n> Matrix C"<<std::endl; printMatrix(h_C, N); float *cpu_C; cpu_C=new float[SIZE]; // Now do the matrix multiplication on the CPU float sum; for (int row=0; row<N; row++){ for (int col=0; col<N; col++){ sum = 0.f; for (int n=0; n<N; n++){ sum += h_A[row*N+n]*h_B[n*N+col]; } cpu_C[row*N+col] = sum; } } double err = 0; // Check the result and make sure it is correct for (int ROW=0; ROW < N; ROW++){ for (int COL=0; COL < N; COL++){ err += cpu_C[ROW * N + COL] - h_C[ROW * N + COL]; } } std::cout << "\n> Error (CPU vs GPU): " << err << std::endl; return 0; }
4,759
__global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N); // Kernelul ce se executa pe device-ul CUDA __global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N) { /* int i = blockIdx.x*32 + threadIdx.x; int j = blockIdx.y; float sum = 0.0f; for (int k = 0; k < N; ++k) sum += a_d[i*N+k] * b_d[k*N+j]; r_d[i*N+j] = sum; */ // VERSION 2.0 int tx = threadIdx.x; int i = blockIdx.x*32 + tx; int j = blockIdx.y; __shared__ float cb[32]; float sum = 0.0f; for (int ks = 0; ks < N; ks += 32) { cb[tx] = a_d[ks+tx+N*j]; for (int k = ks; k < ks+32; ++k) sum += b_d[i+N*k] * cb[k-ks]; } r_d[i+N*j] = sum; //VERSION 3.0 /* int tx = threadIdx.x; int i = blockIdx.x*64 + tx; int j = blockIdx.y; __shared__ float cb[32]; float sum0 = 0.0f, sum1 = 0.0f; for (int ks = 0; ks < N; ks += 32) { cb[tx] = a_d[ks+tx+N*j]; __syncthreads(); for (int k = ks; k < ks+32; ++k) { sum0 += b_d[i+N*k] * cb[k-ks]; sum1 += b_d[i+32+N*k] * cb[k-ks]; } __syncthreads(); } r_d[i+N*j] = sum0; r_d[i+32+N*j] = sum1; */ } extern "C" cudaError_t launch_actiune_thread(float* a_d, float* b_d,float *r_d,int N,dim3 DIM_GRID, dim3 DIM_BLOCK) { actiune_thread <<<DIM_GRID, DIM_BLOCK>>> (a_d, b_d,r_d,N); return cudaGetLastError(); }
4,760
__global__ void gradient_func(double** X, double** Y, long A, long B) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i < A && j < B){ X[i][j] = Y[i][j+1]*(1 - pow (tanh (X[i][j]), 2)); } }
4,761
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <time.h> // Input Array Variables float* h_MatA = NULL; // Output Array float* h_VecV = NULL; float* h_VecW = NULL; float* h_NormW = NULL; // Variables to change int GlobalSize = 5000; // this is the dimension of the matrix, GlobalSize*GlobalSize const float EPS = 0.000005; // tolerence of the error int max_iteration = 100; // the maximum iteration steps // Functions void Cleanup(void); void InitOne(float*, int); void UploadArray(float*, int); float CPUReduce(float*, int); void Arguments(int, char**); void CPU_AvProduct() { int N = GlobalSize; int matIndex =0; for(int i=0;i<N;i++) { h_VecW[i] = 0; for(int j=0;j<N;j++) { matIndex = i*N + j; h_VecW[i] += h_MatA[matIndex] * h_VecV[j]; } } } void CPU_NormalizeW() { int N = GlobalSize; float normW=0; for(int i=0;i<N;i++) normW += h_VecW[i] * h_VecW[i]; normW = sqrt(normW); for(int i=0;i<N;i++) h_VecV[i] = h_VecW[i]/normW; } float CPU_ComputeLamda() { int N = GlobalSize; float lamda =0; for(int i=0;i<N;i++) lamda += h_VecV[i] * h_VecW[i]; return lamda; } void RunCPUPowerMethod() { printf("*************************************\n"); float oldLamda =0; float lamda=0; //AvProduct CPU_AvProduct(); //power loop for (int i=0;i<max_iteration;i++) { CPU_NormalizeW(); CPU_AvProduct(); lamda= CPU_ComputeLamda(); printf("CPU lamda at %d: %f \n", i, lamda); //If residual is lass than epsilon break if(abs(oldLamda - lamda) < EPS) break; oldLamda = lamda; } printf("*************************************\n"); } // Host code int main(int argc, char** argv) { struct timespec t_start,t_end; double runtime; Arguments(argc, argv); int N = GlobalSize; printf("Matrix size %d X %d \n", N, N); size_t vec_size = N * sizeof(float); size_t mat_size = N * N * sizeof(float); size_t norm_size = sizeof(float); // Allocate normalized value in host memory h_NormW = (float*)malloc(norm_size); // Allocate input matrix in host memory h_MatA = (float*)malloc(mat_size); // Allocate initial vector V in host memory h_VecV = (float*)malloc(vec_size); // Allocate W vector for computations h_VecW = (float*)malloc(vec_size); // Initialize input matrix UploadArray(h_MatA, N); InitOne(h_VecV,N); printf("Power method in CPU starts\n"); clock_gettime(CLOCK_REALTIME,&t_start); RunCPUPowerMethod(); // the lamda is already solved here clock_gettime(CLOCK_REALTIME,&t_end); runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec); printf("CPU: run time = %f secs.\n",runtime); printf("Power method in CPU is finished\n"); Cleanup(); } void Cleanup(void) { // Free host memory if (h_MatA) free(h_MatA); if (h_VecV) free(h_VecV); if (h_VecW) free(h_VecW); if (h_NormW) free(h_NormW); exit(0); } // Allocates an array with zero value. void InitOne(float* data, int n) { for (int i = 0; i < n; i++) data[i] = 0; data[0]=1; } void UploadArray(float* data, int n) { int total = n*n; int value=1; for (int i = 0; i < total; i++) { data[i] = (int) (rand() % (int)(101));//1;//value; value ++; if(value>n) value =1; // data[i] = 1; } } // Obtain program arguments void Arguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0) { GlobalSize = atoi(argv[i+1]); i = i + 1; } if (strcmp(argv[i], "--max_iteration") == 0 || strcmp(argv[i], "-max_iteration") == 0) { max_iteration = atoi(argv[i+1]); i = i + 1; } } }
4,762
// Name: U.H. Anuji de Silva // Student Id: 1432292 #include <stdio.h> //inserting stdlib.h library to run Cuda Malloc #include <stdlib.h> #define N 4 int main() { int i, j =0; int A[N][N] = { {1, 5, 6, 7}, {4, 4, 8, 0}, {2, 3, 4, 5}, {2, 3, 4, 5} }; int B[N][N] = { {1, 5, 6, 7}, {4, 4, 8, 0}, {2, 3, 4, 5}, {2, 3, 4, 5} }; int C[N][N] = { {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0} }; for(i=0;i<N;i++){ for(j=0;j<N;j++){ C[i][j] = A[i][j] + B[i][j]; } } printf("Sum of entered matrices:-\n"); for(i=0;i<N;i++){ for(j=0;j<N;j++){ printf("%d ", C[i][j]); } printf("\n"); } return 0; }
4,763
#include "includes.h" __global__ void kernel(void) { while(1); }
4,764
__global__ void g_getCost_3(float* cost, float* weight, float lambda, int wlen) { __shared__ float _sum[32]; _sum[threadIdx.x] = 0; __syncthreads(); for(int i = 0; i < wlen; i += blockDim.x) { int id = i + threadIdx.x; if(id < wlen) { _sum[threadIdx.x] += weight[id] * weight[id]; } } int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < skip && (threadIdx.x + skip) < len) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = skip; } }
4,765
#include "includes.h" using namespace std; /* const int sizePoint = 5; const int sizeIndividum = 5; const int mathValueMutation = 5; const float dispersionMutation = 5.0f; const int powCount = 3; const float randMaxCount = 20.0f; */ const int sizePoint = 500; const int sizeIndividum = 1000; const int mathValueMutation = 5; const float dispersionMutation = 5.0f; const int powCount = 3; const float randMaxCount = 20.0f; const int maxPokoleney = 30; __global__ void errorsKernel(float *points, float *individs, float *errors, int powCount, int sizePoint) { int id = threadIdx.x; float ans = 0; int x = 1; for (int i = 0; i < sizePoint; i++) { for (int j = 0; j < powCount; j++) { for (int k = 0; k < j; k++) { x *= i; } x *= individs[id*powCount + j]; ans += x; x = 1; } ans = points[i] - ans; errors[id] += sqrt(ans * ans); ans = 0; } }
4,766
/************************************************************************************\ * * * Copyright � 2014 Advanced Micro Devices, Inc. * * Copyright (c) 2015 Mark D. Hill and David A. Wood * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following are met: * * * * You must reproduce the above copyright notice. * * * * Neither the name of the copyright holder nor the names of its contributors * * may be used to endorse or promote products derived from this software * * without specific, prior, written permission from at least the copyright holder. * * * * You must include the following terms in your license and/or other materials * * provided with the software. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A * * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER * * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * * OF SUCH DAMAGE. * * * * Without limiting the foregoing, the software may implement third party * * technologies for which you must obtain licenses from parties other than AMD. * * You agree that AMD has not obtained or conveyed to you, and that you shall * * be responsible for obtaining the rights to use and/or distribute the applicable * * underlying intellectual property rights related to the third party technologies. * * These third party technologies are not licensed hereunder. * * * * If you use the software (in whole or in part), you shall adhere to all * * applicable U.S., European, and other export laws, including but not limited to * * the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), * * and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant * * to Section 740.6 of the EAR, you hereby certify that, except pursuant to a * * license granted by the United States Department of Commerce Bureau of Industry * * and Security or as otherwise permitted pursuant to a License Exception under * * the U.S. Export Administration Regulations ("EAR"), you will not (1) export, * * re-export or release to a national of a country in Country Groups D:1, E:1 or * * E:2 any restricted technology, software, or source code you receive hereunder, * * or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such * * technology or software, if such foreign produced direct product is subject to * * national security controls as identified on the Commerce Control List (currently * * found in Supplement 1 to Part 774 of EAR). For the most current Country Group * * listings, or for additional information about the EAR or your obligations under * * those regulations, please refer to the U.S. Bureau of Industry and Security's * * website at http://www.bis.doc.gov/. * * * \************************************************************************************/ /** * @brief color kernel 1 * @param row CSR pointer array * @param col CSR column array * @param node_value Vertex value array * @param color_array Color value array * @param stop Termination variable * @param max_d Max array * @param color Current color label * @param num_nodes Number of vertices * @param num_edges Number of edges */ __global__ void color1(int *row, int *col, int *node_value, int *color_array, int *stop, int *max_d, const int color, const int num_nodes, const int num_edges) { // Get my thread workitem id int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) { // If the vertex is still not colored if (color_array[tid] == -1) { // Get the start and end pointer of the neighbor list int start = row[tid]; int end; if (tid + 1 < num_nodes) end = row[tid + 1]; else end = num_edges; int maximum = -1; // Navigate the neighbor list for (int edge = start; edge < end; edge++) { // Determine if the vertex value is the maximum in the neighborhood if (color_array[col[edge]] == -1 && start != end - 1) { *stop = 1; if (node_value[col[edge]] > maximum) maximum = node_value[col[edge]]; } } // Assign maximum the max array max_d[tid] = maximum; } } } /** * @brief color kernel 2 * @param node_value Vertex value array * @param color_array Color value array * @param max_d Max array * @param color Current color label * @param num_nodes Number of vertices * @param num_edges Number of edges */ __global__ void color2(int *node_value, int *color_array, int *max_d, const int color, const int num_nodes, const int num_edges) { // Get my workitem id int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) { // If the vertex is still not colored if (color_array[tid] == -1) { if (node_value[tid] >= max_d[tid]) // Assign a color color_array[tid] = color; } } }
4,767
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> // matrix transpose: from (out_w * in_w) to (in_w * out_w) #define IN_WIDTH 1000 #define OUT_WIDTH 100 #define N IN_WIDTH * OUT_WIDTH #define BLOCK_SIZE 16 #define MAX_ERR 1e-6 __global__ void matrix_transpose(float *d_out, float *d_in, int d_in_width, int d_out_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(cid < d_in_width && rid < d_out_width){ d_out[cid * d_out_width + rid] = d_in[rid * d_in_width + cid]; } } int main(){ float *h_in, *h_out; float *d_in, *d_out; // Allocate host memory h_in = (float*)malloc(sizeof(float) * N); h_out = (float*)malloc(sizeof(float) * N); // Initialize host arrays /*** TEST 1 ***/ // for(int i = 0; i < OUT_WIDTH; i++){ // for(int j = 0; j < IN_WIDTH; j++){ // h_in[i*IN_WIDTH + j] = (float)i; // } // } /*** TEST 2 ***/ for (int i = 0; i< N; i++){ h_in[i] = (float) i; } // Allocate device memory cudaMalloc((void**)&d_in, sizeof(float) * N); cudaMalloc((void**)&d_out, sizeof(float) * N); // Transfer data from host to device memory cudaMemcpy(d_in, h_in, sizeof(float) * N, cudaMemcpyHostToDevice); // Executing kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Note x dim (rid) mapped to OUT_WIDTH dim3 dimGrid(OUT_WIDTH / BLOCK_SIZE + 1, IN_WIDTH / BLOCK_SIZE + 1); matrix_transpose<<<dimGrid,dimBlock>>>(d_out, d_in, IN_WIDTH, OUT_WIDTH); // Transfer data back to host memory cudaMemcpy(h_out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost); // Verification for(int i = 0; i < OUT_WIDTH; i++){ for(int j = 0; j < IN_WIDTH; j++){ assert(fabs(h_in[i * IN_WIDTH + j] - h_out[j * OUT_WIDTH + i]) < MAX_ERR); // printf("in[%d][%d] = %f\n", i, j, h_in[i * IN_WIDTH + j]); } } // for(int i = 0; i < IN_WIDTH; i++){ // for(int j = 0; j < OUT_WIDTH; j++){ // // assert(fabs(h_in[i * IN_WIDTH + j] - h_out[j * OUT_WIDTH + i]) < MAX_ERR); // printf("out[%d][%d] = %f\n", i, j, h_out[i * OUT_WIDTH + j]); // } // } // printf("out[0] = %f\n", out[0]); printf("PASSED\n"); // Deallocate device memory cudaFree(d_in); cudaFree(d_out); // Deallocate host memory free(h_in); free(h_out); }
4,768
#include <iostream> #include <chrono> void polynomial_expansion (float* poly, int degree, int n, float* array) { } int main (int argc, char* argv[]) { //TODO: add usage if (argc < 3) { std::cerr<<"usage: "<<argv[0]<<" n degree"<<std::endl; return -1; } int n = atoi(argv[1]); //TODO: atoi is an unsafe function int degree = atoi(argv[2]); int nbiter = 1; float* array = new float[n]; float* poly = new float[degree+1]; for (int i=0; i<n; ++i) array[i] = 1.; for (int i=0; i<degree+1; ++i) poly[i] = 1.; std::chrono::time_point<std::chrono::system_clock> begin, end; begin = std::chrono::system_clock::now(); for (int iter = 0; iter<nbiter; ++iter) polynomial_expansion (poly, degree, n, array); end = std::chrono::system_clock::now(); std::chrono::duration<double> totaltime = (end-begin)/nbiter; std::cerr<<array[0]<<std::endl; std::cout<<n<<" "<<degree<<" "<<totaltime.count()<<std::endl; delete[] array; delete[] poly; return 0; }
4,769
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <cuda.h> //#define SHARED_SIZE_LIMIT 1024 #define NUM_THREADS 1024 #define NUM_BLOCKS 32768 #define NUM_VALS NUM_THREADS*NUM_BLOCKS #define SHARED_SIZE_LIMIT 1024 int random_float() { return (int)rand()/(int)RAND_MAX; } void array_print(int *arr, int length) { int i; for (i = 0; i < length; ++i) { printf("%d ", arr[i]); } printf("\n"); } void array_fill(int *v) { int i; for (i = 0; i < NUM_VALS; i++) { v[i] = rand(); } } void test (int *v) { int i; int val = v[0]; for (i = 1; i < NUM_VALS; ++i) { if (val < v[i]) { printf("val: %d, v[%d]: %d.\n", val, i, v[i]); printf("TEST FAIL\n\n"); return; } else { printf("val: %d, v[%d]: %d.\n", val, i, v[i]); val = v[i]; } } printf("TEST OK\n\n"); } /* void array_fill(int *arr, int length) { srand(time(NULL)); int i; for (i = 0; i < length; ++i) { arr[i] = length-i;//random_float(); } }*/ void array_copy(int *dst, int *src, int length) { int i; for (i=0; i<length; ++i) { dst[i] = src[i]; } } //Comparamos dos elementos y en caso de ser decrecientes, los swapeamos. __device__ inline void comparator(int &A, int &B, uint dir) { int temp; if ((A <= B) == dir) { temp = A; A = B; B = temp; } } /*La cosa en este bitonicsort es que compartimos memoria. Asíues pese que la idea principal es la misma, nosotros lo que hacemos es comparaciones entre elementos de las distintas memorias para hacer que, finalmente el vector termine ordenado. */ __global__ void bitonicSortShared(int *dev_values) { int tx = threadIdx.x; int bx = blockIdx.x; int index = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; __shared__ int sh_values[SHARED_SIZE_LIMIT]; sh_values[tx] = dev_values[index]; sh_values[tx + (SHARED_SIZE_LIMIT/2)] = dev_values[index + (SHARED_SIZE_LIMIT/2)]; for (uint size = 2; size < SHARED_SIZE_LIMIT; size <<= 1) { uint ddd = (tx & (size / 2)) == 0;//direction: ascending or descending for (uint stride = size/2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * tx - (tx & (stride - 1)); comparator(sh_values[pos], sh_values[pos + stride], ddd); } } uint ddd = ((bx&1) == 0); // uint ddd = ((bx&1)==0); { for (uint stride = SHARED_SIZE_LIMIT/2; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * tx - (tx & (stride - 1)); comparator(sh_values[pos + 0], sh_values[pos + stride], ddd); } } __syncthreads(); dev_values[index] = sh_values[tx]; dev_values[index+(SHARED_SIZE_LIMIT/2)] = sh_values[tx+(SHARED_SIZE_LIMIT/2)]; } void bitonic_sort(int *values) { int *dev_values; size_t size = NUM_VALS * sizeof(int); cudaMalloc((void**) &dev_values, size); cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice); dim3 numBlocks(NUM_BLOCKS, 1); dim3 numThreads(NUM_THREADS, 1); cudaDeviceSynchronize(); uint blockCount = NUM_VALS / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; printf("blockCount=%d, threadCount=%d, SHARED_SIZE_LIMIT=%d\n", blockCount, threadCount, SHARED_SIZE_LIMIT); bitonicSortShared<<<blockCount, threadCount>>>(dev_values); cudaDeviceSynchronize(); cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost); cudaFree(dev_values); } int main(void) { //int *values = (int*) malloc( NUM_VALS * sizeof(int)); //int *ref = (int*) malloc( NUM_VALS * sizeof(int)); int *host_values; cudaMallocHost( &host_values, NUM_VALS * sizeof(int)); // cudaMallocHost( &original_values, numBytes); float TiempoKernel; cudaEvent_t E1, E2; cudaEventCreate(&E1); cudaEventCreate(&E2); array_fill(host_values); cudaEventRecord(E1, 0); cudaEventSynchronize(E1); cudaFuncSetCacheConfig(bitonicSortShared, cudaFuncCachePreferL1); bitonic_sort(host_values); test(host_values); cudaEventRecord(E2, 0); cudaEventSynchronize(E2); cudaEventElapsedTime(&TiempoKernel, E1, E2); printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel); cudaFree(host_values); }
4,770
#include <stdio.h> // __global__ 告诉编译器这个函数会从CPU中调用,然后在GPU上执行 __global__ void helloFromGPU(void) { printf("Hello world from GPU!\n"); } int main(void) { printf("Hello world from CPU!\n"); // <<<意为着主线程到设备端代码的调用。一个内核函数通过一组线程来执行,所有线程执行相同的代码。10个GPU线程被调用。 helloFromGPU <<<1, 10>>>(); // 显示的释放和清空当前进程中与当前设备有关的所有资源 cudaDeviceReset(); return 0; }
4,771
// Note this file isn't configured to automatically compile #include <device_functions.h> #include <device_launch_parameters.h> // Build: // nvcc -l cuda -o microbench microbench.cpp // nvcc -arch sm_50 -cubin microbench.cu // Inspect a cubin (use nvdisasm from cuda 6.5 for best results): // maxas.pl -e microbench.cubin // Insert new sass into cubin // maxas.pl -i microbench.sass microbench.cubin // run it: // ./microbench // Use extern C so C++ doesn't mangle our kernel name extern "C" __global__ void microbench(int *out, int *clocks, int *in) { __shared__ int share[1024]; int tid = threadIdx.x; int bx = blockIdx.x; int by = blockIdx.y; int start = clock(); share[tid] = in[by * 65535 + bx]; //tid + blkDimX + blkDimY + blkDimZ + grdDimX + grdDimY + grdDimZ __syncthreads(); int end = clock(); clocks[tid] = (start >> 16) | (end & 0xffff0000); //end - start; out[tid] = share[tid ^ 1]; } // A note about using the Cuda Runtime. // If that's your preference over the driver API then here's what you'd do: // In your project properties in the Cuda C/C++ panel: // -Set the "Keep Processed Files" (-keep) option // -Add a -v manually to the command line // If compiling on command line just add -keep -v options to nvcc. // Rebuild your solution and look in the log for these lines that follow the ptxas step: // #$ fatbinary --create="Release/kernel.fatbin" -32 --key="a7bce87544c2a492" --ident="C:/Users/Scott/Documents/sgemm6/sgemm6/kernel.cu" --cmdline="-v --opt-level 4 --generate-line-info " "--image=profile=sm_50,file=Release/kernel.sm_50.cubin" "--image=profile=compute_50,file=Release/kernel.ptx" --embedded-fatbin="Release/kernel.fatbin.c" --cuda // #$ cl.exe @Release/kernel.cu.cpp.ii.res > "Release/kernel.cu.cpp.ii" // #$ cl.exe @Release/kernel.cu.obj.res -Fo"Release/kernel.cu.obj" // You just need to manually run these 3 commands (or add them to a build script) // after you've modified the cubin generated from the preceeding ptxas command. // That will give you a new .cu.obj file which will automatically be linked in for you next time you // build your project (or you could manually run the linker step as well). // Having done that you can call your kernel normally using the <<< >>> syntax. // Debugging will have to be with the sass syntax but that's what you'll want to see anyway. // With fatbin you can also keep non-maxwell optimized versions of your code. // I just discovered this also works as a shortcut to the above: // nvcc -lib -arch sm_52 -m 32 -use-cubin code=sm_52,cubin=microbench.cubin -o microbench.lib microbench.cu // The cu kernel definitions above need to have empty bodies. // And, the cu file must be compiled to a lib seperately before linking.
4,772
#include<bits/stdc++.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> using namespace std; #define N 2048 void initialise(int* v,int n){ for(int i = 0;i<n;i++){ v[i] = rand()%1000; } } __global__ void mat_vec_mult(int* mat,int*v,int* res,int n){ int tid = threadIdx.x; res[tid] = 0; for(int i =0;i<n;i++){ res[tid] = res[tid] + mat[ tid*n + i]*v[i]; } } int main(){ int *h_mat,*h_v,*h_r; int *d_mat,*d_v,*d_r; int *s_r; float elapsed_cpu, elapsed_gpu; clock_t t1, t2; h_mat =(int*)malloc(N*N*sizeof(int)); h_v = (int*)malloc(N*sizeof(int)); h_r = (int*)malloc(N*sizeof(int)); s_r = (int*)malloc(N*sizeof(int)); cudaMalloc(&d_mat,N*N*sizeof(int)); cudaMalloc(&d_v,N*sizeof(int)); cudaMalloc(&d_r,N*sizeof(int)); initialise(h_mat,N*N); initialise(h_v,N); //serial t1 = clock(); for(int i =0;i<N;i++){ s_r[i]=0; for(int j=0;j<N;j++){ s_r[i] = s_r[i] + h_mat[i*N + j]*h_v[j]; } } t2 = clock(); //parallel cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMemcpy(d_mat,h_mat,N*N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_v,h_v,N*sizeof(int),cudaMemcpyHostToDevice); mat_vec_mult<<<1,N>>>(d_mat,d_v,d_r,N); cudaMemcpy(h_r,d_r,N*sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_gpu, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms cout<<elapsed_cpu<<endl; cout<<elapsed_gpu<<endl; cout<<"speedup "<<elapsed_cpu/elapsed_gpu<<endl; for(int i =0;i<N;i++){ if(s_r[i]!=h_r[i]){ cout<<"failed"; break; } } return 0; }
4,773
/*************************************************** * Module for PIR * * To be compiled with nvcc -ptx pir.cu * Debug: nvcc -arch=sm_20 -ptx pir.cu * Note: CUDA may not support all versions of gcc; * See * https://groups.google.com/forum/#!topic/torch7/WaNmWZqMnzw **************************************************/ //#include <stdio.h> #ifdef __cplusplus extern "C" { #endif typedef char int8_cu; typedef unsigned char uint8_cu; typedef long int int32_cu; typedef unsigned long int uint32_cu; typedef long long int int64_cu; typedef unsigned long long int uint64_cu; #define DATA_TYPE uint64_cu // CUDA Kernel __global__ void pir(DATA_TYPE* db, uint8_cu* reqs, DATA_TYPE* output, int batchSize, int reqLength, int numBuckets, int bucketSize, int globalSize){ //int localIndex = threadIdx.x; //int groupIndex = blockIdx.x; int globalIndex = threadIdx.x + (blockIdx.x * blockDim.x); if (globalIndex >= globalSize) { return; } __syncthreads(); // Iterate over requests in a batch, atomic_xor my data into output int bucketId = globalIndex / bucketSize; int depthOffset = globalIndex % bucketSize; DATA_TYPE data = db[globalIndex]; DATA_TYPE* addr; uint8_cu reqBit; for (int i = 0; i < batchSize; i++) { reqBit = reqs[(i*reqLength) + (bucketId/8)] & (1 << (bucketId%8)); if (reqBit > 0) { addr = &output[(i*bucketSize)+depthOffset]; atomicXor(addr, data); } } } #ifdef __cplusplus } #endif
4,774
#include "includes.h" __global__ void stencil_1d(int *in, int *out, int dim) { __shared__ int temp[BLOCK_SIZE + 2*RADIUS]; int lindex = threadIdx.x + RADIUS; int gindex = threadIdx.x + blockDim.x * blockIdx.x; int stride = gridDim.x * blockDim.x; int left, right; // Go through all data // Step all threads in a block to avoid synchronization problem while ( gindex < (dim + blockDim.x) ) { // Read input elements into shared memory temp[lindex] = 0; if (gindex < dim) temp[lindex] = in[gindex]; // Populate halos, set to zero if we are at the boundary if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = 0; left = gindex - RADIUS; if (left >= 0) temp[lindex - RADIUS] = in[left]; temp[lindex + blockDim.x] = 0; right = gindex + blockDim.x; if (right < dim) temp[lindex + blockDim.x] = in[right]; } // Synchronize threads - make sure all data is available! __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS; offset <= RADIUS; offset++) { result += temp[lindex + offset]; } // Store the result if (gindex < dim) out[gindex] = result; // Update global index and quit if we are done gindex += stride; __syncthreads(); } }
4,775
#include<stdio.h> #include<stdlib.h> #include<time.h> #define N 1000 //Job size = 1K, 10K, 100K, 1M and 10M //add kernel __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } //function to generate random numbers void random_ints(int* x, int size) { int i; for (i=0;i<size;i++) { x[i]=rand()%N; } } int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); //time start and stop cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); //Allocate device memory cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); //Allocate CPU memory a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); cudaEventRecord( start, 0 ); //Copy CPU memory to GPU memory cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks add<<<1,N>>>(d_a, d_b, d_c); //N Threads and 1 Thread Block //Copy from device to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); cudaEventRecord( stop, 0 ); cudaEventSynchronize(stop); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); printf("GPU Execution Time = %f\n",time); //Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
4,776
void modify_param(int x) { /*@ requires x == 10; ensures x == 11; */ x++; } void test1(unsigned int s) { /*@ requires s > 0; ensures s >= 0; */ s /= 2; }
4,777
#include "includes.h" __global__ void Copy_matA_to_matB_withShuffleIdx (float * A , float * B , int size, int cols , float * new_idxs, int max_rows){ int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; int irow = id / cols; int icol = id % cols; if (id<size){ int irow_new = max_rows - 1 - irow; /// it was ascending, so I need to revert it... int irow_old = new_idxs[irow]; B[irow_new*cols + icol] = A[irow_old*cols + icol]; } }
4,778
#include "includes.h" __global__ void recenter_3D(float* coords, size_t dim_z, size_t dim_y, size_t dim_x){ size_t index = blockIdx.x * blockDim.x + threadIdx.x; size_t total = dim_x * dim_y * dim_z; if(index < total){ coords[index] += (float)dim_z/2.0; coords[index + total] += (float)dim_y/2.0; coords[index + 2 * total] += (float)dim_x/2.0; } __syncthreads(); }
4,779
#include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define BLOCK_SIZE 512 #define MAX_POINTS 100000000 // 100M points #define MAX_MEANS 1000 #define MAX_ITER 30 // CUDA prefers struct-of-arrays style here (for cache purposes) typedef struct { double *x, *y; int *membership; } points; typedef struct { double *x, *y; } centroids; typedef struct { double *x_sum, *y_sum; int *size; } temp_centroids; // algorithm termination flag __managed__ int assignment_changed = 1; // reads n data points from input file __host__ void read_data(int n, char *file_name, points P) { unsigned int i = 0; double x, y; FILE *file = fopen(file_name, "r"); assert(file != NULL); while (!feof(file) && i < n) { if (fscanf(file, "%lf %lf", &x, &y) != 2) break; P.x[i] = x; P.y[i] = y; P.membership[i++] = -1; } } // selects k centers at random from n points __host__ void init_centers(int n, int k, points P, centroids C) { srand(time(NULL)); for (int i = 0; i < k; ++i) { // not actually uniform random sampling, but very close int rand_idx = rand() % n; C.x[i] = P.x[rand_idx]; C.y[i] = P.y[rand_idx]; } } // computes ||p-c||^2 for a point p and center c __device__ inline double norm_2D_sqr(double x1, double y1, double x2, double y2) { // sqrt is monotonic, so we may omit it in the distance calculation // i.e. application of sqrt does not change the order of distances return (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2); } // assign each point to the cluster given by the closest centroid // NVIDIA suggests const and restrict here to improve compiler optimization __global__ void assign_clusters(int n, int k, const double *__restrict__ Px, const double *__restrict__ Py, int *__restrict__ Pmembership, double *__restrict__ Cx, double *__restrict__ Cy, double *__restrict__ Ox_sum, double *__restrict__ Oy_sum, int *__restrict__ Osize) { int index = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; // thread-local values that will be reduced __shared__ double x_sum[BLOCK_SIZE]; __shared__ double y_sum[BLOCK_SIZE]; __shared__ int size[BLOCK_SIZE]; int membership = -1; if (index < n) { double min_dist = INFINITY; for (int i = 0; i < k; ++i) { double current_dist = norm_2D_sqr(Px[index], Py[index], Cx[i], Cy[i]); if (current_dist < min_dist) { min_dist = current_dist; membership = i; } } // arbitrary concurrent write is valid since all // threads write the same value if (membership != Pmembership[index]) assignment_changed = 1; Pmembership[index] = membership; } __syncthreads(); // k reductions (one per centroid) for (int c = 0; c < k; ++c) { x_sum[tid] = (membership == c) ? Px[index] : 0; y_sum[tid] = (membership == c) ? Py[index] : 0; size[tid] = (membership == c) ? 1 : 0; __syncthreads(); // reduce block's sums into one value (in thread 0) for (int offset = BLOCK_SIZE >> 1; offset > 0; offset >>= 1) { if (tid < offset) { x_sum[tid] += x_sum[tid + offset]; y_sum[tid] += y_sum[tid + offset]; size[tid] += size[tid + offset]; } __syncthreads(); } // save block's sums to output arrays if (tid == 0) { Ox_sum[blockIdx.x * k + c] = x_sum[tid]; Oy_sum[blockIdx.x * k + c] = y_sum[tid]; Osize[blockIdx.x * k + c] = size[tid]; } __syncthreads(); } } // reduce temporary cluster sizes and centroid x/y sums to smaller arrays __global__ void reduce_temp_clusters(int n, int k, const double *__restrict__ Ix_sum, const double *__restrict__ Iy_sum, const int *__restrict__ Isize, double *__restrict__ Ox_sum, double *__restrict__ Oy_sum, int *__restrict__ Osize) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int tid = threadIdx.x; // thread-local values that will be reduced __shared__ double x_sum[BLOCK_SIZE]; __shared__ double y_sum[BLOCK_SIZE]; __shared__ int size[BLOCK_SIZE]; for (int c = 0; c < k; ++c) { x_sum[tid] = 0; y_sum[tid] = 0; size[tid] = 0; // if necessary, sum multiple items per thread for (int b = index; b < n; b += stride) { x_sum[tid] += Ix_sum[b * k + c]; y_sum[tid] += Iy_sum[b * k + c]; size[tid] += Isize[b * k + c]; } __syncthreads(); // reduce block's sums into one value (in thread 0) for (int offset = BLOCK_SIZE >> 1; offset > 0; offset >>= 1) { if (tid < offset) { x_sum[tid] += x_sum[tid + offset]; y_sum[tid] += y_sum[tid + offset]; size[tid] += size[tid + offset]; } __syncthreads(); } // save block's sums to output arrays if (tid == 0) { Ox_sum[blockIdx.x * k + c] = x_sum[tid]; Oy_sum[blockIdx.x * k + c] = y_sum[tid]; Osize[blockIdx.x * k + c] = size[tid]; } __syncthreads(); } } // update cluster centroid positions __global__ void update_clusters(int n, int k, double *__restrict__ Cx, double *__restrict__ Cy, const double *__restrict__ Ix_sum, const double *__restrict__ Iy_sum, const int *__restrict__ Isize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < k && Isize[index]) { Cx[index] = Ix_sum[index] / Isize[index]; Cy[index] = Iy_sum[index] / Isize[index]; } } /* * prints results and performance where * k = number of clusters (means) * n = number of points (in 2D) * h = number of iterations until convergence * t = elapsed time (in seconds) * * P contains the input points * C contains the final cluster centroids * T contains (in part) the final cluster sizes */ __host__ void print_results(int k, int n, int h, double t, points P, centroids C, temp_centroids T) { printf("performed %d iterations in %.2f s, perf: %.2f billion\n", h, t, (double)k * n * h / t * 1e-9); double *xs = (double *)malloc(sizeof(double) * n); double *ys = (double *)malloc(sizeof(double) * n); int offsets[k + 1]; offsets[0] = 0; for (int i = 0; i < k; ++i) { offsets[i + 1] = offsets[i] + T.size[i]; } // pack permutation of input points into clusters in a single pass by using // prefix-sum on the cluster sizes as offsets into our output arrays for (int i = 0; i < n; ++i) { int m = P.membership[i]; xs[offsets[m]] = P.x[i]; ys[offsets[m]++] = P.y[i]; } for (int c = 0; c < k; ++c) { printf("=====cluster %d centered at %lf %lf has size %d=====\n", c, C.x[c], C.y[c], T.size[c]); for (int i = offsets[c] - T.size[c]; i < offsets[c]; ++i) { printf("%lf %lf\n", xs[i], ys[i]); } } free(xs); free(ys); } int main(int argc, char **argv) { int k, n, h; char *file_name; points P; centroids C; temp_centroids T1; temp_centroids T2; cudaEvent_t start, stop; float time; // read in number of points and means assert(argc >= 4); n = atoi(argv[1]); k = atoi(argv[2]); file_name = argv[3]; assert(n <= MAX_POINTS && k <= MAX_MEANS); int blockSize = BLOCK_SIZE; int numBlocks = (n + blockSize - 1) / blockSize; int reductionBlockSize = BLOCK_SIZE; int reductionNumBlocks = (numBlocks + reductionBlockSize - 1) / reductionBlockSize; // make sure that we can support the number of points with our two block // reductions. with BLOCK_SIZE = 512, this limit is ~250M points assert(reductionNumBlocks <= 1024); // malloc memory and set up GPU timers cudaMallocManaged(&P.x, sizeof(double) * n); cudaMallocManaged(&P.y, sizeof(double) * n); cudaMallocManaged(&P.membership, sizeof(int) * n); cudaMallocManaged(&C.x, sizeof(double) * k); cudaMallocManaged(&C.y, sizeof(double) * k); cudaMallocManaged(&T1.x_sum, sizeof(double) * numBlocks * k); cudaMallocManaged(&T1.y_sum, sizeof(double) * numBlocks * k); cudaMallocManaged(&T1.size, sizeof(int) * numBlocks * k); cudaMallocManaged(&T2.x_sum, sizeof(double) * reductionNumBlocks * k); cudaMallocManaged(&T2.y_sum, sizeof(double) * reductionNumBlocks * k); cudaMallocManaged(&T2.size, sizeof(int) * reductionNumBlocks * k); cudaEventCreate(&start); cudaEventCreate(&stop); read_data(n, file_name, P); init_centers(n, k, P, C); cudaEventRecord(start, 0); for (h = 0; assignment_changed && h < MAX_ITER; ++h) { // assign points to nearest clusters assignment_changed = 0; assign_clusters<<<numBlocks, blockSize>>> (n, k, P.x, P.y, P.membership, C.x, C.y, T1.x_sum, T1.y_sum, T1.size); cudaDeviceSynchronize(); // two block reductions of cluster sizes and centroid x/y sums reduce_temp_clusters<<<reductionNumBlocks, reductionBlockSize>>> (numBlocks, k, T1.x_sum, T1.y_sum, T1.size, // input values to reduce T2.x_sum, T2.y_sum, T2.size); // reduced output values cudaDeviceSynchronize(); reduce_temp_clusters<<<1, reductionBlockSize>>> (reductionNumBlocks, k, T2.x_sum, T2.y_sum, T2.size, // reduce values from T2 T1.x_sum, T1.y_sum, T1.size); // back into T1 cudaDeviceSynchronize(); // update centroid positions update_clusters<<<1, k>>> (n, k, C.x, C.y, T1.x_sum, T1.y_sum, T1.size); cudaDeviceSynchronize(); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); print_results(k, n, h, time * 1e-3, P, C, T1); // CUDA automatically frees and resets device on program exit }
4,780
#include "includes.h" #define WARP_SIZE 32 // # of threads that are executed together (constant valid on most hardware) /* Simple CUDA example showing: 1) how to sum the values of an array in parallel 2) how to add a scaler to values of an array in parallel 3) how to query GPU hardware Compile with minimum archetecture specification of 30. Example: nvcc example.cu - o example -arch=sm_30 Author: Jordan Bonilla */ // Allow timing of functions clock_t start,end; /* Add "scalar" to every element of the input array in parallel */ // CPU entry point for kernel to add "scalar" to every element of the input array __global__ void _cuda_add_scalar(int *in, int scalar, int n) { int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; while(globalIdx < n) { in[globalIdx] = in[globalIdx] + scalar; globalIdx += blockDim.x * gridDim.x; } }
4,781
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <time.h> #include <cuda.h> void initialize(int *menacc, int *womenacc, int *menpre, int *womenlock, int n) { int i; for(i=0; i<=n; i++) { menacc[i] = -1; womenacc[i] = -1; menpre[i] = 1; womenlock[i] = 0; } } __global__ void stable_matching(int n, int *d_men, int *d_women, int *d_menacc, int *d_womenacc, int *d_menpre, int *d_matched, int *d_matched_, int *d_womenlock) { int j = threadIdx.x + 1, idx, ct=0; while(1) { __syncthreads(); if(*d_matched_ == 0) break; if(*d_matched_ == 1 && j <= n && d_menacc[j] == -1) { idx = d_men[j*(n+1) + d_menpre[j]]; *d_matched = 0; // locking mechanism bool isSet = false; do { if(isSet = atomicCAS(&d_womenlock[idx], 0, 1) == 0) { if(d_womenacc[idx] == -1) { d_womenacc[idx] = j; d_menacc[j] = idx; } else if(d_women[idx*(n+1) + d_womenacc[idx]] > d_women[idx*(n+1) + j]) { d_menacc[d_womenacc[idx]] = -1; d_menacc[j] = idx; d_womenacc[idx] = j; } } if(isSet) { atomicCAS(&d_womenlock[idx], 1, 0); } } while(!isSet); d_menpre[j]++; } __syncthreads(); if(j == 1 && *d_matched == 1) { *d_matched_ = 0; } else if(j == 1 && *d_matched == 0) { *d_matched = 1; } ct++; } __syncthreads(); } int main() { int n,i,j,k; int *d_matched, *d_matched_; int *men, *women; int *menacc, *womenacc, *menpre, *womenlock; int *d_men, *d_women; int *d_menacc, *d_womenacc, *d_menpre, *d_womenlock; clock_t beg, end; double time_taken; scanf("%d",&n); men = (int *) malloc((n+1)*(n+1)*sizeof(int)); women = (int *) malloc((n+1)*(n+1)*sizeof(int)); menacc = (int *) malloc((n+1)*sizeof(int)); womenacc = (int *) malloc((n+1)*sizeof(int)); womenlock = (int *) malloc((n+1)*sizeof(int)); menpre = (int *) malloc((n+1)*sizeof(int)); cudaMalloc(&d_men, (n+1)*(n+1)*sizeof(int)); cudaMalloc(&d_women, (n+1)*(n+1)*sizeof(int)); cudaMalloc(&d_menacc, (n+1)*sizeof(int)); cudaMalloc(&d_womenacc, (n+1)*sizeof(int)); cudaMalloc(&d_womenlock, (n+1)*sizeof(int)); cudaMalloc(&d_menpre, (n+1)*sizeof(int)); cudaMalloc(&d_matched, sizeof(int)); cudaMalloc(&d_matched_, sizeof(int)); initialize(menacc, womenacc, menpre, womenlock, n); beg = clock(); for(i=1; i<=n; i++) { for(j=0; j<=n; j++) { scanf("%d", &men[i*(n+1) + j]); } } for(i=1; i<=n; i++) { for(j=0; j<=n; j++) { scanf("%d", &k); women[i*(n+1) + k] = j; } } end = clock(); time_taken = ((double)(end-beg) * 1000000)/CLOCKS_PER_SEC; printf("read time : %f us, ", time_taken); cudaMemcpy(d_men, men, (n+1)*(n+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_women, women, (n+1)*(n+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_menacc, menacc, (n+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_womenlock, womenlock, (n+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_womenacc, womenacc, (n+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_menpre, menpre, (n+1)*sizeof(int), cudaMemcpyHostToDevice); int matched = 1; cudaMemcpy(d_matched, &matched, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_matched_, &matched, sizeof(int), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start,0); stable_matching <<< 1, n >>>(n, d_men, d_women, d_menacc, d_womenacc, d_menpre, d_matched, d_matched_, d_womenlock); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); cudaMemcpy(menacc, d_menacc, (n+1)*sizeof(int), cudaMemcpyDeviceToHost); printf("compute time : %f us\n", milliseconds*1000); for(j=1;j<=n;j++) printf("%d %d\n", j, menacc[j]); free(men); free(women); free(menacc); free(womenacc); free(menpre); free(womenlock); cudaFree(&d_men); cudaFree(&d_women); cudaFree(&d_matched); cudaFree(&d_matched_); cudaFree(&d_menacc); cudaFree(&d_womenacc); cudaFree(&d_menpre); cudaFree(&d_womenlock); return 0; }
4,782
#include <stdio.h> #include <cuda.h> #include "mytime.h" __global__ void bankcheck() { __shared__ unsigned s[1024]; s[1 * threadIdx.x] = threadIdx.x; } __global__ void bankcheck2() { __shared__ unsigned s[1024]; s[32 * threadIdx.x] = threadIdx.x; } int main() { int ii; double start, end; bankcheck<<<1, 32>>>(); // dummy for warmup. cudaDeviceSynchronize(); start = rtclock(); for (ii = 0; ii < 1000; ++ii) { bankcheck<<<1, 32>>>(); cudaDeviceSynchronize(); } end = rtclock(); printtime("bank consecutive: ", start, end); start = rtclock(); for (ii = 0; ii < 1000; ++ii) { bankcheck2<<<1, 32>>>(); cudaDeviceSynchronize(); } end = rtclock(); printtime("bank strided: ", start, end); return 0; }
4,783
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void addThreadId() { int i = threadIdx.x; printf("Hello world! My threadId is %d\n",i); } int main() { const int threads = 256; addThreadId <<<1, threads >>> (); cudaDeviceSynchronize(); return 0; }
4,784
#pragma region License /* The MIT License Copyright (c) 2009 Sky Morey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma endregion #pragma once /* #include <cuda.h>; #include "Core.h"; #include "System\cuFalloc.cu" using namespace System; template class TreeSet<int>; __device__ int system_COMPARE(unsigned __int32 shard, void* x, void* y) { int a = *((int*)x); int b = *((int*)y); return (a < b ? -1 : (a > b ? 1 : 0)); } __global__ void TestTreeSet(fallocDeviceHeap* deviceHeap) { fallocInit(deviceHeap); fallocContext* ctx = fallocCreateCtx(deviceHeap); fallocContext* stack = fallocCreateCtx(deviceHeap); falloc(stack, 70, false); // TreeSet<int> treeSet; treeSet.xtor(0, ctx); treeSet.Add(5); treeSet.Add(3); treeSet.Add(1); treeSet.Add(2); treeSet.Add(7); treeSet.Add(10); // treeSet.EnumeratorBegin(stack); while (treeSet.EnumeratorMoveNext(stack)) cuPrintf("%d\n", treeSet.Current); treeSet.EnumeratorEnd(stack); // fallocDisposeCtx(stack); fallocDisposeCtx(ctx); } int main() { cudaFallocHeap heap = cudaFallocInit(2048); cudaPrintfInit(256000); // test TestTreeSet<<<1, 1>>>(heap.deviceHeap); // free and exit cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); cudaFallocEnd(heap); printf("\ndone.\n"); scanf_s("%c"); return 0; } */
4,785
#include <cuda.h> #include <iostream> #include <random> #include <chrono> #define N 300000 #define checkCudaErrors(msg) err_msg(msg, __LINE__) void err_msg(cudaError_t msg, int x) { if (msg != cudaSuccess) { std::cerr << "In line: " << x << ". error: " << cudaGetErrorString(msg) << std::endl; exit(1); } return; } // void debug(int *x) { // int *h_x = new float[N]; // checkCudaErrors(cudaMemcpy(h_x, x, sizeof(float)*N, cudaMemcpyDeviceToHost)); // for (float i = 0; i < N; i++) { // std::cout << h_x[i] << " "; // } // std::cout << std::endl; // delete[] h_x; // } __global__ void seperateMaxMin(float *x, float *y, float *z, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; float tmp; for (int i = index; i < half_size; i += stride) { if (i + half_size >= full_size) break; if (x[i] < x[i+half_size]) { tmp = x[i]; x[i] = x[i+half_size]; x[i+half_size] = tmp; } if (y[i] < y[i+half_size]) { tmp = y[i]; y[i] = y[i+half_size]; y[i+half_size] = tmp; } if (z[i] < z[i+half_size]) { tmp = z[i]; z[i] = z[i+half_size]; z[i+half_size] = tmp; } } } __global__ void findMax(float *x, float *y, float *z, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) { x[i] = (i + half_size < full_size) ? ((x[i] >= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i]; y[i] = (i + half_size < full_size) ? ((y[i] >= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i]; z[i] = (i + half_size < full_size) ? ((z[i] >= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i]; } } __global__ void findMin(float *x, float *y, float *z, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) { x[i] = (i + half_size < full_size) ? ((x[i] <= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i]; y[i] = (i + half_size < full_size) ? ((y[i] <= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i]; z[i] = (i + half_size < full_size) ? ((z[i] <= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i]; } } int main() { float *max_x, *max_y, *max_z, *x, *y, *z, *d_x, *d_y, *d_z; float x_max, x_min, y_max, y_min, z_max, z_min; std::chrono::time_point<std::chrono::system_clock> start, end; double time; x = new float[N]; y = new float[N]; z = new float[N]; std::mt19937 mt(10); for (int i = 0; i < N; i++) { x[i] = mt() / 100000.0; y[i] = mt() / 100000.0; z[i] = mt() / 100000.0; if (i == 0) { x_max = x_min = x[i]; y_max = y_min = y[i]; z_max = z_min = z[i]; } else { x_max = (x_max >= x[i]) ? x_max : x[i]; x_min = (x_min <= x[i]) ? x_min : x[i]; y_max = (y_max >= y[i]) ? y_max : y[i]; y_min = (y_min <= y[i]) ? y_min : y[i]; z_max = (z_max >= z[i]) ? z_max : z[i]; z_min = (z_min <= z[i]) ? z_min : z[i]; } } int points_num = N; std::cout << "correct x max: " << x_max << std::endl; std::cout << "correct x min: " << x_min << std::endl; std::cout << "correct y max: " << y_max << std::endl; std::cout << "correct y min: " << y_min << std::endl; std::cout << "correct z max: " << z_max << std::endl; std::cout << "correct z min: " << z_min << std::endl; checkCudaErrors(cudaMalloc(&d_x, sizeof(float) * points_num)); checkCudaErrors(cudaMalloc(&d_y, sizeof(float) * points_num)); checkCudaErrors(cudaMalloc(&d_z, sizeof(float) * points_num)); checkCudaErrors(cudaMemcpy(d_x, x, sizeof(float) * points_num, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_y, y, sizeof(float) * points_num, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_z, z, sizeof(float) * points_num, cudaMemcpyHostToDevice)); start = std::chrono::system_clock::now(); checkCudaErrors(cudaMalloc((void**)&max_x, sizeof(float)*points_num)); checkCudaErrors(cudaMalloc((void**)&max_y, sizeof(float)*points_num)); checkCudaErrors(cudaMalloc((void**)&max_z, sizeof(float)*points_num)); checkCudaErrors(cudaMemcpy(max_x, d_x, sizeof(float)*points_num, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(max_y, d_y, sizeof(float)*points_num, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(max_z, d_z, sizeof(float)*points_num, cudaMemcpyDeviceToDevice)); // debug(max_x); int half_points_num = (points_num - 1) / 2 + 1; int block_x = (half_points_num > 1024) ? 1024 : half_points_num; int grid_x = (half_points_num - 1) / block_x + 1; seperateMaxMin<<<grid_x, block_x>>>(max_x, max_y, max_z, points_num, half_points_num); checkCudaErrors(cudaGetLastError()); // debug(max_x); int min_points_num = half_points_num; int min_half_points_num; float *min_x = max_x + (points_num / 2); float *min_y = max_y + (points_num / 2); float *min_z = max_z + (points_num / 2); points_num = half_points_num; while (points_num > 1) { half_points_num = (points_num - 1) / 2 + 1; block_x = (half_points_num > 1024) ? 1024 : half_points_num; grid_x = (half_points_num - 1) / block_x + 1; findMax<<<grid_x, block_x>>>(max_x, max_y, max_z, points_num, half_points_num); checkCudaErrors(cudaGetLastError()); points_num = half_points_num; } while (min_points_num > 1) { min_half_points_num = (min_points_num - 1) / 2 + 1; block_x = (min_half_points_num > 1024) ? 1024 : min_half_points_num; grid_x = (min_half_points_num - 1) / block_x + 1; findMin<<<grid_x, block_x>>>(min_x, min_y, min_z, min_points_num, min_half_points_num); checkCudaErrors(cudaGetLastError()); min_points_num = min_half_points_num; } checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(&x_max, max_x, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&x_min, min_x, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&y_max, max_y, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&y_min, min_y, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&z_max, max_z, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&z_min, min_z, sizeof(float), cudaMemcpyDeviceToHost)); end = std::chrono::system_clock::now(); time =std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0; std::cout << "GPU x max: " << x_max << std::endl; std::cout << "GPU x min: " << x_min << std::endl; std::cout << "GPU y max: " << y_max << std::endl; std::cout << "GPU y min: " << y_min << std::endl; std::cout << "GPU z max: " << z_max << std::endl; std::cout << "GPU z min: " << z_min << std::endl; std::cout << "time: " << time << "ms." << std::endl; checkCudaErrors(cudaFree(max_x)); checkCudaErrors(cudaFree(max_y)); checkCudaErrors(cudaFree(max_z)); delete[] x; delete[] y; delete[] z; return 0; }
4,786
#include "includes.h" __global__ void vecmabite( int *out, int *in, int threads, std::size_t size ) { auto tid_x = threadIdx.x; auto tid_b = blockIdx.x; out[ tid_x + threads * tid_b] = in[ 2 * (tid_x + threads * tid_b) ]; }
4,787
#include "includes.h" __global__ void ShortestPath1(float *Arr1,float *Arr2,int N,int rows, int rank){ //rowNum is number of rows for each process (full assigned to process) //Arr1 input array,Holds of (u,v) //Arr2 output array int k; int col=blockIdx.x * blockDim.x + threadIdx.x; int row=blockIdx.y * blockDim.y + threadIdx.y; int offset=rows*rank; int index=row*N+col; int index_ik,index_kj; Arr2[index]=Arr1[index]; for(k=rank*rows; k<((rank+1)*rows); k++){ index_ik = row*N+k; index_kj = (k-offset)*N+col; if(Arr1[index]>(Arr1[index_ik]+Arr1[index_kj])){ Arr2[index]=Arr1[index_ik]+Arr1[index_kj]; } __syncthreads(); } }
4,788
#include "includes.h" /* Sample input file format: 1.Line : 6 => Number of nodes(int) 2.Line : 7 => Number of edges(int) 3.Line : 1 2 5.0 ---------------- 4.Line : 2 3 1.5 | 5.Line : 1 3 2.1 | 6.Line : 1 4 1.2 |=> Edges 7.Line : 1 5 15.5 | 8.Line : 2 5 3.6 | 9.Line : 3 6 1.2----------------- 10.Line : 1 => Start node. /////////////////////////////////////////////////////// Doesn't check any error condition. */ using namespace std; // Edge struct. typedef struct { int* startPoints; int* endPoints; double* weights; }Edge; // This kernel will call queue size thread. __global__ void processQueueKernel(int *parentArray, double *resultWeightArray, const int* queue,const int *startPoints,const int *endPoints, const double *weightArray) { int threadIndex = threadIdx.x; int elementIndex = queue[threadIndex]; int startNode = startPoints[elementIndex]; int endNode = endPoints[elementIndex]; double edgeWeight = weightArray[elementIndex]; double nodeWeight = resultWeightArray[startNode]; if (nodeWeight + edgeWeight < resultWeightArray[endNode]) { resultWeightArray[endNode] = nodeWeight + edgeWeight; parentArray[endNode] = startNode; } }
4,789
#include "includes.h" __global__ void sgemm_kernel(const float *A, const float *B, float *C, int M, int N, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float element_c = 0.f; for (int e = 0; e < K; e++) element_c += A[row * K + e] * B[e * K + col]; C[row * N + col] = alpha * element_c + beta * C[row * N + col]; }
4,790
#include <stdlib.h> #include <stdio.h> #include <math.h> #define N 10000 #define TPB 128 /* indica o no. de threads por bloco */ __global__ void add( int *a, int *b, int *c ) { /* * threadIdx.x contém o Id da thread a ser executada * blockIdx.x contém o Id do bloco * blockDim.x contém o nr de threads utilizadas em cada bloco * gridDim.x contém o nr de blocos utilizados em um grid */ int tid = threadIdx.x + blockIdx.x * blockDim.x; // this thread handles the data at its thread id if(tid<N){ c[tid] = a[tid] + b[tid]; } /** * O While adapta a funcao para percorrer vetor maior do que o alocado */ // while (tid < N){ // c[tid] = a[tid] + b[tid]; // tid+= blockDim.x * gridDim.x; // } } int main( void ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(int)); cudaMalloc( (void**)&dev_b, N * sizeof(int)); cudaMalloc( (void**)&dev_c, N * sizeof(int)); // fill the arrays 'a' and 'b' on the CPU for (int i=0; i<N; i++) { a[i] = -i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, a, N * sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy( dev_b, b, N * sizeof(int),cudaMemcpyHostToDevice); /** * Aloca uma quantidade maior de blocos para o processamento dos dados * Função add não utiliza dados desnecessários */ printf("N=%d, TPB=%d, Nr Blocos=%d \n", N, TPB, ((N + TPB - 1)/ TPB)); add<<<((N+TPB-1)/TPB),TPB>>>(dev_a,dev_b,dev_c); // copy the array 'c' back from the GPU to the CPU cudaMemcpy(c, dev_c, N * sizeof(int),cudaMemcpyDeviceToHost); // display the results for (int i=0; i<N; i++) { if(i%1000==0) printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // free the memory allocated on the GPU cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); getchar(); return 0; }
4,791
#include <fstream> #include <iostream> #include <cuda_runtime.h> // C++ Program for Floyd Warshall Algorithm //#include <bits/stdc++.h> #include <chrono> #include <ctime> using namespace std; /* Define Infinite as a large enough value.This value will be used for vertices not connected to each other */ #define INF 99999 void floydWarshall (int** graph, int** dist, int nNodes) { /* Add all vertices one by one to the set of intermediate vertices. ---> Before start of an iteration, we have shortest distances between all pairs of vertices such that the shortest distances consider only the vertices in set {0, 1, 2, .. k-1} as intermediate vertices. ----> After the end of an iteration, vertex no. k is added to the set of intermediate vertices and the set becomes {0, 1, 2, .. k} */ for (int k = 0; k < nNodes; k++) { // Pick all vertices as source one by one for (int i = 0; i < nNodes; i++) { // Pick all vertices as destination for the // above picked source for (int j = 0; j < nNodes; j++) { // If vertex k is on the shortest path from // i to j, then update the value of dist[i][j] if (dist[i][k] + dist[k][j] < dist[i][j]) dist[i][j] = dist[i][k] + dist[k][j]; } } } } // Solves the all-pairs shortest path // problem using Floyd Warshall algorithm __global__ void vecFloydWarshall(int** graph, int* dist, int nNodes, int k) { /* Add all vertices one by one to the set of intermediate vertices. ---> Before start of an iteration, we have shortest distances between all pairs of vertices such that the shortest distances consider only the vertices in set {0, 1, 2, .. k-1} as intermediate vertices. ----> After the end of an iteration, vertex no. k is added to the set of intermediate vertices and the set becomes {0, 1, 2, .. k} */ // Pick all vertices as source one by one for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nNodes; i += blockDim.x * gridDim.x) { for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < nNodes; j += blockDim.y * gridDim.y) { // Pick all vertices as destination for the // above picked source // If vertex k is on the shortest path from // i to j, then update the value of dist[i][j] //printf("j: %d\n",j); if (dist[i*nNodes+k] + dist[k*nNodes+j] < dist[i*nNodes+j]){ dist[i*nNodes+j] = dist[i*nNodes+k] + dist[k*nNodes+j]; //printf("i %d, j %d\n",i,j); } } } } /* A utility function to print solution */ void printSolution(int** dist, int nNodes) { for (int i = 0; i < nNodes; i++) { for (int j = 0; j < nNodes; j++) { if (dist[i][j] == INF) cout << "INF" << " "; else cout << dist[i][j] << " "; } cout << endl; } } // This code is contributed by rathbhupendra int main(int argc, char **argv){ int** graph; int** dist; int a, b, w, nNodes; int* device_dist; int* aux_dist; if (argc > 1) { //cout << "input file is " << argv[1] << endl; ifstream inputfile(argv[1]); inputfile >> nNodes; graph = new int*[nNodes]; for (int i = 0; i < nNodes; ++i) { graph[i] = new int[nNodes]; for (int j = 0; j < nNodes; ++j) graph[i][j] = INF; } while (inputfile >> a >> b >> w) { graph[a][b] = w; graph[b][a] = w; } } dist = new int*[nNodes]; aux_dist = new int[nNodes * nNodes]; for (int i = 0; i < nNodes; ++i) dist[i] = new int[nNodes]; /* dist[][] will be the output matrix that will finally have the shortest distances between every pair of vertices */ int i, j, k; /* Initialize the solution matrix same as input graph matrix. Or we can say the initial values of shortest distances are based on shortest paths considering no intermediate vertex. */ for (i = 0; i < nNodes; i++) { for (j = 0; j < nNodes; j++) { dist[i][j] = graph[i][j]; aux_dist[i*nNodes+j] = graph[i][j]; } } //cout << graph[0][1] << " vs " << aux_dist[1] << "at position (" << 0 << "," << 1 << ")\n"; cudaMalloc(&device_dist, nNodes * nNodes * sizeof(int)); cudaMemcpy(device_dist, aux_dist, nNodes * nNodes * sizeof(int),cudaMemcpyHostToDevice); int blockSize = 256; int numBlocks = (nNodes + blockSize - 1) / blockSize; auto start = std::chrono::system_clock::now(); for (int k = 0; k < nNodes; ++k){ vecFloydWarshall<<<numBlocks, blockSize>>>(graph, device_dist, nNodes, k); cudaDeviceSynchronize(); //cout << "currently in " << k << endl; } auto end = std::chrono::system_clock::now(); auto timeElapsed = (end - start); cudaMemcpy(aux_dist, device_dist, nNodes * nNodes * sizeof(int),cudaMemcpyDeviceToHost); floydWarshall (graph, dist, nNodes); /*int count = 0; for (i = 0; i < nNodes; i++) { for (j = 0; j < nNodes; j++) { if (dist[i][j] != aux_dist[i*nNodes+j]){ count += 1; cout << "cpu: " << dist[i][j] << " vs gpu: " << aux_dist[i*nNodes+j] << endl; } } } cout << "Error count between CPU and GPU: " << count << endl;*/ for (i = 0; i < nNodes; i++) { for (j = 0; j < nNodes; j++) { dist[i][j] = aux_dist[i*nNodes+j]; } } //cout << graph[0][1] << " vs " << aux_dist[1] << "at position (" << 0 << "," << 1 << ")\n"; // Print the shortest distance matrix //printSolution(dist, nNodes); auto sec = std::chrono::duration_cast<std::chrono::seconds>(timeElapsed).count(); cout << "Computation time: " << sec << "\n"; return 0; }
4,792
#include "includes.h" __global__ static void solveEnd ( double* data, const double a, const double b, const double d, const double e, const double omega_11, const double omega_12, const double omega_21, const double omega_22, const int nx, const int nBatch ) { // Matrix index int globalIdx = blockDim.x * blockIdx.x + threadIdx.x; // Last two vectors double newNx2; double newNx1; // Compute lambda = d^~ - transpose(g) * inverse(E) * d_hat newNx2 = data[(nx - 2) * nBatch + globalIdx] - (e * data[globalIdx] + a * data[(nx - 4) * nBatch + globalIdx] + b * data[(nx - 3) * nBatch + globalIdx]); newNx1 = data[(nx - 1) * nBatch + globalIdx] - (d * data[globalIdx] + e * data[nBatch + globalIdx] + a * data[(nx - 3) * nBatch + globalIdx]); // Compute x^~ = omega * lambda data[(nx - 2) * nBatch + globalIdx] = omega_11 * newNx2 + omega_12 * newNx1; data[(nx - 1) * nBatch + globalIdx] = omega_21 * newNx2 + omega_22 * newNx1; }
4,793
#include <stdio.h> #include <stdlib.h> #define SIZE 8 __global__ void addArray(double * result, double * array); int main(){ cudaEvent_t start,stop; float elapsedtime; //the moment at which we start measuring the time cudaEventCreate(&start); cudaEventRecord(start,0); double array[SIZE]; double result[SIZE/2]; int i; for (i = 0; i < SIZE; i++){ array[i] = i +1 ; //printf("%lf ", array[i] ); } //pointers to the arrays to be put in cuda memory double *array_cuda; double *result_cuda; //allocate memory in cuda device cudaMalloc((void **)&array_cuda, sizeof(double)* SIZE); cudaMalloc((void **)&result_cuda, sizeof(double)* (SIZE/2)); //Copy contents from main memory to device memory cudaMemcpy(array_cuda, array, sizeof(double)*SIZE, cudaMemcpyHostToDevice); //call the cuda kernel addArray <<< 1, SIZE/2 >>> (result_cuda, array_cuda); //Copy results from device to host cudaMemcpy(result, result_cuda, sizeof(double)* (SIZE/2), cudaMemcpyDeviceToHost); printf("Answer is : "); for(i=0; i<SIZE/2 ;i++){ printf("%.1lf ",result[i]); } //the moment at which we stop measuring time cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); //Find and print the elapsed time cudaEventElapsedTime(&elapsedtime,start,stop); printf("Time spent for operation is %.10f seconds\n",elapsedtime/(float)1000); return 0; } __global__ void addArray(double *result_cuda, double *array_cuda){ int tid = threadIdx.x; result_cuda[tid] = (array_cuda[2*tid] + array_cuda[2*tid+1]) / 2; }
4,794
#include<iostream> using namespace std; __global__ void MatrixMulKernel(float *Md,float *Nd,float *Pd,int Width){ int tx=threadIdx.x; int ty=threadIdx.y; float Pvalue=0; for(int k=0;k<Width;k++){ float Mdelement=Md[ty*Width+k]; float Ndelement=Nd[k*Width+tx]; Pvalue+=Mdelement*Ndelement; } Pd[ty*Width+tx]=Pvalue; } void MatrixMultiplication(float *M,float *N,float *P,int Width){ int size=Width*Width*sizeof(float); float *Md,*Nd,*Pd; cudaMalloc((void **)&Md,size); cudaMemcpy(Md,M,size,cudaMemcpyHostToDevice); cudaMalloc((void **)&Nd,size); cudaMemcpy(Nd,N,size,cudaMemcpyHostToDevice); cudaMalloc((void **)&Pd,size); dim3 dimBlock(Width,Width); dim3 dimGrid(1,1); MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,Width); cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost); cudaFree(Md); cudaFree(Nd); cudaFree(Pd); } int main(){ float M[3][3]={1,2,3,4,5,6,7,8,9}; float N[3][3]={9,8,7,6,5,4,3,2,1}; float P[3][3]={0}; MatrixMultiplication(*M,*N,*P,3); cout << "P[3][3] = " << endl; for(int m=0;m<3;m++){ for(int n=0;n<3;n++){ cout << P[m][n] << " "; } cout << endl; } }
4,795
#include <stdio.h> #include <cuda_runtime_api.h> namespace LSW_CUDA{ template<typename real> __global__ void EleProductKernelFun(const real* v1, const real* v2, real* v1v2,int N){ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N){ v1v2[i] = v1[i]*v2[i]; } } /** * use cuda to compute the per element product of vector v1 and v2, * store the result in v1v2. * v1v2[i] = v1[i]*v2[i]. * It is used to compute the matrix-vector dot product, when the * matrix is an diaginal matrix. * * @param d_v1 vector on device * @param d_v2 vector on device * @param d_v1v2 result of the product, on device, allocated outside. * @param len the length of the vectors.(all have the same length). * * @return true if parameters are valid and compute success. */ template<typename real> bool CuEleProduct(const real *d_v1,const real *d_v2,real *d_v1v2,int len){ if(len == 0){ return true; } //check parameters bool succ = (d_v1!=NULL && d_v1v2!=NULL && d_v2!=NULL && len > 0); if (!succ){ printf("\nerror: CuEleProduct(...) invalid parameters!\n"); return false; } int N = len; int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; //running kernel function on the device EleProductKernelFun<<<blocksPerGrid, threadsPerBlock>>>(d_v1, d_v2, d_v1v2, N); //check errors cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { succ = false; printf("\nerror: CuEleProduct(...) kernel function EleProductKernelFun(..) failed!\n"); printf("cuda error:%s",cudaGetErrorString(err)); } return succ; } extern"C" bool EleProductD(const double *d_v1,const double *d_v2,double *d_v1v2,int len){ return CuEleProduct(d_v1,d_v2,d_v1v2,len); } extern"C" bool EleProductF(const float *d_v1,const float *d_v2,float *d_v1v2,int len){ return CuEleProduct(d_v1,d_v2,d_v1v2,len); } }//end of namespace
4,796
#include "includes.h" __global__ void sum_partial(double4 *a, double4 *b, unsigned int nextsize){ unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if(i >= nextsize) return; extern __shared__ double4 shaccelerations[]; double4 *shacc = (double4*) shaccelerations; double4 myacc; myacc = b[i]; shacc[threadIdx.x] = a[i]; myacc.x += shacc[threadIdx.x].x; myacc.y += shacc[threadIdx.x].y; myacc.z += shacc[threadIdx.x].z; b[i] = myacc; }
4,797
/* Please use "inp.txt" as input file and output/write your results of each question to a separate file named as "q1a.txt", "q1b.txt" etc. The output file should have the same format as the input file. You only need to submit three source code files, e.g. q1.cu, q2.cu and q3.cu and the input file "inp.txt". Don't submit any other files. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #define THREADNUM 16 #define THREADS_PER_BLOCK 1024 __global__ void min(int *array, int *answer, int n){ int index = (threadIdx.x + blockIdx.x * blockDim.x)*2; int d, val; for (d = n; d >= 1; d = d/2){ if (index < d){ val = array[index]; if (array[index+1] < val) val = array[index+1]; } __syncthreads(); if (index < d){ array[index/2] = val; } __syncthreads(); } *answer = array[0]; /* int chunk_size = n/blockDim.x; int i, localmin = 10000; __shared__ int min[THREADNUM]; if (threadIdx.x < blockDim.x-1){ for (i = threadIdx.x * chunk_size; i < threadIdx.x * chunk_size + chunk_size; i++){ if(array[i] < localmin) localmin = array[i]; } } else{ for (i = threadIdx.x * chunk_size; i < n; i++){ if(array[i] < localmin) localmin = array[i]; } } min[threadIdx.x] = localmin; __syncthreads(); if(threadIdx.x == 0){ int globalmin = 10000; for(i = 0; i < blockDim.x; i ++) if(min[i] < globalmin) globalmin = min[i]; *answer = globalmin; } */ } __global__ void last_digit(int *array, int *b){ b[blockIdx.x] = array[blockIdx.x] % 10; } int main(void) { int numcomma = 0; char c; FILE* stream = fopen("inp.txt", "r"); while(1){ c = fgetc(stream); if (c == EOF) break; if (c == ',') numcomma ++; } fclose(stream); int array[numcomma+1]; stream = fopen("inp.txt", "r"); int i; for (i = 0; i <= numcomma; i ++){ fscanf(stream, "%d,", &array[i]); } fclose(stream); int *d_array; int answer; int *d_answer; int size = sizeof(array); int *b = (int *) malloc(size); int *d_b; // Allocate space for device copies of array cudaMalloc((void **)&d_array, size); cudaMalloc((void **)&d_answer, sizeof(int)); cudaMalloc((void **)&d_b, size); cudaMemcpy(d_array, &array, size, cudaMemcpyHostToDevice); min<<<(numcomma + THREADS_PER_BLOCK)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_array, d_answer, numcomma+1); cudaMemcpy(&answer, d_answer, sizeof(int), cudaMemcpyDeviceToHost); last_digit<<<(numcomma+1), 1>>>(d_array, d_b); cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost); cudaFree(d_answer); cudaFree(d_array); cudaFree(d_b); FILE *q1a = fopen("q1a.txt", "w+"); fprintf(q1a, "Min: %d\n", answer); FILE *q1b = fopen("q1b.txt", "w+"); for (i = 0; i <= numcomma; i ++){ fprintf(q1b, "%d", b[i]); if (i < numcomma) fprintf(q1b, ", "); } free(b); }
4,798
// // Assignment 1: ParallelSine // CSCI 415: Networking and Parallel Computation // Spring 2017 // Name(s): Kelan Riley // Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch // standard imports #include <stdio.h> #include <math.h> #include <iomanip> #include <iostream> #include <string> #include <sys/time.h> // problem size (vector length) N // remember that a vector is just a series of values that we'd like to refer to // as one thing, so we can refer to the whole series by just saying the word // vector static const int N = 134215680; // Number of terms to use when approximating sine static const int TERMS = 6; // need a better understanding of this algorithm for computing sine // kernel function (CPU - Do not modify) void sine_serial(float *input, float *output) { // loop counter int i; // iterate as many times as there are numbers to work on for (i=0; i<N; i++) { // fetch ith number in the input array float value = input[i]; // multiply the number by 3 initially float numer = input[i] * input[i] * input[i]; int denom = 6; // 3! int sign = -1; // this loops TERMS number of times for (int j=1; j<=TERMS;j++) { value += sign * numer / denom; numer *= input[i] * input[i]; denom *= (2*j+2) * (2*j+3); sign *= -1; } output[i] = value; } } // kernel function (CUDA device) // TODO: Implement your graphics kernel here. See assignment instructions for method information // need to tell cuda that this is a kernel to run... need special syntax here... //__global__ is the syntax for doing that, the below code will run on threads executing in the GPU __global__ void sine_parallel(float *input, float *output) { // the thread id of the current thread that is running this kernel // threadIdx is a dim3 structure with x, y, and z fields (up to three dimensions) // make sure to compute the idx as the block number offset by the thread id in the block! int idx = blockIdx.x * 1024 + threadIdx.x; // fetch ith number in the input array float value = input[idx]; // multiply the number by 3 initially float numer = input[idx] * input[idx] * input[idx]; int denom = 6; // 3! int sign = -1; // this loops TERMS number of times for (int j=1; j<=TERMS;j++) { value += sign * numer / denom; numer *= input[idx] * input[idx]; denom *= (2*j+2) * (2*j+3); sign *= -1; } // write out the result into the output array output[idx] = value; } // BEGIN: timing and error checking routines (do not modify) // Returns the current time in microseconds long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } // Prints the time elapsed since the specified time long long stop_timer(long long start_time, std::string name) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; std::cout << std::setprecision(5); std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n"; return end_time - start_time; } void checkErrors(const char label[]) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } // END: timing and error checking routines (do not modify) int main (int argc, char **argv) { // first I'm going to save the total number of bytes this array takes up in a variable int total_array_bytes = N * sizeof(float); int half_size = N / 2; int half_array_bytes = half_size * sizeof(float); //BEGIN: CPU implementation (do not modify) float *h_cpu_result = (float*)malloc(N*sizeof(float)); float *h_input = (float*)malloc(N*sizeof(float)); //Initialize data on CPU int i; for (i=0; i<N; i++) { h_input[i] = 0.1f * i; } //Execute and time the CPU version long long CPU_start_time = start_timer(); sine_serial(h_input, h_cpu_result); long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time"); //END: CPU implementation (do not modify) //TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results // allocating the results array on the host (cpu) float *h_gpu_result = (float*)malloc(total_array_bytes); // declare 4 pointers (because 2 devices) to memory on the GPU float *d_in_1; float *d_out_1; float *d_out_2; float *d_in_2; // explicitly set which device is being used cudaSetDevice(0); // insert some timing code now long long GPU_memory_allocation_start_time = start_timer(); // now actually allocate GPU memory for input and output cudaMalloc((void **) &d_in_1, half_array_bytes); cudaMalloc((void **) &d_out_1, half_array_bytes); // using the second device because why not cudaSetDevice(1); cudaMalloc((void **) &d_in_2, half_array_bytes); cudaMalloc((void **) &d_out_2, half_array_bytes); long long GPU_memory_allocation_time = stop_timer(GPU_memory_allocation_start_time, "\nGPU Memory Allocation"); // time the memory copy to devices long long host_to_device_start_time = start_timer(); // the second thing to do would be to copy the input array over into the gpu 2's memory cudaMemcpy(d_in_2, h_input, half_array_bytes, cudaMemcpyHostToDevice); cudaSetDevice(0); //copy the other half of the input array over onto the first device cudaMemcpy(d_in_1, &h_input[half_size], half_array_bytes, cudaMemcpyHostToDevice); long long host_to_device_time = stop_timer(host_to_device_start_time, "GPU Memory Copy to Device"); // time how long it takes for the kernel to run long long kernel_start_time = start_timer(); // now I think I'm ready to launch the kernel on the GPU // my original call was faulty since I can't run more than 1024 threads per block! sine_parallel<<<65535, 1024>>>(d_in_1, d_out_1); // also run the kernel on the second device cudaSetDevice(1); sine_parallel<<<65535, 1024>>>(d_in_2, d_out_2); // checking to see that there were no errors with the kernel parameters when it got launched long long kernel_time = stop_timer(kernel_start_time, "GPU Kernel Run Time"); // time how long it takes to copy the results on the GPU back onto the CPU long long device_to_host_start_time = start_timer(); // now copy the results on GPU 2's memory to CPU memory cudaMemcpy(h_gpu_result, d_out_2, half_array_bytes, cudaMemcpyDeviceToHost); // now copy the results on GPU 1's memory to CPU memory cudaMemcpy(&h_gpu_result[half_size], d_out_1, half_array_bytes, cudaMemcpyDeviceToHost); long long device_to_host_time = stop_timer(device_to_host_start_time, "GPU Memory Copy to Host"); // get the total time on the GPU long long total_time = stop_timer(GPU_memory_allocation_start_time, "Total GPU Run Time"); std::cout << "\n"; // Checking to make sure the CPU and GPU results match - Do not modify int errorCount = 0; for (i=0; i<N; i++) { if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6) errorCount = errorCount + 1; } if (errorCount > 0) printf("Result comparison failed.\n"); else printf("Result comparison passed.\n"); // Cleaning up memory free(h_input); free(h_cpu_result); free(h_gpu_result); // make sure to free the memory on the GPU too! cudaFree(d_in_1); cudaFree(d_out_1); cudaFree(d_in_2); cudaFree(d_out_2); return 0; }
4,799
// Ref:https://github.com/PacktPublishing/Hands-On-GPU-Accelerated-Computer-Vision-with-OpenCV-and-CUDA/blob/master/Chapter2/03_thread_execution_example.cu #include <iostream> #include <stdio.h> __global__ void myfirstkernel(void) { printf("Hello! I'm thread in block: %d\n", blockIdx.x); } int main() { myfirstkernel<<<16, 1>>>(); cudaDeviceSynchronize(); printf("All threads are finished.\n"); return 0; }
4,800
#include "math.cuh" template <typename T> __device__ double degree_to_radian(T degree){ double PI = 3.1415926535897932384626433832; double radian = degree * PI / 180; return radian; } template __device__ double degree_to_radian<int>(int degree); template __device__ double degree_to_radian<double>(double degree); template <typename T> __device__ T radian_to_degree(double radian){ double PI = 3.1415926535897932384626433832; T degree = radian * 180 / PI; return degree; } template __device__ int radian_to_degree<int>(double radian); template __device__ double radian_to_degree<double>(double radian);