serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
5,301
// // Compile: // // $ nvcc list_gpus.cu -o list_gpus // // #include <cuda.h> #include <curand_kernel.h> #include <stdio.h> int main() { int deviceCount; cudaGetDeviceCount(&deviceCount); int device; for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device Index %d, %s, Compute Capability %d.%d\n", device, deviceProp.name, deviceProp.major, deviceProp.minor); } }
5,302
#include "includes.h" __global__ void addOneColumnPerThread(double* a, double* b, double* c, int n) { // Get the column for current thread int column = (blockIdx.x * blockDim.x + threadIdx.x); // Make sure we do not go out of bounds if (column < n) { for (int i = 0; i < n; i++) { c[i * n + column] = a[i * n + column] + b[i * n + column]; } } }
5,303
#include <cuda_runtime.h> #include <stdio.h> int main(int argc,char ** argv) { int nElem=1024; dim3 block(1024); dim3 grid((nElem-1)/block.x+1); printf("grid.x %d block.x %d\n",grid.x,block.x); block.x=512; grid.x=(nElem-1)/block.x+1; printf("grid.x %d block.x %d\n",grid.x,block.x); block.x=256; grid.x=(nElem-1)/block.x+1; printf("grid.x %d block.x %d\n",grid.x,block.x); block.x=128; grid.x=(nElem-1)/block.x+1; printf("grid.x %d block.x %d\n",grid.x,block.x); cudaDeviceReset(); return 0; }
5,304
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <sys/resource.h> //134217728 double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } __global__ void vecSum_kernel_cuda(double *d_vecA,double *d_result,unsigned long dist,unsigned long n){ unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; if (global_id < n){ d_vecA[global_id*dist] = d_vecA[global_id*dist] + d_vecA[global_id*dist+dist / 2]; } } void checkparams(unsigned long *n, unsigned int *cb); void checkparamsB(unsigned long *n, unsigned int *cb); int main(int argc, char *argv[]){ if (argc != 2){ printf("Falta argumento: N\n"); return 0; } unsigned long N = atoi (argv[1]); double *vecA,*result,timetick; unsigned int i; cudaError_t error; unsigned int CUDA_BLK = 32; // checkparamsB(&N,&CUDA_BLK); unsigned long numBytes = sizeof(double)*N; double *d_vecA,*d_result; vecA = (double *)malloc(numBytes); result = (double *)malloc(sizeof(double)); *result = 0; for (i = 0; i < N; i++){ vecA[i] = i; } cudaMalloc((void **) &d_vecA, numBytes); cudaMalloc((void **) &d_result, sizeof(double)); cudaMemcpy(d_vecA, vecA, numBytes, cudaMemcpyHostToDevice); // Bloque unidimensional de hilos (*cb* hilos) dim3 dimBlock(CUDA_BLK); // Grid unidimensional (*ceil(n/cb)* bloques) timetick = dwalltime(); for(i = 2; i <= N ;i *= 2){ dim3 dimGrid((N / i + dimBlock.x - 1) / dimBlock.x); vecSum_kernel_cuda<<<dimGrid, dimBlock>>>(d_vecA,d_result,i,N/i); cudaThreadSynchronize(); } printf("Tiempo para sumar las matrices: %f\n",dwalltime() - timetick); error = cudaGetLastError(); printf("error: %d\n",error); cudaMemcpy(result, d_vecA, sizeof(double), cudaMemcpyDeviceToHost); // GPU -> CPU printf("%f\n",*result); cudaFree(d_vecA); cudaFree(d_result); free(vecA); free(result); return 0; } void checkparamsB(unsigned long *n, unsigned int *cb){ struct cudaDeviceProp capabilities; cudaGetDeviceProperties (&capabilities, 0); *cb = capabilities.maxThreadsDim[0]; printf("%d\n",*cb); } void checkparams(unsigned long *n, unsigned int *cb){ struct cudaDeviceProp capabilities; // Si menos numero total de hilos que tamaño bloque, reducimos bloque if (*cb > *n) *cb = *n; cudaGetDeviceProperties (&capabilities, 0); if (*cb > capabilities.maxThreadsDim[0]) { *cb = capabilities.maxThreadsDim[0]; printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n\n", *cb); } if (((*n + *cb - 1) / *cb) > capabilities.maxGridSize[0]) { *cb = 2 * (*n - 1) / (capabilities.maxGridSize[0] - 1); if (*cb > capabilities.maxThreadsDim[0]) { *cb = capabilities.maxThreadsDim[0]; printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n", *cb); if (*n > (capabilities.maxGridSize[0] * *cb)) { *n = capabilities.maxGridSize[0] * *cb; printf("->Núm. total de hilos cambiado a %lu (máx por grid para \ dev)\n\n", *n); } else { printf("\n"); } } else { printf("->Núm. hilos/bloq cambiado a %d (%d máx. bloq/grid para \ dev)\n\n", *cb, capabilities.maxGridSize[0]); } } }
5,305
#include "includes.h" // %%cu // as data type is int, sum might overflow (depending on rand(), but the seq and parallel answers are still equal, or change int to long long (too lazy sorry)) #define THREADS_PER_BLOCK 256 using namespace std; __global__ void calculate(int *arr_in, int* arr_out, int sz, int option){ int ind = threadIdx.x; int dim = blockDim.x; extern __shared__ int shared_mem[]; int actual_ind = blockIdx.x*blockDim.x + ind; if(actual_ind < sz){ shared_mem[ind] = arr_in[actual_ind]; }else{ if(option == 0 || option == 3) shared_mem[ind] = 0; else if(option == 1){//maximum shared_mem[ind] = -INT_MAX; }else{//minimum shared_mem[ind] = INT_MAX; } } __syncthreads(); for(int i=dim/2 ; i > 0 ; i=i/2){ if(ind<i){ if(option == 0 || option == 3) shared_mem[ind]+=shared_mem[ind+i]; else if(option == 1){ shared_mem[ind]=max(shared_mem[ind],shared_mem[ind+i]); }else{ shared_mem[ind]=min(shared_mem[ind],shared_mem[ind+i]); } } __syncthreads(); } arr_out[blockIdx.x]=shared_mem[0]; }
5,306
#include <stdio.h> #include <cuda.h> #include <sys/time.h> #define N 2048 __global__ void findMax(int *a, int *b){ b[0] = 0; if(a[threadIdx.x] > b[0]){ b[0] = a[threadIdx.x]; } __syncthreads(); } int findMaxCPU(int *a){ int max = 0; for(int i = 0; i < N; i++){ if(a[i] > max) { max = a[i]; } } return max; } int main(int argc, char *argv[]){ int *a, *b; int *d_a, *d_b; int size = N * sizeof(int); struct timeval before; struct timeval after; cudaMalloc((void **)&d_a, size); cudaMalloc((void**)&d_b, size); a = (int *)malloc(size); b = (int*)malloc(size); //random_ints(a, N); for(int i = 0; i < N; i++){ a[i] = rand(); } cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); gettimeofday(&before, NULL); findMax<<<1,N>>>(d_a, d_b); gettimeofday(&after, NULL); cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost); printf("For GPU, the max is %d and it takes %d microseconds\n", b[0], (after.tv_sec * 1000000 + after.tv_usec) - (before.tv_sec * 1000000 + before.tv_usec)); gettimeofday(&before, NULL); int max = findMaxCPU(a); gettimeofday(&after, NULL); printf("For CPU, the max is %d and it takes %d microseconds\n", max, (after.tv_sec * 1000000 + after.tv_usec) - (before.tv_sec * 1000000 + before.tv_usec)); free(a); free(b); cudaFree(d_a); cudaFree(d_b); return 0; }
5,307
#include "includes.h" using namespace std; // https://stackoverflow.com/questions/26853363/dot-product-for-dummies-with-cuda-c __global__ void init_vec(float* vec, float value) { int tid = blockIdx.x * blockDim.x + threadIdx.x; vec[tid] = value; }
5,308
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <stdbool.h> int nValues[15]={100,150,200,250,350,500,650,800,900,1000,1200,1400,1600,1800,2000}; // here you can put any values you want for k // warning do not change the length of the array int kValues[5]={10,20,45,80,100}; __device__ static void calculate(int *readingArray, int* writingArray, double *weights, int n ,int current,int xAxes, int yAxes){ double Sum = 0; if(current < n*n) { // loop through all the points that affect for(int p=-2;p<3;p++){ for(int q=-2;q<3;q++){ Sum += weights[(p+2)*5+(q+2)] * readingArray[((p + yAxes + n) % n) * n + ( q + xAxes + n) % n]; // index properly in order to include the wrap points // add the weight to Sum } } // check to decide which value the current spin should take if(Sum > 0.00001)// set to 0.000001 in order to take into account // floating points writingArray[current] = 1; else if(Sum < -0.00001) writingArray[current] = -1; else // if it is zero then let the value remain the same writingArray[current] = readingArray[current]; } } // cuda function to parallelize the spin calculation __global__ void spinCalculation(int n, double * gpuWeights,int *gpuG,int *gpuGTemp,int i,int block,int looper) { // variable to hold the sum of the weights int current = blockIdx.x * block * block + threadIdx.x; // calculation of the current index int xAxes; int yAxes; for(int q=0;q<looper;q++) { // switch the i%2 which is the current number of iretarion // so periodically we will be writing to gpuGTemp and then to gpuG switch (i%2) { case 0: xAxes=(current*looper+q)%n; yAxes=(current*looper+q)/n; calculate(gpuG,gpuGTemp,gpuWeights,n,current*looper+q,xAxes,yAxes); break; // here everything is the same with the difference that is reading from the gpuGTemp array // and write to the gpuG case 1: xAxes=(current*looper+q)%n; yAxes=(current*looper+q)/n; calculate(gpuGTemp,gpuG,gpuWeights,n,current*looper+q,xAxes,yAxes); break; } } } void ising (int *G, double *w, int k, int n,int grid ,int block) { int looper= n*n/(grid*grid*block*block) + 1; double *weights; cudaMalloc(&weights,sizeof(double)*25); cudaMemcpy(weights,w,25*sizeof(double),cudaMemcpyHostToDevice); int *tempG=(int *) malloc(sizeof(int)*n*n); memcpy(tempG,G,n*n*sizeof(int)); int *gpuTempG; cudaMalloc(&gpuTempG,n*n*sizeof(int)); int *gpuG; cudaMalloc(&gpuG,n*n*sizeof(int)); cudaMemcpy(gpuTempG,tempG,n*n*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(gpuG,G,n*n*sizeof(int),cudaMemcpyHostToDevice); for(int i=0;i<k;i++){ spinCalculation<<<grid*grid,block*block>>>(n,weights,gpuG,gpuTempG,i,block,looper); cudaDeviceSynchronize(); } if(k%2==1){ cudaMemcpy(G,gpuTempG,n*n*sizeof(int),cudaMemcpyDeviceToHost); } else{ cudaMemcpy(G,gpuG,n*n*sizeof(int),cudaMemcpyDeviceToHost); } cudaFree(gpuG); cudaFree(gpuTempG); free(tempG); } void initializeG(int n, int *G){ for (int i=0;i<n*n;i++){ if((random()%2)==0) G[i]=1; else G[i]=-1; } } int main(){ int block=50; int grid=50; FILE *file; // pointer to a file clock_t end,start; // variables to count time int n,k; // n and k double weights[] = {0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004}; // array with weights // loop through every n and k value for(int i=0;i<15;i++){ n=nValues[i]; // set n value int *G=(int*)malloc(sizeof(int)*n*n); // allocate memory for G for(int j=0;j<5;j++){ k=kValues[j]; // set k value initializeG(n,G);// initialize G start=clock(); // start counting ising(G,weights,k,n,grid,block);// call ising end=clock();// stop counting file=fopen("V2.csv","a"); // open csv file fprintf(file, "%d ,%d, %lf\n",n,k, ((double)(end-start))/CLOCKS_PER_SEC); // write the data printf("%lf\n",((double)(end-start))/CLOCKS_PER_SEC);// print data fclose(file);// close file } free(G);// free G } return 0; }
5,309
#include <math.h> #include <stdio.h> #include <stdint.h> __device__ __forceinline__ int getLinearIndex(int row, int col, int slice, int nRows, int nCols){ //image indexing is column major return slice*nRows*nCols + col * nRows + row; } __device__ __forceinline__ double getTileAverage(int row, int col, int slice, int tileSize, int imageSize, int* image){ int i, j; double sum = 0.0; double size = tileSize * tileSize; for(i = 0; i < tileSize; i++){ for(j = 0; j < tileSize; j++){ int tempRow = row + i; int tempCol = col + j; int tempLinearIndex = getLinearIndex(tempRow, tempCol, slice, imageSize, imageSize); sum = sum + image[tempLinearIndex]; if(slice == 0){ printf("IMAGE VALUE: %d ; SUM: %d\n", image[tempLinearIndex], sum); } } } printf("SUM: %d \n", sum); return sum/size; } //tileSize = side length of tile //numTiles = num of tiles per side //threadsPerBlock = fixed at 16 template <typename T> __device__ __forceinline__ void mosaic(T* image, const T* reds, const T* greens, const T* blues, int numSamples, int* nearestTiles, int tileSize, int numTiles, int threadsPerBlock){ //Calculate what tile this is int tileRowIdx = blockIdx.x * threadsPerBlock + threadIdx.x; int tileColIdx = blockIdx.y * threadsPerBlock + threadIdx.y; //Calculate top-left pixel of current tile, int pixelRow = tileRowIdx * tileSize; int pixelCol = tileColIdx * tileSize; //targetImageSize = side length of target image in pixels int targetImageSize = tileSize * numTiles; if(pixelRow >= targetImageSize || pixelCol >= targetImageSize){ return; } double avgR = getTileAverage(pixelRow, pixelCol, 0, tileSize, targetImageSize, image); double avgG = getTileAverage(pixelRow, pixelCol, 1, tileSize, targetImageSize, image); double avgB = getTileAverage(pixelRow, pixelCol, 2, tileSize, targetImageSize, image); printf("Tuple of averages: %d, %d, %d \n", avgR, avgG, avgB); double minDistance = -1; int minDistanceIndex = -1; int i; for(i = 0; i < numSamples; i = i+1){ double tempDistance = fabs(pow(avgR-reds[i], 2) + pow(avgB-blues[i], 2) + pow(avgG-greens[i], 2)); if(fabs(tempDistance) < minDistance || minDistance == -1){ minDistance = tempDistance; minDistanceIndex = i; } } //Tiles are indexed in row-major order int tileLinearIndex = tileRowIdx * numTiles + tileColIdx; nearestTiles[tileLinearIndex] = minDistanceIndex; return; } __global__ void mosaic_cuda_double(int* nearestTile, int* image, const int* red, const int* green, const int* blue, int numSamples, int tileSize, int numTiles, int threadsPerBlock){ mosaic(image, red, green, blue, numSamples, nearestTile, tileSize, numTiles, threadsPerBlock); return; }
5,310
# pragma warning (disable:4819) #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define ARRAYSIZE 5 #define checkCudaErrors( a ) do { \ if (cudaSuccess != (a)) { \ fprintf(stderr, "Cuda runtime error in line %d of file %s \ : %s \n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError()) ); \ exit(EXIT_FAILURE); \ } \ } while(0); void printDeviceProp(cudaDeviceProp &prop); void printVector(const int vector[]); void inquireGPUInfo() { int count; cudaGetDeviceCount(&count); if (count == 0) { printf("There is no device.\n"); return; } else { printf("Device count is %d.\n\n", count); } // find the device int i; for (i = 0; i < count; ++i) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { printDeviceProp(prop); } } printf("\n"); } void printDeviceProp(cudaDeviceProp &prop) { printf("Device name :\t %s.\n", prop.name); printf("Major compute capability: \t %d.\n", prop.major); printf("Total global memory: \t %lld bytes.\n", prop.totalGlobalMem); printf("Max threads per block: \t %d.\n", prop.maxThreadsPerBlock); printf("Total const memory: \t %lld bytes.\n", prop.totalConstMem); printf("Shared memory per block: \t %lld bytes.\n", prop.sharedMemPerBlock); printf("Registers per block: \t %d.\n", prop.regsPerBlock); printf("Max threads per multiprocessors: \t %d.\n", prop.maxThreadsPerMultiProcessor); printf("Multiprocessors count: \t %d.\n", prop.multiProcessorCount); } __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } void printVector(const int vector[]) { int i; for (i = 0; i < ARRAYSIZE; i++) { if (i != ARRAYSIZE) { printf("%d, ", vector[i]); } else { printf("%d ", vector[i]); } } printf("\n"); } int main() { // inquireGPUInfo(); const int a[ARRAYSIZE] = { 1, 2, 3, 4, 5 }; const int b[ARRAYSIZE] = { 10, 20, 30, 40, 50 }; int c[ARRAYSIZE] = { 0 }; int *dev_a, *dev_b, *dev_c; checkCudaErrors(cudaMalloc((void**)&dev_a, ARRAYSIZE * sizeof(int))); checkCudaErrors(cudaMalloc((void**)&dev_b, ARRAYSIZE * sizeof(int))); checkCudaErrors(cudaMalloc((void**)&dev_c, ARRAYSIZE * sizeof(int))); checkCudaErrors(cudaMemcpy(dev_a, a, ARRAYSIZE * sizeof(int) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_b, b, ARRAYSIZE * sizeof(int) , cudaMemcpyHostToDevice)); addKernel <<< 1, ARRAYSIZE >>> (dev_c, dev_a, dev_b); checkCudaErrors(cudaMemcpy(c, dev_c, ARRAYSIZE * sizeof(int) , cudaMemcpyDeviceToHost)); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); printf("Array 1: \t"); printVector(a); printf("Array 2: \t"); printVector(b); printf("Arrays sum: \t"); printVector(c); getchar(); return 0; }
5,311
#include "../image_headers/convolution.cuh" #include <iostream> #include <cstdlib> __device__ float calcFx(const unsigned char* image, int i, int j, int width, int height) { if (0 <= i && i < width && 0 <= j && j < height) { return image[j * width + i]; } else if ((0 <= i && i < width) || (0 <= j && j < height)) { return 1; } else { return 0; } } __global__ void convolve_kernel(const unsigned char* image, unsigned char* output, int width, int height, const float *mask, int m) { int output_index = blockIdx.x * blockDim.x + threadIdx.x; int x = output_index % width; int y = output_index / width; float accumulator = 0; for (int i = 0; i < m; i++) { for (int j = 0; j < m; j++) { float result = calcFx(image, x + i - m / 2, y + j - m / 2, width, height); accumulator += mask[i * m + j] * result; } } output[output_index] = accumulator; } __global__ void convolve_kernel2(const unsigned char* image, float* output, int width, int height, const float *mask, int m) { int output_index = blockIdx.x * blockDim.x + threadIdx.x; int x = output_index % width; int y = output_index / width; float accumulator = 0; for (int i = 0; i < m; i++) { for (int j = 0; j < m; j++) { float result = calcFx(image, x + i - m / 2, y + j - m / 2, width, height); accumulator += mask[i * m + j] * result; } } output[output_index] = accumulator; } void convolve(const unsigned char* image, unsigned char* output, int width, int height, const float *mask, int m) { int size = width * height; int maskSize = m * m; int threads_per_block = 256; int num_blocks = (size - 1) / threads_per_block + 1; // copy data to the device unsigned char *dImage, *dOutput; float *dMask; cudaMalloc((void **)&dImage, size * sizeof(unsigned char)); cudaMalloc((void **)&dOutput, size * sizeof(unsigned char)); cudaMalloc((void **)&dMask, maskSize * sizeof(float)); cudaMemcpy(dImage, image, size * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(dOutput, output, size * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(dMask, mask, maskSize * sizeof(float), cudaMemcpyHostToDevice); // event timers cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); convolve_kernel<<<num_blocks, threads_per_block>>>(dImage, dOutput, width, height, dMask, m); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float numMs; cudaEventElapsedTime(&numMs, start, stop); std::cout << "convolution in cuda took " << numMs << "ms" << std::endl; // copy back cudaMemcpy(output, dOutput, size * sizeof(unsigned char), cudaMemcpyDeviceToHost); }
5,312
__global__ void transform_kernel( float4* outpos, float4* inpos, unsigned int width, unsigned int height, float* mvp_matrix, float* vp_matrix) { // Indices into the VBO data. Roughly like texture coordinates from GLSL. unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y; unsigned int bx = threadIdx.x; unsigned int by = threadIdx.y; // test code: passing vertices without transformation /*outpos[ty*width + tx].x = inpos[ty*width + tx].x ; outpos[ty*width + tx].y = inpos[ty*width + tx].y ; outpos[ty*width + tx].z = inpos[ty*width + tx].z ; outpos[ty*width + tx].w = inpos[ty*width + tx].w ;*/ //////////////////////////////////////////////////////////////////////////// // shared memory //////////////////////////////////////////////////////////////////////////// __shared__ float4 coords[16][16]; __shared__ float mvps[16]; __shared__ float vps[16]; mvps[bx] = mvp_matrix[bx]; vps[bx] = vp_matrix[bx]; //////////////////////////////////////////////////////////////////////////// // matrix transformation: modelview -> projection //////////////////////////////////////////////////////////////////////////// coords[bx][by].x = mvps[0] * inpos[ty*width + tx].x + mvps[1] * inpos[ty*width + tx].y + mvps[2] * inpos[ty*width + tx].z + mvps[3] * inpos[ty*width + tx].w; coords[bx][by].y = mvps[4] * inpos[ty*width + tx].x + mvps[5] * inpos[ty*width + tx].y + mvps[6] * inpos[ty*width + tx].z + mvps[7] * inpos[ty*width + tx].w; coords[bx][by].z = mvps[8] * inpos[ty*width + tx].x + mvps[9] * inpos[ty*width + tx].y + mvps[10] * inpos[ty*width + tx].z + mvps[11] * inpos[ty*width + tx].w; coords[bx][by].w = mvps[12] * inpos[ty*width + tx].x + mvps[13] * inpos[ty*width + tx].y + mvps[14] * inpos[ty*width + tx].z + mvps[15] * inpos[ty*width + tx].w; __syncthreads(); //////////////////////////////////////////////////////////////////////////// // normalization //////////////////////////////////////////////////////////////////////////// coords[bx][by].x = coords[bx][by].x / coords[bx][by].w; coords[bx][by].y = coords[bx][by].y / coords[bx][by].w; coords[bx][by].z = coords[bx][by].z / coords[bx][by].w; coords[bx][by].w = coords[bx][by].w / coords[bx][by].w; __syncthreads(); //////////////////////////////////////////////////////////////////////////// // matrix transformation: viewport //////////////////////////////////////////////////////////////////////////// outpos[ty*width + tx].x = vps[0] * coords[bx][by].x + vps[1] * coords[bx][by].y + vps[2] * coords[bx][by].z + vps[3] * coords[bx][by].w; outpos[ty*width + tx].y = vps[4] * coords[bx][by].x + vps[5] * coords[bx][by].y + vps[6] * coords[bx][by].z + vps[7] * coords[bx][by].w; outpos[ty*width + tx].z = vps[8] * coords[bx][by].x + vps[9] * coords[bx][by].y + vps[10] * coords[bx][by].z + vps[11] * coords[bx][by].w; outpos[ty*width + tx].w = vps[12] * coords[bx][by].x + vps[13] * coords[bx][by].y + vps[14] * coords[bx][by].z + vps[15] * coords[bx][by].w; __syncthreads(); } /* __global__ void tiangle_setup_kernel(float4* outpos, float4* verts, unsigned int width, unsigned int height, float* mvp_matrix, float* vp_matrix) { // Indices into the VBO data. Roughly like texture coordinates from GLSL. unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y; unsigned int bx = threadIdx.x; unsigned int by = threadIdx.y; // Shared memory __shared__ float4 coord1[8][8]; __shared__ float4 coord2[8][8]; __shared__ float4 coord3[8][8]; //__shared__ float signed_area; __shared__ float3 coefs[8][8]; // copy vertices to shared memory coord1[bx][by] = verts[3*ty*width + 3*tx]; coord2[bx][by] = verts[3*ty*width + 3*tx + 1]; coord3[bx][by] = verts[3*ty*width + 3*tx + 2]; __syncthreads(); // area float signed_area = (coord2[bx][by].x - coord1[bx][by].x)*(coord3[bx][by].y - coord1[bx][by].y) -(coord3[bx][by].x - coord1[bx][by].x)*(coord2[bx][by].y - coord1[bx][by].y); __syncthreads(); // coefs coefs[bx][by].x = coord1[bx][by].y - coord2[bx][by].y; coefs[bx][by].y = coord2[bx][by].x - coord1[bx][by].x; if(signed_area < 0) { coefs[bx][by].x = -coefs[bx][by].x; coefs[bx][by].y = -coefs[bx][by].y; } __syncthreads(); coefs[bx][by].z = -coefs[bx][by].x*coord1[bx][by].x - coefs[bx][by].y*coord1[bx][by].y; } */
5,313
#include "includes.h" __global__ void kLogregCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x; if (tx < numCases) { const int label = int(labels[tx]); const float maxp = maxProbs[tx]; const float labelp = probs[label * numCases + tx]; labelLogProbs[tx] = __logf(labelp); /* * Compute the probability of guessing the correct case if you take the most-probable label. * * This is done like this: * * - If the most probable label is not equal to the true label, then the probability is zero. * - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum). * * This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned * maximum probability. But it's a safety measure to prevent over-estimating your accuracy. * Though it could never happen in reality. Well it could. But it wouldn't. Cool? */ if (labelp != maxp) { correctProbs[tx] = 0; } else { int numMax = 0; for (int i = 0; i < numOut; i++) { numMax += probs[i * numCases + tx] == maxp; } correctProbs[tx] = 1.0f / float(numMax); } } }
5,314
__global__ void deviceKernel(int *a, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < N; i += stride) { a[i] = 1; } } void hostFunction(int *a, int N) { for (int i = 0; i < N; ++i) { a[i] = 1; } } int main() { int N = 2<<24; size_t size = N * sizeof(int); int *a; cudaMallocManaged(&a, size); hostFunction(a, N); deviceKernel<<<256, 256>>>(a, N); cudaDeviceSynchronize(); cudaFree(a); }
5,315
/******************************************************************************* * serveral useful gpu functions will be defined in this file to facilitate * the surface redistance scheme ******************************************************************************/ typedef struct { double sR; double sL; } double_eno_derivative; __device__ inline double max2(double x, double y) { return (x<y) ? y : x; } __device__ inline double min2(double x, double y) { return (x<y) ? x : y; } __device__ inline double min_mod(double x, double y) { return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y); } __device__ inline double sign(double x) { return (x>0) ? 1.0 : -1.0; } __device__ inline void advection_velocity(double & H1, double & H2, double & H3, double & normal_d, double sign, double Dx, double Dy, double Dz, double nx, double ny, double nz) { normal_d = nx * Dx + ny * Dy + nz * Dz; H1 = sign * (Dx - nx * normal_d); H2 = sign * (Dy - ny * normal_d); H3 = sign * (Dz - nz * normal_d); double H_mag = sqrt(H1*H1+H2*H2+H3*H3); H1 = H1/H_mag; H2 = H2/H_mag; H3 = H3/H_mag; } __device__ inline void Upwind_Hamiltonian(double & Hamil, double normal_d, double sign, double Dx, double Dy, double Dz) { // numerical error can lead to negative value inside sqrt() // the following code is needed to avoid NAN due to sqrt of a negative number Hamil = sign* ( sqrt( max2(0,Dx*Dx+Dy*Dy+Dz*Dz-normal_d*normal_d) ) - 1); } // convert subindex to linear index // periodic boundary conditions are assumed __device__ inline int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges) { int row_idxn = min2(rows-1, max2(0, row_idx)); int col_idxn = min2(cols-1, max2(0, col_idx)); int pge_idxn = min2(pges-1, max2(0, pge_idx)); int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn; return ind; } /****************************************************************************** * calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3] ******************************************************************************/ __device__ inline double_eno_derivative eno_derivative( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds) { double p2m; double_eno_derivative eno_d; double p2 = v1 - 2.0 * v0 + v2; double p2r = v0 - 2.0 * v2 + v3; p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2); double vr = (pr==ds) ? v2 : 0; eno_d.sR = (vr - v0) / pr - pr * p2m; double p2l = v0 - 2.0 * v1 + v4; p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2); double vl = (pl==ds) ? v1 : 0; eno_d.sL = (v0 - vl) / pl + pl * p2m; return eno_d; } /****************************************************************************** * calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3] * without boundary correction ******************************************************************************/ __device__ inline double_eno_derivative eno_derivative_field( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds) { double p2m; double_eno_derivative eno_d; double p2 = v1 - 2.0 * v0 + v2; double p2r = v0 - 2.0 * v2 + v3; p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2); eno_d.sR = (v2 - v0) / ds - ds * p2m; double p2l = v0 - 2.0 * v1 + v4; p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2); eno_d.sL = (v0 - v1) / ds + ds * p2m; return eno_d; } // calculate surface redistance step with central upwind scheme without higher order term // now lsf represents the auxilary level set function(not the level set function) // inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors __global__ void surface_redistance_step_1st(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges); int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges); int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges); int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges); double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx); double Dx[2] = {eno_dx.sR, eno_dx.sL}; int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges); int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges); int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges); int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges); double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy); double Dy[2] = {eno_dy.sR, eno_dy.sL}; int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges); int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges); int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges); int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges); double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz); double Dz[2] = {eno_dz.sR, eno_dz.sL}; double Nx = nx[ind]; double Ny = ny[ind]; double Nz = nz[ind]; double Sign = sign[ind]; // different choices yield different upwind direction int const choice_x[8]={0,0,0,0,1,1,1,1}; int const choice_y[8]={0,0,1,1,0,0,1,1}; int const choice_z[8]={0,1,0,1,0,1,0,1}; double Hamiltonian[8]={0,0,0,0,0,0,0,0}; // a[0],a[1] is the magnitude of the maximum forward and backward // information propagation speed in the x direction. b for y and c for z direction double a[2]={0,0}; double b[2]={0,0}; double c[2]={0,0}; for(int i=0;i<8;i++){ double dr_x = Dx[choice_x[i]]; double dr_y = Dy[choice_y[i]]; double dr_z = Dz[choice_z[i]]; double H1,H2,H3, normal_d; advection_velocity(H1,H2,H3,normal_d,Sign,dr_x,dr_y,dr_z,Nx,Ny,Nz); Upwind_Hamiltonian(Hamiltonian[i],normal_d,Sign,dr_x,dr_y,dr_z); a[0] = max2(a[0],max2(H1,0)); b[0] = max2(b[0],max2(H2,0)); c[0] = max2(c[0],max2(H3,0)); a[1] = fabs(min2(-a[1],min2(H1,0))); b[1] = fabs(min2(-b[1],min2(H2,0))); c[1] = fabs(min2(-c[1],min2(H3,0))); } // calculate the numerical Hamiltonian //double epsilon=1e-6; double numerical_Hamiltonian = 0; double denominator = (a[0]+a[1])*(b[0]+b[1])*(c[0]+c[1]); int const choice_a[8]={1,1,1,1,0,0,0,0}; int const choice_b[8]={1,1,0,0,1,1,0,0}; int const choice_c[8]={1,0,1,0,1,0,1,0}; for(int i=0;i<8;i++){ double H_a = a[choice_a[i]]; double H_b = b[choice_b[i]]; double H_c = c[choice_c[i]]; numerical_Hamiltonian += H_a * H_b * H_c * Hamiltonian[i]; } numerical_Hamiltonian = numerical_Hamiltonian/denominator; numerical_Hamiltonian += - a[0]*a[1]*(Dx[0]-Dx[1])/(a[0]+a[1]) - b[0]*b[1]*(Dy[0]-Dy[1])/(b[0]+b[1]) - c[0]*c[1]*(Dz[0]-Dz[1])/(c[0]+c[1]); step[ind] = numerical_Hamiltonian * deltat[ind]; } // calculate surface redistance step with central upwind scheme with higher order term // now lsf represents the auxilary level set function(not the level set function) // inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors __global__ void surface_redistance_step(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges); int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges); int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges); int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges); double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx); double Dx[2] = {eno_dx.sR, eno_dx.sL}; int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges); int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges); int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges); int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges); double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy); double Dy[2] = {eno_dy.sR, eno_dy.sL}; int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges); int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges); int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges); int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges); double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz); double Dz[2] = {eno_dz.sR, eno_dz.sL}; double Nx = nx[ind]; double Ny = ny[ind]; double Nz = nz[ind]; double Sign = sign[ind]; // different choices yield different upwind direction int const choice_x[8]={0,0,0,0,1,1,1,1}; int const choice_y[8]={0,0,1,1,0,0,1,1}; int const choice_z[8]={0,1,0,1,0,1,0,1}; double Hamiltonian[8]={0,0,0,0,0,0,0,0}; // a[0],a[1] is the magnitude of the maximum forward and backward // information propagation speed in the x direction. b for y and c for z direction double a[2]={0,0}; double b[2]={0,0}; double c[2]={0,0}; for(int i=0;i<8;i++){ double dr_x = Dx[choice_x[i]]; double dr_y = Dy[choice_y[i]]; double dr_z = Dz[choice_z[i]]; double H1,H2,H3, normal_d; advection_velocity(H1,H2,H3,normal_d,Sign,dr_x,dr_y,dr_z,Nx,Ny,Nz); Upwind_Hamiltonian(Hamiltonian[i],normal_d,Sign,dr_x,dr_y,dr_z); a[0] = max2(a[0],max2(H1,0)); b[0] = max2(b[0],max2(H2,0)); c[0] = max2(c[0],max2(H3,0)); a[1] = fabs(min2(-a[1],min2(H1,0))); b[1] = fabs(min2(-b[1],min2(H2,0))); c[1] = fabs(min2(-c[1],min2(H3,0))); } // calculate the numerical Hamiltonian //double epsilon=1e-6; double numerical_Hamiltonian = 0; double denominator = (a[0]+a[1])*(b[0]+b[1])*(c[0]+c[1]); int const choice_a[8]={1,1,1,1,0,0,0,0}; int const choice_b[8]={1,1,0,0,1,1,0,0}; int const choice_c[8]={1,0,1,0,1,0,1,0}; for(int i=0;i<8;i++){ double H_a = a[choice_a[i]]; double H_b = b[choice_b[i]]; double H_c = c[choice_c[i]]; numerical_Hamiltonian += H_a * H_b * H_c * Hamiltonian[i]; } numerical_Hamiltonian = numerical_Hamiltonian/denominator; numerical_Hamiltonian += - a[0]*a[1]*(Dx[0]-Dx[1])/(a[0]+a[1]) - b[0]*b[1]*(Dy[0]-Dy[1])/(b[0]+b[1]) - c[0]*c[1]*(Dz[0]-Dz[1])/(c[0]+c[1]); // // calculate higher order terms double psi_x_int_pp = ( (a[0]*Dx[0]+a[1]*Dx[1]) - (Hamiltonian[0]-Hamiltonian[4]) ) / (a[0]+a[1]) ; double psi_x_int_mm = ( (a[0]*Dx[0]+a[1]*Dx[1]) - (Hamiltonian[3]-Hamiltonian[7]) ) / (a[0]+a[1]) ; double psi_x_int_pm = ( (a[0]*Dx[0]+a[1]*Dx[1]) - (Hamiltonian[1]-Hamiltonian[5]) ) / (a[0]+a[1]) ; double psi_x_int_mp = ( (a[0]*Dx[0]+a[1]*Dx[1]) - (Hamiltonian[2]-Hamiltonian[6]) ) / (a[0]+a[1]) ; double dx_pp = min_mod(Dx[0]-psi_x_int_pp,psi_x_int_pp-Dx[1]); double dx_mm = min_mod(Dx[0]-psi_x_int_mm,psi_x_int_mm-Dx[1]); double dx_pm = min_mod(Dx[0]-psi_x_int_pm,psi_x_int_pm-Dx[1]); double dx_mp = min_mod(Dx[0]-psi_x_int_mp,psi_x_int_mp-Dx[1]); double x_c = b[0]*c[0]*dx_mm + b[1]*c[1]*dx_pp + b[0]*c[1]*dx_mp + b[1]*c[0]*dx_pm; double psi_y_int_pp = ( (b[0]*Dy[0]+b[1]*Dy[1]) - (Hamiltonian[0]-Hamiltonian[2]) ) / (b[0]+b[1]) ; double psi_y_int_mm = ( (b[0]*Dy[0]+b[1]*Dy[1]) - (Hamiltonian[5]-Hamiltonian[7]) ) / (b[0]+b[1]) ; double psi_y_int_pm = ( (b[0]*Dy[0]+b[1]*Dy[1]) - (Hamiltonian[1]-Hamiltonian[3]) ) / (b[0]+b[1]) ; double psi_y_int_mp = ( (b[0]*Dy[0]+b[1]*Dy[1]) - (Hamiltonian[4]-Hamiltonian[6]) ) / (b[0]+b[1]) ; double dy_pp = min_mod(Dy[0]-psi_y_int_pp,psi_y_int_pp-Dy[1]); double dy_mm = min_mod(Dy[0]-psi_y_int_mm,psi_y_int_mm-Dy[1]); double dy_pm = min_mod(Dy[0]-psi_y_int_pm,psi_y_int_pm-Dy[1]); double dy_mp = min_mod(Dy[0]-psi_y_int_mp,psi_y_int_mp-Dy[1]); double y_c = a[0]*c[0]*dy_mm + a[1]*c[1]*dy_pp + a[0]*c[1]*dy_mp + a[1]*c[0]*dy_pm; double psi_z_int_pp = ( (c[0]*Dz[0]+c[1]*Dz[1]) - (Hamiltonian[0]-Hamiltonian[1]) ) / (c[0]+c[1]) ; double psi_z_int_mm = ( (c[0]*Dz[0]+c[1]*Dz[1]) - (Hamiltonian[6]-Hamiltonian[7]) ) / (c[0]+c[1]) ; double psi_z_int_pm = ( (c[0]*Dz[0]+c[1]*Dz[1]) - (Hamiltonian[2]-Hamiltonian[3]) ) / (c[0]+c[1]) ; double psi_z_int_mp = ( (c[0]*Dz[0]+c[1]*Dz[1]) - (Hamiltonian[4]-Hamiltonian[5]) ) / (c[0]+c[1]) ; double dz_pp = min_mod(Dz[0]-psi_z_int_pp,psi_z_int_pp-Dz[1]); double dz_mm = min_mod(Dz[0]-psi_z_int_mm,psi_z_int_mm-Dz[1]); double dz_pm = min_mod(Dz[0]-psi_z_int_pm,psi_z_int_pm-Dz[1]); double dz_mp = min_mod(Dz[0]-psi_z_int_mp,psi_z_int_mp-Dz[1]); double z_c = a[0]*b[0]*dz_mm + a[1]*b[1]*dz_pp + a[0]*b[1]*dz_mp + a[1]*b[0]*dz_pm; numerical_Hamiltonian += (a[0]*a[1]*x_c + b[0]*b[1]*y_c + c[0]*c[1]*z_c) / denominator; // step[ind] = numerical_Hamiltonian * deltat[ind]; } // calculate surface redistance step with central upwind scheme with higher order term // AND WITHOUT boundary correction // now lsf represents the auxilary level set function(not the level set function) // inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors __global__ void surface_redistance_step_noboundaryfix(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges); int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges); int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges); int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges); double_eno_derivative eno_dx = eno_derivative_field( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx); double Dx[2] = {eno_dx.sR, eno_dx.sL}; int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges); int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges); int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges); int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges); double_eno_derivative eno_dy = eno_derivative_field( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy); double Dy[2] = {eno_dy.sR, eno_dy.sL}; int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges); int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges); int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges); int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges); double_eno_derivative eno_dz = eno_derivative_field( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz); double Dz[2] = {eno_dz.sR, eno_dz.sL}; double Nx = nx[ind]; double Ny = ny[ind]; double Nz = nz[ind]; double Sign = sign[ind]; // different choices yield different upwind direction int const choice_x[8]={0,0,0,0,1,1,1,1}; int const choice_y[8]={0,0,1,1,0,0,1,1}; int const choice_z[8]={0,1,0,1,0,1,0,1}; double Hamiltonian[8]={0,0,0,0,0,0,0,0}; // a[0],a[1] is the magnitude of the maximum forward and backward // information propagation speed in the x direction. b for y and c for z direction double a[2]={0,0}; double b[2]={0,0}; double c[2]={0,0}; for(int i=0;i<8;i++){ double dr_x = Dx[choice_x[i]]; double dr_y = Dy[choice_y[i]]; double dr_z = Dz[choice_z[i]]; double H1,H2,H3, normal_d; advection_velocity(H1,H2,H3,normal_d,Sign,dr_x,dr_y,dr_z,Nx,Ny,Nz); Upwind_Hamiltonian(Hamiltonian[i],normal_d,Sign,dr_x,dr_y,dr_z); a[0] = max2(a[0],max2(H1,0)); b[0] = max2(b[0],max2(H2,0)); c[0] = max2(c[0],max2(H3,0)); a[1] = fabs(min2(-a[1],min2(H1,0))); b[1] = fabs(min2(-b[1],min2(H2,0))); c[1] = fabs(min2(-c[1],min2(H3,0))); } // calculate the numerical Hamiltonian //double epsilon=1e-6; double numerical_Hamiltonian = 0; double denominator = (a[0]+a[1])*(b[0]+b[1])*(c[0]+c[1]); int const choice_a[8]={1,1,1,1,0,0,0,0}; int const choice_b[8]={1,1,0,0,1,1,0,0}; int const choice_c[8]={1,0,1,0,1,0,1,0}; for(int i=0;i<8;i++){ double H_a = a[choice_a[i]]; double H_b = b[choice_b[i]]; double H_c = c[choice_c[i]]; numerical_Hamiltonian += H_a * H_b * H_c * Hamiltonian[i]; } numerical_Hamiltonian = numerical_Hamiltonian/denominator; numerical_Hamiltonian += - a[0]*a[1]*(Dx[0]-Dx[1])/(a[0]+a[1]) - b[0]*b[1]*(Dy[0]-Dy[1])/(b[0]+b[1]) - c[0]*c[1]*(Dz[0]-Dz[1])/(c[0]+c[1]); // // calculate higher order terms double psi_x_int_pp = ( (a[0]*Dx[0]+a[1]*Dx[1]) - (Hamiltonian[0]-Hamiltonian[4]) ) / (a[0]+a[1]) ; double psi_x_int_mm = ( (a[0]*Dx[0]+a[1]*Dx[1]) - (Hamiltonian[3]-Hamiltonian[7]) ) / (a[0]+a[1]) ; double psi_x_int_pm = ( (a[0]*Dx[0]+a[1]*Dx[1]) - (Hamiltonian[1]-Hamiltonian[5]) ) / (a[0]+a[1]) ; double psi_x_int_mp = ( (a[0]*Dx[0]+a[1]*Dx[1]) - (Hamiltonian[2]-Hamiltonian[6]) ) / (a[0]+a[1]) ; double dx_pp = min_mod(Dx[0]-psi_x_int_pp,psi_x_int_pp-Dx[1]); double dx_mm = min_mod(Dx[0]-psi_x_int_mm,psi_x_int_mm-Dx[1]); double dx_pm = min_mod(Dx[0]-psi_x_int_pm,psi_x_int_pm-Dx[1]); double dx_mp = min_mod(Dx[0]-psi_x_int_mp,psi_x_int_mp-Dx[1]); double x_c = b[0]*c[0]*dx_mm + b[1]*c[1]*dx_pp + b[0]*c[1]*dx_mp + b[1]*c[0]*dx_pm; double psi_y_int_pp = ( (b[0]*Dy[0]+b[1]*Dy[1]) - (Hamiltonian[0]-Hamiltonian[2]) ) / (b[0]+b[1]) ; double psi_y_int_mm = ( (b[0]*Dy[0]+b[1]*Dy[1]) - (Hamiltonian[5]-Hamiltonian[7]) ) / (b[0]+b[1]) ; double psi_y_int_pm = ( (b[0]*Dy[0]+b[1]*Dy[1]) - (Hamiltonian[1]-Hamiltonian[3]) ) / (b[0]+b[1]) ; double psi_y_int_mp = ( (b[0]*Dy[0]+b[1]*Dy[1]) - (Hamiltonian[4]-Hamiltonian[6]) ) / (b[0]+b[1]) ; double dy_pp = min_mod(Dy[0]-psi_y_int_pp,psi_y_int_pp-Dy[1]); double dy_mm = min_mod(Dy[0]-psi_y_int_mm,psi_y_int_mm-Dy[1]); double dy_pm = min_mod(Dy[0]-psi_y_int_pm,psi_y_int_pm-Dy[1]); double dy_mp = min_mod(Dy[0]-psi_y_int_mp,psi_y_int_mp-Dy[1]); double y_c = a[0]*c[0]*dy_mm + a[1]*c[1]*dy_pp + a[0]*c[1]*dy_mp + a[1]*c[0]*dy_pm; double psi_z_int_pp = ( (c[0]*Dz[0]+c[1]*Dz[1]) - (Hamiltonian[0]-Hamiltonian[1]) ) / (c[0]+c[1]) ; double psi_z_int_mm = ( (c[0]*Dz[0]+c[1]*Dz[1]) - (Hamiltonian[6]-Hamiltonian[7]) ) / (c[0]+c[1]) ; double psi_z_int_pm = ( (c[0]*Dz[0]+c[1]*Dz[1]) - (Hamiltonian[2]-Hamiltonian[3]) ) / (c[0]+c[1]) ; double psi_z_int_mp = ( (c[0]*Dz[0]+c[1]*Dz[1]) - (Hamiltonian[4]-Hamiltonian[5]) ) / (c[0]+c[1]) ; double dz_pp = min_mod(Dz[0]-psi_z_int_pp,psi_z_int_pp-Dz[1]); double dz_mm = min_mod(Dz[0]-psi_z_int_mm,psi_z_int_mm-Dz[1]); double dz_pm = min_mod(Dz[0]-psi_z_int_pm,psi_z_int_pm-Dz[1]); double dz_mp = min_mod(Dz[0]-psi_z_int_mp,psi_z_int_mp-Dz[1]); double z_c = a[0]*b[0]*dz_mm + a[1]*b[1]*dz_pp + a[0]*b[1]*dz_mp + a[1]*b[0]*dz_pm; numerical_Hamiltonian += (a[0]*a[1]*x_c + b[0]*b[1]*y_c + c[0]*c[1]*z_c) / denominator; // step[ind] = numerical_Hamiltonian * deltat[ind]; }
5,316
typedef unsigned int uint; //Warp based summation __device__ int inexclusive_scan_warp(volatile int *ptr,bool inclusive, const unsigned int idx, int value) { const unsigned int lane = idx & 31; if (lane >= 1) ptr[idx] = value = ptr[idx - 1] + value; if (lane >= 2) ptr[idx] = value = ptr[idx - 2] + value; if (lane >= 4) ptr[idx] = value = ptr[idx - 4] + value; if (lane >= 8) ptr[idx] = value = ptr[idx - 8] + value; if (lane >= 16) ptr[idx] = value = ptr[idx - 16] + value; if(inclusive) return value; //Inclusive else return(lane > 0) ? ptr[idx-1] : 0; //Exclusive } //N is number of previous blocks in the count call __device__ void exclusive_scan_blockD(int *ptr, const int N, int *count, volatile int *shmemESB) { const unsigned int idx = threadIdx.x; const unsigned int lane = idx & 31; const unsigned int warpid = idx >> 5; int value; //Read the data in shmem if(idx < N + 1) shmemESB[idx] = value = ptr[idx]; else shmemESB[idx] = value = 0; __syncthreads(); // step 1: Intra-warp scan in each warp int val = inexclusive_scan_warp(&shmemESB[0], false, idx, value); __syncthreads(); // step 2: Collect per-warp particle results if (lane == 31) shmemESB[warpid] = shmemESB[idx]; __syncthreads(); value = shmemESB[idx]; // step 3: Use 1st warp to scan per-warp results if (warpid == 0) inexclusive_scan_warp(&shmemESB[0], false, idx, value); __syncthreads(); // step 4: Accumulate results from Steps 1 and 3; if (warpid > 0) val = shmemESB[warpid - 1] + val; __syncthreads(); // Step 5: Write and return the final result ptr[idx] = val; __syncthreads(); // ptr[blockDim.x - 1] + lastValue; //count if(idx == 0)//Thread 0 saves the total count value *count = ptr[blockDim.x - 1]; } //N is number of previous blocks in the count call extern "C" __global__ void exclusive_scan_block(int *ptr, const int N, int *count) { extern __shared__ int shmemESB[]; exclusive_scan_blockD(ptr, N, count, shmemESB); } typedef struct setupParams { int jobs; //Minimal number of jobs for each 'processor' int blocksWithExtraJobs; //Some ' processors' do one extra job all with bid < bWEJ int extraElements; //The elements that didn't fit completely int extraOffset; //Start of the extra elements }setupParams; //Warp based prefix sum, using extra buffer space to remove the need for if statements // __device__ int hillisSteele4(volatile int *ptr, int *count, uint val, const unsigned int idx) __device__ int hillisSteele4(volatile int *ptr, int &count, uint val, const unsigned int idx) { // const unsigned int lane = idx & 31; //We don't require lane here since idx is always < 32 in the way we start the blocks/threads //volatile int* tmp = ptr + (32 / 2); volatile int* tmp = &ptr[16]; ptr[idx] = 0; tmp[idx] = val; //Since we set half the array to 0 we don't need ifs! tmp[idx] = val = tmp[idx - 1] + val; tmp[idx] = val = tmp[idx - 2] + val; tmp[idx] = val = tmp[idx - 4] + val; tmp[idx] = val = tmp[idx - 8] + val; tmp[idx] = val = tmp[idx - 16] + val; //Inclusive sum/count count = tmp[blockDim.x-1]; //Exclusive index return (idx > 0) ? tmp[idx-1] : 0; } //Warp based prefix sum, using extra buffer space to remove the need for if statements // __device__ int hillisSteele4(volatile int *ptr, int *count, uint val, const unsigned int idx) __device__ __forceinline__ int hillisSteele5(volatile unsigned int tmp[], int &count, uint val, const int idx) { // const unsigned int lane = idx & 31; //We don't require lane here since idx is always < 32 in the way we start the blocks/threads //volatile int* tmp = ptr + (32 / 2); // volatile int* tmp = &ptr[16]; tmp[idx-16] = 0; tmp[idx] = val; //Since we set half the array to 0 we don't need ifs! tmp[idx] = val = tmp[idx - 1] + val; tmp[idx] = val = tmp[idx - 2] + val; tmp[idx] = val = tmp[idx - 4] + val; tmp[idx] = val = tmp[idx - 8] + val; tmp[idx] = val = tmp[idx - 16] + val; //Inclusive sum/count count = tmp[blockDim.x-1]; //Exclusive index return (idx > 0) ? tmp[idx-1] : 0; } __device__ void reduce_block2(int tid, volatile int *shmem, int val) { //Reduce the 32 block if(tid < 16){ shmem[tid] = val = val + shmem[tid+16]; shmem[tid] = val = val + shmem[tid+8]; shmem[tid] = val = val + shmem[tid+4]; shmem[tid] = val = val + shmem[tid+2]; shmem[tid] = val = val + shmem[tid+1]; } } //Count the number of valid elements in this BLOCK __device__ void compact_countD(volatile uint2 *values, uint *counts, const int N, setupParams sParam, volatile int *shmemCC2) { const int tid = threadIdx.x; const int bid = blockDim.y * blockIdx.x + threadIdx.y; volatile __shared__ int shmemCC[128]; //Determine the parameters and loop over the particles int jobSize, offSet, count = 0; jobSize = sParam.jobs; if(bid < sParam.blocksWithExtraJobs) jobSize++; if(bid <= sParam.blocksWithExtraJobs) offSet = (sParam.jobs+1)*64*bid; else { offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64; offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64; } offSet /= 2; //Divide by two since we do double loads (uint2) for(int i=0; i < jobSize; i++) { count += (values[offSet + tid].x >> 31); count += (values[offSet + tid].y >> 31); offSet += blockDim.x; } //Reduce to get the count of this block shmemCC[32*threadIdx.y + tid] = count; __syncthreads(); reduce_block2(tid, &shmemCC[32*threadIdx.y], count); //Save the values / count of the current block if(threadIdx.x == 0) counts[bid] = shmemCC[32*threadIdx.y]; //Block 0 handles any extra elements that couldn't be divided equally if(bid == 0) { //Here i use single element reads for ease of boundary conditions and steps count = 0; offSet = sParam.extraOffset; uint* value2 = (uint*) values; for(int i=0 ; i < sParam.extraElements; i += blockDim.x) { if((offSet + i + tid) < (N)) //Make sure we dont read more than there are items { count += (value2[offSet + i + tid] >> 31); } } //Reduce shmemCC[tid] = count; __syncthreads(); reduce_block2(tid, &shmemCC[0], count); //Save the count if(tid == 0) counts[gridDim.x*blockDim.y] = shmemCC[0]; }//end if bid==0 }//end compact_count //Count the number of valid elements in this BLOCK extern "C" __global__ void compact_count(volatile uint2 *values, uint *counts, const int N, setupParams sParam) { extern __shared__ int shmemCC[]; compact_countD(values, counts, N, sParam,shmemCC) ; } //The kernel that actually moves the data __device__ void compact_moveD( uint2 *values, uint *output, uint *counts, const int N, setupParams sParam, volatile unsigned int *shmemCM) { //Walk the values of this block const int tid = threadIdx.x; const int bid = blockDim.y * blockIdx.x + threadIdx.y; //Determine the parameters and loop over the particles int jobSize, offSet; jobSize = sParam.jobs; if(bid < sParam.blocksWithExtraJobs) jobSize++; if(bid <= sParam.blocksWithExtraJobs) offSet = (sParam.jobs+1)*64*bid; else { offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64; offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64; } offSet /= 2; //Divide by two since we do double loads (uint2) TODO what happens if offSet is uneven...? int outputOffset = counts[bid]; int curCount; //Do per step the prefix scan to determine the output locations for(int i=0; i < jobSize; i++) { uint2 validBase = values[offSet + tid]; int value = (validBase.x >> 31); value += (validBase.y >> 31); int idx = hillisSteele5(&shmemCM[48*threadIdx.y+16], curCount, value, threadIdx.x); if((validBase.x >> 31)) { output[idx + outputOffset] = validBase.x & 0x7FFFFFFF; idx++; } if((validBase.y >> 31)) { output[idx + outputOffset] = validBase.y & 0x7FFFFFFF; } outputOffset += curCount; //Increase the output offset offSet += blockDim.x; //Step to the next N threads } //Block 0 handles any extra elements that couldn't be divided equally if(bid == 0) { //Here i use single element reads for ease of boundary conditions and steps offSet = sParam.extraOffset; outputOffset = counts[gridDim.x*blockDim.y]; uint* value2 = (uint*) values; for(int i=0; i < sParam.extraElements; i += blockDim.x) { uint value = 0; if((offSet + i + tid) < (N)) //Make sure we dont read more than there are items value = value2[offSet + i + tid]; int idx = hillisSteele5(&shmemCM[48*threadIdx.y+16], curCount, value >> 31, tid); if((offSet + i + tid) < N) if(value >> 31) output[idx + outputOffset] = value & 0x7FFFFFFF; outputOffset += curCount; //Increase the output offset } }//end if bid==0 }//end compact_move //The kernel that actually moves the data extern "C" __global__ void compact_move( uint2 *values, uint *output, uint *counts, const int N, setupParams sParam) { extern __shared__ unsigned int shmemCM[]; compact_moveD(values, output, counts,N,sParam,shmemCM); } //The kernel that actually moves/splits the data __device__ void split_moveD( uint2 *valid, uint *output, uint *counts, const int N, setupParams sParam, volatile unsigned int *shmemSM) { //Walk the values of this block const int tid = threadIdx.x; const int bid = blockDim.y * blockIdx.x + threadIdx.y; //Determine the parameters and loop over the particles int jobSize, offSet; jobSize = sParam.jobs; if(bid < sParam.blocksWithExtraJobs) jobSize++; if(bid <= sParam.blocksWithExtraJobs) offSet = (sParam.jobs+1)*64*bid; else { offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64; offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64; } int outputOffset = counts[bid]; //Get the start of the output offset of the invalid items //this is calculated as follows: //totalValidItems + startReadOffset - startOutputOffset //startReadOffset - startOutputOffset <- is the total number of invalid items from any blocks //before the current block int rightOutputOffset = counts[gridDim.x*blockDim.y+1]; rightOutputOffset = rightOutputOffset + offSet - outputOffset; offSet /= 2; //Divide by two since we do double loads (uint2) TODO what happens if offSet is uneven...? int curCount; int idx, ridx; //Do per step the prefix scan to determine the output locations for(int i=0; i < jobSize; i++) { uint2 validBase = valid[offSet + tid]; int value = (validBase.x >> 31); value += (validBase.y >> 31); idx = hillisSteele5(&shmemSM[48*threadIdx.y+16], curCount, value, tid); ridx = threadIdx.x*2 - idx; //lane*2 - idx , *2 since we read 2 items a time if((validBase.x >> 31)) { output[idx + outputOffset] = validBase.x & 0x7FFFFFFF; idx++; } else { output[ridx + rightOutputOffset] = validBase.x & 0x7FFFFFFF; ridx++; } if((validBase.y >> 31)) output[idx + outputOffset] = validBase.y & 0x7FFFFFFF; else output[ridx + rightOutputOffset] = validBase.y & 0x7FFFFFFF; outputOffset += curCount; //Increase the output offset rightOutputOffset += 64 - curCount; //64 (32*2) since we do 2 items a time offSet += blockDim.x; //Step to the next N threads } //Block 0 handles any extra elements that couldn't be divided equally if(bid == 0) { //Here i use single element reads for ease of boundary conditions and steps offSet = sParam.extraOffset; outputOffset = counts[gridDim.x*blockDim.y]; rightOutputOffset = counts[gridDim.x*blockDim.y+1]; rightOutputOffset = rightOutputOffset + offSet - outputOffset; uint* valid2 = (uint*) valid; for(int i=0; i < sParam.extraElements; i += blockDim.x) { uint value = 0; if((offSet + i + tid) < (N)) //Make sure we dont read more than there are items value = valid2[offSet + i + tid]; idx = hillisSteele5(&shmemSM[48*threadIdx.y+16], curCount, value >> 31, tid); ridx = threadIdx.x - idx; if((offSet + i + tid) < N) if(value >> 31) output[idx + outputOffset] = value & 0x7FFFFFFF; else output[ridx + rightOutputOffset] = value & 0x7FFFFFFF; outputOffset += curCount; //Increase the output offset rightOutputOffset += 32-curCount; //32 since we do only 1 at a time } }//end if bid==0 }//end split_move //The kernel that actually moves/splits the data extern "C" __global__ void split_move( uint2 *valid, uint *output, uint *counts, const int N, setupParams sParam) { extern __shared__ int unsigned shmemSM[]; split_moveD(valid, output, counts, N, sParam, shmemSM); }
5,317
#include <iostream> #include <cstdlib> #include <cstdio> #include <cmath> using namespace std; // 随机初始化两个 m*n 大小的矩阵 void Generate(float **a, float **b, float **c, int m, int n) { *a = new float[m*n], *b = new float [m*n], *c = new float [m*n]; for (int i = 0; i < m; ++i) for (int j = 0; j < n; ++j) { (*a)[i*n+j] = 10.0 * rand()/(RAND_MAX+1.0); (*b)[i*n+j] = 10.0 * rand()/(RAND_MAX+1.0); } } // 检验矩阵加法计算结果 void Evaluate(float *a, float *b, float *c, int m, int n) { for (int i = 0; i < m; ++i) for (int j = 0; j < n; ++j) if ((fabs(a[i*n+j] + b[i*n+j]- c[i*n+j]) / c[i*n+j]) > 1e-4) { printf("Computation Error In %d Row %d Col!\n", i, j); return; } printf("Computation Correct!\n"); } // 一维加法 __global__ void MatrixMul_1d(float *a, float *b, float *c, int m, int n, int sz) { int id = (threadIdx.x + blockIdx.x * blockDim.x) * sz; for (int i = id; i < id + sz; ++i) if (i < m * n) c[i] = a[i] + b[i]; } // 二维加法 __global__ void MatrixMul_2d(float *a, float *b, float *c, int m, int n, int sz) { int idx = (threadIdx.x + blockIdx.x * blockDim.x) * sz; int idy = (threadIdx.y + blockIdx.y * blockDim.y) * sz; for (int y = idy; y < idy + sz; ++y) for (int x = idx; x < idx + sz; ++x) if (y < m && x < n) { int id = y * n + x; c[id] = a[id] + b[id]; } } int main(int argc, char *argv[]) { float *a, *b, *c, *da, *db, *dc, t; int m = strtol(argv[1], NULL, 10); int n = strtol(argv[2], NULL, 10); // 随机初始化矩阵 Generate(&a, &b, &c, m, n); // 显存分配 cudaMalloc((void**)&da, m*n*sizeof(float)); cudaMalloc((void**)&db, m*n*sizeof(float)); cudaMalloc((void**)&dc, m*n*sizeof(float)); // 数据拷贝 cudaMemcpy((void*)da, (void*)a, m*n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy((void*)db, (void*)b, m*n*sizeof(float), cudaMemcpyHostToDevice); // 设置blockSize和gridSize int b1 = strtol(argv[3], NULL, 10); // blockSize一维 int b2 = strtol(argv[4], NULL, 10); // blockSize二维(1代表一维块) int grid_dim = strtol(argv[5], NULL, 10); // grid维度 int sz = strtol(argv[6], NULL, 10); if (grid_dim == 1) sz = sz * sz; dim3 blockSize(b1, b2); dim3 gridSize; // block一维, grid一维 if (b2 == 1 && grid_dim == 1) gridSize = dim3(ceil(ceil((float)m*n/sz) / blockSize.x)); // block一维,grid二维 或 block二维,grid二维 else if (grid_dim == 2) gridSize = dim3(ceil(ceil((float)n/sz) / blockSize.x), ceil(ceil((float)m/sz) / blockSize.y)); // 记录时间 cudaEvent_t t1, t2; cudaEventCreate(&t1); cudaEventCreate(&t2); cudaEventRecord(t1, 0); // 调用核函数 if (grid_dim == 1) MatrixMul_1d <<<gridSize, blockSize>>> (da, db, dc, m, n, sz); else MatrixMul_2d <<<gridSize, blockSize>>> (da, db, dc, m, n, sz); // 输出运行时间 cudaEventRecord(t2, 0); cudaEventSynchronize(t1); cudaEventSynchronize(t2); cudaEventElapsedTime(&t, t1, t2); printf("Time cost(CUDA): %.3f ms\n", t); cudaEventDestroy(t1); cudaEventDestroy(t2); // 拷贝回host并释放显存 cudaMemcpy((void*)c, (void*)dc, m*n*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(da); cudaFree(db); cudaFree(dc); // 检验正确性 Evaluate(a, b, c, m, n); return 0; }
5,318
/* * 2020.05.20 뷮ó ǥ * Chapter 7. Parallel Patterns : Convolution Example Code * Created by ̻ */ // ش κ ּ Ǯ Ͽ ֽñ ٶϴ. // * 7.4 ڵ ǥ Ͽ ۵մϴ. /// __syncthread() ϱ /// ٸ intellisense /// NVIDIA //#include "cuda_runtime.h" //#include "device_launch_parameters.h" // //// for syncthreads() //#ifdef __INTELLISENSE__ //// in here put whatever is your favorite flavor of intellisense workarounds //void __syncthreads(); //#endif //#include "device_functions.h" /// ũ ڵ /// ؾ ϴ ڵ /// NVIDIA //for __syncthreads() #include "cuda_runtime.h" #include "device_launch_parameters.h" #ifndef __CUDACC__ #define __CUDACC__ #endif #include "device_functions.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <chrono> #include <iostream> // 1D #define ARR_SIZE 7 #define MASK_SIZE 5 #define TILE_1D_SIZE 5 // 2D #define IMG_WIDTH 200 #define TILE_WIDTH 5 #define MASK_WIDTH 3 #define O_TILE_WIDTH IMG_WIDTH #define O_TILE_HEIGHT IMG_WIDTH #define PITCH ((TILE_WIDTH) + (MASK_WIDTH) - 1) // 迭 Լ void printArr(float* arr) { for (int i = 0; i < ARR_SIZE; i++) printf("%4.0f ", arr[i]); printf("\n"); } void printArr2D(float arr[O_TILE_WIDTH * O_TILE_HEIGHT]) { for (int i = 0; i < O_TILE_WIDTH; i++) { for (int j = 0; j < O_TILE_HEIGHT; j++) printf("%4.3f ", arr[i * O_TILE_WIDTH + j]); printf("\n"); } } /// 7.2 Simple Convolution 1D //__global__ void convolution_1D_basic_kernel(float *src, float *mask, float *dst, int Mask_Width, int Width) //{ // int i = blockIdx.x * blockDim.x + threadIdx.x; // // float result = 0.f; // int start_point = i - (Mask_Width / 2); // thread // for (int j = 0; j < Mask_Width; j++) { // if (start_point + j >= 0 && start_point + j < Width) { // result += src[start_point + j] * mask[j]; // } // } // dst[i] = result; //} // //int main() //{ // float hstSrc[ARR_SIZE] = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f }; // float hstMask[MASK_SIZE] = { 3.f, 4.f, 5.f, 4.f, 3.f }; // float hstResult[ARR_SIZE] = { 0.f }; // // float* devSrc = nullptr; // cudaMalloc(&devSrc, sizeof(float) * ARR_SIZE); // cudaMemcpy(devSrc, hstSrc, sizeof(float) * ARR_SIZE, cudaMemcpyKind::cudaMemcpyHostToDevice); // // float* devMask = nullptr; // cudaMalloc(&devMask, sizeof(float) * MASK_SIZE); // cudaMemcpy(devMask, hstMask, sizeof(float) * MASK_SIZE, cudaMemcpyKind::cudaMemcpyHostToDevice); // // // float* devResult = nullptr; // cudaMalloc(&devResult, sizeof(float) * ARR_SIZE); // // std::cout << "start parallelizing" << std::endl; // std::cout << "elapsed in time: "; // std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); // // convolution_1D_basic_kernel << < 1, ARR_SIZE >> > (devSrc, devMask, devResult, MASK_SIZE, ARR_SIZE); // cudaDeviceSynchronize(); // // std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> duration = end - start; // std::cout << duration.count() * 1000 << std::endl; // std::cout << "----------------------------------" << std::endl; // // cudaMemcpy(hstResult, devResult, sizeof(float) * ARR_SIZE, cudaMemcpyKind::cudaMemcpyDeviceToHost); // // printArr(hstResult); // // return 0; //} /// 7.3 Convolution 1D with Contant Mask //__constant__ float Mask[MASK_SIZE]; // //__global__ void convolution_1D_const_kernel(float *Src, float *Dst, int Mask_Width, int Width) //{ // int i = blockIdx.x * blockDim.x + threadIdx.x; // // float result = 0.f; // int Conv_start_point = i - (Mask_Width / 2); // thread // for (int j = 0; j < Mask_Width; j++) { // if (Conv_start_point + j >= 0 && Conv_start_point + j < Width) { // result += Src[Conv_start_point + j] * Mask[j]; // } // } // Dst[i] = result; //} // //int main() //{ // float hstSrc[ARR_SIZE] = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f }; // float hstMask[MASK_SIZE] = { 3.f, 4.f, 5.f, 4.f, 3.f }; // float hstResult[ARR_SIZE] = { 0.f }; // // float* devSrc = nullptr; // cudaMalloc(&devSrc, sizeof(float) * ARR_SIZE); // cudaMemcpy(devSrc, hstSrc, sizeof(float) * ARR_SIZE, cudaMemcpyKind::cudaMemcpyHostToDevice); // // // cudaMemcpyToSymbol(Mask, hstMask, sizeof(float) * MASK_SIZE); // // // float* devResult = nullptr; // cudaMalloc(&devResult, sizeof(float) * ARR_SIZE); // // std::cout << "start parallelizing" << std::endl; // std::cout << "elapsed in time: "; // std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); // // convolution_1D_const_kernel << < 1, ARR_SIZE >> > (devSrc, devResult, MASK_SIZE, ARR_SIZE); // cudaDeviceSynchronize(); // // std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> duration = end - start; // std::cout << duration.count() * 1000 << std::endl; // std::cout << "----------------------------------" << std::endl; // // cudaMemcpy(hstResult, devResult, sizeof(float) * ARR_SIZE, cudaMemcpyKind::cudaMemcpyDeviceToHost); // // printArr(hstResult); // // return 0; //} /// 7.4 Convolution 1D with Halo cells //__constant__ float Mask[MASK_SIZE]; //__global__ void convolution_1D_halo_kernel(float *Src, float *Dst, int Mask_Width, int Width) //{ // int i = blockIdx.x * blockDim.x + threadIdx.x; // __shared__ float tile_halo[TILE_1D_SIZE + MASK_SIZE - 1]; // // int n = Mask_Width / 2; // // int halo_index_left = (blockIdx.x - 1) * blockDim.x + threadIdx.x; // if (threadIdx.x >= blockDim.x - n) { // tile_halo[threadIdx.x - (blockDim.x - n)] // = (halo_index_left < 0) ? 0 : Src[halo_index_left]; // } // // tile_halo[n + threadIdx.x] = Src[i]; // // int halo_index_right = (blockIdx.x - 1) * blockDim.x + threadIdx.x; // if (threadIdx.x < n) { // tile_halo[n + blockDim.x + threadIdx.x] = // (halo_index_right >= Width) ? 0 : Src[halo_index_right]; // } // __syncthreads(); // // float result = 0.f; // for (int j = 0; j < Mask_Width; j++) { // result += tile_halo[threadIdx.x + j] * Mask[j]; // } // Dst[i] = result; //} // //int main() //{ // float hstSrc[ARR_SIZE] = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f }; // float hstMask[MASK_SIZE] = { 3.f, 4.f, 5.f, 4.f, 3.f }; // float hstResult[ARR_SIZE] = { 0.f }; // // float* devSrc = nullptr; // cudaMalloc(&devSrc, sizeof(float) * ARR_SIZE); // cudaMemcpy(devSrc, hstSrc, sizeof(float) * ARR_SIZE, cudaMemcpyKind::cudaMemcpyHostToDevice); // // // cudaMemcpyToSymbol(Mask, hstMask, sizeof(float) * MASK_SIZE); // // // float* devResult = nullptr; // cudaMalloc(&devResult, sizeof(float) * ARR_SIZE); // // std::cout << "start parallelizing" << std::endl; // std::cout << "elapsed in time: "; // std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); // // convolution_1D_halo_kernel << < ceil((float)ARR_SIZE / TILE_1D_SIZE), TILE_1D_SIZE >> > (devSrc, devResult, MASK_SIZE, ARR_SIZE); // cudaDeviceSynchronize(); // // std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> duration = end - start; // std::cout << duration.count() * 1000 << std::endl; // std::cout << "----------------------------------" << std::endl; // // cudaMemcpy(hstResult, devResult, sizeof(float) * ARR_SIZE, cudaMemcpyKind::cudaMemcpyDeviceToHost); // // printArr(hstResult); // // return 0; //} /// 7.5 Convolution 1D using general caching //__constant__ float Mask[MASK_SIZE]; //__global__ void convolution_1D_caching_kernel(float *Src, float *Dst, int Mask_Width, int Width) //{ // int i = blockIdx.x * blockDim.x + threadIdx.x; // __shared__ float tile_sm[TILE_1D_SIZE]; // // tile_sm[threadIdx.x] = Src[i]; // // __syncthreads(); // // int This_tile_start_point = blockIdx.x * blockDim.x; // int Next_tile_start_point = (blockIdx.x + 1) * blockDim.x; // int Conv_start_point = i - (Mask_Width / 2); // // float result = 0.f; // for (int j = 0; j < Mask_Width; j++) { // int N_index = Conv_start_point + j; // if (N_index >= 0 && N_index < Width) { // // // if ((N_index >= This_tile_start_point) // && (N_index < Next_tile_start_point)) { // result += tile_sm[threadIdx.x + j - (Mask_Width / 2)] * Mask[j]; // } // // else { // result += Src[N_index] * Mask[j]; // } // // // } // } // Dst[i] = result; //} // //int main() //{ // float hstSrc[ARR_SIZE] = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f }; // float hstMask[MASK_SIZE] = { 3.f, 4.f, 5.f, 4.f, 3.f }; // float hstResult[ARR_SIZE] = { 0.f }; // // float* devSrc = nullptr; // cudaMalloc(&devSrc, sizeof(float) * ARR_SIZE); // cudaMemcpy(devSrc, hstSrc, sizeof(float) * ARR_SIZE, cudaMemcpyKind::cudaMemcpyHostToDevice); // // // cudaMemcpyToSymbol(Mask, hstMask, sizeof(float) * MASK_SIZE); // // // float* devResult = nullptr; // cudaMalloc(&devResult, sizeof(float) * ARR_SIZE); // // std::cout << "start parallelizing" << std::endl; // std::cout << "elapsed in time: "; // std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); // // convolution_1D_caching_kernel << < ceil((float)ARR_SIZE / TILE_1D_SIZE), TILE_1D_SIZE >> > (devSrc, devResult, MASK_SIZE, ARR_SIZE); // cudaDeviceSynchronize(); // // std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> duration = end - start; // std::cout << duration.count() * 1000 << std::endl; // std::cout << "----------------------------------" << std::endl; // // cudaMemcpy(hstResult, devResult, sizeof(float) * ARR_SIZE, cudaMemcpyKind::cudaMemcpyDeviceToHost); // // printArr(hstResult); // // return 0; //} /// 7.6 Convolution 2D with Halo cells // å const float __restrict__ *M , // const float* __restrict__ M ùٸ // å Ұ κп ־ // ͳݿ ã ڵ带 Ͽ //#define W_SM ((TILE_WIDTH) + (MASK_WIDTH) - 1) //__global__ void convolution_2D_tiled_kernel(float *inputData, float *outputData, // int width, int pitch, int height, int channels, // const float* __restrict__ M) //{ // __shared__ float N_ds[W_SM][W_SM]; // // int maskRadius = MASK_WIDTH / 2; // // int dest = threadIdx.y * TILE_WIDTH + threadIdx.x; // int destY = dest / W_SM; //col of shared memory // int destX = dest % W_SM; //row of shared memory // int srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius; //row index to fetch data from input image // int srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius; //col index to fetch data from input image // // if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) // N_ds[destY][destX] = inputData[srcY * width + srcX]; // else // N_ds[destY][destX] = 0; // // // dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH; // destY = dest / W_SM; // destX = dest % W_SM; // srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius; // srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius; // if (destY < W_SM) { // if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) // N_ds[destY][destX] = inputData[srcY *width + srcX]; // else // N_ds[destY][destX] = 0; // } // // __syncthreads(); // // float output = 0.0f; // for (int y = 0; y < MASK_WIDTH; y++) // for (int x = 0; x < MASK_WIDTH; x++) // output += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * MASK_WIDTH + x]; // // int oY = blockIdx.y * TILE_WIDTH + threadIdx.y; // int oX = blockIdx.x * TILE_WIDTH + threadIdx.x; // // if (oY < height && oX < width) // outputData[oY * width + oX] = output; // // __syncthreads(); //} // //int main() //{ // float hstSrc[IMG_WIDTH * IMG_WIDTH]; // srand((unsigned int)time(NULL)); // // for (int i = 0; i < IMG_WIDTH; i++) { // for (int j = 0; j < IMG_WIDTH; j++) { // hstSrc[i * IMG_WIDTH + j] = 3;//rand() % 10; // } // } // float hstMask[MASK_WIDTH * MASK_WIDTH] // = { (1 / 9.f), (1 / 9.f), (1 / 9.f), // (1 / 9.f), (1 / 9.f), (1 / 9.f), // (1 / 9.f), (1 / 9.f), (1 / 9.f) // }; // 3*3 kernel mean filter // // float hstResult[O_TILE_WIDTH * O_TILE_WIDTH] = { 0.f }; // // float* devSrc; // cudaError_t status; // status = cudaMalloc(&devSrc, sizeof(float) * IMG_WIDTH * IMG_WIDTH); // cudaMemcpy(devSrc, hstSrc, sizeof(float) * IMG_WIDTH * IMG_WIDTH, cudaMemcpyKind::cudaMemcpyHostToDevice); // // float* devMask; // cudaMalloc(&devMask, sizeof(float) * MASK_WIDTH * MASK_WIDTH); // cudaMemcpy(devMask, hstMask, sizeof(float) * MASK_WIDTH * MASK_WIDTH, cudaMemcpyKind::cudaMemcpyHostToDevice); // // // float* devResult; // cudaMalloc(&devResult, sizeof(float) * O_TILE_WIDTH * O_TILE_WIDTH); // // // CPU // std::cout << "start Convolution CPU" << std::endl; // std::cout << "elapsed in time: "; // std::chrono::high_resolution_clock::time_point start1 = std::chrono::high_resolution_clock::now(); // for (int i = 0; i < IMG_WIDTH; i++) { // for (int j = 0; j < IMG_WIDTH; j++) { // // float res = 0.f; // int x = j - MASK_WIDTH / 2; // int y = i - MASK_WIDTH / 2; // for (int mW = 0; mW < MASK_WIDTH; mW++) { // for (int mH = 0; mH < MASK_WIDTH; mH++) { // // if ((x + mW) < 0 || (x + mW) >= IMG_WIDTH || (y + mH) < 0 || (y + mH) >= IMG_WIDTH) // continue; // // res += hstSrc[(y + mH)* IMG_WIDTH + (x + mW)] * hstMask[mH * MASK_WIDTH + mW]; // } // } // // hstResult[i * O_TILE_HEIGHT + j] = res; // // } // } // // std::chrono::high_resolution_clock::time_point end1 = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> duration1 = end1 - start1; // std::cout << duration1.count() * 1000 << std::endl; // std::cout << "----------------------------------" << std::endl << std::endl; // // //printArr2D(hstResult); // // memset(hstResult, 0, sizeof(float) * O_TILE_WIDTH * O_TILE_WIDTH); // // // // GPU // // std::cout << "start parallelizing" << std::endl; // std::cout << "elapsed in time: "; // std::chrono::high_resolution_clock::time_point start2 = std::chrono::high_resolution_clock::now(); // // dim3 gridDim = dim3(ceil((float)IMG_WIDTH / TILE_WIDTH), ceil((float)IMG_WIDTH / TILE_WIDTH)); // dim3 blockDim = dim3(TILE_WIDTH, TILE_WIDTH); // // convolution_2D_tiled_kernel << < gridDim, blockDim >> > (devSrc, devResult, IMG_WIDTH, PITCH, IMG_WIDTH, 1, devMask); // cudaDeviceSynchronize(); // // std::chrono::high_resolution_clock::time_point end2 = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> duration2 = end2 - start2; // std::cout << duration2.count() * 1000 << std::endl; // std::cout << "----------------------------------" << std::endl; // // cudaMemcpy(hstResult, devResult, sizeof(float) * O_TILE_WIDTH * O_TILE_WIDTH, cudaMemcpyKind::cudaMemcpyDeviceToHost); // // //printArr2D(hstResult); // // return 0; //}
5,319
#include "includes.h" __global__ void even(int *darr, int n) { int k = threadIdx.x; int t; k = k * 2; if (k <= n - 2) { if (darr[k] > darr[k + 1]) { t = darr[k]; darr[k] = darr[k + 1]; darr[k + 1] = t; } } }
5,320
#include <stdio.h> #include <cuda.h> #define INIT 1000 #define k 2 void random(int* x){ for(int i=0;i<INIT*k;i++){ x[i] = rand() % 10; } } __global__ void kernel(int *a, int *b, int *c){ // //計算區塊索引 // int block=(blockIdx.z*gridDim.y+blockIdx.y)*gridDim.x+blockIdx.x; // //計算執行緒索引 // int t=(threadIdx.z*blockDim.y+threadIdx.y)*blockDim.x+threadIdx.x; // //計算區塊中包含的執行緒數目 // int n=blockDim.x*blockDim.y*blockDim.z; // //執行緒在陣列中對應的位置 // int x=block*n+t; int x = blockIdx.x * blockDim.x + threadIdx.x; c[x] = a[x] * b[x]; } int main(void){ int a[INIT*k] = {0}; int b[INIT*k] = {0}; int c[INIT*k] = {0}; for(int i=0;i<INIT*k;i++){ printf("%d: %d * %d = %d\n", i, a[i], b[i], c[i]); } printf("\n"); int *GA, *GB, *GC; random(a); cudaMalloc((void**)&GA, k*INIT*sizeof(int)); cudaMemcpy(GA, a, sizeof(int)*INIT*k, cudaMemcpyHostToDevice); random(b); cudaMalloc((void**)&GB, INIT*k*sizeof(int)); cudaMemcpy(GB, b, sizeof(int)*INIT*k, cudaMemcpyHostToDevice); for(int i=0;i<INIT*k;i++){ printf("%d: %d * %d = %d\n", i, a[i], b[i], c[i]); } printf("\n"); cudaMalloc((void**)&GC, k*INIT*sizeof(int)); kernel<<<k, INIT>>>(GA,GB,GC); cudaMemcpy(c, GC, sizeof(int)*INIT*k, cudaMemcpyDeviceToHost); for(int i=0;i<INIT*k;i++){ printf("%d:\t %d * %d = %d\n", i, a[i], b[i], c[i]); } cudaFree(GA); cudaFree(GB); cudaFree(GC); return 0; }
5,321
/* test_kernel.cu it does not contain anything for the moment device AJOUTER +1 en parallèle à chaque élément du tableau */ //kernel ! __global__ void kernel_1(int* T_device) { T_device[0] += 1; } __global__ void inc_gpu(int* a, int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) a[id]++; }
5,322
#include <bits/stdc++.h> using namespace std; typedef vector<int> vi; typedef vector<long> vl; typedef vector<bool> vb; typedef vector<float> vd; typedef pair<int,int> ii; typedef pair<long, long> ll; typedef unordered_set<int> ui; const int MAX_BLOCK_SIZE = 1024; const int MAX_NUM_FEATURES = 32; const int MAX_CASE_PER_THREAD = 8; __global__ void cudaUpdateWeight(int N, int K, int N_step, float l_rate, float *X, float *Y, float *new_w, float *old_w, int *syn_use, int npt = 1){ // Naive way to do updates assert(blockDim.x <= MAX_BLOCK_SIZE); assert(npt <= MAX_CASE_PER_THREAD); int idx = blockIdx.x * blockDim.x + threadIdx.x; float X_tmp[MAX_CASE_PER_THREAD][MAX_NUM_FEATURES], Y_pred[MAX_CASE_PER_THREAD], Y_true[MAX_CASE_PER_THREAD]; int start = idx*npt, end = min(N, (idx+1)*npt); if(start >= N) return; for(int i=0;i<end-start;++i){ Y_true[i] = Y[start + i]; for(int j=0;j<K;++j){ X_tmp[i][j] = X[(start + i)*K + j]; } } for(int step = 0; step < N_step; ++step){ for(int i=0;i<end-start;++i){ Y_pred[i] = 0.; for(int j=0;j<K;++j) Y_pred[i] += X_tmp[i][j]*old_w[j]; } for(int j = 0; j < K; ++j) { float additive = 0.; for(int i=0;i<end-start;++i) additive += (Y_true[i] - Y_pred[i])*X_tmp[i][j]; additive *= l_rate/N; atomicAdd(new_w + j, additive); } if(idx < K) old_w[idx] = new_w[idx]; atomicAdd(syn_use, 0); } return; } __global__ void cudaGetError(int N, int K, float *X, float *Y, float *weights, float *dev_err, int npt = 1){ assert(blockDim.x <= MAX_BLOCK_SIZE); assert(npt <= MAX_CASE_PER_THREAD); int idx = blockIdx.x * blockDim.x + threadIdx.x; int start = idx*npt, end = min(N, (idx+1)*npt); if(start >= N) return; float blk_sum = 0.; for(int i=start; i<end ; ++i){ float diff = -Y[i]; for(int j=0;j<K;++j) diff += weights[j] * X[i*K + j]; blk_sum += diff * diff; } atomicAdd(dev_err, blk_sum); return; } class CudaLinearReg{ int N_train, N_test, N_feat; int B_size, N_block, *syn_use; // In this case, for convenient, consider weights and bias together. float **X_train, *Y_train, **X_test, *Y_test, *weights; float *dev_X, *dev_Y, *dev_w, *dev_old_w, *dev_err; inline float getRandNum(){ return float(rand())/RAND_MAX; } public: CudaLinearReg(int n_trian, int n_test, int n_feat, int block_size = 0):N_train(n_trian), N_test(n_test), N_feat(n_feat){ // Create serial storage for X_train X_train = new float* [N_train]; X_train[0] = new float [N_train * N_feat]; for(int i=1;i<N_train;++i) X_train[i] = X_train[i-1] + N_feat; // Create serial storage for Y_train Y_train = new float [N_train]; // Create serial storage for X_test X_test = new float* [N_test]; X_test[0] = new float [N_test * N_feat]; for(int i=1;i<N_test;++i) X_test[i] = X_test[i-1] + N_feat; // Create serial storage for Y_test Y_test = new float [N_test]; // Create serial storage for weights weights = new float [N_feat]; // Create memory for X, Y, weights, old_weights on GPU device cudaMalloc((void **)&dev_X, N_train*N_feat*sizeof(float)); cudaMalloc((void **)&dev_Y, N_train*sizeof(float)); cudaMalloc((void **)&dev_w, N_feat*sizeof(float)); cudaMalloc((void **)&dev_old_w, N_feat*sizeof(float)); cudaMalloc((void **)&dev_err, 1*sizeof(float)); cudaMalloc((void **)&syn_use, 1*sizeof(int)); if(block_size <= 0 || block_size>MAX_BLOCK_SIZE) B_size = min(MAX_BLOCK_SIZE, N_train); else B_size = block_size; N_block = (N_train + B_size - 1)/B_size; } void loadData(float **trX, float *trY, float **teX, float *teY){ // Make sure that all the dimensions are correct memcpy(X_train[0], trX[0], N_train*N_feat*sizeof(float)); memcpy(Y_train, trY, N_train*sizeof(float)); memcpy(X_test[0], teX[0], N_test*N_feat*sizeof(float)); memcpy(Y_test, teY, N_test*sizeof(float)); } void setBlocks(int block_size){ if(block_size <= 0 || block_size>MAX_BLOCK_SIZE) B_size = min(MAX_BLOCK_SIZE, N_train); else B_size = block_size; N_block = (N_train + B_size - 1)/B_size; } /* This function is no longer needed. void initData(vector<float> w){ for(int i=0;i<N_train;++i){ X_train[i][0] = 1.; for(int j=1;j<N_feat;++j) X_train[i][j] = 2.*getRandNum(); Y_train[i] = 0.; for(int j=0;j<N_feat;++j) Y_train[i] += w[j] * X_train[i][j]; Y_train[i] += 0.4*(getRandNum() - 0.5); } }*/ void initWeights(float amp_weight = 2.0){ // Initializing weights for(int i=0;i<N_feat;++i) weights[i] = getRandNum(); } void initGPU(){ // Initializing CUDA memories cudaMemcpy(dev_X, X_train[0], N_train*N_feat*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_Y, Y_train, N_train*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_w, weights, N_feat*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_old_w, weights, N_feat*sizeof(float), cudaMemcpyHostToDevice); } float getError(bool isTest = false, int npt = 1){ // We use mean squre root error here. // Here we use the vector pred_Y to record the predicted value float error = 0; int N = N_train; cudaMemcpy(dev_err, &error, 1*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_w, weights, N_feat*sizeof(float), cudaMemcpyHostToDevice); if(isTest){ N = N_test; cudaMemcpy(dev_X, X_test[0], N*N_feat*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_Y, Y_test, N*sizeof(float), cudaMemcpyHostToDevice); } else{ cudaMemcpy(dev_X, X_train[0], N*N_feat*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_Y, Y_train, N*sizeof(float), cudaMemcpyHostToDevice); } cudaGetError<<<N_block, B_size>>>(N, N_feat, dev_X, dev_Y, dev_w, dev_err, npt); cudaMemcpy(&error, dev_err, 1*sizeof(float), cudaMemcpyDeviceToHost); return error/N; } void cudaNaiveTrain(int N_step, float learning_rate, int npt = 1){ // Call the GPU update, which uses the Naive approach. initGPU(); cudaUpdateWeight<<<N_block, B_size>>>(N_train, N_feat, N_step, learning_rate, dev_X, dev_Y, dev_w, dev_old_w, syn_use, npt); cudaMemcpy(weights, dev_w, N_feat*sizeof(float), cudaMemcpyDeviceToHost); } vector<float> getWeights(){ vector<float> ans(N_feat); for(int i=0;i<N_feat;++i) ans[i] = weights[i]; return ans; } ~CudaLinearReg(){ delete X_train[0]; delete X_train; delete Y_train; delete X_test[0]; delete X_test; delete Y_test; delete weights; cudaFree(dev_X); cudaFree(dev_Y); cudaFree(dev_old_w); cudaFree(dev_w); } }; class TestLinearReg{ CudaLinearReg lrg_test; float ampli, **train_x, *train_y, **test_x, *test_y; int N_train, N_test, N_feat; vd weights; float linearFn(float *x){ float ans = 0.; for(int i=0;i<N_feat;++i) ans += x[i] * weights[i]; return ans; } /* float quardFn(const vd &x){ assert(weights.size() == 2*x.size()); for(int i=0;i<n_var;++i){ ans += weights[2*i]*x[i] + weights[2*i+1]*x[i]*x[i]; } return ans; }*/ inline float getRandNum(){ return float(rand())/RAND_MAX; } public: TestLinearReg(vd correct_w, int n_tr, int n_te, float Amp = 0.4): weights(correct_w), N_train(n_tr), N_test(n_te), ampli(Amp), lrg_test(n_tr , n_te, (int)correct_w.size()){ srand(1); N_feat = (int)weights.size(); assert(N_feat > 1); // Allocate memories train_x = new float* [N_train]; train_x[0] = new float [N_train*N_feat]; for(int i=1;i<N_train;++i) train_x[i] = train_x[i-1] + N_feat; train_y = new float [N_train]; test_x = new float* [N_test]; test_x[0] = new float [N_test*N_feat]; for(int i=1;i<N_test;++i) test_x[i] = test_x[i-1] + N_feat; test_y = new float [N_test]; // Show something on the screen: cerr << setprecision(6); cerr<<"We are testing the following function \n y = "<<weights[0]; for(int i=1;i<N_feat;++i) cerr<<" + "<<weights[i]<<"*x"<<to_string(i); cerr<<endl; } void generateDateSet(float A = 2.){ for(int i=0;i<N_train;++i){ for(int j=0; j<N_feat; ++j){ if(!j) train_x[i][j] = 1.; else train_x[i][j] = A*getRandNum(); } train_y[i] = linearFn(train_x[i]) + ampli * (getRandNum() - 0.5); } for(int i=0;i<N_test;++i){ for(int j=0; j<N_feat; ++j){ if(!j) test_x[i][j] = 1.; else test_x[i][j] = A*getRandNum(); } test_y[i] = linearFn(test_x[i]) + ampli * (getRandNum() - 0.5); } lrg_test.loadData(train_x, train_y, test_x, test_y); } void outputTrain(string filename){ std::ios_base::sync_with_stdio(false),cin.tie(0),cout.tie(0); freopen(filename.c_str(), "w", stdout); for(int i=0;i<N_train;++i){ for(int j=0;j<N_feat;++j) cout<<train_x[i][j]<<' '; cout<<train_y[i]<<endl; } } void outputTest(string filename){ std::ios_base::sync_with_stdio(false),cin.tie(0),cout.tie(0); freopen(filename.c_str(), "w", stdout); for(int i=0;i<N_test;++i){ for(int j=0;j<N_feat;++j) cout<<test_x[i][j]<<' '; cout<<test_y[i]<<endl; } } void showWeights(){ auto pred_wei = lrg_test.getWeights(); cerr << setprecision(6); cerr<<"Here is what we get \n y = "<<pred_wei[0]; for(int i=1;i<N_feat;++i) cerr<<" + "<<pred_wei[i]<<"*x"<<to_string(i); cerr<<endl; } vector<vector<float>> testModel(float l_rate, int n_chunk, int n_step){ // Testing the training process vector<vector<float>> ans; float steps = 0.; lrg_test.initWeights(); ans.push_back(vector<float>{steps, lrg_test.getError(false), lrg_test.getError(true)}); for(int i=1;i<=n_chunk;++i){ lrg_test.cudaNaiveTrain(n_step, l_rate); steps += n_step; ans.push_back(vector<float>{steps, lrg_test.getError(false), lrg_test.getError(true)}); } return ans; } ~TestLinearReg(){ delete train_x[0]; delete train_x; delete train_y; delete test_x[0]; delete test_x; delete test_y; } }; int main(int argc, char* argv[]){ std::ios_base::sync_with_stdio(false),cin.tie(0),cout.tie(0); float w1 = 1.7, w2 = 0.8, b = 2.2; int n_train = 7000, n_test = 3000; if(argc > 1) w1 = stod(argv[1]); if(argc > 2) w2 = stod(argv[2]); if(argc > 3) b = stod(argv[3]); if(argc > 4) n_train = stoi(argv[4]); if(argc > 5) n_test = stoi(argv[5]); TestLinearReg testLR(vector<float>{b, w1, w2}, n_train, n_test); cerr<<"Generating "<<n_train<<" training examples and "<<n_test<<" testing examples"<<endl; testLR.generateDateSet(); string trainfile = "para_train.txt", testfile = "para_test.txt", resultfile = "para_rslt.txt"; testLR.outputTrain(trainfile); testLR.outputTest(testfile); cerr<<"Data sets are stored in "<<trainfile<<" and "<<testfile<<endl; cerr<<"Finish generating data"<<endl; cerr<<"Training the model"<<endl; clock_t start_time = clock(), end_time; auto res = testLR.testModel(0.05, 100, 5); end_time = clock(); float comp_time = float(end_time - start_time)/CLOCKS_PER_SEC; cerr<< setprecision(8); cerr<<"=========================================Time Usage========================================="<<endl<<endl; cerr<<comp_time<<endl<<endl; cerr<<"============================================================================================"<<endl<<endl; cerr<<"Finish train the model"<<endl; testLR.showWeights(); ofstream ofile; ofile.open(resultfile.c_str()); ofile << setprecision(6); for(auto vec:res){ ofile<<vec[0]<<' '<<vec[1]<<' '<<vec[2]<<endl; } ofile.close(); cerr<<"The cost function results are stored in "<<resultfile<<endl; return 0; }
5,323
#include <stdio.h> // From Robert Crovella on StackOverflow.com // https://stackoverflow.com/questions/33150040/doubling-buffering-in-cuda-so-the-cpu-can-operate-on-data-produced-by-a-persiste/33158954#33158954 // with format cleanup for readability preference constexpr int num_iterations = 1000; constexpr size_t num_vals = 65536; constexpr int threads_per_block = 256; enum ready_status_e { not_full, full }; inline void show_ready_status( char* status_str, ready_status_e status ) { int status_index = static_cast<int>(status); const char *status_strings[] = { "Not Full", "Full" }; if ((status_str != nullptr)) { if ( (status_index > 0) && (status_index < 2)) { strncpy( status_str, status_strings[status_index], sizeof(status_strings[status_index]) ); } else { printf( "%s(): ERROR: status, %d was invalid\n", __func__, (int)status ); strncpy( status_str, "Invalid Status", 14 ); } } else { printf( "%s(): ERROR: status_str pointer was somehow nullptr\n", __func__ ); return; } } #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) __device__ volatile int d_block_num1 = 0; __device__ volatile int d_block_num2 = 0; __device__ volatile int d_iteration_num = 0; __device__ void my_compute_function(int *buffer, int idx, int data){ buffer[idx] = data; // put your work code here } inline __host__ __device__ bool is_odd( int val ) { return val & 1; } __global__ void testkernel(int *buffer1, int *buffer2, volatile ready_status_e *buffer1_ready_status, volatile ready_status_e *buffer2_ready_status, const int buffer_size, const int num_iterations) { // assumption of persistent block-limited kernel launch int idx = threadIdx.x+blockDim.x*blockIdx.x; int iteration_num = 0; // persistent until iterations complete while (iteration_num < num_iterations) { // ping pong between buffers int *current_buffer = (iteration_num & 1)? buffer2:buffer1; volatile ready_status_e *current_buffer_ready_status = (is_odd(iteration_num)) ? (buffer2_ready_status) : (buffer1_ready_status); volatile int *current_d_block_num = (is_odd(iteration_num)) ? (&d_block_num2) : (&d_block_num1); int my_idx = idx; // don't overrun buffers on device while (iteration_num - d_iteration_num > 1); // wait for buffer to be consumed while (*current_buffer_ready_status == ready_status_e::full); // perform the "work" while (my_idx < buffer_size) { my_compute_function(current_buffer, my_idx, iteration_num); my_idx += gridDim.x*blockDim.x; // grid-striding loop } __syncthreads(); // wait for my block to finish __threadfence(); // make sure global buffer writes are "visible" if (!threadIdx.x) atomicAdd((int *)current_d_block_num, 1); // mark my block done if (!idx) { // am I the main block/thread? while (*current_d_block_num < gridDim.x); // wait for all blocks to finish *current_d_block_num = 0; *current_buffer_ready_status = ready_status_e::full; // indicate that buffer is ready __threadfence_system(); // push it out to mapped memory d_iteration_num++; } iteration_num++; } // end of while (iteration_num < num_iterations ) { // persistent until num_iterations complete } bool validate(const int *actual_vals, const int num_vals, const int expected_val) { for (int val_num = 0; val_num < num_vals; ++val_num) { if (actual_vals[val_num] != expected_val) { printf("mismatch at %d, was: %d, should be: %d\n", val_num, actual_vals[val_num], expected_val); return false; } } return true; } // end of bool validate(const int *data, const int dsize, const int expected){ int main(){ int *h_buffer1, *d_buffer1, *h_buffer2, *d_buffer2; volatile ready_status_e *buffer1_ready_status, *buffer2_ready_status; // buffer and "mailbox" setup cudaHostAlloc(&h_buffer1, num_vals*sizeof(int), cudaHostAllocDefault); cudaCheckErrors("cudaHostAlloc failed for h_buffer1"); cudaHostAlloc(&h_buffer2, num_vals*sizeof(int), cudaHostAllocDefault); cudaCheckErrors("cudaHostAlloc failed for h_buffer2"); cudaHostAlloc(&buffer1_ready_status, sizeof(int), cudaHostAllocMapped); cudaCheckErrors("cudaHostAlloc failed for buffer1_ready_status"); cudaHostAlloc(&buffer2_ready_status, sizeof(int), cudaHostAllocMapped); cudaCheckErrors("cudaHostAlloc failed for buffer2_ready_status"); cudaMalloc(&d_buffer1, num_vals*sizeof(int)); cudaCheckErrors("cudaMalloc failed for d_buffer1"); cudaMalloc(&d_buffer2, num_vals*sizeof(int)); cudaCheckErrors("cudaMalloc failed for d_buffer2"); cudaStream_t streamk, streamc; cudaStreamCreate(&streamk); cudaCheckErrors("cudaStreamCreate failed for streamk"); cudaStreamCreate(&streamc); cudaCheckErrors("cudaStreamCreate failed for streamc"); *buffer1_ready_status = ready_status_e::not_full; *buffer2_ready_status = ready_status_e::not_full; cudaMemset(d_buffer1, 0xFF, num_vals*sizeof(int)); cudaCheckErrors("cudaMemset (to 0xFF) failed for d_buffer1"); cudaMemset(d_buffer2, 0xFF, num_vals*sizeof(int)); cudaCheckErrors("cudaMemset (to 0xFF) failed for d_buffer2"); // inefficient crutch for choosing number of blocks int num_blocks = 0; cudaDeviceGetAttribute(&num_blocks, cudaDevAttrMultiProcessorCount, 0); cudaCheckErrors("get multiprocessor count failed"); testkernel<<<num_blocks, threads_per_block, 0, streamk>>>(d_buffer1, d_buffer2, buffer1_ready_status, buffer2_ready_status, num_vals, num_iterations); cudaCheckErrors("testkernel launch failed"); volatile ready_status_e *current_buffer_ready_status; int *h_current_buffer, *d_current_buffer; for (int iteration_num = 0; iteration_num < num_iterations; ++iteration_num) { if (is_odd(iteration_num)) { // ping pong on the host side current_buffer_ready_status = buffer2_ready_status; h_current_buffer = h_buffer2; d_current_buffer = d_buffer2; } else { current_buffer_ready_status = buffer1_ready_status; h_current_buffer = h_buffer1; d_current_buffer = d_buffer1; } // int qq = 0; // add for failsafe - otherwise a machine failure can hang while ((*current_buffer_ready_status)!= ready_status_e::full); // use this for a failsafe: // if (++qq > 1000000) { // printf("current_buffer_ready_status = %d\n", *current_buffer_ready_status); // return 0; // } // wait for buffer to be full; cudaMemcpyAsync(h_current_buffer, d_current_buffer, num_vals*sizeof(int), cudaMemcpyDeviceToHost, streamc); cudaStreamSynchronize(streamc); cudaCheckErrors("cudaMemcpyAsync failed for d_current_buffer to h_current_buffer"); *current_buffer_ready_status = ready_status_e::not_full; // release buffer back to device if (!validate(h_current_buffer, num_vals, iteration_num)) { printf("validation of h_current_buffer failed at iter %d\n", iteration_num); exit(1); } } // end of for (int iteration_num = 0; iteration_num < num_iterations; ++iteration_num) { printf("Completed %d iterations successfully\n", num_iterations); }
5,324
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ //final kernel //INSERT KERNEL CODE HERE //__device__ __constant__ float test_image[1024]; __constant__ float conv[800]; __constant__ float bias[8]; __global__ void convolution(float* test_image ,float* out ) { //int Row = blockIdx.y* blockDim.y + threadIdx.y; //int Col = blockIdx.x* blockDim.x + threadIdx.x; int convid = blockIdx.x; int row = threadIdx.y; int col = threadIdx.x; int index = 8*(row*28+col); //loading the matrix into shared memory __shared__ float Matrix1[28][28]; Matrix1[row][col]=test_image[row*28 + col]; __syncthreads(); float value = 0.0f; for (int i=-4 ;i<=5;i++){ for(int j=-4 ;j<=5 ;j++){ int ModRow = row +i; int ModCol = col +j; if(ModRow>=0 && ModCol>=0 && ModRow<28 && ModCol<28){ int temp = (i+4)*10 + j+4; value += Matrix1[ModRow][ModCol]*conv[temp +100*convid]; } } } out[index+convid] = (value+bias[convid])>=0? (value +bias[convid]) :0.0f; } __global__ void Multiply0(float *imageData , float* multiplier , int multiplier_height,float* matrixresult ,float* bias1){ __shared__ float ds_M[128]; int Col = blockIdx.x*blockDim.x+threadIdx.x; double Pvalue = 0.0; for (int m = 0; m < 49; m++) { ds_M[threadIdx.x] = imageData[128*m + threadIdx.x]; __syncthreads(); for (int k = 0; k < 128; k++) Pvalue += ds_M[k] * multiplier[(m*128+k)*512 +Col]; } matrixresult[Col] = (Pvalue +bias1[Col] )>=0?(Pvalue +bias1[Col] ):0; } __global__ void Multiply1(float *imageData , float* multiplier , float* matrixresult , float* bias){ __shared__ float ds_M[512][10]; int col = blockIdx.x; int row = threadIdx.x; ds_M[row][col] = multiplier[row*10+col]* imageData[row]; __syncthreads(); if (threadIdx.x==0){ float value=0.0; for (int i=0 ; i<512 ; i++) value+=ds_M[i][blockIdx.x]; matrixresult[col]= value+ bias[col]; } } __global__ void Mul0(float *imageData , float* multiplier ,float* matrixresult){ int row = 7*threadIdx.x; int col = 2*blockIdx.x; for(int i=0; i<7 ;i++){ if((row+i)<6272){ matrixresult[(row+i)*512+col] = multiplier[(row+i)*512+col]*imageData[col]; matrixresult[(row+i)*512+col+1] = multiplier[(row+i)*512+col+1]*imageData[col+1]; } } }
5,325
#include "global_defines.cuh" void LBM::bounceback(){ /*Fluid densities are rotated. By the next propagation step, this * * results in a bounce back from obstacle nodes.*/ /* .......bounce back from obstacles: this is the no-slip boundary- condition. The velocity vector of all fluid densities is inverted, so all the fluid densities will be sent back to the node where they were located before the last propagation step, but with opposite velocity vector ... there exist lots of other possibilities. */ if(data_location==GPU) copy_data_from_device_to_host(); int x,y,z; //.....loop over all nodes for( z = 0; z< lz ; ++z){ for( y = 0; y< ly ; ++y){ for( x = 0; x< lx ; ++x){ //.........consider only obstacle nodes if (obstacles[index(z,y,x)]==1){ //...........rotate all ensities and write back to node D3.Q1[index(z,y,x)] = D3_hlp.Q3[index(z,y,x)]; D3.Q2[index(z,y,x)] = D3_hlp.Q4[index(z,y,x)]; D3.Q3[index(z,y,x)] = D3_hlp.Q1[index(z,y,x)]; D3.Q4[index(z,y,x)] = D3_hlp.Q2[index(z,y,x)]; D3.Q5[index(z,y,x)] = D3_hlp.Q6[index(z,y,x)]; D3.Q6[index(z,y,x)] = D3_hlp.Q5[index(z,y,x)]; D3.Q7[index(z,y,x)] = D3_hlp.Q9[index(z,y,x)]; D3.Q8[index(z,y,x)] = D3_hlp.Q10[index(z,y,x)]; D3.Q9[index(z,y,x)] = D3_hlp.Q7[index(z,y,x)]; D3.Q10[index(z,y,x)] = D3_hlp.Q8[index(z,y,x)]; D3.Q11[index(z,y,x)] = D3_hlp.Q13[index(z,y,x)]; D3.Q12[index(z,y,x)] = D3_hlp.Q14[index(z,y,x)]; D3.Q13[index(z,y,x)] = D3_hlp.Q11[index(z,y,x)]; D3.Q14[index(z,y,x)] = D3_hlp.Q12[index(z,y,x)]; D3.Q15[index(z,y,x)] = D3_hlp.Q17[index(z,y,x)]; D3.Q16[index(z,y,x)] = D3_hlp.Q18[index(z,y,x)]; D3.Q17[index(z,y,x)] = D3_hlp.Q15[index(z,y,x)]; D3.Q18[index(z,y,x)] = D3_hlp.Q16[index(z,y,x)]; } } } } #ifdef DEBUG cout << " #LBM bounceback OK!" << endl; #endif } __global__ void bounceback_kernel_v4_shared(const int end_of_memory, const CUDA_FLOATING *source_data, CUDA_FLOATING *destination_data, const int *obstacles){ /*Fluid densities are rotated. By the next propagation step, this * * results in a bounce back from obstacle nodes.*/ /* .......bounce back from obstacles: this is the no-slip boundary- condition. The velocity vector of all fluid densities is inverted, so all the fluid densities will be sent back to the node where they were located before the last propagation step, but with opposite velocity vector ... there exist lots of other possibilities. */ const int tid=blockIdx.x*blockDim.x+threadIdx.x; extern __shared__ CUDA_FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=source_data[tid]; __syncthreads(); if (tid<end_of_memory and obstacles[tid]){ destination_data[tid]=shared_buffer[threadIdx.x]; } } void LBM::cuda_bounceback(){ if(data_location==CPU) copy_data_from_host_to_device(); dim3 threads_type2(threads_for_streaming_collision_and_relaxation,1,1); dim3 grid_type2(blocks_for_streaming_collision_and_relaxation,1,1); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q3, D3_d.Q1, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q4, D3_d.Q2, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q1, D3_d.Q3, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q2, D3_d.Q4, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q6, D3_d.Q5, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q5, D3_d.Q6, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q9, D3_d.Q7, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q10, D3_d.Q8, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q7, D3_d.Q9, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q8, D3_d.Q10, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q13, D3_d.Q11, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q14, D3_d.Q12, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q11, D3_d.Q13, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q12, D3_d.Q14, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q17, D3_d.Q15, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q18, D3_d.Q16, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q15, D3_d.Q17, obstacles_d); bounceback_kernel_v4_shared<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>(lattice_nodes, D3_hlp_d.Q16, D3_d.Q18, obstacles_d); cudaDeviceSynchronize(); #ifdef DEBUG cout << " #LBM bounceback OK!" << endl; #endif }
5,326
#include <iostream> #include <stdio.h> #include <cuda.h> #include <device_launch_parameters.h> #define SIZE 5 #define BLOCK_DIM 5 __global__ void MatrixAddition(float* d_M, float* d_N, float* d_P) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int index = col + row * SIZE; if (col < SIZE && row < SIZE) { d_P[index] = d_M[index] + d_N[index]; } } __global__ void MatrixSubtraction(float* d_M, float* d_N, float* d_P) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int index = col + row * SIZE; if (col < SIZE && row < SIZE) { d_P[index] = d_M[index] - d_N[index]; } } __global__ void MatrixMultiplication(float* d_M, float* d_N, float* d_P, int width) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int index = col + row * width; int value = 0; if (col < width && row < width) { for (int i = 0; i < width; ++i) { value += d_M[row * width + i] * d_N[i * width + col]; d_P[index] = value; } } } __host__ void MatAddition(float M[SIZE][SIZE], float N[SIZE][SIZE], float P[SIZE][SIZE], int width) { for ( int i = 0; i < width; ++i ) { for ( int j = 0; j < width; ++j ) { P[j][i] = M[j][i] + N[j][i]; } } } __host__ void MatSubtraction(float M[SIZE][SIZE], float N[SIZE][SIZE], float P[SIZE][SIZE], int width) { for ( int i = 0; i < width; ++i ) { for ( int j = 0; j < width; ++j ) { P[j][i] = M[j][i] - N[j][i]; } } } __host__ void MatMultiplication(float M[SIZE][SIZE], float N[SIZE][SIZE], float P[SIZE][SIZE], int width) { for ( int i = 0; i < width; ++i ) { for ( int j = 0; j < width; ++j ) { for ( int k = 0; k < width; ++k ) { P[j][i] += M[j][k] * N[k][i]; } } } } int main() { float m[SIZE][SIZE], n[SIZE][SIZE], pa[SIZE][SIZE], ps[SIZE][SIZE], pm[SIZE][SIZE]; float pha[SIZE][SIZE], phs[SIZE][SIZE], phm[SIZE][SIZE]; float *d_ma, *d_na, *d_pa; float *d_ms, *d_ns, *d_ps; float *d_mm, *d_nm, *d_pm; int size = SIZE * SIZE * sizeof(float); for ( int i = 0; i < SIZE; ++i ) { for ( int j = 0; j < SIZE; ++j ) { m[j][i] = j + i * SIZE; n[j][i] = j + i * SIZE; pa[j][i] = ps[j][i] = pm[j][i] = pha[j][i] = phs[j][i] = phm[j][i] = 0; } } // Memory allocation cudaMalloc(( void**) &d_ma, size ); cudaMalloc(( void**) &d_na, size ); cudaMalloc(( void**) &d_pa, size ); cudaMalloc(( void**) &d_ms, size ); cudaMalloc(( void**) &d_ns, size ); cudaMalloc(( void**) &d_ps, size ); cudaMalloc(( void**) &d_mm, size ); cudaMalloc(( void**) &d_nm, size ); cudaMalloc(( void**) &d_pm, size ); cudaMemcpy( d_ma, m, size, cudaMemcpyHostToDevice ); cudaMemcpy( d_na, n, size, cudaMemcpyHostToDevice ); cudaMemcpy( d_ms, m, size, cudaMemcpyHostToDevice ); cudaMemcpy( d_ns, n, size, cudaMemcpyHostToDevice ); cudaMemcpy( d_mm, m, size, cudaMemcpyHostToDevice ); cudaMemcpy( d_nm, n, size, cudaMemcpyHostToDevice ); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid(1, 1); // Device Operations MatrixAddition<<<dimGrid, dimBlock>>>(d_ma, d_na, d_pa); MatrixSubtraction<<<dimGrid, dimBlock>>>(d_ms, d_ns, d_ps); MatrixMultiplication<<<dimGrid, dimBlock>>>(d_mm, d_nm,d_pm, SIZE); cudaMemcpy( pa, d_pa, size, cudaMemcpyDeviceToHost ); cudaMemcpy( ps, d_ps, size, cudaMemcpyDeviceToHost ); cudaMemcpy( pm, d_pm, size, cudaMemcpyDeviceToHost ); // Host Operations MatAddition(m, n, pha, SIZE); MatSubtraction(m, n, phs, SIZE); MatMultiplication(m, n, phm, SIZE); for ( int i = 0; i < SIZE; ++i ) { for ( int j = 0; j < SIZE; ++j ) { //std::cout << i << " " << j << " " << pa[j][i] << std::endl; //std::cout << i << " " << j << " " << ps[j][i] << std::endl; std::cout << i << " " << j << " " << pm[j][i] << std::endl; //std::cout << i << " " << j << " " << pha[j][i] << std::endl; //std::cout << i << " " << j << " " << phs[j][i] << std::endl; //std::cout << i << " " << j << " " << phm[j][i] << std::endl; } } cudaFree(d_ma); cudaFree(d_na); cudaFree(d_pa); cudaFree(d_ms); cudaFree(d_ns); cudaFree(d_ps); cudaFree(d_mm); cudaFree(d_nm); cudaFree(d_pm); system("pause"); }
5,327
/* autor fredy m uaem desonses@gmail.com para mas comentarios */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include "cuda_fp16.h" /* En el siguiente ejemplo se muestran las diferencias y las similitudes que existen a la hora de reservar memoria tanto en el host como en el device. En este ejemplo se reserva espacio para una matriz cuadrada de NxN elementos, se inicializa en el host con valores aleatorios (entre 0 y 9) de tipo float y despues se transfieren los datos desde el host hasta el device: */ #define N 8 // MAIN: rutina principal ejecutada en el host int main(int argc, char** argv) { // declaracion float *hstA_matriz; float *devA_matriz; float *hstB_matriz;//// float *devB_matriz;//// // reserva en el host hstA_matriz = (float*)malloc(N*N * sizeof(float)); hstB_matriz = (float*)malloc(N*N * sizeof(float));///// // reserva en el device cudaMalloc((void**)&devA_matriz, N*N * sizeof(float)); cudaMalloc((void**)&devB_matriz, N*N * sizeof(float));//// // inicializacion de datos srand((int)time(NULL)); for (int i = 0; i < N*N; i++) { hstA_matriz[i] = (float)(rand() % 2); } // copia de datos cudaMemcpy(devA_matriz, hstA_matriz, N*N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(devB_matriz, devA_matriz, N*N * sizeof(float), cudaMemcpyDeviceToDevice); cudaMemcpy(hstB_matriz, devB_matriz, N*N * sizeof(float), cudaMemcpyDeviceToHost); // salida printf("matriz A\n"); for (int j = 0; j < N*N; j++) { printf("%f, ",hstA_matriz[j]); } printf("\n\n"); printf("matriz B\n"); for (int k = 0; k < N*N; k++) { printf("%f, ", hstB_matriz[k]); } cudaFree(devA_matriz); cudaFree(devB_matriz); printf("\npulsa INTRO para finalizar..."); fflush(stdin); char tecla = getchar(); return 0; }
5,328
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include <sys/types.h> #include <unistd.h> #include <signal.h> #include <assert.h> #include <ctype.h> #include <sys/time.h> //number of threads PER BLOCK #define NTHREADS 1024 #define NBLOCKS 1024 #define TOTALTHREADS (NTHREADS*NBLOCKS) // uncomment the below line to see output of prime numbers #define DEBUG void usage(); __global__ void Sieve(char* arr, unsigned long long max, unsigned long long start); void printResult(char* arr, unsigned long long max); int main(int argc, char** argv){ unsigned long long max; int ret; char* array; char* cuda_array; //makes sure user has correct input if (argc != 2){ usage(); } max = atoll(argv[1]); //creates array of size user input on CPU array = (char*)malloc(sizeof(char) * max); if(array == NULL){ printf("malloc failed\n"); exit(1); } //creates array of size user input on gpu ret = cudaMalloc(&cuda_array, sizeof(char) * max); if (ret != cudaSuccess){ printf("cudaMalloc of size %lld failed to return %d\n", max, ret); exit(1); } //memset all values to 1 which is the signature of being prime memset(array, 1, max); // 0 and 1 are not prime numbers array[0] = 0; array[1] = 0; //copy contents of CPU array into GPU array cudaMemcpy((void*)cuda_array, array, max, cudaMemcpyHostToDevice); unsigned long long sqrtMax; sqrtMax = (unsigned long long)(sqrt((double)max)); /*can only summon so many threads at one so the for loop allows us to do so repetitevly*/ for (unsigned long long i=0; i*TOTALTHREADS < sqrtMax; i++) { Sieve<<<NBLOCKS, NTHREADS>>>((char *)cuda_array, max, i*TOTALTHREADS); } //copy gpu array data to cpu array cudaMemcpy(array, (void*)cuda_array, max, cudaMemcpyDeviceToHost); #ifdef DEBUG printResult((char*)array, max); #endif //free gpu array and cpu array cudaFree((void*)cuda_array); free(array); } //prints out correct usage if user does not provide proper input void usage(){ printf("usage: ./cudaSieve [maxInt]\n"); exit(1); } __global__ void Sieve(char* arr, unsigned long long max, unsigned long long start){ unsigned long long base = blockIdx.x * blockDim.x + threadIdx.x + start; unsigned long long next; unsigned long long sqrtMax; sqrtMax = (unsigned long long)sqrt((double)max); if (base > sqrtMax){ return; } //check if base has been marked yet, if not then mark off all muliples of base if(arr[base] == 1){ for(next = base + base; next < max; next += base){ arr[next] = 0; } } } //prints out result of numbers in interval that are prime void printResult(char* arr, unsigned long long max){ unsigned long long i; for (i = 0; i < max; i++){ if(arr[i] == 1){ printf("%lld\n", i); } } printf("\n"); }
5,329
#include <cstdio> void add(const int x, const int y, const int WIDTH, int* c, const int* a, const int* b) { int i = y * (WIDTH) + x; // [y][x] = y * WIDTH + x; c[i] = a[i] + b[i]; } // main program for the CPU: compiled by MS-VC++ int main(void) { // host-side data const int WIDTH = 5; int a[WIDTH][WIDTH]; int b[WIDTH][WIDTH]; int c[WIDTH][WIDTH] = { 0 }; // make a, b matrices for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { a[y][x] = y * 10 + x; b[y][x] = (y * 10 + x) * 100; } } // calculate for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { add(x, y, WIDTH, (int*)(c), (int*)(a), (int*)(b)); } } // print the result for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { printf("%5d", c[y][x]); } printf("\n"); } // done return 0; }
5,330
#include "includes.h" static __device__ float E = 2.718281828; __global__ void reduceArgMaxKernel(float *src, float *dst, float *arg, int dim_size, int block_size) { int di = blockIdx.x * block_size + threadIdx.x; int si = di * dim_size; float now = src[si], max = now; int maxi = 0; for (int i = 1; i < dim_size; i++) { now = src[si+i]; if (now > max) { max = now; maxi = i; } } dst[di] = max; arg[di] = maxi; }
5,331
template <int N> __device__ int get_value(){ return N; } __global__ void foo_device(int * n){ int i = threadIdx.x; n[i] = get_value<7>()*i; //n[i] = 7*i; } template <typename T> __global__ void bar_device(T * n){ T i = threadIdx.x; //n[i] = get_value<7>()*i; n[i] = 7*i; } template <typename T> __global__ void car_device(T * n){ T i = threadIdx.x; //n[i] = get_value<7>()*i; n[i] = 7*i; }
5,332
#include <stdio.h> #include <malloc.h> #include <cuda.h> #define M 20 __global__ void add(int *A, int *B, int *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } int main() { int i, *A, *B, *C; A = (int *) malloc(M * sizeof(int)); B = (int *) malloc(M * sizeof(int)); C = (int *) malloc(M * sizeof(int)); for (i = 0; i < M; i++) { *(A + i) = i; *(B + i) = 2 * i; } int *dev_A, *dev_B, *dev_C; int size = sizeof(int); // allocate device copies of a, b, c cudaMalloc((void**) &dev_A, size * M); cudaMalloc((void**) &dev_B, size * M); cudaMalloc((void**) &dev_C, size * M); cudaMemcpy(dev_A, A, size * M, cudaMemcpyHostToDevice); cudaMemcpy(dev_B, B, size * M, cudaMemcpyHostToDevice); // launch add() kernel on GPU, passing parameters add<<<1, M>>>(dev_A, dev_B, dev_C); // copy device result back to host copy of c cudaMemcpy(C, dev_C, size * M, cudaMemcpyDeviceToHost); for (i = 0; i < M; i++) printf("%d\n", C[i]); free(A); free(B); free(C); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); return 0; }
5,333
#include "includes.h" __global__ void vecProduct(int *d_x, int *d_y, int *d_z, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { d_z[idx] = d_x[idx] * d_y[idx]; } }
5,334
#include "GpuRetina.cuh" #include <cstdio> template<int BLOCK_SIZE> __global__ void calculateRetina2d( const TrackProjection* tracks, int tracksNum, const double* hitsX, const double* hitsZ, int hitsNum, double sharpness, double *values ) { int trackId = blockIdx.x; unsigned int tid = threadIdx.x; const double trackX0 = tracks[trackId].x0; const double trackDx = tracks[trackId].dx; double sum = 0; for (int hitId = tid; hitId < hitsNum; hitId += BLOCK_SIZE) { const double hitX = hitsX[hitId]; const double hitZ = hitsZ[hitId]; const double shift = (hitX - trackX0 - hitZ * trackDx); sum += exp(-shift * shift * sharpness); } __shared__ double sdata[BLOCK_SIZE]; sdata[tid] = sum; for (unsigned int s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { __syncthreads(); if (tid < s) { sdata[tid] += sdata[tid + s]; } } if (tid == 0) { values[trackId] = sdata[0]; } } template<class T> T* allocAndFetch(const T* data, int size) { T* answer = nullptr; cudaMalloc( (void**)&answer, sizeof(T) * size); cudaMemcpy( answer, data, sizeof(T) * size, cudaMemcpyHostToDevice); return answer; } void getRetina2dGpu( const TrackProjection* tracks, int tracksNum, const double* hitsX, const double* hitsZ, int hitsNum, double sharpness, double *values ) { const int BLOCK_SIZE = 1 << 8; TrackProjection* tracksGpu = allocAndFetch(tracks, tracksNum); double* hitsXGpu = allocAndFetch(hitsX, hitsNum); double* hitsZGpu = allocAndFetch(hitsZ, hitsNum); double* valuesGpu = nullptr; cudaMalloc( (void**)&valuesGpu, sizeof(double) * tracksNum); calculateRetina2d<BLOCK_SIZE><<<tracksNum, BLOCK_SIZE>>>( tracksGpu, tracksNum, hitsXGpu, hitsZGpu, hitsNum, sharpness, valuesGpu ); cudaMemcpy(values, valuesGpu, sizeof(double) * tracksNum, cudaMemcpyDeviceToHost ); cudaFree(tracksGpu); cudaFree(hitsXGpu); cudaFree(hitsZGpu); cudaFree(valuesGpu); } template<int BLOCK_SIZE> __global__ void calculateRetina3d( const TrackPure* tracks, int tracksNum, const Hit* hits, int hitsNum, double sharpness, double *values ) { int trackId = blockIdx.x; unsigned int tid = threadIdx.x; const double trackX0 = tracks[trackId].x0; const double trackDx = tracks[trackId].dx; const double trackY0 = tracks[trackId].y0; const double trackDy = tracks[trackId].dy; double sum = 0; for (int hitId = tid; hitId < hitsNum; hitId += BLOCK_SIZE) { const float hitX = hits[hitId].x; const float hitY = hits[hitId].y; const float hitZ = hits[hitId].z; const double shiftX = (hitX - trackX0 - hitZ * trackDx); const double shiftY = (hitY - trackY0 - hitZ * trackDy); sum += exp(-(shiftX * shiftX + shiftY * shiftY) * sharpness); } __shared__ double sdata[BLOCK_SIZE]; sdata[tid] = sum; __syncthreads(); for (unsigned int s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid == 0) { values[trackId] = sdata[0]; } } void getRetina3dGpu( const TrackPure* tracks, int tracksNum, const Hit* hitsX, int hitsNum, double sharpness, double *values ) { const int BLOCK_SIZE = 1 << 8; TrackPure* tracksGpu = allocAndFetch(tracks, tracksNum); Hit* hitsGpu = allocAndFetch(hitsX, hitsNum); double* valuesGpu = nullptr; cudaMalloc( (void**)&valuesGpu, sizeof(double) * tracksNum); calculateRetina3d<BLOCK_SIZE><<<tracksNum, BLOCK_SIZE>>>( tracksGpu, tracksNum, hitsGpu, hitsNum, sharpness, valuesGpu ); cudaMemcpy( values, valuesGpu, sizeof(double) * tracksNum, cudaMemcpyDeviceToHost ); cudaFree(tracksGpu); cudaFree(hitsGpu); cudaFree(valuesGpu); }
5,335
// Shim functions for calling cuRAND from Numba functions. // // Numba's ABI expects that: // // - The return value is used to indicate whether a Python exception occurred // during function execution. This does not happen in C/C++ kernels, so we // always return 0. // - The result returned to Numba is passed as a pointer in the first parameter. // For void functions (such as curand_init()), a parameter is passed, but is // unused. #include <curand_kernel.h> extern "C" __device__ int _numba_curand_init( int* numba_return_value, unsigned long long seed, unsigned long long sequence, unsigned long long offset, curandState *state, unsigned long long index) { curand_init(seed, sequence, offset, &state[index]); return 0; } extern "C" __device__ unsigned int _numba_curand( int* numba_return_value, curandState *states, unsigned long long index) { *numba_return_value = curand(&states[index]); return 0; }
5,336
#include "includes.h" __global__ void test(float *a, float *b, float *c, int N) { if(blockIdx.x<N) c[blockIdx.x] = a[blockIdx.x]*b[blockIdx.x]; return; }
5,337
/////////////////////////////////////////////////////////////////////////////// // *Time: 5e-5 seconds /////////////////////////////////////////////////////////////////////////////// // /* __global__ void pass1gpu( scalar_t* pointValues, // input int nx, int ny, int nz, // input scalar_t isoval, // input uchar* edgeCases) // output { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; if(j >= ny || k >= nz) return; edgeCases[k*ny*(nx-1) + j*(nx-1) + i] = calcCaseEdge( pointValues[k*ny*nx + j*nx + i + 0] >= isoval, pointValues[k*ny*nx + j*nx + i + 1] >= isoval); } */ /* __global__ void pass1gpu( scalar_t* pointValues, // input scalar_t isoval, // input uchar* edgeCases) // output { int i = blockIdx.x; int j = blockIdx.y; int k = blockIdx.z; int ny = blockDim.y; int nx = blockDim.x + 1; edgeCases[k*ny*(nx-1) + j*(nx-1) + i] = calcCaseEdge( pointValues[k*ny*nx + j*nx + i + 0] >= isoval, pointValues[k*ny*nx + j*nx + i + 1] >= isoval); } void FlyingEdgesAlgorithm::pass1() { dim3 dims = make_uint3(nx-1, ny, nz); pass1gpu<<<dims, 1>>>(pointValues, isoval, edgeCases); } */ /////////////////////////////////////////////////////////////////////////////// // *Time: 8.2e-5 /////////////////////////////////////////////////////////////////////////////// /* __global__ void pass1gpu1( scalar_t* pointValues, // input scalar_t isoval, // input uchar* edgeCases) // output { int i = blockIdx.x; int j = blockIdx.y; int k = blockIdx.z; int ny = blockDim.y; int nx = blockDim.x + 1; edgeCases[k*ny*(nx-1) + j*(nx-1) + i] = calcCaseEdge( pointValues[k*ny*nx + j*nx + i + 0] >= isoval, pointValues[k*ny*nx + j*nx + i + 1] >= isoval); } __global__ void pass1gpu2( uchar* edgeCases, // input int nx, // input FlyingEdgesAlgorithm::gridEdge* gridEdges) // output { int j = blockIdx.y; int k = blockIdx.z; int ny = blockDim.y; FlyingEdgesAlgorithm::gridEdge& grid = gridEdges[k*ny + j]; for(int i = 0; i != nx-1; ++i) { uchar const& edge = edgeCases[k*ny*(nx-1) + j*(nx-1) + i]; if(edge == 1 || edge == 2) { grid.xl = i; break; } } for(int i = nx-2; i != -1; ++i) { uchar const& edge = edgeCases[k*ny*(nx-1) + j*(nx-1) + i]; if(edge == 1 || edge == 2) { grid.xr = i; break; } } } __global__ void pass1gpu222( scalar_t* pointValues, // input scalar_t isoval, // input int nx, // input int ny, uchar* edgeCases, // output FlyingEdgesAlgorithm::gridEdge* gridEdges) // output { // int j = blockIdx.y; // int k = blockIdx.z; // // int ny = blockDim.y; int j = threadIdx.y; int k = threadIdx.z; scalar_t* curPointValues = pointValues + k*nx*ny + j*nx; uchar* curEdgeCases = edgeCases + k*(nx-1)*ny + j*(nx-1); FlyingEdgesAlgorithm::gridEdge& curGridEdge = gridEdges[k*ny + j]; bool isGE[2]; isGE[0] = (curPointValues[0] >= isoval); for(int i = 1; i != nx; ++i) { isGE[i%2] = (curPointValues[i] >= isoval); curEdgeCases[i-1] = calcCaseEdge(isGE[(i+1)%2], isGE[i%2]); if(curEdgeCases[i-1] == 1 || curEdgeCases[i-1] == 2) { if(curGridEdge.xl == 0) curGridEdge.xl = i-1; curGridEdge.xr = i; } } } __global__ void pass1gpu333( scalar_t* pointValues, // input scalar_t isoval, // input int nx, int ny, int nz, // input uchar* edgeCases, // output FlyingEdgesAlgorithm::gridEdge* gridEdges) // output { int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; if (j >= ny || k >= nz) return; scalar_t* curPointValues = pointValues + k*nx*ny + j*nx; uchar* curEdgeCases = edgeCases + k*(nx-1)*ny + j*(nx-1); FlyingEdgesAlgorithm::gridEdge& curGridEdge = gridEdges[k*ny + j]; bool isGE[2]; isGE[0] = (curPointValues[0] >= isoval); // for(int i = 1; i != nx; ++i) // { // isGE[i%2] = (curPointValues[i] >= isoval); // curEdgeCases[i-1] = calcCaseEdge(isGE[(i+1)%2], isGE[i%2]); // // if(curEdgeCases[i-1] == 1 || curEdgeCases[i-1] == 2) // { // if(curGridEdge.xl == 0) // curGridEdge.xl = i-1; // curGridEdge.xr = i; // } // } } */
5,338
#include <stdio.h> #include "ChessBoard.cuh" /** * Makes the chess board and assigns values for each piece * Returns: a matrix of pieces */ Piece** makeChessBoard(){ Piece** board = (Piece**)(calloc(DIM, sizeof(Piece*))); for(int row=0; row<DIM; row++){ board[row]=(Piece*)(calloc(DIM, sizeof(Piece))); for(int col=0; col<DIM; col++){ // Clears the piece's data values board[row][col].numberConversion=0; if(row==0 || row==1 || row==DIM-2 || row==DIM-1){ // Assigns a color value of 1 to black board[row][col].piece.color= (unsigned int)(row==DIM-2 || row==DIM-1); // Adds that it is the first move board[row][col].piece.isFirstMove=1; } // Assigns the piece value if(row==1 || row==DIM-2){ board[row][col].piece.isPawn=1; } else if(row==0 || row==DIM-1){ switch(col){ case 0: board[row][col].piece.isRook=1; break; case 1: board[row][col].piece.isKnight= 1; break; case 2: board[row][col].piece.isBishop= 1; break; case 3: board[row][col].piece.isQueen= 1; break; case 4: board[row][col].piece.isKing=1; break; case 5: board[row][col].piece.isBishop= 1; break; case 6: board[row][col].piece.isKnight= 1; break; case 7: board[row][col].piece.isRook=1; break; } } } } return board; } void freeChessBoard(Piece** board){ for(int row=0; row<DIM; row++){ free(board[row]); } free(board); } /** * Prints the chess board * Parameter board: the chess board to print * Returns: nothing */ void printChessBoard(Piece** board){ printf("\n\t"); for(int col=0; col<DIM; col++){ printf("%c\t", ((int)'A')+col); } printf("\n"); for(int row=0; row<DIM; row++){ printf("%d\t", row); for(int col=0; col<DIM; col++){ // Prints a space if(board[row][col].numberConversion==0){ printf("______\t"); } // Prints a piece else{ // Prints the initial space printf("__"); // Prints the color if(board[row][col].piece.color==0){ printf("%c", WHITECHAR); } else{ printf("%c", BLACKCHAR); } // Prints the piece if(board[row][col].piece.isPawn){ printf("%c", PAWN); } else if(board[row][col].piece.isRook){ printf("%c", ROOK); } else if(board[row][col].piece.isKnight){ printf("%c", KNIGHT); } else if(board[row][col].piece.isBishop){ printf("%c", BISHOP); } else if(board[row][col].piece.isQueen){ printf("%c", QUEEN); } else{ printf("%c", KING); } // Adds the end spacing printf("__\t"); } } printf("\n"); } printf("===================================================\n"); } double getReward(PieceConversion piece){ if(piece.isPawn){ return PAWNREWARD; } else if(piece.isRook){ return ROOKREWARD; } else if(piece.isKnight){ return KNIGHTREWARD; } else if(piece.isBishop){ return BISHOPREWARD; } else if(piece.isQueen){ return QUEENREWARD; } else if(piece.isKing){ return KINGREWARD; } else{ return 0; } } /** * Moves a piece from an old position to a new position * Parameter board: the matrix of pieces to alter * Parameter oldRow: the old row to move the piece from * Parameter oldCol: the old column to move the piece from * Parameter newRow: the new row to move to piece to * Parameter newCol: the new column to move the piece to * Returns: nothing */ double movePiece(Piece** board, int oldRow, int oldCol, int newRow, int newCol){ double reward=getReward(board[newRow][newCol].piece); // Moves the rook in the if(board[oldRow][oldCol].piece.isKing && abs(newCol-oldCol)>1){ if(oldCol>newCol && board[oldRow][0].piece.isRook){ board[oldRow][3].numberConversion= board[oldRow][0].numberConversion; board[oldRow][0].numberConversion=0; } else if(oldCol<newCol && board[oldRow][7].piece.isRook){ board[oldRow][5].numberConversion= board[oldRow][7].numberConversion; board[oldRow][7].numberConversion=0; } } // Moves the piece to the designated position board[newRow][newCol].numberConversion= board[oldRow][oldCol].numberConversion; board[newRow][newCol].piece.isFirstMove=0; // Deletes the piece from the old position board[oldRow][oldCol].numberConversion=0; return reward; } /** * Creates a one-hot encoded vector of the game board * Parameter board: the game board * Parameter inputVector: the vector that will recieve the game board's * values * Returns: nothing */ void oneHotEncode(Piece** board, double* inputVector){ for(int row=0; row<DIM; row++){ for(int col=0; col<DIM; col++){ // Gets the number conversion for the piece unsigned int numberConversion= board[row][col].numberConversion; for(int field=0; field<8; field++){ // Gets the end bit inputVector[row*DIM+col+field]= numberConversion%2; // Does a bitwise right shift numberConversion/=2; } } } }
5,339
#include <cuda.h> #include <stdio.h> int main() { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); for(int i=0;i<count;++i) { cudaGetDeviceProperties(&prop,i); printf( "--- General Information for device %d ---\n", i ); printf( "Name:%s\n", prop.name ); printf( "Compute capability:%d.%d\n", prop.major, prop.minor ); printf( "Clock rate:%d\n", prop.clockRate ); printf( "Device copy overlap:" ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execution timeout :" ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "--- Memory Information for device %d ---\n", i ); printf( "Total global mem:%ld\n", prop.totalGlobalMem ); printf( "Total constant Mem:%ld\n", prop.totalConstMem ); printf( "Max mem pitch:%ld\n", prop.memPitch ); printf( "Texture Alignment:%ld\n", prop.textureAlignment ); } }
5,340
// tests we can at least declare them and stuff #include "cuda.h" #include <iostream> int returnerror() { return CUDA_ERROR_INVALID_IMAGE; } int main(int argc, char *argv[]) { CUdevice device; std::cout << returnerror() << std::endl; std::cout << CUDA_ERROR_INVALID_IMAGE << std::endl; std::cout << CUDA_ERROR_NOT_READY << std::endl; return 0; }
5,341
/* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <curand.h> #include <curand_kernel.h> #define SIZE (2048*1024/sizeof(int)) __device__ int *nonce; // called by host, executed by GPU __global__ void init() { nonce = (int *)malloc(SIZE*sizeof(int)); } __global__ void setVals() { curandState_t state; /* we have to initialize the state */ curand_init(0, /* the seed controls the sequence of random values that are produced */ 0, /* the sequence number is only important with multiple cores */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &state); for(int i=0;i<SIZE;i++){ int r = curand(&state) % SIZE; //printf("%d ", r); *(nonce+r) = i; } } __global__ void getVals() { int j; for(int i=0;i<SIZE;i++){ j = *(nonce+i); //printf("%d ", j); } } int main(void) { //printf("%d\n", sizeof(int)); init<<<1, 1>>>(); getVals<<<1, 1>>>(); setVals<<<1, 1>>>(); return 0; }
5,342
#include "includes.h" __global__ void dropout_train(float* data, float* outputPtr, int size, float probability) { int thread_index = threadIdx.x + blockIdx.x * blockDim.x; int num_threads = blockDim.x * gridDim.x; for(int i = 0; i < size; i += num_threads) { int index = i + thread_index; if(index < size) { if(outputPtr[index] < probability) data[index] = 0; } } }
5,343
#include "includes.h" __global__ void matrixAddPitch (int *a, int *b, int*c, int pitch) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > pitch || idy > HEIGHT) return; c[idy * pitch + idx] = a[idy * pitch + idx] + b[idy * pitch + idx]; }
5,344
#include "includes.h" //================= Device matching functions =====================// template <int size> __device__ void InvertMatrix(float elem[size][size], float res[size][size]) { int indx[size]; float b[size]; float vv[size]; for (int i=0;i<size;i++) indx[i] = 0; int imax = 0; float d = 1.0; for (int i=0;i<size;i++) { // find biggest element for each row float big = 0.0; for (int j=0;j<size;j++) { float temp = fabs(elem[i][j]); if (temp>big) big = temp; } if (big>0.0) vv[i] = 1.0/big; else vv[i] = 1e16; } for (int j=0;j<size;j++) { for (int i=0;i<j;i++) { // i<j float sum = elem[i][j]; // i<j (lower left) for (int k=0;k<i;k++) // k<i<j sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left) elem[i][j] = sum; // i<j (lower left) } float big = 0.0; for (int i=j;i<size;i++) { // i>=j float sum = elem[i][j]; // i>=j (upper right) for (int k=0;k<j;k++) // k<j<=i sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left) elem[i][j] = sum; // i>=j (upper right) float dum = vv[i]*fabs(sum); if (dum>=big) { big = dum; imax = i; } } if (j!=imax) { // imax>j for (int k=0;k<size;k++) { float dum = elem[imax][k]; // upper right and lower left elem[imax][k] = elem[j][k]; elem[j][k] = dum; } d = -d; vv[imax] = vv[j]; } indx[j] = imax; if (elem[j][j]==0.0) // j==j (upper right) elem[j][j] = 1e-16; if (j!=(size-1)) { float dum = 1.0/elem[j][j]; for (int i=j+1;i<size;i++) // i>j elem[i][j] *= dum; // i>j (upper right) } } for (int j=0;j<size;j++) { for (int k=0;k<size;k++) b[k] = 0.0; b[j] = 1.0; int ii = -1; for (int i=0;i<size;i++) { int ip = indx[i]; float sum = b[ip]; b[ip] = b[i]; if (ii!=-1) for (int j=ii;j<i;j++) sum -= elem[i][j]*b[j]; // i>j (upper right) else if (sum!=0.0) ii = i; b[i] = sum; } for (int i=size-1;i>=0;i--) { float sum = b[i]; for (int j=i+1;j<size;j++) sum -= elem[i][j]*b[j]; // i<j (lower left) b[i] = sum/elem[i][i]; // i==i (upper right) } for (int i=0;i<size;i++) res[i][j] = b[i]; } } __global__ void ComputeHomographies(float *coord, int *randPts, float *homo, int numPts) { float a[8][8], ia[8][8]; float b[8]; const int bx = blockIdx.x; const int tx = threadIdx.x; const int idx = blockDim.x*bx + tx; const int numLoops = blockDim.x*gridDim.x; for (int i=0;i<4;i++) { int pt = randPts[i*numLoops+idx]; float x1 = coord[pt+0*numPts]; float y1 = coord[pt+1*numPts]; float x2 = coord[pt+2*numPts]; float y2 = coord[pt+3*numPts]; float *row1 = a[2*i+0]; row1[0] = x1; row1[1] = y1; row1[2] = 1.0; row1[3] = row1[4] = row1[5] = 0.0; row1[6] = -x2*x1; row1[7] = -x2*y1; float *row2 = a[2*i+1]; row2[0] = row2[1] = row2[2] = 0.0; row2[3] = x1; row2[4] = y1; row2[5] = 1.0; row2[6] = -y2*x1; row2[7] = -y2*y1; b[2*i+0] = x2; b[2*i+1] = y2; } InvertMatrix<8>(a, ia); __syncthreads(); for (int j=0;j<8;j++) { float sum = 0.0f; for (int i=0;i<8;i++) sum += ia[j][i]*b[i]; homo[j*numLoops+idx] = sum; } __syncthreads(); }
5,345
#include <stdio.h> #include <stdlib.h> __global__ void kernel1(int* d_data) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; d_data[tid] += 1; } __global__ void kernel2(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; const int numElementPerThread = numElement/nthread; const int start = tid*numElementPerThread; int end = start + numElementPerThread; for(int i = start; i < end; i++) { d_data[i] += 1; } } // __global__ void kernel2_opt(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; for(int i = tid; i < numElement; i += nthread) { d_data[i] += 1; } } void demo1() { const int numElement = 512*1024; int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i] + 1; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); kernel1<<<1024, 512>>>(d_data); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Kernel elapsed time: %.3f ms\n", elapsedTime); printf("kernel1: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED.\n"); return; } } printf("Test pass...\n"); free(h_data); free(gold); } void demo2(const int numElement) { printf("numElement = %d\n", numElement); int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i]; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); float elapsedTime = 0.0f; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*CPU*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); for(int i = 0; i < numElement; i++) { gold[i] += 1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("CPU elapsed time: %.3f ms\n", elapsedTime); /*GPU method 1*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); kernel2<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2 elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); /*GPU method 2*/ cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEventRecord(start, 0); kernel2_opt<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2_opt elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED. i = %d: %d, %d\n", i, h_data[i], gold[i]); return; } } printf("Test pass...\n"); free(h_data); free(gold); } int main() { int numElement = 1*1024*1024; demo2(numElement); //execute once to warm up for performance measurement printf("\n\nstart ............................................\n"); printf("demo2 started!\n"); for(int i = numElement; i <= 32*1024*1024; i*=2) { demo2(i); printf("\n"); } printf("demo1 started!\n"); demo1(); return EXIT_SUCCESS; }
5,346
#include<stdio.h> __global__ void replicate(int *__restrict__ in, int *__restrict__ out, size_t n, size_t rep) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int gsize = blockDim.x * gridDim.x; for (size_t i = tid; i < n; i += gsize) { for (size_t j = 0; j < rep; j++) { out[i + j*n] = in[i]; } } } int main() { int *a, *gpu_a, *gpu_b; int rep = 10000; int n = 1000; scanf("%d %d", &n, &rep); a = new int[n]; for (int i = 0; i < n; i++) a[i] = i * 100; cudaMalloc(&gpu_a, sizeof(int) * n); cudaMalloc(&gpu_b, sizeof(int) * n * rep); cudaMemcpy(gpu_a, a, sizeof(int) * n, cudaMemcpyHostToDevice); // seems that copying >500MB memory in one kernel is slow // so I divide the kernel here size_t div = (1<<27) / n; for (size_t i = 0; i < rep; i += div) { replicate<<<40, 256>>>(gpu_a, gpu_b + i * n, n, rep-i>div ? div : rep-i); } cudaDeviceSynchronize(); //replicate<<<40, 256>>>(gpu_a, gpu_b, n, rep); cudaDeviceSynchronize(); }
5,347
#include "includes.h" __global__ void cudaGetError(int N, double *ana, double *cur, double *e_sum){ // Parallelly compute the error int index = blockIdx.x*blockDim.x + threadIdx.x; if(index < (N+1)*(N+1)) (*e_sum) += (ana[index] - cur[index])*(ana[index] - cur[index]); return; }
5,348
#include "includes.h" __global__ void reduce( float *a, int size, int c) { int tid = blockIdx.x; //Handle the data at the index int index=c,j=0;//size=b for(j=index+1;j<size;j++) { a[((tid+index+1)*size + j)] = (float)(a[((tid+index+1)*size + j)] - (float)a[((tid+index+1)*size+index)] * a[((index*size) + j)]); } }
5,349
/* CUDA Project: Solving a tridiagonal system on GPUs a: lower diagonal b: diagonal c: upper diagonal y: A*x x: solution of the system, x = inv(A)*y */ #include <stdio.h> # include <assert.h> #define NTPB 8 __host__ void thomas(float *a, float *b, float *c, float *y, float *x, int n){ /// ------------------ forward elimination ------------------------ /// c[0] /= b[0]; y[0] /= b[0]; for (int i = 1; i < n; i++){ float tmp = b[i] - a[i]*c[i-1]; if (i < n-1) c[i] /= tmp; y[i] = (y[i] - a[i]*y[i-1]) / tmp; } /// ------------------ backward substitution ------------------------ /// x[n-1] = y[n-1]; for (int i = n-2; i>=0; i--) x[i] = float(y[i] - c[i] * x[i+1]); } __global__ void CR(float *a, float *b, float *c, float *y, float *x, int n){ float k1, k2; __shared__ float a_s[NTPB]; __shared__ float b_s[NTPB]; __shared__ float c_s[NTPB]; __shared__ float y_s[NTPB]; int stride = 2; int i = stride*(threadIdx.x + 1) - 1; // from 1 to n-1 /// ------------------------ forward elimination ---------------------------- /// k1 = a[i] / b[i-1]; k2 = 0.0; a_s[threadIdx.x] = - a[i-1]*k1; // a[0] is already 0, so no problem there if (i < n-1){ k2 = c[i] / b[i+1]; b_s[threadIdx.x] = b[i] - c[i-1]*k1 - a[i+1]*k2; c_s[threadIdx.x] = - c[i+1]*k2; y_s[threadIdx.x] = y[i] - y[i-1]*k1 - y[i+1]*k2; } else{ // last equation b_s[threadIdx.x] = b[i] - c[i-1]*k1; c_s[threadIdx.x] = 0.0; y_s[threadIdx.x] = y[i] - y[i-1]*k1; } __syncthreads(); while (stride < n/2){ i = stride*(threadIdx.x + 1) - 1; if (threadIdx.x < NTPB/stride){ int delta = stride/2; k1 = a_s[i] / b_s[i-delta]; a_s[i] = - a_s[i-delta]*k1; if (threadIdx.x < NTPB/stride - 1){ k2 = c_s[i] / b_s[i+delta]; b_s[i] -= c_s[i-delta]*k1 + a_s[i+delta]*k2; c_s[i] = -c_s[i+delta]*k2; y_s[i] -= y_s[i-delta]*k1 + y_s[i+delta]*k2; } else{ // last equation b_s[i] -= c_s[i-delta]*k1; c_s[i] = 0.0; y_s[i] -= y_s[i-delta]*k1; } } stride *= 2; } __syncthreads(); /// ------------- log2(n)-th step: solving a 2 unknowns system --------------- /// if (threadIdx.x == 0){ x[n-1] = (y_s[n/4 - 1] - y_s[n/2 - 1]*b_s[n/4 - 1] / a_s[n/2 - 1]) / (c_s[n/4 - 1] - b_s[n/2-1]*b_s[n/4 - 1]/a_s[n/2 - 1]); x[n/2-1] = (y_s[n/4 - 1] - c_s[n/4 - 1]*x[n-1]) / b_s[n/4 - 1]; } /// ------------------------ backward substitution --------------------------- /// stride /= 2; // other even unknowns while (NTPB/stride < NTPB){ i = stride*(2*threadIdx.x + 1) - 1; if (threadIdx.x < NTPB/stride){ if (threadIdx.x == 0){ // first unknown x[i] = (y_s[(i-1)/2] - c_s[(i-1)/2]*x[i + stride]) / b_s[(i-1)/2]; } else{ x[i]=(y_s[(i-1)/2] - a_s[(i-1)/2]*x[i - stride] - c_s[(i-1)/2]*x[i+stride])/b_s[(i-1)/2]; } } stride /= 2; } __syncthreads(); // odd unknowns if(threadIdx.x < NTPB){ i = 2*threadIdx.x; if(i==0){ // first unknown x[0]=(y[0]-c[0]*x[1])/b[0]; } else{ x[i]=(y[i]-c[i]*x[i+1]-a[i]*x[i-1])/b[i]; } } } __global__ void PCR(float *a, float *b, float *c, float *y, float *x, int n){ float k1, k2; __shared__ float a_s[NTPB]; __shared__ float b_s[NTPB]; __shared__ float c_s[NTPB]; __shared__ float y_s[NTPB]; int stride = 2; int i = stride*threadIdx.x + blockIdx.x; // from 0 to NTPB_PCR-2, by 2 for the first block, // from 1 to NTPB_PCR-1, by 2 for the second block if (i == 0){ // first equation //k1 = 0.0; k2 = c[i] / b[i+1]; a_s[threadIdx.x] = 0.0; b_s[threadIdx.x] = b[i] - a[i+1]*k2; c_s[threadIdx.x]=-c[i+1]*k2; y_s[threadIdx.x]=y[i]-y[i+1]*k2; } else if (i == n-1){ // last equation k1 = a[i] / b[i-1]; //k2 = 0.0; a_s[threadIdx.x] = -a[i-1]*k1; b_s[threadIdx.x] = b[i] - c[i-1]*k1; c_s[threadIdx.x] = 0.0; y_s[threadIdx.x] = y[i] - y[i-1]*k1; } else{ k1 = a[i] / b[i-1]; k2 = c[i] / b[i+1]; a_s[threadIdx.x] = -a[i-1]*k1; b_s[threadIdx.x] = b[i] - c[i-1]*k1 - a[i+1]*k2; c_s[threadIdx.x] = - c[i+1]*k2; y_s[threadIdx.x] = y[i] - y[i-1]*k1 - y[i+1]*k2; } __syncthreads(); float a_tmp, b_tmp, c_tmp, y_tmp; i = threadIdx.x; while(stride <= n/2){ int delta = stride/2; if(i-delta < 0){ // first equation k2 = c_s[i] / b_s[i+delta]; a_tmp = 0.0; b_tmp = b_s[i] - a_s[i+delta]*k2; c_tmp = -c_s[i+delta]*k2; y_tmp = y_s[i]-y_s[i+delta]*k2; } else if(i+delta>n-1){ // last equation k1 = a_s[i] / b_s[i-delta]; a_tmp = -a_s[i-delta]*k1; b_tmp = b_s[i] - c_s[i-delta]*k1; c_tmp = 0.0; y_tmp = y_s[i] - y_s[i-delta]*k1; } else{ k1 = a_s[i] / b_s[i-delta]; k2 = c_s[i] / b_s[i+delta]; a_tmp = -a_s[i-delta]*k1; b_tmp = b_s[i] - a_s[i+delta]*k2 - c_s[i-delta]*k1; c_tmp = -c_s[i+delta]*k2; y_tmp = y_s[i] - y_s[i+delta]*k2 - y_s[i-delta]*k1; } __syncthreads(); // wait for all threads to finish, then assign a_s[i] = a_tmp; b_s[i] = b_tmp; c_s[i] = c_tmp; y_s[i] = y_tmp; __syncthreads(); // in order to update vectors for all threads stride *= 2; } // solve for all x x[2*threadIdx.x+blockIdx.x] = y_s[threadIdx.x] / b_s[threadIdx.x]; } int main(void){ float *a, *b, *c, *y, *x; float *a_gpu, *b_gpu, *c_gpu, *y_gpu, *x_gpu; int n = 16; printf("n = %d\n", n); a = (float*) malloc(n*sizeof(float)); b = (float*) malloc(n*sizeof(float)); c = (float*) malloc(n*sizeof(float)); y = (float*) malloc(n*sizeof(float)); x = (float*) malloc(n*sizeof(float)); cudaMalloc(&a_gpu, n*sizeof(float)); cudaMalloc(&b_gpu, n*sizeof(float)); cudaMalloc(&c_gpu, n*sizeof(float)); cudaMalloc(&y_gpu, n*sizeof(float)); cudaMalloc(&x_gpu, n*sizeof(float)); // Laplace operator and y = all ones a[0] = 0.; b[0] = 2.; c[0] = -1.; y[0] = 1.; a[n-1] = -1.; c[n-1] = 0; b[n-1] = 2.; y[n-1] = 1.; for (int i=0; i<n-1; i++){ a[i] = -1.; b[i] = 2.; c[i] = -1.; y[i] = 1.; } float temps; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpy(a_gpu, a, n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(b_gpu, b, n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(c_gpu, c, n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(y_gpu, y, n*sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(start, 0); CR<<<1,NTPB>>>(a_gpu, b_gpu, c_gpu, y_gpu, x_gpu, n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&temps, start, stop); cudaMemcpy(x, x_gpu, n*sizeof(float), cudaMemcpyDeviceToHost); printf("Resultat avec CR:\n"); for (int i=0; i<n; i++) printf("%.5f\n", x[i]); printf("CR: Time elapsed on GPU: %f ms\n", temps); cudaEventRecord(start, 0); PCR<<<2,NTPB>>>(a_gpu, b_gpu, c_gpu, y_gpu, x_gpu, n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&temps, start, stop); cudaMemcpy(x, x_gpu, n*sizeof(float), cudaMemcpyDeviceToHost); printf("\nResultat avec PCR:\n"); for (int i=0; i<n; i++) printf("%.5f\n", x[i]); printf("PCR: Time elapsed on GPU: %f ms\n", temps); cudaEventRecord(start, 0); thomas(a, b, c, y, x, n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&temps, start, stop); printf("\nResultat avec Thomas:\n"); for (int i=0; i<n; i++) printf("%.5f\n", x[i]); printf("Thomas: Time elapsed on CPU: %f ms\n", temps); free(a); free(b); free(c); free(y); free(x); cudaFree(a_gpu); cudaFree(b_gpu); cudaFree(c_gpu); cudaFree(y_gpu); cudaFree(x_gpu); }
5,350
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 #define TILE_WIDTH 16 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void computeHistogramGPU(PPMPixel *data, int *cols, int *rows, float *h){ /* * Nós podemos pensar que cada pixel na imagem é uma thread. * Então devemos calcular o indice da thread na imagem * porem como eu coloquei um bloco de 16x16 é necessário checar a posição da thread * esta contida na imagem. * */ int row = blockIdx.y * blockDim.y + threadIdx.y; //coordenada vertical da thread na imagem int col = blockIdx.x * blockDim.x + threadIdx.x; //coordenada horizontal da thread na imagem if(threadIdx.x == 0 && threadIdx.y == 0){ printf("ro:%d co:%d rb:%d rc:%d \n",row,col,blockIdx.y,blockIdx.x); } __syncthreads(); if(row < *rows && col < *cols) { int j, k, l; int index = 0; //eu percebi que estes 3 loops são desnecessarios, pois poderiamos atribuir o valor do pixel no posicao correta //do histograma fazendo h[ (data.red*16) + (data.green*4) + data.blue] += 1. //Entretanto, eu prefiri não modificar o codigo, pois assim eu consegui obter speedups melhores. for (j = 0; j <= 3; j++) { for (k = 0; k <= 3; k++) { for (l = 0; l <= 3; l++) { if (data[(row*(*cols)) + col].red == j && data[(row*(*cols)) + col].green == k && data[(row*(*cols)) + col].blue == l) { atomicAdd(&h[index], 1.0);//tendo certeza que apenas uma thread por vez vai modificar essa variável (acesso atómico) } index++; } } } } } void Histogram(PPMImage *image, float *h) { // host int i, cols = image->x, rows = image->y; float n = image->y * image->x; // device PPMPixel *d_data; int *d_cols, *d_rows; float *d_h; for (i = 0; i < n; i++) { image->data[i].red = floor((image->data[i].red * 4) / 256); image->data[i].blue = floor((image->data[i].blue * 4) / 256); image->data[i].green = floor((image->data[i].green * 4) / 256); } //variáveis necessarias para medir o tempos pedidos na tarefa // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // float milliseconds = 0; //alocando memoria na GPU para a variáveis // cudaEventRecord(start); cudaMalloc( (void**)&d_data, sizeof(PPMPixel)*( (int)n) ) ; cudaMalloc( (void**)&d_cols, sizeof(int)); cudaMalloc( (void**)&d_rows, sizeof(int)); cudaMalloc( (void**)&d_h, sizeof(float) * 64); // cudaEventRecord(stop); // cudaEventSynchronize(stop); // cudaEventElapsedTime(&milliseconds, start, stop); // printf("\ntempo alocar memoria: %0.6f\n",milliseconds); //enviado os dados da CPU para a GPU // cudaEventRecord(start); cudaMemcpy(d_data, image->data, sizeof(PPMPixel)*( (int)n ) , cudaMemcpyHostToDevice); cudaMemcpy(d_cols, &cols, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_rows, &rows, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_h, h, sizeof(float) * 64, cudaMemcpyHostToDevice); // cudaEventRecord(stop); // cudaEventSynchronize(stop); // cudaEventElapsedTime(&milliseconds, start, stop); // printf("\ntempo enviar: %0.6f\n",milliseconds); dim3 numeroBlocosNaImagem(ceil((float)cols/TILE_WIDTH), ceil((float)rows/TILE_WIDTH), 1); dim3 numeroThreadsPorBloco(TILE_WIDTH, TILE_WIDTH, 1); // //realizando a computação na GPU // cudaEventRecord(start); computeHistogramGPU<<<numeroBlocosNaImagem,numeroThreadsPorBloco>>>(d_data, d_cols, d_rows, d_h); // cudaEventRecord(stop); // cudaEventSynchronize(stop); // cudaEventElapsedTime(&milliseconds, start, stop); // printf("\ntempo computar: %0.6f\n",milliseconds); //recenbendo o resultado da GPU (GPU -> CPU) // cudaEventRecord(start); cudaMemcpy(h, d_h, sizeof(float) * 64, cudaMemcpyDeviceToHost); // cudaEventRecord(stop); // cudaEventSynchronize(stop); // cudaEventElapsedTime(&milliseconds, start, stop); // printf("\ntempo receber: %0.6f\n",milliseconds); //liberando a memória alocada na GPU cudaFree(d_data); cudaFree(d_cols); cudaFree(d_rows); cudaFree(d_h); /* * Normalizando os valores do histograma. * Infelizmente quando eu realizava a normalização na GPU * eu obtinha um valor diferente na terceira casa decimal de precisão. O mais misterioso * era que isso so ocorria no primeiro elemento do meu vetor h. Por isso resolvi fazer * a normalização na CPU * */ for (i = 0; i < 64; i++){ h[i] = h[i]/n; } } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } //double t_start, t_end; int i; char *filename = argv[1]; //Recebendo o arquivo!; //scanf("%s", filename); PPMImage *image = readPPM(filename); float *h = (float*)malloc(sizeof(float) * 64); //Inicializar h for(i=0; i < 64; i++) h[i] = 0.0; //t_start = rtclock(); Histogram(image, h); //t_end = rtclock(); for (i = 0; i < 64; i++){ printf("%0.3f ", h[i]); } printf("\n"); //fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); free(h); } /* * como a normalização dos valores eu fiz na CPU então o considerei o meu tempo de GPU_total sendo como * GPU_total = tempo_GPU_criar_buffer + tempo_GPU_offload_enviar + tempo_kernel + tempo_GPU_offload_receber + GPU_total + CPU_tempo_normalizar * * ----------------------------------------------------------------------------------------------------------------------------------------------------------- * | Entrada | tempo_serial | tempo_GPU_criar_buffer | tempo_GPU_offload_enviar | tempo_kernel | tempo_GPU_offload_receber | GPU_total | speedup | * ------------------------------------------------------------------------------------------------------------------------------------------------------------ * | ar1.ppm | 0.179675s | 0.321664ms | 0.852256ms | 2.844864ms | 0.025536ms | 0.092086s | 1.951165 | * | ar2.ppm | 0.344623s | 0.385280ms | 1.608000ms | 7.420544ms | 0.025056ms | 0.110731s | 3.112254 | * | ar3.ppm | 1.298848s | 0.343008ms | 5.551264ms | 32.017376ms | 0.021952ms | 0.235085s | 5.525014 | * ----------------------------------------------------------------------------------------------------------------------------------------------------------- * * * */
5,351
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <time.h> #include <cuda_runtime.h> double my_timer() { struct timeval time; double _ret_val_0; gettimeofday(( & time), 0); _ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0)); return _ret_val_0; } #define BLOCK_SIZE 16 void matrixMulCPU(int8_t *A, int8_t *B, int *C, int size){ int i, j, k; int sum; for(i = 0; i < size; i++){ for(j = 0; j < size; j++){ sum = 0; for(k = 0; k < size; k++){ sum += A[i * size + k] * B[j * size + k]; } C[i * size + j] = sum; } } } __global__ void matrixMulGPU(int8_t *A, int8_t *B, int *C, int width){ // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = width * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + width - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = width * BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE; // Csub is used to store the element of the block sub-matrix // that is computed by the thread int Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ int8_t As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ int8_t Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + width * ty + tx]; Bs[ty][tx] = B[b + width * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += (int)As[ty][k] * (int)Bs[tx][k]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = width * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + width * ty + tx] = Csub; } int main(int argc, char *argv[]){ int i; int8_t *A, *B; int *C, *D; int8_t *A_dev, *B_dev; int *C_dev; double start_timer, end_timer; int width, MSIZE; if(argc < 2){ printf("Error input options\n"); exit(1); } width = atoi(argv[1]); MSIZE = width * width; A = (int8_t*)malloc(sizeof(int8_t)*MSIZE); cudaMalloc(&A_dev, MSIZE*sizeof(int8_t)); B = (int8_t*)malloc(sizeof(int8_t)*MSIZE); cudaMalloc(&B_dev, MSIZE*sizeof(int8_t)); C = (int*)malloc(sizeof(int)*MSIZE); cudaMalloc(&C_dev, MSIZE*sizeof(int)); D = (int*)malloc(sizeof(int)*MSIZE); srand(time(NULL)); // Init matrix for(i = 0; i < MSIZE; i++){ A[i] = 1;//(rand() % 16) - 8; B[i] = 1;//(rand() % 16) - 8; C[i] = 0; D[i] = 0; } cudaMemcpy(A_dev, A, MSIZE*sizeof(int8_t), cudaMemcpyHostToDevice); cudaMemcpy(B_dev, B, MSIZE*sizeof(int8_t), cudaMemcpyHostToDevice); cudaMemcpy(C_dev, C, MSIZE*sizeof(int), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); /*thread blcok conf.*/ dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(width/dimBlock.x, width/dimBlock.y); start_timer = my_timer(); matrixMulGPU<<<grid, dimBlock>>>(A_dev, B_dev, C_dev, width); cudaDeviceSynchronize(); end_timer = my_timer(); printf("The GPU Elapsed Time:%lf Sec.\n", end_timer - start_timer); cudaMemcpy(C, C_dev, MSIZE*sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); start_timer = my_timer(); matrixMulCPU(A, B, D, width); end_timer = my_timer(); printf("The CPU Elapsed Time:%lf Sec.\n", end_timer - start_timer); //Verification printf("Verifying\n"); int flag = 0; for(i = 0; i < MSIZE; i++){ if(abs(C[i] - D[i]) > 1e-3){ printf("Error:%d, %d, %d\n", C[i], D[i], i); break; } flag ++; } if(flag == MSIZE) printf("Verify Success!!\n"); // memory free free(A); cudaFree(A_dev); free(B); cudaFree(B_dev); free(C); cudaFree(C_dev); free(D); return 0; }
5,352
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define NUM 5 #define RANGE 10 int main(void){ double START,END; START = clock(); srand(time(NULL)); int data[NUM]; // generate number for(int i=0;i<NUM;i++){ data[i] = i; } // shuffle for(int i = 0; i < NUM; i++) { int j = rand() % NUM; int tmp = data[i]; data[i] = data[j]; data[j] = tmp; } float width[NUM]; for(int i=0;i<NUM;i++){ width[i] = rand() % RANGE; } data[0] = 1; data[1] = 2; data[2] = 3; data[3] = 4; data[4] = 5; width[0] = 9; width[1] = 3; width[2] = 7; width[3] = 9; width[4] = 1; // read the file cost FILE *fPtr; int cost_num = NUM * (NUM - 1) / 2; fPtr=fopen("cost.txt","r"); int cost[NUM][NUM] = {0}; int temp[cost_num][3]; // cost for(int i=0;i<cost_num;i++){ fscanf(fPtr , "%d %d %d" , &temp[i][0], &temp[i][1], &temp[i][2]); } fclose(fPtr); for(int i=0;i<cost_num;i++){ // 2 dimention cost cost[ temp[i][0]-1 ][ temp[i][1]-1] = temp[i][2]; cost[ temp[i][1]-1 ][ temp[i][0]-1] = temp[i][2]; } // cal position float position[NUM]; position[0] = width[0] / 2; for(int i=1;i<NUM;i++){ position[i] = width[i] / 2 + position[i-1] + width[i-1] / 2; } // OF float OF[cost_num]; int count = 0; for(int i=0;i<NUM-1;i++){ for(int j=i+1;j<NUM;j++){ OF[count] = ( position[j] - position[i] ) * cost[ data[i] - 1 ][ data[j] - 1 ]; printf("%f %f\n", position[i] , position[j]); printf("%d %d %d %f %d\n", data[i]-1, data[j] - 1, cost[ data[i] - 1 ][ data[j] - 1 ], position[j] - position[i] , count); count++; } } float total_OF = 0.0 ; for(int i=0;i<cost_num;i++){ total_OF += OF[i]; } for(int i=0;i<NUM;i++){ printf("%d ", data[i]); } printf("\n"); for(int i=0;i<NUM;i++){ printf("%f ", width[i]); } printf("OF = %f\n", total_OF); END = clock(); printf("time = %f\n", (END - START) / CLOCKS_PER_SEC); return 0; }
5,353
#include<iostream> int main(void) { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); for (int i = 0; i < count; i++) { cudaGetDeviceProperties(&prop, i); std::cout << "--- General Information for device" << i << "---" << std::endl; std::cout << "Name:" << prop.name << std::endl; std::cout << "Compute capability:" << "major " << prop.major << "minor " << prop.minor << std::endl; std::cout << "Clock rate:" << prop.clockRate << std::endl; std::cout << "Device copy overlap:" << std::endl; if (prop.deviceOverlap) std::cout << "Enabled" << std::endl; else std::cout << "Disabled" << std::endl; std::cout << "Kernel execition timeout:" << std::endl; if (prop.kernelExecTimeoutEnabled) std::cout << "Enabled" << std::endl; else std::cout << "Disabled" << std::endl; std::cout << "---Memory Information for device" << i << "---" << std::endl; std::cout << "Total global mem:" << prop.totalGlobalMem << std::endl; std::cout << "Total constant Mem:" << prop.totalConstMem << std::endl; std::cout << "Max mem pitch:" << prop.memPitch << std::endl; std::cout << "Texture Aligment:" << prop.textureAlignment << std::endl; std::cout << "--- MP Information for device" << i << "---" << std::endl; std::cout << "Multiprocessor count:" << prop.multiProcessorCount << std::endl; std::cout << "Shared mem per mp:" << prop.sharedMemPerBlock << std::endl; std::cout << "Registers per mp:" << prop.regsPerBlock << std::endl; std::cout << "Threads in warp:" << prop.warpSize << std::endl; std::cout << "Max threads per block:" << prop.maxThreadsPerBlock << std::endl; std::cout << "Max threads dimensions:" << prop.maxThreadsDim[0] << " " << prop.maxThreadsDim[1] << " " << prop.maxThreadsDim[2] << std::endl; std::cout << "Max grid dimensions:" << prop.maxGridSize[0] << " " << prop.maxGridSize[1] << " " << prop.maxGridSize[2] << std::endl; } }
5,354
#include <stdio.h> #include <stdlib.h> //example: //k = 32 //input = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 401 int recursion(int* inputs, int current_val, int curr_index, int* result) { int difference = current_val - inputs[curr_index]; if (difference == 0) { result[curr_index] = 1; return 1; } int i; for (i = curr_index - 1; i >= 0; i--) if (difference - inputs[i] >= 0) { if (recursion(inputs, difference, i, result)) { result[curr_index] = 1; return 1; } }; return 0; } int main() { int *inputs; int k, s, i; printf("Define k for equasion (there are going to be k + 1 inputs):\n"); scanf("%d", &k); inputs = (int *)malloc(k * sizeof(int)); printf("Define input of size %d:\n", k+1); i = 0; while (i < k && (scanf("%d,", &inputs[i]) == 1)) { i++; } // get final sum scanf("%d", &s); int* res = (int*)malloc(k * sizeof(int)); for (i = 0; i < k; i++) res[i] = 0; // begin new recursions until result is obtained starting from highest element towards beggining for (i = k - 1; i >= 0; i--) if(recursion(inputs, s, i, res)) break; // print result for (i = 0; i < k; i++) printf("%d", res[i]); printf(" ("); for (i = 0; i < k - 1; i++) printf("%d*%d + ", res[i], inputs[i]); printf("%d*%d = %d", res[k - 1], inputs[k - 1], s); printf(")\n"); free(inputs); free(res); return 0; }
5,355
#include <stdio.h> __global__ void checkId(){ printf("threadIdx: (%d, %d, %d) blockIdx: (%d, %d, %d) blockDim: (%d, %d, %d) gridDim: (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z ); } void cudaFunction(){ checkId <<< 3, 3 >>> (); }
5,356
//#include "caffe/layers/cosine_loss_layer.hpp" // //namespace caffe { // // template<typename Dtype> // __global__ void channels_gpu_l2_norm(const int n, const int channels, const Dtype* bottom, // Dtype *norm_data) { // CUDA_KERNEL_LOOP(index, n) { // caffe_gpu_l2norm(channels, bottom + index * channels, norm_data); // } // } // // template <typename Dtype> // __global__ void calc_loss(const int n, Dtype *inner_product_data, Dtype *norm_data, Dtype *loss) { // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, // const vector<Blob<Dtype>*>& top) { // // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // // const Dtype *bottom_data = bottom[0]->gpu_data(); // Dtype *norm_data = norm_.mutable_gpu_data(); // Dtype *inner_product_data = inner_product.mutable_gpu_data(); // Dtype loss = Dtype(0.0); // const Dtype* label = bottom[1]->gpu_data(); // for (size_t i = 0; i < batch; i++) { // caffe_gpu_l2norm(channels, bottom_data + i * channels, norm_data + i); // } // caffe_gpu_gemm(CblasNoTrans, CblasTrans, batch, batch, channels, Dtype(1.0), // bottom_data, bottom_data, Dtype(0.0), inner_product_data); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = i; j < batch; j++) { // //inner_product_data[i * batch + j] = caffe_cpu_dot(channels, bottom_data + i * channels, bottom_data + j * channels); // inner_product_data[i * batch + j] / (norm_data[i] * norm_data[j] + Dtype(1.0)); // if (label[i] == label[j]) { // loss += (1 - inner_product_data[i * batch + j]); // } // else { // loss += inner_product_data[i * batch + j]; // } // } // } // // top[0]->mutable_cpu_data()[0] = loss / bottom[0]->count(); // // } // // template <typename Dtype> // void CosineLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, // const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // int batch = bottom[0]->num(); // int channels = bottom[0]->channels(); // int height = bottom[0]->height(); // int width = bottom[0]->width(); // const Dtype* label = bottom[1]->gpu_data(); // const Dtype *norm_data = norm_.gpu_data(); // const Dtype *bottom_data = bottom[0]->mutable_gpu_data(); // // if (propagate_down[0]) { // // Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); // // for (size_t i = 0; i < batch; i++) { // for (size_t j = 0; j < batch; j++) { // if (i == j) { // continue; // } // else { // bool reverse = label[i] == label[j]; // accu_assign(batch, channels, reverse, bottom_data + j * channels, bottom_diff + i * channels, norm_data[i]); // } // } // } // } // // } // // // INSTANTIATE_LAYER_GPU_FUNCS(CosineLossLayer); // //}
5,357
#include <string.h> #include <stdlib.h> #include <stdio.h> #include <time.h> #include <fstream> #include <iostream> #include <chrono> #define CHECK_CUDA_ERR(cudaerr) \ { \ auto err = cudaerr; \ if (err != cudaSuccess) { \ printf("kernel launch failed with error \"%s\".\n",\ cudaGetErrorString(err)); \ exit(1); \ } \ } __device__ void color_pixel( char *colors, char *pixels, float c_re, float c_im, int global_index, int max_iter) { float i = 0, j = 0, ii = 0, jj = 0; int iteration = 0; while ( ii + jj < 4.0 && iteration < max_iter) { j = 2 * i * j + c_im; i = ii - jj + c_re; ii = i * i; jj = j * j; iteration++; } int color_index = global_index * 3; if (iteration < max_iter) { int it_offset = 3 + iteration * 3; pixels[color_index] = colors[it_offset]; pixels[color_index + 1] = colors[it_offset + 1]; pixels[color_index + 2] = colors[it_offset + 2]; } else { pixels[color_index] = colors[0]; pixels[color_index + 1] = colors[1]; pixels[color_index + 2] = colors[2]; } } __global__ void mandelbrot(char *colors, char* pixels, int height, int width, int max_iter) { int global_index = threadIdx.x + blockDim.x * blockIdx.x; float x = (float)(global_index % width); float y = (float)(global_index / width); float f_width = (float)width, f_height = (float)height; float c_re = (x - f_height / 2.0) * 4.0 / f_height; float c_im = (y - f_height / 2.0) * 4.0 / f_height; if (global_index < height * width) color_pixel(colors, pixels, c_re, c_im, global_index, max_iter); } void fill_colors(char *colors, int max_iter) { colors[0] = 200; colors[1] = 200; colors[2] = 200; int shade = 1, speed1 = 0, speed2 = 10, speed3 = 0, j = 1; for (int i = 0; i < max_iter; i+=3) { if (j % 50 == 0) shade <<= 1; int red = colors[0] + i * speed1 - j; int green = colors[1] + i * speed2; int blue = colors[2] + i * speed3 - j; if (red < 0) red = 0; if (green < 0) green = 0; if (blue < 0) blue = 0; colors[3 + i] = (red) % (256 / shade); colors[3 + i + 1] = (green) % (256 / shade); colors[3 + i + 2] = (blue) % (256 / shade); j += 1; } } int main(int argc, char **argv) { int write_to_file_flag = std::atoi(argv[1]); int x_pixels = 19968, y_pixels = 13730, max_iter = 150; int n_pixels = x_pixels * y_pixels; char *host_pixels, *device_pixels, *host_colors, *device_colors; size_t pixel_size = sizeof(char) * n_pixels * 3; // * 3 for RGB // This allocates pinned memory to speed-up memory transfers CHECK_CUDA_ERR(cudaMallocHost(&host_pixels, pixel_size)); CHECK_CUDA_ERR(cudaMalloc(&device_pixels, pixel_size)); size_t color_size = sizeof(char) * (max_iter * 3 + 3); CHECK_CUDA_ERR(cudaMallocHost(&host_colors, color_size)); CHECK_CUDA_ERR(cudaMalloc(&device_colors, color_size)); fill_colors(host_colors, max_iter); CHECK_CUDA_ERR(cudaMemcpy(device_colors, host_colors, color_size, cudaMemcpyHostToDevice)); CHECK_CUDA_ERR(cudaDeviceSynchronize()); auto start = std::chrono::steady_clock::now(); mandelbrot<<<(32 + n_pixels) / 32, 32>>>( /*colors=*/device_colors, /*pixels=*/device_pixels, /*height=*/y_pixels, /*width=*/x_pixels, /*max_iter*/max_iter); CHECK_CUDA_ERR(cudaDeviceSynchronize()); auto end = std::chrono::steady_clock::now(); std::cout << "RUN " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << std::endl; start = std::chrono::steady_clock::now(); CHECK_CUDA_ERR(cudaMemcpy(host_pixels, device_pixels, pixel_size, cudaMemcpyDeviceToHost)); CHECK_CUDA_ERR(cudaDeviceSynchronize()); end = std::chrono::steady_clock::now(); std::cout << "READ " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << std::endl; if (write_to_file_flag) { long long current_time = time(nullptr); std::ofstream image (std::to_string(current_time).append("-gpu.bmp"), std::ofstream::binary); image << (uint8_t)0x42 << (uint8_t)0x4D << (uint8_t)0x7C << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x1A << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x0C << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x00 << (uint8_t)0x00 << // Image Width (uint8_t)0x4E << // Image Width (uint8_t)0xA2 << // Image Height (uint8_t)0x45 << // Image height (uint8_t)0x01 << (uint8_t)0x00 << (uint8_t)0x18 << (uint8_t)0x00; for (int i = 0; i < n_pixels * 3; i++) image << host_pixels[i]; image << 0x00 << 0x00; } CHECK_CUDA_ERR(cudaFreeHost(host_pixels)); CHECK_CUDA_ERR(cudaFreeHost(host_colors)); CHECK_CUDA_ERR(cudaFree(device_pixels)); CHECK_CUDA_ERR(cudaFree(device_colors)); return 0; }
5,358
#include "includes.h" __global__ void copy_mem(unsigned char *source, unsigned char *render) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) for (int channel = 0; channel < 3; channel ++ ) render[3*((y+j)*width + x) + channel] = source[3 * ((y+j)*width + x) + channel]; }
5,359
#include <cstdio> __device__ void cuda_device_function() { printf("This function is called from device only. a=%d, \n", blockIdx.x); } // I'm using this function to test array indices/outputs (total variable) __global__ void cuda_global_function() { int total = ((blockIdx.x+1)*(blockIdx.y+1))*(threadIdx.x+1)*(threadIdx.y+1); // sample/dummy variable printf("Block X: %d Block Y: %d Block Z: %d Thread X: %d Thread Y: %d Thread Z: %d Total: %d \n",blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, total); } __host__ void host_function() { printf("This function is called from host only.\n"); } int main() { host_function(); //cuda_global_function<<<1,4>>>(); dim3 block_size=dim3(2,200,1); // these are the dim3 variable parameters initialized in hanon_exercise_threads.cu dim3 thread_size=dim3(40,4,1); cuda_global_function<<<block_size,thread_size>>>(); return 0; }
5,360
#include <iostream> #include <string> #include <fstream> #include <sstream> #include <bitset> #include <cstdlib> #include <cmath> #include <algorithm> #include<iomanip> #include<string.h> #include<istream> #include<limits.h> #include<cuda_runtime.h> using namespace std; // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 64 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE char mystring [9999999]; int main() { double** M1; double** M2; //char a[20]; char* p; int i,j; FILE * pFile; int cols1=0; int rows1=0; int cols2,rows2=0; pFile = fopen ("input_large.txt" , "r"); if ( fgets (mystring , 9999999 , pFile) != NULL ) for(i=0;i<strlen(mystring);i++) { if(mystring[i]=='\t') cols1++; } fclose (pFile); //cout<<cols1<<endl; pFile = fopen ("input_large.txt" , "r"); rows1=0; while(fgets(mystring,9999999,pFile)!=NULL) { if(mystring[0]=='\n') break; rows1++; } // cout<<rows1<<endl; // pFile = fopen ("input_large.txt" , "r"); while(fgets(mystring,9999999,pFile)!=NULL) { cols2=0; for(i=0;i<strlen(mystring);i++) { if(mystring[i]=='\t') cols2++; } rows2++; } //cout<<cols2<<endl<<rows2; cols1++; cols2++; fclose (pFile); //cout<<"hi"<<endl; cout<<cols1<<endl<<rows1<<endl<<cols2<<endl<<rows2<<endl; M1= (double**) malloc(rows1*sizeof(double*)); //cout<<"hi"<<endl; for(i=0;i<rows1;i++) M1[i]=(double*) malloc(cols1*sizeof(double)); //cout<<"hello"; M2= (double**) malloc(rows2*sizeof(double*)); for(i=0;i<rows2;i++) M2[i]=(double*) malloc(cols2*sizeof(double)); //cout<<"hello"; pFile = fopen ("input_large.txt" , "r"); i=0; while(fgets(mystring,9999999,pFile)!=NULL) { if(mystring[0]=='\n') goto abc; p=mystring; for(j=0;j<cols1;j++) M1[i][j]=strtod(p,&p); i++; } abc: i=0; while(fgets(mystring,9999999,pFile)!=NULL) { p=mystring; for(j=0;j<cols2;j++) M2[i][j]=strtod(p,&p); i++; } fclose (pFile); // Load A and B to device memory Matrix A,B,C; A.width=cols1; B.width=cols2; A.height=rows1; B.height=rows2; C.width=A.width+B.width-1; C.height=A.height+B.height-1; C.elements=(float*)malloc(C.height*C.width*sizeof(float)); //cout<<"hi"; A.elements=(float*)malloc(A.width*A.height*sizeof(float)); //for(i=0;i<(A.width*A.height);i++) //A.elements[i]=1; i=0; for(int g=0;g<rows1;g++) { for(int h=0;h<cols1;h++ ) { A.elements[i]=M1[g][h]; i++; } } B.elements=(float*)malloc(B.width*B.height*sizeof(float)); //for(i=0;i<(B.width*B.height);i++) //B.elements[i]=1; i=0; for(int q=0;q<rows2;q++) { for(int w=0;w<cols2;w++ ) { B.elements[i]=M2[q][w]; i++; } } //for(i=0;i<(B.height*B.width);i++) //cout<<B.elements[i]<<"\t"; //cout<<endl; Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 numBlocks((C.width+BLOCK_SIZE-1)/BLOCK_SIZE,(C.height+BLOCK_SIZE-1)/BLOCK_SIZE); //dim3 numBlocks((int)ceil((float)(C.width)/BLOCK_SIZE),(int)ceil((float)(C.height)/BLOCK_SIZE)); MatMulKernel<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); for(i=0;i<(C.width*C.height);i++) { //if(.elements[i]!=0) cout<<C.elements[i]<<" "; //else break; } cout<<endl<<i; } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row<C.height && col<C.width) { for (int e = 0; e < B.height; ++e) for (int c=0;c<B.width;++c) { if((row>=e)&& (col>=c) && (A.height>(row-e)) && (A.width>(col-c))) C.elements[row * C.width + col] += A.elements[(row-e) * A.width + col-c] * B.elements[e * B.width + c]; } } }
5,361
#include <stdio.h> #define N (1024*1024) #define M (1000000) __global__ void cudakernel(float *buf) { int i = threadIdx.x + blockIdx.x * blockDim.x; buf[i] = 1.0f * i / N; for(int j = 0; j < M; j++) buf[i] = buf[i] * buf[i] - 0.25f; } int main() { float data[N]; float *d_data; cudaMalloc(&d_data, N * sizeof(float)); cudakernel<<<N/256, 256>>>(d_data); cudaMemcpy(data, d_data, N * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_data); int sel; printf("Enter an index: "); scanf("%d", &sel); printf("data[%d] = %f\n", sel, data[sel]); }
5,362
#define N (2048 * 2048) #define THREADS_PER_BLOCK 512 __global__ void add(int *a, int *b, int *c) { int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index] + b[index]; } void random_ints(int *a, int n){ int i; for(i = 0; i < n; i++){ a[i] = i; } } int main(void) { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); //allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); //allocate space for host copies of a, b, c and setup input values a = (int *) malloc(size); random_ints(a, N); b = (int *) malloc(size); random_ints(b, N); c = (int *) malloc(size); //copy inputs to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); //Launch add() kernel on GPU with N threads add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c); //copy result back to host cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); //cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
5,363
#include <iostream> #include <cmath> #include <iomanip> #include <fstream> #include <algorithm> using namespace std; #define PI 3.14159265359 #define grid(i,j,ny) i*(ny+1)+j #define omega 1.5 #define RelativeError 1e-4 #define epsilon 1e-10 #define threadx 16 #define thready 16 //----------------------------------------------------------------------------- int getPos(int i, int j, int nx){ return (((j-1) * (nx -1)) + i) - 1; } //----------------------------------------------------------------------------- double getQ(int n, double temp, double lx, double ly){ double res = cos(n*PI); res -= 1; res /= sinh(lx*n*PI/ly); res *= -2*temp; res /= (n*PI); return res; } //----------------------------------------------------------------------------- double getT(double x, double y, double lx, double ly, double temp){ double sum = 0; for(int n = 1; n<97; n++){ sum += getQ(n, temp, lx, ly)*sinh(n*PI*x/ly)*sin(n*PI*y/ly); } return sum; } //----------------------------------------------------------------------------- double findMaxTemp(double tleft, double tright, double ttop, double tbottom){ double max = tleft; if(tright > max) max = tright; if(ttop > max) max = ttop; if(tbottom > max) max = tbottom; return max; } //----------------------------------------------------------------------------- __global__ void update(double *grid_old, double * grid_new, bool isRed, bool *converge, int nx, int ny, double ldx, double ldy, int blocky){ extern __shared__ double grid_s[]; int i = blockIdx.x * threadx + threadIdx.x;//x position index int j = blockIdx.y * thready + threadIdx.y;//y position index int sharedPos = threadIdx.x * (thready+2) + threadIdx.y;//Making 2D into 1D for the shared memory position in this block int blockPos = 2*(blockIdx.x * blocky + blockIdx.y); if(isRed){//Could have just as well been !isRed blockPos++;//Because each block has two convergence flags, need to only update one of the two } /*if(blockPos%2 == 1 && (threadIdx.x == 0 || threadIdx.y == 0 || threadIdx.x == threadx+1 || threadIdx.y == thready+1)){ printf("%d %d %d\n", blockPos, i, j); }*/ if(i< nx + 1 && j < ny + 1){//Within the domain of the grid grid_s[sharedPos] = grid_old[grid(i,j,ny)]; } __syncthreads(); if(i< nx + 1 && j < ny + 1){//Within the domain of the grid converge[blockPos] = true;//Default. Then all you need is one 'false' to force another iteration if(i != 0 && i != nx && j != 0 && j!= ny){//boundaries if((i%2 == j%2) == isRed){//Red or not Red? if(threadIdx.x != 0 && threadIdx.y != 0 && threadIdx.x != threadx + 1 && threadIdx.y != thready + 1){//halo points grid_new[grid(i,j,ny)] = (1-omega)*grid_s[sharedPos];//copy a weighted fraction of the old //Then update with the remaining fraction with the new grid_new[grid(i,j,ny)] += omega *( grid_s[sharedPos-(thready+2)]*ldy*ldy/(2*(ldx*ldx+ldy*ldy)) + //left grid_s[sharedPos+thready+2]*ldy*ldy/(2*(ldx*ldx+ldy*ldy)) + //Right grid_s[sharedPos+1]*ldx*ldx/(2*(ldx*ldx+ldy*ldy)) + //top grid_s[sharedPos-1]*ldx*ldx/(2*(ldx*ldx+ldy*ldy)) //bottom ); if((grid_new[grid(i,j,ny)] - grid_s[sharedPos])/(grid_s[sharedPos] + epsilon) > RelativeError){ converge[blockPos] = false; } }//end of not halo }//end of red/black }//end of not boundaries }//end of domain of the grid }//end of update //----------------------------------------------------------------------------- int main(){ double tbot, ttop, tleft, tright; //cout << "Temperature of bottom side: "; cin >> tbot; //cout << "Temperature of top side: "; cin >> ttop; //cout << "Temperature of left side: "; cin >> tleft; //cout << "Temperature of right side: "; cin >> tright; int nx, ny; double lx, ly; //cout << "Number of segments in x: "; cin >> nx; //cout << "Number of segments in y: "; cin >> ny; //cout << "Length of x: "; cin >> lx; //cout << "Length of y: "; cin >> ly; double lnx = lx/nx; double lny = ly/ny; double *grid_h, *error, *percerror; double *grid_d_old, *grid_d_new; size_t gridSize = (nx + 1)*(ny + 1)*sizeof(double); cudaMalloc(&grid_d_new, gridSize); cudaMalloc(&grid_d_old, gridSize); grid_h = (double*)malloc(gridSize); error = (double*)malloc(gridSize); percerror = (double*)malloc(gridSize); //initialize grid with initial conditions //i is an index of x position for(int i = 0; i< nx + 1; i++){ grid_h[i*(ny+1)] = tbot; grid_h[i*(ny+1)+ny] = ttop; //boundaries are defined so error = 0 error[i*(ny+1)] = 0; error[i*(ny+1)+ny] = 0; percerror[i*(ny+1)] = 0; percerror[i*(ny+1)+ny] = 0; } for(int j = 0; j< ny + 1; j++){ grid_h[j] = tleft;//i=0 here grid_h[nx*(ny+1)+j] = tright;//i=nx here //boundaries are defined so error = 0 error[j] = 0; error[nx*(ny+1)+j] = 0; percerror[j] = 0; percerror[nx*(ny+1)+j] = 0; } //Initial guess is based on a linear interpolation between the boundaries double deltai = (tright - tleft)/nx; double deltaj = (ttop - tbot)/ny; for(int i = 1; i< nx; i++){ for(int j = 1; j < ny; j++){ grid_h[i*(ny+1)+j] = 0.5*((tleft + deltai*i)+(tbot + deltaj*j)); } } cudaMemcpy(grid_d_new, grid_h, gridSize, cudaMemcpyHostToDevice); dim3 threadDim(threadx + 2, thready + 2); int blockx = 1 + ((nx-1 -1)/threadx);//nx-1 is the number of interior x points int blocky = 1 + ((ny-1 - 1)/thready);//ny-1 is the number of interior y points dim3 blockDim(blockx, blocky); bool *converge_d; bool *converge_h; converge_h = (bool*)malloc(2*blockx*blocky*sizeof(bool)); size_t convSize = 2*blockx*blocky*sizeof(bool); cudaMalloc(&converge_d, convSize); int steps = 0; bool didConverge = false; printf("Blocks: %d\n", blockx*blocky); while(!didConverge){ //cout<<"BEFORE:"<<grid_h[7*(ny+1)+3]<<endl; //printf("Step: %d\n", steps); cudaMemcpy(grid_d_old, grid_d_new, gridSize, cudaMemcpyDeviceToDevice); //cudaDeviceSynchronize(); update<<<blockDim,threadDim,(threadx+2)*(thready+2)*sizeof(double)>>>(grid_d_old, grid_d_new, true, converge_d, nx, ny, lnx, lny, blocky); //cudaDeviceSynchronize(); cudaMemcpy(grid_d_old, grid_d_new, gridSize, cudaMemcpyDeviceToDevice); //cudaDeviceSynchronize(); update<<<blockDim,threadDim,(threadx+2)*(thready+2)*sizeof(double)>>>(grid_d_old, grid_d_new, false, converge_d, nx, ny, lnx, lny, blocky); //cudaDeviceSynchronize(); cudaMemcpy(converge_h, converge_d, convSize, cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); steps++; didConverge = converge_h[0]; for(int i = 1; i< 2*blockx*blocky; i++){ didConverge = didConverge && converge_h[i]; } }//converged printf("\nconverged in %d steps.\n", steps); cudaMemcpy(grid_h, grid_d_new, gridSize, cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); //cout<<"AFTER:"<<grid_h[7*(ny+1)+3]<<endl; //fill grid for(int i = 1; i< nx; i++){ for(int j = 1; j< ny; j++){ double actual = getT(i*lnx, j*lny, lx, ly, findMaxTemp(tleft, tright, ttop, tbot)); *(error+(i*(ny+1))+j) = *(grid_h+(i*(ny+1))+j) - actual; if(*(error+(i*(ny+1))+j) < 0){ *(error+(i*(ny+1))+j) *= -1; } //cout << *(error+(i*(ny+1))+j) << " " << actual << " " << *(error+(i*(ny+1))+j)/actual << endl; *(percerror+(i*(ny+1))+j) = 100**(error+(i*(ny+1))+j)/(actual+epsilon); } } /* cout << "\n-----------------" << endl; cout << "Final Results:" << endl; //see grid for(int j = ny; j> -1 ; j--){ for(int i = 0; i< nx + 1; i++){ cout << setprecision(3) << *(grid_h+(i*(ny+1))+j) << "\t"; } cout << endl; } cout << "\n-----------------" << endl; cout << "Error:" << endl; //see grid for(int j = ny - 1; j> 0 ; j--){ for(int i = 1; i< nx; i++){ cout << setprecision(3) << *(error+(i*(ny+1))+j) << "\t"; } cout << endl; } cout << "\n-----------------" << endl; cout << "Percent Error:" << endl; //see grid for(int j = ny - 1; j> 0 ; j--){ for(int i = 1; i< nx; i++){ cout << setprecision(3) << *(percerror+(i*(ny+1))+j) << "\t"; } cout << endl; }*/ /*//output data for plotly ofstream myfile; myfile.open("SimpleLaplaceCartesianDataforplotly2.txt"); myfile << "Simple Laplace in Cartesian\n"; myfile << "X (m)\n"; myfile << "Y (m)\n"; myfile << lnx << endl << lx << endl << lny << endl << ly << endl << nx << endl << ny << endl; for(int i = 0; i< nx + 1; i++){ for(int j = 0; j<ny + 1; j++){ myfile << *(grid+(i*(ny+1))+j) << ","; } myfile << endl; } myfile.close(); */ /*//output data for matlab ofstream myfile; myfile.open("matLabData.txt"); for(int i = 0; i< nx + 1; i++){ for(int j = 0; j<ny + 1; j++){ myfile << i*lnx << " " << j*lny << " " << *(grid+(i*(ny+1))+j) << "\n"; } myfile << endl; } myfile.close(); */ //Output data for contour maps plotly ofstream myfile; myfile.open("plotlyContour.txt"); int i, j; for(i = 0; i< nx; i++){ myfile << i*lnx << " "; } myfile << i*lnx << "\n"; for(j = 0; j<ny; j++){ myfile << j*lny << " "; } myfile << j*lny << "\n"; for(j = 0; j<ny + 1; j++){ for(i = 0; i< nx; i++){ myfile << grid_h[i*(ny+1)+j] << " "; } myfile << grid_h[i*(ny+1)+j] << "\n"; } myfile.close(); cudaFree(grid_d_new); cudaFree(grid_d_old); cudaFree(converge_d); free(grid_h); free(error); free(percerror); free(converge_h); }
5,364
/***************************************************************************//** * \file structure.cu * \author Christopher Minar (minarc@oregonstate.edu) */ #include "structure.h" namespace kernels { /* * Updates all the velocities and positions of the body nodes * param double y y positions of the nodes * param double vB v velocities of body nodes * param double dy change in y location * param vnew new v velocity * param totalPoints total number of body points */ __global__ void update_body_viv(double *By, double *vB, double *Bdy, double vnew, double midY, int totalPoints) { int i = threadIdx.x + (blockDim.x * blockIdx.x); if (i > totalPoints) return; vB[i] = vnew; By[i] = midY + Bdy[i]; } __global__ void initialise_old(double *uB0, double unew, int totalPoints) { int i = threadIdx.x + (blockDim.x * blockIdx.x); if (i > totalPoints) return; uB0[i] = unew; } }
5,365
/* skeleton code for assignment3 COMP4901D Hash Join xjia@ust.hk 2015/04/15 */ #include <iostream> #include <cstdio> #include <cmath> #include <cassert> #include <memory> #include <limits> #include <algorithm> #include <vector> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <thrust/sort.h> #include <thrust/device_vector.h> using namespace std; const int numBits = 6; const int totalBits = 19; const int numPart = 1 << numBits; // = 2^6 const int numPerPart = 1 << (totalBits - numBits); // = 2^(19-6) const int mask = (1 << numBits) - 1; const int numThreads = 128; const int numBlocks = 512; #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } /* return the partition ID of the input element */ __device__ int getPartID(int element) { element >>= (totalBits - numBits); return element & mask; } /* input: d_key[], array size N output: d_pixArray[] funciton: for input array d_key[] with size N, return the partition ID array d_pixArray[] */ __global__ void mapPart(int d_key[],int d_pidArray[],int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNumber = blockDim.x * gridDim.x; while(tid < N) { d_pidArray[tid] = getPartID(d_key[tid]); tid += threadNumber; } } /* input: d_pidArray[], array size N output: d_Hist[] function: calculate the histogram d_Hist[] based on the partition ID array d_pidArray[] */ __global__ void count_Hist(int d_Hist[],int d_pidArray[],int N) { __shared__ int s_Hist[numThreads * numPart]; int threadId = blockIdx.x * blockDim.x + threadIdx.x; int threadNumber = blockDim.x * gridDim.x; int offset = threadIdx.x * numPart; for(int i = 0; i < numPart; ++i) s_Hist[i + offset] = 0; for(int i = threadId; i < N; i += threadNumber) s_Hist[offset + d_pidArray[i]]++; for(int i = 0; i < numPart; ++i) d_Hist[i * threadNumber + threadId] = s_Hist[offset + i]; __syncthreads(); } /* input: d_pidArray[] (partition ID array), d_psSum[] (prefix sum of histogram), array size N output: d_loc[] (location array) function: for each element, calculate its corresponding location in the result array based on its partition ID and prefix sum of histogram */ __global__ void write_Hist(int d_pidArray[],int d_psSum[],int d_loc[],int N) { __shared__ int s_psSum[numThreads * numPart]; int threadId = threadIdx.x + blockIdx.x * blockDim.x; int threadNumber = gridDim.x * blockDim.x; int offset = threadIdx.x * numPart; for(int i = 0; i < numPart; ++i) s_psSum[i + offset] = d_psSum[threadId + i * threadNumber]; for(int i = threadId; i < N; i += threadNumber) { int pid = d_pidArray[i]; d_loc[i] = s_psSum[pid + offset]; s_psSum[pid + offset]++; } } /* input: d_psSum[] (prefix sum of histogram), array size N output: start position of each partition function: for each partition (chunck to be loaded in the join step), calculate its start position in the result array (the first element's position of this partition) */ __global__ void getStartPos(int d_psSum[],int d_startPos[],int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int threadNumber = gridDim.x * blockDim.x; if(tid >= numPart) return; d_startPos[tid] = d_psSum[tid * threadNumber]; } /* input: d_key[],d_value[],d_loc[],array size [] output: out_key[],out_value[] function: rewrite the (key,value) pair to its corresponding position based on location array d_loc[] */ __global__ void scatter(int d_key[],float d_value[],int out_key[],float out_value[],int d_loc[],int N) { int threadId = threadIdx.x + blockIdx.x * blockDim.x; int threadNumber = blockDim.x * gridDim.x; while(threadId < N) { out_key[d_loc[threadId]] = d_key[threadId]; out_value[d_loc[threadId]] = d_value[threadId]; threadId += threadNumber; } } /* function: split the (key,value) array with size N, record the start position of each partition at the same time */ void split(int *d_key,float *d_value,int *d_startPos,int N) { dim3 grid(numBlocks); dim3 block(numThreads); /*if(N<numThreads){ grid=1; block=N; }else{ grid=(N+numThreads-1)/numThreads; block=numThreads; }*/ int num_threads=grid.x * block.x; int hist_len = num_threads * numPart; int *d_pidArr, *d_Hist, *d_psSum, *d_loc, *d_outkey; float *d_outvalue; cudaMalloc(&d_outkey, sizeof(int)*N); cudaCheckError(); cudaMalloc(&d_outvalue, sizeof(float)*N); cudaCheckError(); cudaMalloc(&d_loc,sizeof(int)*N); cudaCheckError(); cudaMalloc(&d_pidArr, sizeof(int)*N); cudaCheckError(); cudaMalloc(&d_Hist, sizeof(int)*hist_len); cudaCheckError(); cudaMalloc(&d_psSum, sizeof(int)*hist_len); cudaCheckError(); mapPart<<<grid,block>>>(d_key, d_pidArr, N); cudaCheckError(); count_Hist<<<grid,block>>>(d_Hist, d_pidArr, N); cudaCheckError(); thrust::device_ptr<int> dev_Hist(d_Hist); thrust::device_ptr<int> dev_psSum(d_psSum); thrust::exclusive_scan(dev_Hist, dev_Hist + hist_len, dev_psSum); cudaCheckError(); getStartPos<<<grid,block>>>(d_psSum, d_startPos, N); cudaCheckError(); write_Hist<<<grid,block>>>(d_pidArr, d_psSum, d_loc, N); cudaCheckError(); scatter<<<grid,block>>>(d_key, d_value, d_outkey, d_outvalue, d_loc, N); cudaCheckError(); cudaMemcpy(d_key, d_outkey, sizeof(int)*N, cudaMemcpyDeviceToDevice); cudaCheckError(); cudaMemcpy(d_value, d_outvalue, sizeof(float)*N, cudaMemcpyDeviceToDevice); cudaCheckError(); cudaFree(d_psSum); cudaCheckError(); cudaFree(d_Hist); cudaCheckError(); cudaFree(d_pidArr); cudaCheckError(); cudaFree(d_loc); cudaCheckError(); cudaFree(d_outvalue); cudaCheckError(); cudaFree(d_outkey); cudaCheckError(); /* add your code here */ } /* function: perform hash join on two (key,value) arrays */ __global__ void join(int d_key1[],float d_value1[],int d_key2[],float d_value2[],int d_startPos1[],int d_startPos2[],int d_result[],int N1,int N2) { __shared__ int inner[numPerPart]; int b_offset = threadIdx.x; int b_size = blockDim.x; //load B to inner shared int start1 = d_startPos1[blockIdx.x]; int start2 = d_startPos2[blockIdx.x]; int end1, end2; if(blockIdx.x == blockDim.x - 1){ end1 = N1; end2 = N2; } else{ end1 = d_startPos1[blockIdx.x + 1]; end2 = d_startPos2[blockIdx.x + 1]; } for(int i=start2+b_offset;i<end2;i+=b_size){ inner[i-start2] = d_key2[i]; } __syncthreads(); for(int i=start1+b_offset;i<end1;i+=b_size){ d_result[i] = -1; for(int j=0;j<end2-start2;++j){ if(d_key1[i] == inner[j]){ d_result[i] = start2 + j; } } } } /*void check_arr(int* arr, int N){ int lower = std::numeric_limits<int>::min(); std::for_each(arr, arr+N, [&](int& val){ if(val < lower){ fprintf(stderr, "array not sorted! @ %td\n", &val - arr); exit(-1); } else{ lower = val; } }); }*/ void print_arr(int* arr, int* loc, int N){ fprintf(stderr, "arr:\n"); //check_arr(arr, N); for(int i=0;i<numPart;++i){ int start=loc[i], end; if(i==numPart-1){ end = N; }else{ end = loc[i+1]; } fprintf(stderr, "from %d to %d: ", start, end); for(int j=start;j!=end;++j){ fprintf(stderr, "%08x ", arr[j]); } fprintf(stderr, "\n"); } fprintf(stderr, "loc:\n"); for(int i=0;i<numPart;++i){ fprintf(stderr, "%d ", loc[i]); } fprintf(stderr, "\n"); } void hashJoin(int *d_key1,float *d_value1,int *d_key2,float *d_value2,int N1,int N2,int *d_result) { int *d_startPos1,*d_startPos2; cudaMalloc(&d_startPos1,sizeof(int) * numPart); cudaCheckError(); cudaMalloc(&d_startPos2,sizeof(int) * numPart); cudaCheckError(); split(d_key1,d_value1,d_startPos1,N1); //std::vector<int> arr1_finish(N1); //std::vector<int> arr1_loc(numPart); //cudaMemcpy(&arr1_loc.front(), d_startPos1, sizeof(int)*numPart, cudaMemcpyDeviceToHost); //cudaCheckError(); //cudaMemcpy(&arr1_finish.front(), d_key1, sizeof(int)*N1, cudaMemcpyDeviceToHost); //cudaCheckError(); //fprintf(stderr, "arr1: "); //print_arr(&arr1_finish.front(), &arr1_loc.front(), N1); split(d_key2,d_value2,d_startPos2,N2); //std::vector<int> arr2_finish(N2); //std::vector<int> arr2_loc(numPart); //cudaMemcpy(&arr2_loc.front(), d_startPos2, sizeof(int)*numPart, cudaMemcpyDeviceToHost); //cudaCheckError(); //cudaMemcpy(&arr2_finish.front(), d_key2, sizeof(int)*N2, cudaMemcpyDeviceToHost); //cudaCheckError(); //fprintf(stderr, "arr2: "); //print_arr(&arr2_finish.front(), &arr2_loc.front(), N2); dim3 grid(numPart); dim3 block(1024); join<<<grid,block>>>(d_key1,d_value1,d_key2,d_value2,d_startPos1,d_startPos2,d_result,N1,N2); } int main() { freopen("in.txt","r",stdin); int *h_key1, *h_key2, *d_key1, *d_key2; float *h_value1, *h_value2, *d_value1, *d_value2; int *h_result, *d_result; int N1,N2; { int tmp = scanf("%d%d",&N1,&N2); (void)tmp; assert(tmp==2); } h_key1 = (int*)malloc(N1 * sizeof(int)); h_key2 = (int*)malloc(N2 * sizeof(int)); h_value1 = (float*)malloc(N1 * sizeof(float)); h_value2 = (float*)malloc(N2 * sizeof(float)); h_result = (int*)malloc(N1 * sizeof(int)); cudaMalloc(&d_key1, N1 * sizeof(int)); cudaCheckError(); cudaMalloc(&d_key2, N2 * sizeof(int)); cudaCheckError(); cudaMalloc(&d_value1, N1 * sizeof(float)); cudaCheckError(); cudaMalloc(&d_value2, N2 * sizeof(float)); cudaCheckError(); cudaMalloc(&d_result, N1 * sizeof(int)); cudaCheckError(); for(int i = 0; i < N1; ++i){ int tmp = scanf("%d%f",&h_key1[i],&h_value1[i]); (void)tmp; assert(tmp==2); } for(int i = 0; i < N2; ++i){ int tmp = scanf("%d%f",&h_key2[i],&h_value2[i]); (void)tmp; assert(tmp==2); } memset(h_result,-1,sizeof(int) * N1); cudaMemcpy(d_key1,h_key1, sizeof(int) * N1, cudaMemcpyHostToDevice); cudaCheckError(); cudaMemcpy(d_result,h_result, sizeof(int) * N1, cudaMemcpyHostToDevice); cudaCheckError(); cudaMemcpy(d_key2,h_key2, sizeof(int) * N2, cudaMemcpyHostToDevice); cudaCheckError(); cudaMemcpy(d_value1,h_value1, sizeof(float) * N1, cudaMemcpyHostToDevice); cudaCheckError(); cudaMemcpy(d_value2,h_value2, sizeof(float) * N2, cudaMemcpyHostToDevice); cudaCheckError(); hashJoin(d_key1,d_value1,d_key2,d_value2,N1,N2,d_result); cudaCheckError(); cudaMemcpy(h_result,d_result,sizeof(int) * N1, cudaMemcpyDeviceToHost); cudaCheckError(); cudaMemcpy(h_key1,d_key1,sizeof(int) * N1, cudaMemcpyDeviceToHost); cudaCheckError(); cudaMemcpy(h_key2,d_key2,sizeof(int) * N2, cudaMemcpyDeviceToHost); cudaCheckError(); cudaMemcpy(h_value1,d_value1,sizeof(float) * N1, cudaMemcpyDeviceToHost); cudaCheckError(); cudaMemcpy(h_value2,d_value2,sizeof(float) * N2, cudaMemcpyDeviceToHost); cudaCheckError(); int matched = 0; freopen("out.txt","w",stdout); for(int i = 0;i < N1; ++i) { if(h_result[i] == -1) continue; matched++; printf("Key %d\nValue1 %.2f Value2 %.2f\n\n",h_key1[i],h_value1[i],h_value2[h_result[i]]); } printf("Matched %d\n",matched); fclose(stdout); freopen("/dev/tty","w",stdout); free(h_key1); free(h_key2); free(h_value1); free(h_value2); free(h_result); cudaFree(d_key1); cudaCheckError(); cudaFree(d_key2); cudaCheckError(); cudaFree(d_value1); cudaCheckError(); cudaFree(d_value2); cudaCheckError(); cudaFree(d_result); cudaCheckError(); cudaDeviceReset(); cudaCheckError(); return 0; }
5,366
#include "includes.h" __global__ void multMatriz(float *da, float *db, float *dc, int num){ float sum=0; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; while(j<num){ while(i<num){ for (unsigned int k = 0; k<num; k++) sum += da[i * num + k] * db[k * num + j]; dc[i*num + j] = (float) sum; i += gridDim.y * blockDim.y; } j+=gridDim.x * blockDim.x; i = threadIdx.y + blockIdx.y * blockDim.y; } }
5,367
#include <cuda.h> #include <stdio.h> #include <math.h> #include <sys/time.h> const int PARTITION_SIZE = 32; #define AT(mtx, width, row, column) \ mtx[(row) * (width) + (column)] inline double nowSec() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec*1e-6; } __global__ void global_mmul (int *A, int *B, int *C, int N, int Ndev, int dev) { int NperDev = N/Ndev; int i = NperDev*(1+dev) -1 - (blockIdx.y * blockDim.y + threadIdx.y); int j = blockIdx.x * blockDim.x + threadIdx.x; int iAC = i % NperDev; int i_part = i % PARTITION_SIZE; int j_part = j % PARTITION_SIZE; int rowPerPart = N/PARTITION_SIZE; __shared__ int Apart[PARTITION_SIZE][PARTITION_SIZE]; __shared__ int Bpart[PARTITION_SIZE][PARTITION_SIZE]; AT(C, N, iAC, j) = 0; for (int n = 0; n < rowPerPart; n++) { Apart[i_part][j_part] = AT(A, N, iAC, n*PARTITION_SIZE + j_part); Bpart[i_part][j_part] = AT(B, N, n*PARTITION_SIZE + i_part, j); __syncthreads(); for (int k=0; k<PARTITION_SIZE; k++) AT(C, N, iAC, j) += Apart[i_part][k]*Bpart[k][j_part]; } } #ifdef PRINT void printMtx (int **m, int N, int width, int height) { for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { if (width > height) printf("%d\t", AT(m[i/height], width, i%height, j)); else printf("%d\t", AT(m[j/width], width, i, j%width)); } puts("\n"); } } #endif int main(int argc, char **argv) { if (argc != 2) { puts("Usage: Matrix_mult [N]\n"); return -1; } int N=atoi(argv[1]); int nDevices; cudaGetDeviceCount(&nDevices); if (N % (PARTITION_SIZE*nDevices)) { printf ("error: N must be a multiple of PARTITION_SIZE*nDevices=%d\n", PARTITION_SIZE*nDevices); return -1; } unsigned NN=N*N; unsigned NNperDevice = NN/nDevices; unsigned NperDevice = N/nDevices; int Nblocks = N/PARTITION_SIZE; int NblocksPerDevice =Nblocks/nDevices; int **A_d, **B_d, **C_d; int **A_h, *B_h, **C_h; A_d = (int**)malloc(nDevices * sizeof(int*)); B_d = (int**)malloc(nDevices * sizeof(int*)); C_d = (int**)malloc(nDevices * sizeof(int*)); A_h = (int**)malloc(nDevices * sizeof(int*)); C_h = (int**)malloc(nDevices * sizeof(int*)); B_h = (int*)malloc(sizeof(int)*NN); for (int i=0; i<nDevices; i++) { A_h[i] = (int*)malloc(sizeof(int)*NNperDevice); C_h[i] = (int*)malloc(sizeof(int)*NNperDevice); cudaSetDevice(i); cudaMalloc(&A_d[i], sizeof(int)*NNperDevice); cudaMalloc(&C_d[i], sizeof(int)*NNperDevice); cudaMalloc(&B_d[i], sizeof(int)*NN); } cudaDeviceSynchronize(); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { AT(A_h[i/NperDevice], N, i%NperDevice, j) = ((i == j) ? 1 : 0); AT(B_h, N, i, j) = i*N+j; } } for (int i=0; i<nDevices; i++) { cudaSetDevice(i); cudaMemcpy(B_d[i], B_h, NN*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(A_d[i], A_h[i], NNperDevice*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(C_d[i], C_h[i], NNperDevice*sizeof(int),cudaMemcpyHostToDevice); } cudaDeviceSynchronize(); dim3 blockPerGrid(Nblocks,NblocksPerDevice); dim3 threadPerBlock(PARTITION_SIZE,PARTITION_SIZE); double t_begin = nowSec(); for (int i=0; i<nDevices; i++) { cudaSetDevice(i); global_mmul <<< blockPerGrid, threadPerBlock >>> (A_d[i],B_d[i],C_d[i],N,nDevices,i); } cudaDeviceSynchronize(); double t_end = nowSec(); for (int i=0; i<nDevices; i++) { cudaSetDevice(i); cudaMemcpy(C_h[i], C_d[i], NNperDevice*sizeof(int),cudaMemcpyDeviceToHost); } cudaDeviceSynchronize(); #ifdef PRINT fprintf(stderr,"A=\n"); printMtx(A_h, N, N, NperDevice); fprintf(stderr,"\n\nB=\n"); printMtx(&B_h, N, N, N); fprintf(stderr,"\n\nC=\n"); printMtx(C_h, N, N, NperDevice); fprintf(stderr,"\n"); #endif printf("Elapsed time: %f sec\n", t_end - t_begin); for (int i=0; i<nDevices; i++) { free(A_h[i]); free(C_h[i]); cudaFree(A_d[i]); cudaFree(B_d[i]); cudaFree(C_d[i]); } free(A_h); free(A_d); free(B_h); free(B_d); free(C_h); free(C_d); return 0; }
5,368
#include <stdio.h> #include <stdlib.h> __global__ void foo(int *ptr) { *ptr = 7; } int main(void) { foo<<<1, 1>>>(0); // make the host block until the device is finished with foo cudaThreadSynchronize(); // check for error cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } return 0; }
5,369
//=============================================================================== // Name : MatrixRotate.cpp // Author : Soumil Datta // Version : 1.0 // Description : CUDA program to rotate an NxN matrix by 90 degrees to the right //=============================================================================== #include <iostream> using std::cout; using std::endl; #include <cuda_runtime.h> unsigned int dimension { 1u }; __global__ void transpose(float *matrix, const unsigned int dimension); __global__ void reverse(float *matrix, const unsigned int dimension); bool CPUSolveCheck(float *originalMatrix, float *solvedMatrix); void printMatrix(const float *matrix); int main(int argc, char* argv[]) { if(argc != 2) { cout << "Error: Enter dimension as argument" << endl; exit(EXIT_FAILURE); } cout << "Rotating matrix of dimension " << argv[1] << endl; dimension = atoi(argv[1]); const size_t size { (dimension * dimension) * sizeof(float) }; float *h_matrix { (float *)malloc(size) }; if(h_matrix == nullptr) { cout << "Host matrix memory allocation unsuccessful" << endl; exit(EXIT_FAILURE); } // Fill matrix for(auto i { 0u }; i < dimension * dimension; ++i) { h_matrix[i] = rand()/(float)RAND_MAX; } // Copy array to be used while checking output float *h_matrix_copy { (float *)malloc(size) }; memcpy(h_matrix_copy, h_matrix, size); float *d_matrix = nullptr; cudaMalloc((void **)&d_matrix, size); cudaMemcpy(d_matrix, h_matrix, size, cudaMemcpyHostToDevice); const dim3 threadsPerBlock(16, 16); const dim3 blocksPerGrid((dimension / threadsPerBlock.x) + 1, (dimension / threadsPerBlock.y) + 1); transpose<<<blocksPerGrid, threadsPerBlock>>>(d_matrix, dimension); cudaDeviceSynchronize(); reverse<<<blocksPerGrid, threadsPerBlock>>>(d_matrix, dimension); cudaDeviceSynchronize(); cudaMemcpy(h_matrix, d_matrix, size, cudaMemcpyDeviceToHost); cudaFree(d_matrix); cout << endl << endl; if(CPUSolveCheck(h_matrix_copy, h_matrix)) cout << "GPU Rotate Successful" << endl; else cout << "GPU Rotate Unsuccessful" << endl; cout << "Program complete" << endl; free(h_matrix); free(h_matrix_copy); return 0; } __global__ void transpose(float *matrix, const unsigned int dimension) { const int i = blockDim.x * blockIdx.x + threadIdx.x; const int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < (dimension) && j < (dimension) && j > i) { const auto index { dimension * i + j }; const auto invIndex { dimension * j + i }; const auto temp { matrix[index] }; matrix[index] = matrix[invIndex]; matrix[invIndex] = temp; } } __global__ void reverse(float *matrix, const unsigned int dimension) { const int i = blockDim.x * blockIdx.x + threadIdx.x; const int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < (dimension) && j < (dimension / 2)) { const auto index { dimension * i + j }; const auto revIndex { (dimension * i) + dimension - 1 - j }; const auto temp { matrix[index] }; matrix[index] = matrix[revIndex]; matrix[revIndex] = temp; } } bool CPUSolveCheck(float *originalMatrix, float *solvedMatrix) { // Solve CPU-side with OriginalMatrix for(auto i { 0u }; i < dimension; ++i) { for(auto j { i + 1 }; j < dimension; ++j) { const auto index { dimension * i + j }; const auto invIndex { dimension * j + i }; const auto temp { originalMatrix[index] }; originalMatrix[index] = originalMatrix[invIndex]; originalMatrix[invIndex] = temp; } } for(auto i { 0u }; i < dimension; ++i) { for(auto j { 0u }; j < dimension / 2; ++j) { const auto index { dimension * i + j }; const auto revIndex { (dimension * i) + dimension - 1 - j }; const auto temp { originalMatrix[index] }; originalMatrix[index] = originalMatrix[revIndex]; originalMatrix[revIndex] = temp; } } // Check GPU output vs cpu output for(auto i { 0u }; i < dimension; ++i) { for(auto j { 0u }; j < dimension / 2; ++j) { const auto index { dimension * i + j }; if(!(originalMatrix[index] == solvedMatrix[index])) return false; } } return true; } // Utility void printMatrix(const float *matrix) { for(int i = 0; i < dimension * dimension; ++i) { if(i != 0 && i % dimension == 0) cout << endl; cout << matrix[i] << "\t"; } cout << endl; }
5,370
/******************************************************************** render.c is responsible for rendering the bodies' positions and velocities to an ppm image ********************************************************************/ #include <stdio.h> #include <stdlib.h> #include "string.h" #define WIDTH 1024 #define HEIGHT 1024 typedef struct { int x, y; int red, green, blue; } PPMPixel; void body_to_image(int *hdImage, PPMPixel image) { hdImage[image.y * HEIGHT + image.x] = image.red; hdImage[image.y * HEIGHT + image.x + 1] = image.green; hdImage[image.y * HEIGHT + image.x + 2] = image.blue; } void render_bodies(const float *position, float *velocity, int number_of_bodies, int *hdImage) { int i; for (i = 0; i < number_of_bodies; i++) { PPMPixel body; float current_x = position[i * 3]; float current_y = position[i * 3 + 1]; float current_z = position[i * 3 + 2]; body.x = (int) current_x + 500; body.y = (int) current_y + 500; body.red = 255; body.green = 255; body.blue = 255; body_to_image(hdImage,body); } } char* itoa(int x) { size_t length = snprintf( NULL, 0, "%d", x ); char* str = (char*) malloc( length + 1 ); snprintf( str, length + 1, "%d", x ); return str; } void write_to_file(const int *hdImage, int step) { FILE *fp; char *file_path = strcat(itoa(step), ".ppm"); fp = fopen(file_path, "wb+"); char *width = itoa(WIDTH); char *height = itoa(HEIGHT); fprintf(fp, "P6\n"); fprintf(fp, width); fprintf(fp, " "); fprintf(fp, height); fprintf(fp, "\n"); fprintf(fp, "255\n"); free(width); free(height); char* data = (char*) malloc( sizeof(char) * WIDTH * HEIGHT * 3); int i; for (i = 0; i < WIDTH * HEIGHT * 3; i++) { data[i] = (char) hdImage[i]; } fwrite(data, sizeof(short), WIDTH * HEIGHT * 3, fp); free(data); fclose(fp); } int* initialize_image() { int *image = (int*) malloc( sizeof(int) * WIDTH * HEIGHT * 3); int i; for (i = 0; i < WIDTH * HEIGHT; i++) { image[i * 3] = 0; image[i * 3 + 1] = 0; image[i * 3 + 2] = 0; } return image; } void create_frame(float *position, float *velocity, int num_of_bodies, int step) {/* int *hdImage = initialize_image(); printf("Rendering Pardicles...\n"); render_bodies(position, velocity, num_of_bodies, hdImage); write_to_file(hdImage, step); printf("Successfully rendered to the file\n"); free(hdImage); */ FILE *fp; char *file_path = strcat(itoa(step), ".csv"); fp = fopen(file_path, "w+"); int i = 0; for( i = 0; i < num_of_bodies; i++) { fprintf(fp, "%f", position[i * 3]); fprintf(fp, ","); fprintf(fp, "%f", position[i * 3 + 1]); fprintf(fp, ","); fprintf(fp, "%f", position[i * 3 + 2]); fprintf(fp, "\n"); } fclose(fp); }
5,371
#include <stdio.h> #include <iostream> // Número de elementos em cada vetor #define N 2048 * 2048 __global__ void my_kernel(int * a, int * b, int * c) { // Determina a identificação de thread global exclusiva, por isso sabemos qual elemento processar int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < N ) // Certifique-se de que não inicializamos mais threads do que o necessário c[tid] = a[tid] + b[tid]; } void report_gpu_mem() { size_t free, total; cudaMemGetInfo(&free, &total); std::cout << "Free = " << free << " Total = " << total <<std::endl; } int main() { int *a, *b, *c; // Número total de bytes por vetor int size = N * sizeof (int); // Aloca memória sem a necessidade de usar cudaMemcpy cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); // Inicializa memória for( int i = 0; i < N; ++i ) { a[i] = i; b[i] = i; c[i] = 0; } int threads_per_block = 128; int number_of_blocks = (N / threads_per_block) + 1; my_kernel <<< number_of_blocks, threads_per_block >>> ( a, b, c ); // Espera até a GPU finalizar cudaDeviceSynchronize(); // Imprime os últimos 5 valores de c for( int i = N-5; i < N; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); // Libera toda a nossa memória alocada report_gpu_mem(); cudaFree( a ); report_gpu_mem(); cudaFree( b ); report_gpu_mem(); cudaFree( c ); report_gpu_mem(); }
5,372
#include<cuda.h> #include<cstdlib> #include<cstdio> #ifndef KERNELS #define KERNELS #define OP_NON 0 #define OP_ADD 1 #define OP_SUB 2 #define OP_MUL 3 #define OP_DIV 4 #define FN_SIGM 1 //sigmoid #define FN_RELU 2 //relu #define FN_DSIGM 3 //diffrentiation of sigmoid #define FN_DRELU 4 //diffrentiation of relu __device__ double operation(int op,double elem1,double elem2) { if (op == OP_ADD) return elem1 + elem2; else if(op == OP_SUB) return elem1 - elem2; else if(op == OP_MUL) return elem1 * elem2; else if(op == OP_DIV) return elem1 / (elem2 + 0.00000000001); else return elem1; } __device__ double function(int fn,double elem) { if (fn == FN_SIGM) return 1/(1 + exp(-1*elem)); else if(fn == FN_RELU) return (elem > 0.00001 ? elem: 0); else if(fn == FN_DSIGM) { double sig = 1/(1 + exp(-1*elem)); return sig*(1 - sig); } else if(fn == FN_DRELU) return(elem > 0.00001 ? 1 : 0); else return elem; } __global__ void gaxpy_kernel(int k_dim,double *mat1,double *mat2,double *res,double *matc = NULL, int c_row = 0, int c_col = 0) { int ri = blockIdx.x,rj = threadIdx.x; //printf("%d %d\n",blockDim.x,gridDim.x); double *p1 = mat1 + ri*k_dim, *p2 = mat2 + rj; double sum = 0; for(int k = 0; k < k_dim; k++) { //printf("%d %d %d %d %lf %lf \n",ri,k,k,rj,*p1,*p2); sum += (*p1) * (*p2); p1++; p2 += blockDim.x; } //printf("%d %d %lf\n",ri,rj,sum); if(matc == NULL) res[ri * blockDim.x + rj] = sum; else { if(c_row == 1) res[ri * blockDim.x + rj] = sum + matc[rj]; else if(c_col == 1) res[ri * blockDim.x + rj] = sum + matc[ri]; else res[ri * blockDim.x + rj] = sum + matc[ri*blockDim.x + rj]; } } __global__ void transpose_kernel(double *mat,double *tr_mat) { tr_mat[threadIdx.x*gridDim.x + blockIdx.x] = mat[blockIdx.x*blockDim.x + threadIdx.x]; } __global__ void hadamard_kernel(double *mat1,double *mat2,double *hmat) { hmat[blockIdx.x*blockDim.x + threadIdx.x] = mat1[blockIdx.x*blockDim.x + threadIdx.x] * mat2[blockIdx.x*blockDim.x + threadIdx.x]; } __global__ void saxpy_kernel(double *mat1,double *mat2,double *res,double a = 1) { res[blockIdx.x*blockDim.x + threadIdx.x] = a*mat1[blockIdx.x*blockDim.x + threadIdx.x] + mat2[blockIdx.x*blockDim.x + threadIdx.x]; } __global__ void operate_kernel(double *mat1,double *res,double a,int op) { res[blockIdx.x*blockDim.x + threadIdx.x] = operation(op,mat1[blockIdx.x*blockDim.x + threadIdx.x],a); } __global__ void function_kernel(double *mat1,double *res,int fn) { res[blockIdx.x*blockDim.x + threadIdx.x] = function(fn,mat1[blockIdx.x*blockDim.x + threadIdx.x]); } __global__ void reduction_kernel(double *mat1,double *res,int op,int dim,int axis) { double sum = 0; if(axis == 1) { for(int i = 0; i < dim; i++) sum = operation(op,sum,mat1[i*blockDim.x + threadIdx.x]); res[threadIdx.x] = sum; } else { for(int i = 0; i < dim; i++) sum = operation(op,sum,mat1[threadIdx.x*dim + i]); if(axis == 3) { extern __shared__ double s[]; __syncthreads(); s[threadIdx.x] = sum; __syncthreads(); if(threadIdx.x == 0) { sum = 0; for(int i = 0; i < blockDim.x; i++) sum = operation(op,sum,s[i]); res[0] = sum; //printf("%lf",sum); } __syncthreads(); return; } res[threadIdx.x] = sum; } } #endif
5,373
#ifndef __CUDACC__ #define __CUDACC__ #endif #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include <curand.h> #include <curand_kernel.h> #include <stdio.h> #include <iostream> #include <iomanip> #define N 16 #define BLOCKSIZE 8 cudaError_t multCuda(double *c, double *c1, const double *a, const double *b, float &naive_time, float &tiling_time); __global__ void naiveKernel(double *c, const double *a, const double *b) { double temp = 0; int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; for (int k = 0; k < N; ++k) temp += a[i*N+k] * b[k*N+j]; c[i*N+j] = temp; } __global__ void tilingKernel(double *c, const double *a, const double *b) { __shared__ double A_tile[BLOCKSIZE][BLOCKSIZE]; __shared__ double B_tile[BLOCKSIZE][BLOCKSIZE]; int row = blockIdx.y * BLOCKSIZE + threadIdx.y; int col = blockIdx.x * BLOCKSIZE + threadIdx.x; double temp = 0; for (int i = 0; i < (N - 1) / BLOCKSIZE + 1; ++i) { A_tile[threadIdx.y][threadIdx.x] = a[row * N + i * BLOCKSIZE + threadIdx.x]; B_tile[threadIdx.y][threadIdx.x] = b[(i * BLOCKSIZE + threadIdx.y) * N + col]; // No Shared Mem Bank conflict __syncthreads(); for (int k = 0; k < BLOCKSIZE; ++k) temp += A_tile[threadIdx.y][k] * B_tile[k][threadIdx.x]; // No Shared Mem Bank conflict __syncthreads(); } c[row*N+col] = temp; } void coutMatrix(int d, double *m) { std::cout << std::endl; for (int i = 0; i < d; ++i) { for (int j = 0; j < d; ++j) std::cout << std::setw(9) << m[i*d+j]; std::cout << std::endl; } } int main() { const double a[N*N] = {73.0,7.0,87.0,29.0,68.0,53.0,16.0,6.0,65.0,41.0,63.0,38.0,17.0,31.0,91.0,42.0, 66.0,100.0,32.0,100.0,22.0,84.0,53.0,4.0,78.0,49.0,63.0,1.0,50.0,98.0,55.0,48.0, 87.0,19.0,37.0,65.0,37.0,55.0,88.0,9.0,1.0,78.0,99.0,75.0,58.0,51.0,62.0,29.0, 20.0,22.0,7.0,32.0,47.0,41.0,55.0,50.0,16.0,81.0,76.0,77.0,7.0,15.0,47.0,91.0, 8.0,97.0,68.0,99.0,5.0,21.0,67.0,21.0,45.0,95.0,19.0,52.0,48.0,39.0,39.0,38.0, 20.0,88.0,52.0,100.0,45.0,62.0,52.0,94.0,12.0,56.0,11.0,74.0,71.0,53.0,34.0,1.0, 68.0,16.0,68.0,27.0,85.0,27.0,33.0,30.0,66.0,46.0,17.0,36.0,61.0,24.0,93.0,81.0, 63.0,47.0,71.0,41.0,2.0,18.0,67.0,4.0,23.0,30.0,35.0,19.0,36.0,59.0,1.0,37.0, 71.0,42.0,22.0,16.0,95.0,12.0,66.0,32.0,100.0,5.0,66.0,90.0,52.0,20.0,1.0,30.0, 31.0,51.0,89.0,79.0,52.0,21.0,100.0,96.0,33.0,3.0,49.0,49.0,53.0,45.0,49.0,7.0, 26.0,8.0,84.0,78.0,91.0,90.0,94.0,88.0,30.0,26.0,25.0,98.0,24.0,74.0,70.0,9.0, 10.0,58.0,17.0,92.0,24.0,15.0,85.0,7.0,80.0,8.0,67.0,35.0,27.0,50.0,89.0,47.0, 30.0,85.0,47.0,77.0,86.0,52.0,21.0,15.0,94.0,30.0,87.0,42.0,56.0,57.0,66.0,86.0, 17.0,1.0,89.0,43.0,67.0,66.0,33.0,10.0,64.0,88.0,69.0,22.0,71.0,62.0,84.0,28.0, 21.0,68.0,86.0,5.0,100.0,45.0,72.0,96.0,77.0,23.0,30.0,49.0,6.0,63.0,21.0,67.0, 50.0,63.0,13.0,17.0,89.0,29.0,80.0,57.0,18.0,39.0,6.0,14.0,14.0,57.0,59.0,38.0}; const double b[N*N] = {49.0,66.0,27.0,23.0,94.0,81.0,98.0,59.0,63.0,54.0,50.0,90.0,29.0,31.0,1.0,57.0, 63.0,99.0,64.0,44.0,96.0,90.0,56.0,56.0,76.0,96.0,79.0,47.0,69.0,4.0,9.0,76.0, 25.0,42.0,70.0,67.0,80.0,30.0,12.0,50.0,11.0,87.0,17.0,98.0,54.0,19.0,70.0,45.0, 77.0,71.0,5.0,96.0,96.0,67.0,68.0,33.0,77.0,69.0,1.0,8.0,74.0,15.0,85.0,57.0, 10.0,69.0,47.0,33.0,90.0,57.0,82.0,21.0,20.0,38.0,47.0,32.0,15.0,56.0,87.0,61.0, 41.0,38.0,16.0,94.0,85.0,89.0,87.0,12.0,50.0,89.0,69.0,31.0,15.0,8.0,1.0,27.0, 22.0,82.0,73.0,27.0,5.0,34.0,58.0,39.0,14.0,54.0,2.0,7.0,15.0,63.0,41.0,38.0, 72.0,66.0,74.0,75.0,3.0,84.0,69.0,32.0,4.0,67.0,80.0,12.0,60.0,13.0,57.0,48.0, 29.0,7.0,27.0,72.0,41.0,19.0,47.0,86.0,35.0,60.0,79.0,88.0,10.0,36.0,51.0,40.0, 6.0,78.0,30.0,21.0,3.0,34.0,38.0,55.0,46.0,12.0,78.0,28.0,26.0,39.0,57.0,17.0, 11.0,61.0,57.0,62.0,45.0,82.0,94.0,16.0,58.0,67.0,22.0,77.0,81.0,80.0,100.0,97.0, 98.0,52.0,60.0,97.0,99.0,90.0,87.0,100.0,93.0,47.0,59.0,10.0,13.0,100.0,3.0,36.0, 5.0,6.0,27.0,67.0,84.0,21.0,58.0,39.0,80.0,97.0,91.0,99.0,98.0,45.0,98.0,30.0, 73.0,42.0,20.0,63.0,65.0,14.0,39.0,54.0,61.0,51.0,63.0,4.0,12.0,34.0,11.0,13.0, 14.0,51.0,66.0,94.0,41.0,9.0,3.0,86.0,9.0,49.0,96.0,16.0,41.0,34.0,82.0,4.0, 48.0,68.0,58.0,12.0,68.0,6.0,52.0,66.0,30.0,20.0,91.0,31.0,93.0,60.0,82.0,73.0}; double c[N*N] = { 0 }; double c1[N*N] = { 0 }; float naive_time = 0.0f; float tiling_time = 0.0f; cudaError_t cudaStatus = multCuda(c, c1, a, b, naive_time, tiling_time); if (cudaStatus != cudaSuccess) { fprintf(stderr, "multCuda failed!"); return 1; } std::cout << "Naive GPU Implementation" << std::endl; coutMatrix(N,c); std::cout << "Execution Time : " << naive_time / 1000 << " seconds" << std::endl; std::cout << "Effective Bandwidth : " << (N*N*sizeof(double)*2) / (naive_time / 1000) << " GB/s" << std::endl; std::cout << std::endl; std::cout << "Tiling GPU Implementation" << std::endl; coutMatrix(N,c1); std::cout << "Execution Time : " << tiling_time / 1000 << " seconds" << std::endl; std::cout << "Effective Bandwidth : " << (N*N*sizeof(double)*2) / (tiling_time / 1000) << " GB/s" << std::endl; std::cout << std::endl; cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } cudaError_t multCuda(double *c, double *c1, const double *a, const double *b, float &naive_time, float &tiling_time) { double *dev_a = 0; double *dev_b = 0; double *dev_c = 0; double *dev_c1 = 0; float milliseconds = 0; float milliseconds1 = 0; dim3 dimBlock(BLOCKSIZE, BLOCKSIZE); dim3 dimGrid(N/dimBlock.x, N/dimBlock.y); cudaError_t cudaStatus; cudaEvent_t start, stop; cudaEvent_t start1, stop1; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_c, (N * N) * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_c1, (N * N) * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, (N * N) * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, (N * N) * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_a, a, (N * N) * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, (N * N) * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaEventRecord(start); naiveKernel<<<dimGrid, dimBlock>>>(dev_c, dev_a, dev_b); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaStatus = cudaThreadSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching naiveKernel!\n", cudaStatus); goto Error; } cudaEventRecord(start1); tilingKernel<<<dimGrid, dimBlock>>>(dev_c1, dev_a, dev_b); cudaEventRecord(stop1); cudaEventSynchronize(stop1); cudaStatus = cudaThreadSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching tilingKernel!\n", cudaStatus); goto Error; } cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching Kernel!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(c, dev_c, (N * N) * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(c1, dev_c1, (N * N) * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaEventElapsedTime(&milliseconds, start, stop); cudaEventElapsedTime(&milliseconds1, start1, stop1); naive_time = milliseconds; tiling_time = milliseconds1; Error: cudaFree(dev_c); cudaFree(dev_c1); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
5,374
#include "includes.h" __device__ float hard_mish_yashas(float x) { if (x > 0) return x; if (x > -2) return x * x / 2 + x; return 0; } __device__ float mish_yashas(float x) { float e = __expf(x); if (x <= -18.0f) return x * e; float n = e * e + 2 * e; if (x <= -5.0f) return x * __fdividef(n, n + 2); return x - 2 * __fdividef(x, n + 2); } __global__ void activate_array_hard_mish_kernel(float *x, int n, float *activation_input, float *output_gpu) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { float x_val = x[i]; if (activation_input) activation_input[i] = x_val; // store value before activation output_gpu[i] = hard_mish_yashas(x_val); } }
5,375
//********************************************************************** // * // University Of North Carolina Charlotte * // * //Program: Vecotr adder * //Description: This program is for testing GPU performance with vector * // add function. * // * // * //File Name: pb1b_gpu.c * //File Version: 1.0 * //Baseline: Homework_0 * // * //Course: ECGR6090- Heterogeneous Computing * // * //Programmed by: Roy Liu * //Under Suppervision of: Dr. Hamed Tabkhi * // * //Input file: No * // * //Output:Time of program running * //********************************************************************** #include<cuda_runtime.h> #include<stdio.h> #include<stdlib.h> #include<sys/time.h> __global__ void add(int a[], int b[], int c[]) { int i = blockDim.x * blockIdx.x + threadIdx.x; c[i] = a[i] + b[i]; } void random_ints(int* r, int n); int main() { int n=1000000; //for counting run time struct timeval start, end; float timer; gettimeofday(&start, NULL); int *d_a, *d_b, *d_c; //int n = 1024 * 1024; int size = n * sizeof(int); // data initializing d_a = (int *)malloc(size); random_ints(d_a, n); d_b = (int *)malloc(size); random_ints(d_b, n); d_c = (int *)malloc(size); dim3 dimGrid(n/512); dim3 dimBlock(512); //each block has X threads // kernel add<<<dimGrid, dimBlock>>>(d_a,d_b,d_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); gettimeofday(&end, NULL); timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; printf("Number of loop is: %d\nRunning time is: %f ms\n", n,timer/1000); return 0; } //********************************************************************** // Function Name: random_ints * // Description: - Generate random integer * // Input : None * // Output : Random integer * // Return: None * //********************************************************************** void random_ints(int* r, int n) { int i; for (i=0; i < n; ++i) { r[i] = rand()/2; } }
5,376
#include "includes.h" __global__ void divide(float *x, float* y ,float* out ,const int size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { out[index] = x[index]/y[index] ; } }
5,377
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/generate.h> #include <chrono> using namespace std::chrono; int num_actions = 8; int ncells = 100*100; int nrzns = 5000; int arr_size = ncells * nrzns; int n_print = 30; int my_mod_start = 0; float my_mod(){ int a = (my_mod_start)/nrzns; my_mod_start++; return (float)a; } typedef thrust::device_vector<float>::iterator dIter; int main(){ // TEST: vectorised sort auto START = high_resolution_clock::now(); // fill host array thrust::host_vector<float> H_S2_array(arr_size); for (int i = 0; i < arr_size; i++) H_S2_array[i] = i%(nrzns/100); // to expect 100 reps of each integer after sort std::cout << std::endl; auto init_Hvec = high_resolution_clock::now(); // initialise array of device vecs thrust::device_vector<float> D_arr_of_S2_vecs[num_actions]; for(int i = 0; i< num_actions; i++) D_arr_of_S2_vecs[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); auto copy_H_to_D = high_resolution_clock::now(); // maser vector for value:: this section takes 18.0716secs !!! thrust::host_vector<float> master_vals(arr_size*num_actions); // thrust::generate(master_vals.begin(), master_vals.end(), my_mod); for (int i = 0; i < arr_size*num_actions; i++) master_vals[i] = (int)(i/nrzns); // for(int i = 0; i < nrzns; i++) // std::cout << master_vals[i] << ", "; auto generate = high_resolution_clock::now(); // check master_vals thrust::device_vector<float> D_master_vals(arr_size*num_actions); D_master_vals = master_vals; std::cout << "starting jugaad sort" << std::endl; auto start = high_resolution_clock::now(); thrust::device_vector<float> master_S2_vector(arr_size*num_actions); for(int i = 0; i< num_actions; i++) thrust::copy(D_arr_of_S2_vecs[i].begin(), D_arr_of_S2_vecs[i].end(), master_S2_vector.begin() + i*arr_size); // for(int i = 0; i < arr_size*3; i++) // std::cout<< master_S2_vector[i] << ", " ; // std::cout << std::endl; auto mid = high_resolution_clock::now(); thrust::stable_sort_by_key(master_S2_vector.begin(), master_S2_vector.end(), D_master_vals.begin()); thrust::stable_sort_by_key(D_master_vals.begin(), D_master_vals.end(), master_S2_vector.begin()); cudaDeviceSynchronize(); // for(int i = 0; i < arr_size*3; i++) // std::cout<< master_S2_vector[i] << ", " ; // std::cout << std::endl; auto end = high_resolution_clock::now(); auto duration1 = duration_cast<microseconds>(end - start); std::cout << "copy + sort time = "<< duration1.count()/1e6 << std::endl; auto duration2 = duration_cast<microseconds>(end - mid); std::cout << "only sort time = "<< duration2.count()/1e6 << std::endl; thrust::device_vector<float> D_ones(nrzns, 1); int num_vecs = arr_size * num_actions / nrzns ; thrust::device_vector<float> D_red_S2[num_vecs]; thrust::device_vector<float> D_red_counts[num_vecs]; for (int i = 0; i < num_vecs; i++){ D_red_S2[i] = thrust::device_vector<float>(nrzns); D_red_counts[i] = thrust::device_vector<float>(nrzns); } thrust::device_vector<float> D_redS2_size(num_vecs); thrust::pair<dIter, dIter> new_end; auto red_start = high_resolution_clock::now(); // This section takes 3 seconds for (int i = 0; i < num_vecs; i++){ new_end = thrust::reduce_by_key(master_S2_vector.begin() + (i*nrzns), master_S2_vector.begin() + ((i+1)*nrzns), D_ones.begin(), D_red_S2[i].begin(), D_red_counts[i].begin()); // D_redS2_size[i] = new_end.first - &D_red_S2[i][0]; // std::cout << D_redS2_size[i] << std::endl; } auto red_end = high_resolution_clock::now(); auto red_duration = duration_cast<microseconds>(red_end - red_start); std::cout << "reduce_by_key = "<< red_duration.count()/1e6 << std::endl; auto time_spent = duration_cast<microseconds>(init_Hvec - START); std::cout << "initialise H_vec = "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(copy_H_to_D - init_Hvec); std::cout << "copy_H_to_D= "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(generate - copy_H_to_D); std::cout << "generate= "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(red_end - START); std::cout << "Total time= "<< time_spent.count()/1e6 << std::endl; // for (int i = 0; i < 10; i++){ // std::cout << "vec[" << i << "]" << std::endl; // for (int j = 0; j < 110; j++) // std::cout<< D_red_S2[i][j] << " , " << D_red_counts[i][j] << std::endl; // } return 0; } // int main(){ // // TEST: array of vectors do not form contiguous array elements // int num_actions = 8; // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int n_vecs = 5; // int vec_size = 4; // thrust::device_vector<float> arr_of_vec[n_vecs]; // for(int i = 0; i< n_vecs; i++) // arr_of_vec[i] = thrust::device_vector<float>(vec_size); // for(int i = 0; i< n_vecs; i++) // for(int j = 0; j< vec_size; j++) // arr_of_vec[i][j] = vec_size*i + j; // // std::cout << arr_of_vec[vec_size] << std::endl; // for(int i = 0; i< n_vecs; i++) // for(int j = 0; j< vec_size; j++) // std::cout << &arr_of_vec[i][j] << std::endl; // return 0; // } // int main(){ // // ---------------------------------------------------------- // // TEST 3 // // sorting array of vectors in-array vs sorting vector chunks after copying data into chunks for each vector in array of vectors // // RESULTS: // // chunk based sorting is faster // // sorting vector in-array - 28.8 secs // // sorting vector chunks after copying data into chunks - 19.6 secs // // ---------------------------------------------------------- // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int chunk_size = nrzns; // int n_print = 30; // int nchunks = arr_size/chunk_size; // int num_actions = 8; // // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::host_vector<float> H_S2_array(arr_size); //keys vector} // // fill host array // for (int i = 0; i < arr_size; i++) // H_S2_array[i] = i%(nrzns/10); // to expect 10 reps of each integer after sort // std::cout << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< H_S2_array[i] << std::endl; // std::cout << std::endl; // // // --------------------------------------------------------------------- // // // array of S2_vecs // // thrust::device_vector<float> D_arr_of_S2_vecs1[num_actions]; // // for(int i =0; i< num_actions; i++) // // D_arr_of_S2_vecs1[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); // // auto start = high_resolution_clock::now(); // // for (int i = 0; i< num_actions; i++) // // for (int j = 0; j< nchunks; j++) // // thrust::sort(D_arr_of_S2_vecs1[i].begin() + j*chunk_size, D_arr_of_S2_vecs1[i].begin() + (j+1)*chunk_size); // // auto end = high_resolution_clock::now(); // // auto duration = duration_cast<microseconds>(end - start); // // std::cout << "in-array sort time = "<< duration.count()/1e6 << std::endl; // // // RESULT : SORT TIME = 28.8 secs // // // --------------------------------------------------------------------- // // --------------------------------------------------------------------- // // array of S2_vecs // thrust::device_vector<float> D_arr_of_S2_vecs2[num_actions]; // for(int i =0; i< num_actions; i++) // D_arr_of_S2_vecs2[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); // auto start = high_resolution_clock::now(); // //make chunk vectors and copy data from main vector into chunks // thrust::device_vector<float> D_arr_of_chunk_vecs[num_actions][nchunks]; // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // D_arr_of_chunk_vecs[i][j] = thrust::device_vector<float> (chunk_size); // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // thrust::copy(D_arr_of_S2_vecs2[i].begin() + j*chunk_size, D_arr_of_S2_vecs2[i].begin() + (j+1)*chunk_size, // D_arr_of_chunk_vecs[i][j].begin()); // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // thrust::sort(D_arr_of_chunk_vecs[i][j].begin(), D_arr_of_chunk_vecs[i][j].end()); // auto end = high_resolution_clock::now(); // auto duration = duration_cast<microseconds>(end - start); // std::cout << "copy-array sort time = "<< duration.count()/1e6 << std::endl; // // RESULT : SORT TIME = 19.6 secs // // --------------------------------------------------------------------- // return 0; // } // int main(){ // // ---------------------------------------------------------- // TEST 2 // // sorting vector in-array vs sorting vector chunks after copying data into chunks // // RESULTS: // // chunk based sorting is faster // // sorting vector in-array - 3.47465 secs // // sorting vector chunks after copying data into chunks - 2.3773 secs // // ---------------------------------------------------------- // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int chunk_size = nrzns; // int n_print = 30; // int nchunks = arr_size/chunk_size; // // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::host_vector<float> H_S2_array(arr_size); //keys vector} // // fill host array // for (int i = 0; i < arr_size; i++) // H_S2_array[i] = i%(nrzns/10); // to expect 10 reps of each integer after sort // std::cout << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< H_S2_array[i] << std::endl; // std::cout << std::endl; // thrust::device_vector<float> D_S2_array_1(arr_size); // thrust::device_vector<float> D_S2_array_2(arr_size); // D_S2_array_1 = H_S2_array; // D_S2_array_2 = H_S2_array; // // Sort 1 dec_vector in-array // auto start = high_resolution_clock::now(); // for (int i = 0; i< nchunks; i++) // thrust::sort(D_S2_array_1.begin() + i*chunk_size, D_S2_array_1.begin() + (i+1)*chunk_size); // auto end = high_resolution_clock::now(); // auto duration = duration_cast<microseconds>(end - start); // std::cout << "in-array sort time = "<< duration.count()/1e6 << std::endl; // //check sorted resulsts - OK // std::cout << "sorted array "<< std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< D_S2_array_1[i] << std::endl; // std::cout << std::endl ; // start = high_resolution_clock::now(); // //make chunk vectors and copy data from main vector into chunks // thrust::device_vector<float> D_arr_of_chunk_vecs[nchunks]; // for (int i = 0; i < nchunks; i++) // D_arr_of_chunk_vecs[i] = thrust::device_vector<float> (chunk_size); // for (int i = 0; i < nchunks; i++) // thrust::copy(D_S2_array_2.begin() + i*chunk_size, D_S2_array_2.begin() + (i+1)*chunk_size, // D_arr_of_chunk_vecs[i].begin()); // for (int i = 0; i < nchunks; i++) // thrust::sort(D_arr_of_chunk_vecs[i].begin(), D_arr_of_chunk_vecs[i].end()); // end = high_resolution_clock::now(); // duration = duration_cast<microseconds>(end - start); // std::cout << "copy-array sort time = "<< duration.count()/1e6 << std::endl; // //check sorted resulsts - OK // std::cout << "sorted array " << std::endl; // for (int k = 0; k < 3; k++){ // std::cout << "------chunk " << k << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< D_arr_of_chunk_vecs[k][i] << std::endl; // } // std::cout << std::endl; // return 0; // } // int main(){ // // ---------------------------------------------------------- // TEST 1 // // sorting in chunks over a single vector works !! // // SOLUTION: 1 2 2 3 5 1 2 3 4 4 // // ---------------------------------------------------------- // int arr_size = 10; // int chunk_size = 5; // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::device_vector<float> D_S2_array(S2_array, S2_array + arr_size); //keys vector} // int nchunks = arr_size/chunk_size; // for (int i = 0; i< nchunks; i++) // thrust::sort(D_S2_array.begin() + i*chunk_size, D_S2_array.begin() + (i+1)*chunk_size); // for (int i = 0; i < arr_size; i++) // std::cout<< D_S2_array[i] << std::endl; // std::cout << std::endl; // return 0; // }
5,378
#include <stdio.h> #include <cuda_runtime.h> __global__ void foo_device(int * n){ int i = threadIdx.x; n[i] = 7*i; } int main(int argc, char const *argv[]) { int * device; cudaError_t error; int host[4]; error = cudaMalloc( (void **) &device, sizeof(int)*4); if (error != cudaSuccess) { printf("cudaMalloc returned error %s\n", cudaGetErrorString(error)); } foo_device<<<1,4>>>(device); error = cudaGetLastError(); if (error != cudaSuccess) { printf("kernel returned error %s\n", cudaGetErrorString(error)); } error = cudaMemcpy(host, device, sizeof(int)*4, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcyp returned error %s\n", cudaGetErrorString(error)); } int sum = host[0] + host[1] + host[2] + host[3]; if(sum == 42){ printf("The program works fine! The right anwser is: %i\n", sum); }else{ printf("The answer is wrong. 42 was expected, but it is: %i\n", sum); } return 0; }
5,379
#include <cuda_runtime.h> #include <device_launch_parameters.h> class cuStopwatch{ // todo: add your internal data structure, all in private private: cudaEvent_t start_event; cudaEvent_t end_event; bool is_watching; public: cuStopwatch(); ~cuStopwatch(); void start(); float stop(); }; cuStopwatch::cuStopwatch(){ cudaEventCreate(&start_event); cudaEventCreate(&end_event); } cuStopwatch::~cuStopwatch(){ cudaEventDestroy(start_event); cudaEventDestroy(end_event); } void cuStopwatch::start(){ cudaEventRecord(start_event); } float cuStopwatch::stop(){ float elapsed_time; cudaEventRecord(end_event); cudaEventSynchronize(end_event); cudaEventElapsedTime(&elapsed_time, start_event, end_event); return elapsed_time; }
5,380
#include <stdio.h> #define N 256 __global__ void vecAdd(int *A) { int i = threadIdx.x; A[i]=A[i]+1; } int main (int argc, char *argv[]){ int i; int size = N*sizeof(int); int a[N],*devA; for (i=0; i< N; i++){ a[i] = i; } cudaMalloc( (void**)&devA,size); cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice); vecAdd<<<1, N>>>(devA); cudaMemcpy(a, devA, size, cudaMemcpyDeviceToHost); cudaFree(devA); for (i=0; i < N; i++) { printf("%d ",a[i]); } printf("\n"); }
5,381
#include "includes.h" __global__ void euclideanDistance(const float *data_a, int nrow_a, const float *data_b, int nrow_b, int ncol, float *ans) { /* int myblock = blockIdx.x + blockIdx.y * gridDim.x; int blocksize = blockDim.x * blockDim.y * blockDim.z; int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; */ /* This is very simple, limited to 512 pairs of observations in total. */ int i = threadIdx.x, j = threadIdx.y; if(i < nrow_a && j < nrow_b) { int off_a = i, off_b = j; float sum = 0; for(int k = 0; k < ncol ; k++, off_a += nrow_a, off_b+= nrow_b) { float tmp = (data_a[off_a] - data_b[off_b]); sum += tmp * tmp; } ans[i + j*nrow_a] = sqrt(sum); } }
5,382
#include "includes.h" __global__ void decryptKernel(char* deviceDataIn, char* deviceDataOut, int n) { unsigned index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) deviceDataOut[index] = deviceDataIn[index]-1; }
5,383
#include "Benchmarks.cuh" Benchmarks::Benchmarks() { min = -100.0; max = +100.0; n_threads = 1; n_blocks = 1; n_dim = 100; } Benchmarks::~Benchmarks() { /* empty */ } float Benchmarks::getMin(){ return min; } float Benchmarks::getMax(){ return max; } uint Benchmarks::getID(){ return ID; } void Benchmarks::setMin( float _min ){ min = _min; } void Benchmarks::setMax( float _max ){ max = _max; } void Benchmarks::setThreads( uint _n){ n_threads = _n; } void Benchmarks::setBlocks( uint _n ){ n_blocks = _n; } uint Benchmarks::getThreads(){ return n_threads; } uint Benchmarks::getBlocks(){ return n_blocks; }
5,384
#include "includes.h" __global__ void kDotProduct_r(float* a, float* b, float* target, const uint numElements) { __shared__ float shmem[DP_BLOCKSIZE]; uint eidx = DP_BLOCKSIZE * blockIdx.x + threadIdx.x; shmem[threadIdx.x] = 0; if (eidx < gridDim.x * DP_BLOCKSIZE) { for (; eidx < numElements; eidx += gridDim.x * DP_BLOCKSIZE) { shmem[threadIdx.x] += a[eidx] * b[eidx]; } } __syncthreads(); if (threadIdx.x < 256) { shmem[threadIdx.x] += shmem[threadIdx.x + 256]; } __syncthreads(); if (threadIdx.x < 128) { shmem[threadIdx.x] += shmem[threadIdx.x + 128]; } __syncthreads(); if (threadIdx.x < 64) { shmem[threadIdx.x] += shmem[threadIdx.x + 64]; } __syncthreads(); if (threadIdx.x < 32) { volatile float* mysh = &shmem[threadIdx.x]; *mysh += mysh[32]; *mysh += mysh[16]; *mysh += mysh[8]; *mysh += mysh[4]; *mysh += mysh[2]; *mysh += mysh[1]; if (threadIdx.x == 0) { target[blockIdx.x] = *mysh; } } }
5,385
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> #define CHECK(call) { const cudaError_t error = call; if (error != cudaSuccess) { printf("Error: %s:%d, ", __FILE__, __LINE__); printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); exit(1); }} __global__ void compute_covariance (float *variance,float *covariance,int points,float bias) { int k=threadIdx.x + blockDim.x * blockIdx.x; int j; unsigned long long int grid_num; float R,T; R=0.00198588; T=300.0; if(k<points){ for (j=0;j<points;j++){ grid_num=(unsigned long long)j*points; grid_num+=k; covariance[grid_num]+=(variance[k]*variance[j])*expf((-1.0*bias)/(R*T)); } } } // End of Global __global__ void compute_covariance_2 (float *covariance,int points,float bias) { int k=threadIdx.x + blockDim.x * blockIdx.x; int j; unsigned long long int grid_num; float R,T; R=0.00198588; T=300.0; if(k<points){ for (j=0;j<points;j++){ grid_num=(unsigned long long)j*points; grid_num+=k; covariance[grid_num]+=expf((-1.0*bias)/(R*T)); } } } // End of Global int main () { int blocks,threads,frame,k,j,points,grid_point,curr_frame,max_frame,atom_index,line_counter,avg_only,print_flg,all_points; unsigned long long int sqpoints,grid_num; int devCount; float bias,R,T,count,position; float *top_sum,*bottom_sum,*covariance,*covariance_2,*variance; float *dev_covariance,*dev_variance; char buf[4096]; FILE* file=fopen("selection_coords.dat","r"); FILE *ofp; FILE *ofp2; char outputFilename[] = "weighted_avg_position.dat"; char outputFilename2[] = "atomic_covariance_matrix.dat"; CHECK (cudaSetDevice ( 0 ) ); avg_only=0; print_flg=1; R=0.001986; T=300.00; printf("Initilizing...\n"); points=0; max_frame=0; while (fgets(buf, sizeof (buf), file)) { sscanf (buf, "%i\t%i\t%f\t%f",&frame,&atom_index,&position,&bias); if(points==0){curr_frame=frame;} if(curr_frame==frame){points+=1;} max_frame=frame; } all_points=points; printf("Number of Atoms=%i\n",points/3); printf("Max Frame=%i\n",max_frame); sqpoints= (unsigned long long )points*points; top_sum=(float *)malloc(points*sizeof(float)); if(top_sum == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} bottom_sum=(float *)malloc(points*sizeof(float)); if(bottom_sum == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} if(avg_only == 0){ variance=(float *)malloc(points*sizeof(float)); if(variance == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} covariance=(float *)malloc(sqpoints*sizeof(float)); if(covariance == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} covariance_2=(float *)malloc(sqpoints*sizeof(float)); if(covariance_2 == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} } printf("Set Memory...\n"); memset(top_sum,0,points*sizeof(float)); memset(bottom_sum,0,points*sizeof(float)); if(avg_only == 0){ memset(variance,0,points*sizeof(float)); memset(covariance,0,(sqpoints)*sizeof(float)); memset(covariance_2,0,(sqpoints)*sizeof(float)); } printf("Reading Input...\n"); rewind(file); grid_point=0; line_counter=0; while (fgets(buf, sizeof (buf), file)) { if(line_counter==all_points){ grid_point=0; line_counter=0;} sscanf (buf, "%i\t%i\t%f\t%f",&frame,&atom_index,&position,&bias); top_sum[grid_point]+=(expf((-1.0*bias)/(R*T))*float(position)); bottom_sum[grid_point]+=(expf(((-1.0*bias)/(R*T)))); grid_point+=1; line_counter+=1; } printf("Write Average...\n"); ofp=fopen(outputFilename, "w"); for (k=0;k<points;k++){ fprintf(ofp,"%f\n",top_sum[k]/bottom_sum[k]); } fclose(ofp); //Avg Only Below if(avg_only == 0){ cudaGetDeviceCount(&devCount); // Iterate through devices for (int i = 0; i < devCount; ++i){ cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); threads=devProp.maxThreadsPerBlock; } blocks=ceil(float(points)/float(threads))+1; printf("Threads=%i\n",threads); printf("Blocks=%i\n",blocks); CHECK (cudaMalloc((void **) &dev_covariance, (sqpoints)*sizeof(float)) ); CHECK (cudaMalloc((void **) &dev_variance, points*sizeof(float)) ); CHECK (cudaMemcpy(dev_covariance, covariance, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) ); CHECK (cudaMemcpy(dev_variance, variance, points*sizeof(float), cudaMemcpyHostToDevice) ); rewind(file); grid_point=0; line_counter=0; printf("Compute Covariance...\n"); while (fgets(buf, sizeof (buf), file)) { if(line_counter==all_points){ if(frame%100==0){printf("Frame=%i\n",frame);} CHECK (cudaMemcpy(dev_covariance, covariance, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) ); CHECK (cudaMemcpy(dev_variance, variance, points*sizeof(float), cudaMemcpyHostToDevice) ); compute_covariance<<<blocks,threads>>>(dev_variance,dev_covariance,points,bias); CHECK (cudaMemcpy(covariance, dev_covariance, (sqpoints)*sizeof(float), cudaMemcpyDeviceToHost) ); grid_point=0; line_counter=0;} sscanf (buf, "%i\t%i\t%f\t%f",&frame,&atom_index,&position,&bias); variance[grid_point]=(float(position)-(top_sum[grid_point]/bottom_sum[grid_point])); grid_point+=1; line_counter+=1; } CHECK (cudaMemcpy(dev_covariance, covariance_2, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) ); rewind(file); grid_point=0; line_counter=0; printf("Compute Covariance_2...\n"); while (fgets(buf, sizeof (buf), file)) { if(line_counter==all_points){ if(frame%100==0){printf("Frame=%i\n",frame);} CHECK (cudaMemcpy(dev_covariance, covariance_2, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) ); compute_covariance_2<<<blocks,threads>>>(dev_covariance,points,bias); CHECK (cudaMemcpy(covariance_2, dev_covariance, (sqpoints)*sizeof(float), cudaMemcpyDeviceToHost) ); grid_point=0; line_counter=0;} sscanf (buf, "%i\t%i\t%f\t%f",&frame,&atom_index,&position,&bias); variance[grid_point]=(float(position)-(top_sum[grid_point]/bottom_sum[grid_point])); grid_point+=1; line_counter+=1; } fclose (file); CHECK (cudaFree(dev_covariance) ); CHECK (cudaFree(dev_variance) ); cudaDeviceReset(); if(print_flg==1){ printf("Write Covariance...\n"); ofp2=fopen(outputFilename2, "w"); for (k=0;k<points;k++){ for (j=0;j<points;j++){ grid_num=(unsigned long long)j*points; grid_num+=k; fprintf(ofp2,"%i\t%i\t%f\n",k+1,j+1,(covariance[grid_num]/covariance_2[grid_num])); } } fclose(ofp2); } }//Avg_only free(top_sum); free(bottom_sum); if(avg_only == 0){ free(covariance); free(covariance_2); free(variance); } printf("Complete!\n"); return 0; }
5,386
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #define CHECK(_t, _e) if (_e != cudaSuccess) { fprintf(stderr, "%s failed: %s", _t, cudaGetErrorString(_e)); goto Error;} #define HERR(_t, _e) if (_e != cudaSuccess) { fprintf(stderr, "%s failed: %s", _t, cudaGetErrorString(_e));} const int len = 2 * 1024 + 5; __global__ void kernel(float *c, float *a, float *b) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < len) { c[i] = a[i] * b[i]; } } int main() { int i, ndev, bc, tc; cudaDeviceProp p; float* a = (float*)malloc(len * sizeof(float)); float* b = (float*)malloc(len * sizeof(float)); float* c = (float*)malloc(len * sizeof(float)); float* da = NULL; float* db = NULL; float* dc = NULL; for (i = 0; i < len; i++) { a[i] = 0.5f; b[i] = 2.0f; } //print device properties CHECK("cudaGetDeviceCount", cudaGetDeviceCount(&ndev)); for (i = 0; i < ndev; i++) { CHECK("cudaGetDeviceProperties", cudaGetDeviceProperties(&p, i)); printf("Name: %s\n", p.name); printf("Compute capability: %d.%d\n", p.major, p.minor); printf("Max threads/block: %d\n", p.maxThreadsPerBlock); printf("Max block size: %d x %d x %d\n", p.maxThreadsDim[0], p.maxThreadsDim[1], p.maxThreadsDim[2]); printf("Max grid size: %d x %d x %d\n", p.maxGridSize[0], p.maxGridSize[1], p.maxGridSize[2]); } CHECK("cudaSetDevice", cudaSetDevice(0)); CHECK("cudaMalloc da", cudaMalloc(&da, len * sizeof(float))); CHECK("cudaMalloc db", cudaMalloc(&db, len * sizeof(float))); CHECK("cudaMalloc dc", cudaMalloc(&dc, len * sizeof(float))); //transfer the data CHECK("cudaMemcpy da", cudaMemcpy(da, a, len * sizeof(float), cudaMemcpyHostToDevice)); CHECK("cudaMemcpy db", cudaMemcpy(db, b, len * sizeof(float), cudaMemcpyHostToDevice)); tc = 1024; bc = len / tc; if (len % tc != 0) { bc++; } kernel <<<bc, tc >>> (dc, da, db); CHECK("kernel", cudaGetLastError()); CHECK("cudaMemcpy dc", cudaMemcpy(c, dc, len * sizeof(float), cudaMemcpyDeviceToHost)); for (i = 0; i < len; i++) { if (i % 20 == 0) { printf("\n"); } printf("% 2.0f", c[i]); } printf("\n"); Error: HERR("cudaFree da", cudaFree(da)); HERR("cudaFree db", cudaFree(db)); HERR("cudaFree dc", cudaFree(dc)); HERR("cudaDeviceReset", cudaDeviceReset()); return 0; }
5,387
#define NUM_THREADS 32 __global__ void euclidean_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; // If an element is to be computed if(x < n_a && y < n_b) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]; temp[threadIdx.x] += (t * t); } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { d[y * pitch_d + x] = sqrt(temp[0]); } } } __global__ void euclidean_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x, y = blockIdx.y; if((x == y) && (x < n_a) && (threadIdx.x == 0)) d[y * pitch_d + x] = 0.0; // If all element is to be computed if(y < n_a && x < y) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += NUM_THREADS) { float t = vg_a[x * pitch_a + offset] - vg_a[y * pitch_a + offset]; temp[threadIdx.x] += (t * t); } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) temp[threadIdx.x] += temp[threadIdx.x + stride]; __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { float s = sqrt(temp[0]); d[y * pitch_d + x] = s; d[x * pitch_d + y] = s; } } } __global__ void maximum_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; // If all element is to be computed if(x < n_a && y < n_b) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = abs(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]); temp[threadIdx.x] = max(temp[threadIdx.x], t); } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] = max(temp[threadIdx.x], temp[threadIdx.x + stride]); } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { d[y * pitch_d + x] = temp[0]; } } } __global__ void maximum_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x, y = blockIdx.y; if(x == y && x < n_a && threadIdx.x == 0) { d[y * pitch_d + x] = 0.0; } // If all element is to be computed if(y < n_a && x < y) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = abs(vg_a[x * pitch_a + offset] - vg_a[y * pitch_a + offset]); temp[threadIdx.x] = max(t, temp[threadIdx.x]); } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] = max(temp[threadIdx.x], temp[threadIdx.x + stride]); } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { float s = temp[0]; d[y * pitch_d + x] = s; d[x * pitch_d + y] = s; } } } __global__ void manhattan_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; // If all element is to be computed if(x < n_a && y < n_b) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = abs(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]); temp[threadIdx.x] += t; } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { d[y * pitch_d + x] = temp[0]; } } } __global__ void manhattan_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; if(x == y && x < n_a && threadIdx.x == 0) { d[y * pitch_d + x] = 0.0; } // If all element is to be computed if(y < n_a && x < y) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = abs(vg_a[x * pitch_a + offset] - vg_a[y * pitch_a + offset]); temp[threadIdx.x] += t; } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { float s = temp[0]; d[y * pitch_d + x] = s; d[x * pitch_d + y] = s; } } } __global__ void canberra_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; // If all element is to be computed if(x < n_a && y < n_b) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float num = abs(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]); float den = abs(vg_a[x * pitch_a + offset] + vg_b[y * pitch_b + offset]); if(den != 0.0) { temp[threadIdx.x] += num / den; } } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { d[y * pitch_d + x] = temp[0]; } } } __global__ void canberra_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; if(x == y && x < n_a && threadIdx.x == 0) { d[y * pitch_d + x] = 0.0; } // If all element is to be computed if(y < n_a && x < y) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float num = abs(vg_a[x * pitch_a + offset] - vg_a[y * pitch_a + offset]); float den = abs(vg_a[x * pitch_a + offset] + vg_a[y * pitch_a + offset]); if(den != 0.0) { temp[threadIdx.x] += num / den; } } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { float s = temp[0]; d[y * pitch_d + x] = s; d[x * pitch_d + y] = s; } } } __global__ void binary_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; // If all element is to be computed if(x < n_a && y < n_b) { __shared__ float temp[2 * NUM_THREADS]; temp[threadIdx.x] = 0.0; temp[threadIdx.x + NUM_THREADS] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { int a = vg_a[x * pitch_a + offset] != 0.0; int b = vg_b[y * pitch_b + offset] != 0.0; if(a ^ b) { temp[threadIdx.x] += 1.0; } if(a || b) { temp[threadIdx.x + NUM_THREADS] += 1.0; } } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; temp[threadIdx.x + NUM_THREADS] += temp[threadIdx.x + stride + NUM_THREADS]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { float val = temp[0]; if(temp[NUM_THREADS] != 0.0) { val /= temp[NUM_THREADS]; } d[y * pitch_d + x] = val; } } } __global__ void binary_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; if(x == y && x < n_a && threadIdx.x == 0) { d[y * pitch_d + x] = 0.0; } // If all element is to be computed if(y < n_a && x < y) { __shared__ float temp[2 * NUM_THREADS]; temp[threadIdx.x] = 0.0; temp[threadIdx.x + NUM_THREADS] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { int a = vg_a[x * pitch_a + offset] != 0.0; int b = vg_a[y * pitch_a + offset] != 0.0; if(a ^ b) { temp[threadIdx.x] += 1.0; } if(a || b) { temp[threadIdx.x + NUM_THREADS] += 1.0; } } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; temp[threadIdx.x + NUM_THREADS] += temp[threadIdx.x + stride + NUM_THREADS]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { float val = temp[0]; if(temp[NUM_THREADS] != 0.0) { val /= temp[NUM_THREADS]; } d[y * pitch_d + x] = val; d[x * pitch_d + y] = val; } } } __global__ void minkowski_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x, y = blockIdx.y; // If all element is to be computed if(x < n_a && y < n_b) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = fabsf(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]); temp[threadIdx.x] += __powf(t, p); } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) temp[threadIdx.x] += temp[threadIdx.x + stride]; __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { float power = 1.f/p; d[y * pitch_d + x] = __powf(temp[0], power); } } } __global__ void minkowski_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; if(x == y && x < n_a && threadIdx.x == 0) { d[y * pitch_d + x] = 0.0; } // If all element is to be computed if(y < n_a && x < y) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = fabsf(vg_a[x * pitch_a + offset] - vg_a[y * pitch_a + offset]); temp[threadIdx.x] += __powf(t, p); } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { float power = 1.f / p, s = __powf(temp[0], power); d[y * pitch_d + x] = s; d[x * pitch_d + y] = s; } } } __global__ void dot_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; // If all element is to be computed if(x < n_a && y < n_b) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = vg_a[x * pitch_a + offset] * vg_b[y * pitch_b + offset]; temp[threadIdx.x] += t; } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { d[y * pitch_d + x] = temp[0]; } } } __global__ void dot_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x; size_t y = blockIdx.y; // If all element is to be computed if(y < n_a && x <= y) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) { float t = vg_a[x * pitch_a + offset] * vg_a[y * pitch_a + offset]; temp[threadIdx.x] += t; } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) { temp[threadIdx.x] += temp[threadIdx.x + stride]; } __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { d[y * pitch_d + x] = temp[0]; d[x * pitch_d + y] = temp[0]; } } }
5,388
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <algorithm> #include <chrono> #include <vector> using namespace std::chrono_literals; // Kernel definition __global__ void vectorSum( float const * v1, float const * v2, float * v3) { v3[threadIdx.x] = v1[threadIdx.x] + v2[threadIdx.x]; } int main() { unsigned int count = 50000000; std::vector<float> hVec1(count, 2.2f); std::vector<float> hVec2(count, 1.1f); std::vector<float> hRes(count, 0.0f); std::vector<float> cdRes(count, 0.0f); auto st = std::chrono::system_clock::now(); float* dVec1{}; cudaMalloc(&dVec1, count * sizeof(float)); float* dVec2{}; cudaMalloc(&dVec2, count * sizeof(float)); float* dRes{}; cudaMalloc(&dRes, count * sizeof(float)); cudaMemcpy(dVec1, hVec1.data(), count * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dVec2, hVec2.data(), count * sizeof(float), cudaMemcpyHostToDevice); dim3 blocks{}; dim3 threads{count}; vectorSum <<< blocks, threads >>> (dVec1, dVec2, dRes); cudaMemcpy(cdRes.data(), dRes, count*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dVec1); cudaFree(dVec2); cudaFree(dRes); std::cout << " \n CUDA took " << (std::chrono::system_clock::now() - st).count() << " nano seconds "; //To display //for (auto& el : hRes) std::cout << el << " "; st = std::chrono::system_clock::now(); std::transform(hVec1.begin(), hVec1.end(), hVec2.begin(), hRes.begin(), [](const auto & i, const auto & j) {return i + j; }); std::cout << " \n normal took " << (std::chrono::system_clock::now() - st).count() << " nano seconds "; double indicate{}; std::cout<<"\n\n\n\n\n"; for (size_t i{}; i < cdRes.size(); i++) indicate += cdRes[i] - hRes[i]; std::cout << "\n\n indicator = " << indicate; cudaDeviceReset(); }
5,389
#include <stdio.h> #include <iostream> using namespace std; __global__ void mykernel(void){} __global__ void add(int *n, int *a, int *b, int *c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; c[blockIdx.x+(n[0]/10)] = a[blockIdx.x+(n[0]/10)] + b[blockIdx.x+(n[0]/10)]; c[blockIdx.x+2*(n[0]/10)] = a[blockIdx.x+2*(n[0]/10)] + b[blockIdx.x+2*(n[0]/10)]; c[blockIdx.x+3*(n[0]/10)] = a[blockIdx.x+3*(n[0]/10)] + b[blockIdx.x+3*(n[0]/10)]; c[blockIdx.x+4*(n[0]/10)] = a[blockIdx.x+4*(n[0]/10)] + b[blockIdx.x+4*(n[0]/10)]; c[blockIdx.x+5*(n[0]/10)] = a[blockIdx.x+5*(n[0]/10)] + b[blockIdx.x+5*(n[0]/10)]; c[blockIdx.x+6*(n[0]/10)] = a[blockIdx.x+6*(n[0]/10)] + b[blockIdx.x+6*(n[0]/10)]; c[blockIdx.x+7*(n[0]/10)] = a[blockIdx.x+7*(n[0]/10)] + b[blockIdx.x+7*(n[0]/10)]; c[blockIdx.x+8*(n[0]/10)] = a[blockIdx.x+8*(n[0]/10)] + b[blockIdx.x+8*(n[0]/10)]; c[blockIdx.x+9*(n[0]/10)] = a[blockIdx.x+9*(n[0]/10)] + b[blockIdx.x+9*(n[0]/10)]; } void random_ints(int* a, int N){ int i; for(i = 0; i < N; i++){ //a[i] = rand(); a[i] = i; } } #define N (100) #define THREADS_PER_BLOCK 4 int main(void){ mykernel<<<1,1>>>(); printf("Hello World!\n"); int *a, *b, *c; int *d_a, *d_b, *d_c, *d_n; int size = N * sizeof(int); cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); cudaMalloc((void **)&d_n, sizeof(int)); a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); int *N2 = (int*)malloc(sizeof(int)); N2[0] = N; cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); cudaMemcpy(d_n, N2, sizeof(int), cudaMemcpyHostToDevice); add<<<N/10, 1>>>(d_n, d_a, d_b, d_c); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); for(int i = 0; i < N; i++){ cout<<c[i]<<endl; } free(a); free(b); free(c); free(N2); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
5,390
#include "blur.cuh" __global__ void blurMain(unsigned int w, unsigned int r, unsigned int * src, unsigned int * output) { unsigned int offset = ((32 * blockIdx.x + threadIdx.x) * w) * 3; unsigned int endIndex = offset + r * 3; //first pixel of the row for (unsigned int index = offset; index <= endIndex; index += 3) { output[offset] += src[index]; output[offset + 1] += src[index + 1]; output[offset + 2] += src[index + 2]; } //deal with pxiels with x < r+1 for (unsigned int index = offset + 3; index <= endIndex; index += 3) { output[index] = output[index - 3] + src[index + r * 3]; output[index + 1] = output[index - 3 + 1] + src[index + r * 3 + 1]; output[index + 2] = output[index - 3 + 2] + src[index + r * 3 + 2]; } endIndex = offset + (w - r) * 3; for (unsigned int index = offset + (r + 1) * 3; index < endIndex; index += 3) { output[index] = output[index - 3]; output[index + 1] = output[index - 3 + 1]; output[index + 2] = output[index - 3 + 2]; output[index] += src[index + (r + 1) * 3]; output[index + 1] += src[index + (r + 1) * 3 + 1]; output[index + 2] += src[index + (r + 1) * 3 + 2]; output[index] -= src[index - r * 3]; output[index + 1] -= src[index - r * 3 + 1]; output[index + 2] -= src[index - r * 3 + 2]; } endIndex = offset + w * 3; for (unsigned int index = offset + (w - r) * 3; index < endIndex; index += 3) { output[index] = output[index - 3] - src[index - r * 3]; output[index + 1] = output[index - 3 + 1] - src[index - r * 3 + 1]; output[index + 2] = output[index - 3 + 2] - src[index - r * 3 + 2]; } } __global__ void blurBottomEdge(unsigned int w, unsigned int h, unsigned int r, unsigned int * src, unsigned int * output) { unsigned int offset = (h + threadIdx.x) * w * 3; unsigned int endIndex = offset + r * 3; //first pixel of the row for (unsigned int index = offset; index <= endIndex; index += 3) { output[offset] += src[index]; output[offset + 1] += src[index + 1]; output[offset + 2] += src[index + 2]; } //deal with pxiels with x < r+1 for (unsigned int index = offset + 3; index <= endIndex; index += 3) { output[index] = output[index - 3] + src[index + r * 3]; output[index + 1] = output[index - 3 + 1] + src[index + r * 3 + 1]; output[index + 2] = output[index - 3 + 2] + src[index + r * 3 + 2]; } endIndex = offset + (w - r) * 3; for (unsigned int index = offset + (r + 1) * 3; index < endIndex; index += 3) { output[index] = output[index - 3]; output[index + 1] = output[index - 3 + 1]; output[index + 2] = output[index - 3 + 2]; output[index] += src[index + (r + 1) * 3]; output[index + 1] += src[index + (r + 1) * 3 + 1]; output[index + 2] += src[index + (r + 1) * 3 + 2]; output[index] -= src[index - r * 3]; output[index + 1] -= src[index - r * 3 + 1]; output[index + 2] -= src[index - r * 3 + 2]; } endIndex = offset + w * 3; for (unsigned int index = offset + (w - r) * 3; index < endIndex; index += 3) { output[index] = output[index - 3] - src[index - r * 3]; output[index + 1] = output[index - 3 + 1] - src[index - r * 3 + 1]; output[index + 2] = output[index - 3 + 2] - src[index - r * 3 + 2]; } } __global__ void divideMain(unsigned int w, unsigned int h, unsigned int r, unsigned int * src, unsigned char * output) { unsigned int index, offset = (blockIdx.x * 32 + threadIdx.x + h + r) * w * 3; float height = 2 * r + 1; float divider = (r + 1)*height; for (index = offset, offset += r * 3; index < offset; index += 3) { output[index] = (unsigned char)(src[index] / divider); output[index + 1] = (unsigned char)(src[index + 1] / divider); output[index + 2] = (unsigned char)(src[index + 2] / divider); divider += height; } for (index = offset, offset += (w - 2 * r) * 3; index < offset; index += 3) { output[index] = (unsigned char)(src[index] / divider); output[index + 1] = (unsigned char)(src[index + 1] / divider); output[index + 2] = (unsigned char)(src[index + 2] / divider); } for (index = offset, offset += r * 3; index < offset; index += 3) { divider -= height; output[index] = (unsigned char)(src[index] / divider); output[index + 1] = (unsigned char)(src[index + 1] / divider); output[index + 2] = (unsigned char)(src[index + 2] / divider); } } __global__ void divideTopEdge(unsigned int w, unsigned int r, unsigned int * src, unsigned char * output) { unsigned int index, offset = threadIdx.x * w * 3; float height = r + 1 + threadIdx.x; float divider = (r + 1)*height; for (index = offset, offset += r * 3; index < offset; index += 3) { output[index] = (unsigned char)(src[index] / divider); output[index + 1] = (unsigned char)(src[index + 1] / divider); output[index + 2] = (unsigned char)(src[index + 2] / divider); divider += height; } for (index = offset, offset += (w - 2 * r) * 3; index < offset; index += 3) { output[index] = (unsigned char)(src[index] / divider); output[index + 1] = (unsigned char)(src[index + 1] / divider); output[index + 2] = (unsigned char)(src[index + 2] / divider); } for (index = offset, offset += r * 3; index < offset; index += 3) { divider -= height; output[index] = (unsigned char)(src[index] / divider); output[index + 1] = (unsigned char)(src[index + 1] / divider); output[index + 2] = (unsigned char)(src[index + 2] / divider); } } __global__ void divideBottomEdge(unsigned int w, unsigned int h, unsigned int r, unsigned int * src, unsigned char * output) { unsigned int index, offset = (h + threadIdx.x) * w * 3; float height = 2 * r - threadIdx.x; float divider = (r + 1)*height; for (index = offset, offset += r * 3; index < offset; index += 3) { output[index] = (unsigned char)(src[index] / divider); output[index + 1] = (unsigned char)(src[index + 1] / divider); output[index + 2] = (unsigned char)(src[index + 2] / divider); divider += height; } for (index = offset, offset += (w - 2 * r) * 3; index < offset; index += 3) { output[index] = (unsigned char)(src[index] / divider); output[index + 1] = (unsigned char)(src[index + 1] / divider); output[index + 2] = (unsigned char)(src[index + 2] / divider); } for (index = offset, offset += r * 3; index < offset; index += 3) { divider -= height; output[index] = (unsigned char)(src[index] / divider); output[index + 1] = (unsigned char)(src[index + 1] / divider); output[index + 2] = (unsigned char)(src[index + 2] / divider); } }
5,391
#include<stdio.h> #include<cuda.h> #include<cuda_runtime.h> #define N 512 #define BLOCK_SIZE 16 __global__ void MatAdd(float *A, float *B, float *C){ int i =blockIdx.x * blockDim.x + threadIdx.x; int j =blockIdx.y * blockDim.y + threadIdx.y; if(i<N && j<N) C[i*N+j]=A[i*N+j]+B[i*N+j]; } int main(){ float *h_A, *h_B, *h_C; float *d_A, *d_B, *d_C; int i; h_A = (float*)malloc(N*N*sizeof(float)); h_B = (float*)malloc(N*N*sizeof(float)); h_C = (float*)malloc(N*N*sizeof(float)); //init data for(i=0;i<(N*N);i++){ h_A[i]=1.0; h_B[i]=2.0; h_C[i]=0.0; } //allocate device memory cudaMalloc((void**)&d_A, N*N*sizeof(float)); cudaMalloc((void**)&d_B, N*N*sizeof(float)); cudaMalloc((void**)&d_C, N*N*sizeof(float)); //transfe data to device cudaMemcpy(d_A,h_A,N*N*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_B,h_B,N*N*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_C,h_C,N*N*sizeof(float),cudaMemcpyHostToDevice); dim3 blockSize(1,1); dim3 numBlock(N,N); MatAdd<<<numBlock,blockSize>>>(d_A,d_B,d_C); cudaDeviceSynchronize(); cudaMemcpy(h_C,d_C,N*N*sizeof(float),cudaMemcpyDeviceToHost); /* for(i<0;i<N*N;i++){ if(h_C[i]!=3.0) printf("ERRORR:%f,idx:%d\n",h_C[i],i); break; }*/ printf("PASS!!!!!!!!!!!!!!!\n"); free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
5,392
#include<stdio.h> #define BS 8 #define N 10 void print(int *A,int n){ for(int i=0; i<n; i++) printf("%d ",A[i]); printf("\n"); } __global__ void add_array(int *A, int *B, int n){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) A[i] = A[i] + B[i]; } int main(void){ int threadsPerBlock, blocksPerGrid, n, *A, *B, *dA, *dB; n=N; threadsPerBlock=BS; blocksPerGrid = (n+BS-1)/BS; A = (int*) malloc(n*sizeof(int)); B = (int*) malloc(n*sizeof(int)); for(int i=0; i<n; i++) A[i]=i; for(int i=0; i<n; i++) B[i]=i*2; print(A, n); print(B, n); cudaMalloc((void**)&dA, n*sizeof(int)); cudaMalloc((void**)&dB, n*sizeof(int)); cudaMemcpy(dA, A, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dB, B, n*sizeof(int), cudaMemcpyHostToDevice); add_array<<<blocksPerGrid,threadsPerBlock>>>(dA, dB, n); cudaMemcpy(A , dA, n*sizeof(int), cudaMemcpyDeviceToHost); print(A, n); cudaFree(dA); free(A); return 0; }
5,393
#include "includes.h" __global__ void matMultCuda(float *cu_C, float *cu_A, float *cu_B, unsigned int n) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; int col = (blockIdx.y * blockDim.y) + threadIdx.y; //Log row and col of each thread //printf("row : %d , col : %d \n", row, col); if (row < n && col < n) { int temp_sum = 0; for (int elem = 0; elem < n; elem++) { temp_sum += cu_A[row * n + elem] * cu_B[elem * n + col]; } cu_C[row * n + col] = temp_sum; } };
5,394
#include "includes.h" __global__ void MHDComputedUy_CUDA3_kernel(float *FluxD, float *FluxS1, float *FluxS2, float *FluxS3, float *FluxTau, float *FluxBx, float *FluxBy, float *FluxBz, float *FluxPhi, float *dUD, float *dUS1, float *dUS2, float *dUS3, float *dUTau, float *dUBx, float *dUBy, float *dUBz, float *dUPhi, float dtdx, int size, int dim0, int dim1, int dim2) { // get thread and block index const long tx = threadIdx.x; const long bx = blockIdx.x; const long by = blockIdx.y; int igridy = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE; if (igridy < 2 || igridy > size - 3) return; int k = igridy/(dim0*dim1); int i = (igridy - k*dim0*dim1)/dim1; int j = igridy - k*dim0*dim1 - i*dim1; int igrid = i + (j + k*dim1) * dim0; int igridyp1 = igridy + 1; k = igridyp1/(dim0*dim1); i = (igridyp1 - k*dim0*dim1)/dim1; j = igridyp1 - k*dim0*dim1 - i*dim1; int igridp1 = i + (j + k*dim1) * dim0; dUD [igrid] += (FluxD [igrid] - FluxD [igridp1])*dtdx; dUS1 [igrid] += (FluxS1 [igrid] - FluxS1 [igridp1])*dtdx; dUS2 [igrid] += (FluxS2 [igrid] - FluxS2 [igridp1])*dtdx; dUS3 [igrid] += (FluxS3 [igrid] - FluxS3 [igridp1])*dtdx; dUTau[igrid] += (FluxTau[igrid] - FluxTau[igridp1])*dtdx; dUBx [igrid] += (FluxBx [igrid] - FluxBx [igridp1])*dtdx; dUBy [igrid] += (FluxBy [igrid] - FluxBy [igridp1])*dtdx; dUBz [igrid] += (FluxBz [igrid] - FluxBz [igridp1])*dtdx; dUPhi[igrid] += (FluxPhi[igrid] - FluxPhi[igridp1])*dtdx; }
5,395
//pass //--blockDim=512 --gridDim=512 #include <cuda.h> ////////////////////////////////////////////////////////////////////////////// //// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF //// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO //// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A //// PARTICULAR PURPOSE. //// //// Copyright (c) Microsoft Corporation. All rights reserved ////////////////////////////////////////////////////////////////////////////// // Original kernels are templated. We will check the float case. #define _type float //---------------------------------------------------------------------------- // File: BitonicSort.cpp // // Implements Bitonic sort in C++ AMP // Supports only int, unsigned, long and unsigned long //---------------------------------------------------------------------------- #define BITONIC_TILE_SIZE 512 // Should be a square matrix #define NUM_ELEMENTS (BITONIC_TILE_SIZE * BITONIC_TILE_SIZE) #define MATRIX_WIDTH BITONIC_TILE_SIZE #define MATRIX_HEIGHT BITONIC_TILE_SIZE // Should be divisible by MATRIX_WIDTH and MATRIX_HEIGHT // else parallel_for_each will crash #define TRANSPOSE_TILE_SIZE 16 //---------------------------------------------------------------------------- // Kernel implements partial sorting on accelerator, BITONIC_TILE_SIZE at a time //---------------------------------------------------------------------------- __global__ void bitonic_sort_kernel(_type* data, unsigned ulevel, unsigned ulevelmask) { __shared__ _type sh_data[BITONIC_TILE_SIZE]; int local_idx = threadIdx.x; int global_idx = blockIdx.x*blockDim.x + threadIdx.x; // Cooperatively load data - each thread will load data from global memory // into tile_static sh_data[local_idx] = data[global_idx]; // Wait till all threads have loaded their portion of data #ifndef MUTATION /* BUGINJECT: REMOVE_BARRIER, DOWN */ __syncthreads(); #endif // Sort data in tile_static memory for (unsigned int j = ulevel >> 1 ; j > 0 ; j >>= 1) { _type result = ((sh_data[local_idx & ~j] <= sh_data[local_idx | j]) == (bool)(ulevelmask & global_idx)) ? sh_data[local_idx ^ j] : sh_data[local_idx]; __syncthreads(); sh_data[local_idx] = result; __syncthreads(); } // Store shared data data[global_idx] = sh_data[local_idx]; }
5,396
#include <cstdint> #include <thrust/device_vector.h> #include <thrust/sort.h> template <typename K, typename V> void SortByFreq(K *freq, V *qcode, int size) { using namespace thrust; sort_by_key(device_ptr<K>(freq), // device_ptr<K>(freq + size), // device_ptr<V>(qcode)); } template void SortByFreq<unsigned int, uint8_t>(unsigned int *, uint8_t *, int); template void SortByFreq<unsigned int, uint16_t>(unsigned int *, uint16_t *, int); template void SortByFreq<unsigned int, uint32_t>(unsigned int *, uint32_t *, int);
5,397
extern "C"{ __global__ void threshold(unsigned char * src,unsigned char * dst,int width,int height,int thresh){ //Grid中x方向上的索引 int xIndex = threadIdx.x + blockIdx.x * blockDim.x; //Grid中y方向上的索引 int yIndex = threadIdx.y + blockIdx.y * blockDim.y; int idx = xIndex + yIndex * width; if (xIndex < width && yIndex < height && idx < width * height){ if (src[idx] > thresh){ dst[idx] = 255; }else{ dst[idx] = 0; } } } __global__ void multi_threshold(unsigned char * src,unsigned char * dst,int width,int height,int min_thresh,int max_thresh){ //Grid中x方向上的索引 int xIndex = threadIdx.x + blockIdx.x * blockDim.x; //Grid中y方向上的索引 int yIndex = threadIdx.y + blockIdx.y * blockDim.y; int idx = xIndex + yIndex * width; if (xIndex < width && yIndex < height && idx < width * height){ int pixel = src[idx]; if (pixel >= min_thresh && pixel <= max_thresh){ dst[idx] = 255; }else{ dst[idx] = 0; } } } }
5,398
#include <iostream> __global__ void fac() { printf("aa\n"); } int main() { fac<<<1, 10>>>(); }
5,399
// allocate pitch memory and cudaArray #include <stdio.h> #include <memory.h> #include <cuda.h> #include <cuda_runtime.h> #define NX 1003 #define NY 1003 int main(){ size_t sizeByte = NX*NY*sizeof(float); //host data declaration and initialization float* hdata = (float* )malloc(sizeByte); for(int i=0;i<NX*NY; i++){ hdata[i] = i; } //using pitch linear memory float *ddata_pl; float *ddata_pl_res; size_t sizePL; cudaMallocPitch((void**)&ddata_pl, &sizePL, NX*sizeof(float), NY); printf("Pitch of ddata_pl is %d \n", sizePL); printf("While Pitch of hdata is %d \n", NX*sizeof(float)); return 0; }
5,400
#include <stdio.h> #include <cuda.h> int main(int argc, char** argv){ printf("Hello, world!"); return 0; }