serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
901
#include "includes.h" /*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/ /*(c) 2016 Brian Tarasinski*/ /*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/ //kernel to transform to pauli basis (up, x, y, down) //to be run on a complete complex density matrix, once for each bit //this operation is its own inverse (can also be used in opposite direction) __global__ void bit_to_pauli_basis(double *complex_dm, unsigned int mask, unsigned int no_qubits) { const int x = (blockIdx.x *blockDim.x) + threadIdx.x; const int y = (blockIdx.y *blockDim.y) + threadIdx.y; const double sqrt2 = 0.70710678118654752440; //const double sqrt2 = 1; if ((x >= (1 << no_qubits)) || (y >= (1 << no_qubits))) return; int b_addr = ((x|mask)<<no_qubits | (y&~mask)) << 1; int c_addr = ((x&~mask)<<no_qubits | (y|mask)) << 1; if (x&mask && (~y&mask)){ double b = complex_dm[b_addr]; double c = complex_dm[c_addr]; complex_dm[b_addr] = (b+c)*sqrt2; complex_dm[c_addr] = (b-c)*sqrt2; } if ((~x&mask) && (y&mask)){ b_addr+=1; c_addr+=1; double b = complex_dm[b_addr]; double c = complex_dm[c_addr]; complex_dm[b_addr] = (b+c)*sqrt2; complex_dm[c_addr] = (b-c)*sqrt2; } }
902
#include<iostream> #define THREADS_PER_BLOCK 256 #define BLOCKS 128 #define N (1 << 16) using namespace std; __global__ void add_array(float A[], float blocks[]) { __shared__ int array_per_block[THREADS_PER_BLOCK]; int global_thread_ID = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x, step = gridDim.x * THREADS_PER_BLOCK, my_sum = 0, num_threads = THREADS_PER_BLOCK; for(int i = global_thread_ID; i < N; i += step) my_sum += A[i]; array_per_block[threadIdx.x] = my_sum; __syncthreads(); while(threadIdx.x < num_threads && num_threads > 1) { if(threadIdx.x < num_threads / 2) array_per_block[threadIdx.x] += array_per_block[threadIdx.x + num_threads / 2]; num_threads = num_threads >> 1; __syncthreads(); } if(threadIdx.x == 0) blocks[blockIdx.x] = array_per_block[0]; } void init_array(float A[]) { for(int i = 0; i < N; i++) A[i] = 1; } int main() { float *host_A = new float[N], *host_blocks = new float[BLOCKS], *cuda_A, *cuda_blocks, final_sum = 0; init_array(host_A); cudaMalloc(&cuda_A, sizeof(float) * N); cudaMemcpy(cuda_A, host_A, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMalloc(&cuda_blocks, sizeof(float) * BLOCKS); add_array<<<BLOCKS, THREADS_PER_BLOCK>>>(cuda_A, cuda_blocks); cudaMemcpy(host_blocks, cuda_blocks, sizeof(float) * BLOCKS, cudaMemcpyDeviceToHost); for(int i = 0; i < BLOCKS; i++) final_sum += host_blocks[i]; cout << "Final Sum : " << final_sum << endl; free(host_A); free(host_blocks); cudaFree(cuda_A); cudaFree(cuda_blocks); return 0; }
903
/*#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/highgui.hpp> #include<opencv2\imgproc.hpp> #include <iostream> float mallocTest(unsigned int size,bool up) { cudaEvent_t start, stop; unsigned int *d_var; unsigned int *h_var; float elapsedTime=0; cudaEventCreate(&start); cudaEventCreate(&stop); h_var = (unsigned int*)malloc(sizeof(unsigned int)*size); cudaMalloc((void**)&d_var, size*sizeof(unsigned int)); cudaEventRecord(start,0); for (int i = 0; i < 100; i++) { if (up) cudaMemcpy(d_var,h_var,sizeof(unsigned int)*size,cudaMemcpyHostToDevice); else cudaMemcpy(h_var, d_var, sizeof(unsigned int)*size, cudaMemcpyDeviceToHost); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_var); free(h_var); return elapsedTime; } float hostAllocTest(unsigned int size, bool up) { cudaEvent_t start, stop; unsigned int *d_var; unsigned int *h_var; float elapsedTime=0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaHostAlloc((void**)&h_var, sizeof(unsigned int)*size, cudaHostAllocDefault); cudaMalloc((void**)&d_var, size*sizeof(unsigned int)); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { if (up) cudaMemcpy(d_var, h_var, sizeof(unsigned int)*size, cudaMemcpyHostToDevice); else cudaMemcpy(h_var, d_var, sizeof(unsigned int)*size, cudaMemcpyDeviceToHost); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_var); cudaFreeHost(h_var); return elapsedTime; } int main() { unsigned int size = 10 * 1024 * 1024; float elapsedTime=0; float MB = (float)100 * size*sizeof(unsigned int) / 1024 / 1024; elapsedTime = mallocTest(size,true); printf("Malloc up_transfer(MB/s): %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = hostAllocTest(size, true); printf("cudaHostAlloc up_transfer(MB/s): %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = mallocTest(size, false); printf("Malloc down_transfer(MB/s): %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = hostAllocTest(size, false); printf("cudaHostAlloc down_transfer(MB/s): %3.1f\n", MB/(elapsedTime/1000)); }*/
904
#include<stdio.h> #include<cuda.h> #include<math.h> __global__ void Matmultkernel(int* A , int* b, int* C) { int col = blockIdx.x * blockDim.x + threadIdx.x; int P = 0; for(int k= 0; k<32; k++) { int MA = A[col * 32 + k]; int Mb = b[k]; P = P + MA * Mb; } C[col] = P; } int main() { FILE * pFile; int i,j; pFile = fopen("problem1.out","w"); int d_A[16][32]; int d_b[32][1]; int d_C[16][1]; float time; cudaEvent_t start1, stop1; cudaEventCreate(&start1); cudaEventCreate(&stop1); for(i = 0;i<16;i++) { for (j=0;j<32;j++) { d_A[i][j] = i + j; printf(" %d ",d_A[i][j]); } printf("\n"); } printf("\n"); for(i=0; i<32; i++) { d_b[i][0] = i; printf(" %d \t", d_b[i][0]); } size_t sizeA = 16 * 32 * sizeof(int); size_t sizeb = 32 * sizeof(int); size_t sizeC = 16 * sizeof(int); int* A; cudaMalloc(&A,sizeA); int* b; cudaMalloc(&b,sizeb); int* C; cudaMalloc(&C,sizeC); //Allocate and Load A and B into device memory cudaDeviceProp deviceProp; const int currentDevice = 0; if(cudaGetDeviceProperties(&deviceProp, currentDevice) == cudaSuccess) printf("Device %d: %s \n", currentDevice, deviceProp.name); cudaEventRecord(start1, 0); cudaMemcpy(A, d_A, sizeA, cudaMemcpyHostToDevice); cudaMemcpy(b, d_b, sizeb, cudaMemcpyHostToDevice); // Invoke kernel Matmultkernel<<<1,16>>>(A, b, C); //bring the result back from the device memory into the host cudaMemcpy(d_C, C, sizeC, cudaMemcpyDeviceToHost); cudaEventRecord(stop1, 0); cudaEventSynchronize(stop1); for(i = 0; i<16;i++) { printf("\n %d", d_C[i][0]); fprintf(pFile, "%d \n",d_C[i][0]); } fclose (pFile); cudaFree(A); cudaFree(b); cudaFree(C); cudaEventElapsedTime(&time, start1, stop1); printf("\n Inclusive time is %f", time); cudaEventDestroy(start1); cudaEventDestroy(stop1); return 0; }
905
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <time.h> #include <curand.h> #include <curand_kernel.h> #define BLOCK_SIZE 768 //-------- Save values to dat file --------// void saveFrequenciesToFile(int *array, int size){ FILE *filePointer = fopen("freq.dat", "w"); for (int i = 0; i < 10; i++) { fprintf(filePointer, "0.%d, %f\n", i, (float)array[i]/(size*2)); } } ////-------- Random initialization --------// __global__ void initRand(unsigned int seed, curandState_t *states) { curand_init(seed, threadIdx.x, 0, &states[threadIdx.x]); } //-------- Calculate random --------// __global__ void calculateRandomNumbers(curandState_t *states, int *frequencies, int *result, int size){ __shared__ int partialCount; if (threadIdx.x == 0) { partialCount = 0.0; } __syncthreads(); if (threadIdx.x < size) { float rand1 = curand_uniform(&states[threadIdx.x]); float rand2 = curand_uniform(&states[threadIdx.x]); if ((rand1*rand1) + (rand2*rand2) <= 1) { atomicAdd(&partialCount, 1); } atomicAdd(&frequencies[(int)(rand1 * 10)], 1); atomicAdd(&frequencies[(int)(rand2 * 10)], 1); __syncthreads(); if (threadIdx.x == 0) { atomicAdd(&result[0], partialCount); } __syncthreads(); } } int main (int argc, char *argv[]) { //-------- Testing parameters --------// if (argc != 2){ printf("Incorrect number of parameters :(\n"); printf("Try: \"./MatrixMult <MATRIX SIZE>\"\n"); exit(0); } int size = atoi(argv[1]); if(size < 0){ printf("Negative parameter not allowed. :P\n"); printf("Try: \"./MatrixMult <MATRIX SIZE>\"\n"); exit(0); } //--Initializing variables int *frequencies, *dev_frequencies; int *result, *dev_result; int memorySize = 10*sizeof(int); srand48(time(NULL)); frequencies = (int *)malloc(memorySize); result = (int *)malloc(sizeof(int)); result[0] = 0.0; //--Initializing CUDA memory cudaMalloc((void **)&dev_frequencies, memorySize); cudaMalloc((void **)&dev_result, sizeof(int)); cudaMemcpy(dev_frequencies, frequencies, memorySize, cudaMemcpyHostToDevice); cudaMemcpy(dev_result, result, sizeof(int), cudaMemcpyHostToDevice); int blockNumber = ceil((float)size/BLOCK_SIZE); //--Initializing Random States curandState_t *states; cudaMalloc((void**) &states, size*sizeof(curandState_t)); initRand<<<blockNumber, BLOCK_SIZE>>>(time(NULL), states); //--Calculate Pi calculateRandomNumbers<<<blockNumber, BLOCK_SIZE>>>(states, dev_frequencies, dev_result, size); cudaThreadSynchronize(); cudaMemcpy(frequencies, dev_frequencies, memorySize, cudaMemcpyDeviceToHost); cudaMemcpy(result, dev_result, sizeof(float), cudaMemcpyDeviceToHost); float pi = (((float)result[0] / (float)size)*4); printf("Pi approximated value is: %f\n", pi); cudaFree(frequencies); cudaFree(dev_result); //-- Saving matrices to file saveFrequenciesToFile(frequencies, size); free(frequencies); free(result); exit(0); }
906
#include<stdio.h> #include<cuda.h> #include<time.h> #include<math.h> #define row 43500 #define col 10 #define test_row 14500 #define test_col 10 __global__ void KminNeighbourFind(double *distance1, int *d_kneighbours,int k,int set,int *res_class) { int i=blockDim.x*blockIdx.x+threadIdx.x; int set_i; if(i<test_row) { for(int i1=0;i1<k;i1++) { int min=2*(i1*test_row+i); for(int j1=i1+1;j1<row;j1++) { if(distance1[2*(j1*test_row+i)]<distance1[min]) min=2*(j1*test_row+i); } int dist=2*(i1*test_row+i),clas=2*(i1*test_row+i)+1; double temp=distance1[dist]; distance1[dist]=distance1[min]; distance1[min]=temp; //temp=distance1[clas]; //distance1[clas]=distance1[min+1]; //distance1[min+1]=temp; int index= (int)distance1[min+1]-1; set_i=i*set; index= index+set_i; d_kneighbours[index]+=1; //w=distnace1[2*(0*test_row+i)]; } int max=0; for(int l=1;l<set;l++) { if(d_kneighbours[set_i+l]>d_kneighbours[set_i+max]) max=l; } res_class[i]=max+1; } } __global__ void Euclidian_distance(double *d_train,double *d_test, double *distance) { int ro=blockIdx.x*blockDim.x+threadIdx.x; int co=blockIdx.y*blockDim.y+threadIdx.y; int distanceid=2*(ro*test_row+co); double sum=0,diff=0; //checking boundary condition if(ro<row && co<test_row) { for(int i=0; i<col-1; i++) { diff=(d_train[ro*col+i]-d_test[co*col+i]); sum+=diff*diff; } distance[distanceid]=sqrt(sum); distance[distanceid+1]=d_train[ro*col+col-1]; } // __syncthreads(); } int main() { clock_t s_time,e_time; double t_time; FILE *myfile,*myfilet; int k,i,j; double train[row*col],test1[test_row*test_col]; double *d_train,*d_test; double *distance,*h_distance,*h_distance1; printf("Enter the k value to apply k nearest neighbour algorithm"); scanf("%d",&k); printf("\n"); int set; printf("Enter the total classes present in your dataset\n"); scanf("%d",&set); myfile=fopen("shuttle.trn","r"); if(myfile==NULL) { printf("data not open\n"); exit(0); } else { printf("Successfully open\n"); } myfilet=fopen("shuttle.tst","r"); if(myfilet==NULL) { printf("Test data not open\n"); exit(0); } else { printf("Test file open successfully\n"); } //scanning train data for(i=0;i<row;i++) { for(j=0;j<col;j++) { fscanf(myfile,"%lf",&train[i*col+j]); } } //scanning test data for(i=0;i<test_row;i++) { for(j=0;j<test_col;j++) { fscanf(myfilet,"%lf",&test1[i*test_col+j]); } } cudaError_t cudastatus ; cudastatus = cudaDeviceReset () ; if(cudastatus!= cudaSuccess) { fprintf(stderr , " cudaDeviceReset failed!" ) ; return 1; } cudastatus = cudaSetDevice (0) ; if(cudastatus!=cudaSuccess) { fprintf(stderr , " cudaSetDevice failed!"); return 1; } else printf(" Working \n " ) ; s_time=clock(); size_t size=row*col*sizeof(double); size_t size1=test_row*test_col*sizeof(double); size_t distance_size=2*row*test_row*sizeof(double); size_t class_mem=test_row*sizeof(int); int *res_class,*h_class; h_distance=(double*)malloc(distance_size); h_distance1=(double*)malloc(distance_size); h_class=(int*)malloc(class_mem); //* Allocate matrices in device memory cudaMalloc(&d_train, size); cudaMalloc(&d_test, size1); cudaMalloc(&distance,distance_size); cudaMalloc(&res_class,class_mem); //copy the data from host to device memory cudaMemcpy(d_train,train,size,cudaMemcpyHostToDevice); cudaMemcpy(d_test,test1,size1,cudaMemcpyHostToDevice); dim3 dimgrid((row-1)/16+1,(test_row-1)/16+1,1); dim3 dimblock(16,16,1); Euclidian_distance<<<dimgrid,dimblock>>>(d_train,d_test,distance); cudaMemcpy(h_distance,distance,distance_size,cudaMemcpyDeviceToHost); cudaFree(d_train); cudaFree(d_test); double *distance1; //here code for min k neighbour cal cudaMalloc(&distance1,distance_size); size_t neighbour_size =test_row*set*sizeof(int); int *d_kneighbours; cudaMalloc(&d_kneighbours,neighbour_size); cudaMemcpy(distance,h_distance,distance_size,cudaMemcpyHostToDevice); int h_kneighbours[neighbour_size]; KminNeighbourFind<<<(test_row-1)/16+1,16>>>(distance,d_kneighbours,k,set,res_class); cudaMemcpy(h_distance1,distance,distance_size,cudaMemcpyDeviceToHost); cudaMemcpy(h_kneighbours,d_kneighbours,neighbour_size,cudaMemcpyDeviceToHost); cudaMemcpy(h_class,res_class,class_mem,cudaMemcpyDeviceToHost); cudaFree(distance1); cudaFree(d_kneighbours); cudaFree(res_class); /*for(i=0;i<test_row;i++) { for(j=0;j<set;j++) { printf("class freq of test case %d class no %d value %d\n",i+1,j,h_kneighbours[i*set+j]); } } */ int count=0; for(i=0;i<test_row;i++) { if(test1[i*col+col-1]!=h_class[i]) count++; printf("Given Test point %d belongs to class %d\n",i+1,h_class[i]); } e_time=clock(); t_time=((double)(e_time-s_time))/1000000; printf("Count unmachted %d",count); printf("\n \n Total time taken %0.2lf second",t_time); //cudaMemcpy(h_kneighbours,d_kneighbours,neighbour_size,cudaMemcpyDeviceToHost); /* for(i=0;i<row;i++) { for(j=0;j<1;j++) { printf("%lf %lf",h_distance[2*(i*test_row+j)],h_distance[2*(i*test_row+j)+1]); } printf("\n"); } printf("K nearest one\n\n"); for(i=0;i<k;i++) { for(j=0;j<1;j++) { printf("%lf %lf",h_distance1[2*(i*test_row+j)],h_distance1[2*(i*test_row+j)+1]); } printf("\n"); }*/ return 0; }
907
#include "includes.h" __global__ void cuArraysCopyExtractFixedOffset(const float *imageIn, const int inNX, const int inNY, float *imageOut, const int outNX, const int outNY, const int nImages, const int offsetX, const int offsetY) { int outx = threadIdx.x + blockDim.x*blockIdx.x; int outy = threadIdx.y + blockDim.y*blockIdx.y; if(outx < outNX && outy < outNY) { int idxOut = (blockIdx.z * outNX + outx)*outNY+outy; int idxIn = (blockIdx.z*inNX + outx + offsetX)*inNY + outy + offsetY; imageOut[idxOut] = imageIn[idxIn]; } }
908
/**** File: findRedsDriver.cu Date: 5/3/2018 By: Bill Hsu Compile: nvcc findRedsDriver.cu -o frgpu Run: ./frgpu ****/ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <cuda.h> #define NUMPARTICLES 32768 #define NEIGHBORHOOD .05 #define THREADSPERBLOCK 128 void initPos(float *); float findDistance(float *, int, int); __device__ float findDistanceGPU(float *, int, int); void dumpResults(int index[]); __global__ void findRedsGPU(float *p, int *numI); int main() { cudaEvent_t start, stop; float time; float *pos; int *numReds; pos = (float *) malloc(NUMPARTICLES * 4 * sizeof(float)); numReds = (int *) malloc(NUMPARTICLES * sizeof(int)); initPos(pos); // your code to allocate device arrays for pos and numReds go here // create timer events cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); /* invoke kernel findRedsGPU here */ cudaThreadSynchronize(); // your code to copy results to numReds[] go here cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Elapsed time = %f\n", time); dumpResults(numReds); } void initPos(float *p) { // your code for initializing pos goes here } __device__ float findDistanceGPU(float *p, int i, int j) { // your code for calculating distance for particle i and j } __global__ void findRedsGPU(float *p, int *numI) { // your code for counting red particles goes here } void dumpResults(int index[]) { int i; FILE *fp; fp = fopen("./dump.out", "w"); for (i=0; i<NUMPARTICLES; i++) { fprintf(fp, "%d %d\n", i, index[i]); } fclose(fp); }
909
#include <stdio.h> #include <math.h> #define THREADS_PER_BLOCK 256 __global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH ) { int COL = threadIdx.x + blockIdx.x * blockDim.x; int ROW = threadIdx.y + blockIdx.y * blockDim.y; if (ROW < WIDTH && COL < WIDTH) { for (int i = 0; i < WIDTH; i++) { Pd[ROW * WIDTH + COL] += Md[ROW * WIDTH + i] * Nd [i * WIDTH + COL]; } } } int main(int arg0, char *arg1[]){ cudaThreadSynchronize(); int WIDTH = atoi(arg1[1]); int sqrtThreads = sqrt(THREADS_PER_BLOCK); int nBlocks = WIDTH/sqrtThreads; if (WIDTH % sqrtThreads != 0) { nBlocks++; } dim3 grid(nBlocks, nBlocks, 1); dim3 block(sqrtThreads, sqrtThreads, 1); float *a_h, *b_h, *c_h, *d_h, *a_d, *b_d, *c_d; int size; cudaEvent_t start; cudaEvent_t stop; float elapsed1; size = WIDTH * WIDTH * sizeof(float); a_h = (float*) malloc(size); b_h = (float*) malloc(size); c_h = (float*) malloc(size); d_h = (float*) malloc(size); for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { a_h[i * WIDTH + j] = i; b_h[i * WIDTH + j] = i; } } cudaMalloc((void**)&a_d, size); cudaMalloc((void**)&b_d, size); cudaMalloc((void**)&c_d, size); cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); MatrixMul<<<grid, block>>>(a_d, b_d, c_d, WIDTH); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed1, start, stop); printf("%f\n", elapsed1/1000); cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); free(a_h); free(b_h); free(c_h); free(d_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
910
#include <stdio.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\ fprintf(stderr, "code: %d, reason: %s\n", error,\ cudaGetErrorString(error));\ exit(EXIT_FAILURE);\ }\ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start,0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; __global__ void addVecKernel(int *in1, int *in2, int n, int *out) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { out[i] = in1[i] + in2[i]; } } void addVec(int *in1, int *in2, int n, int *out, bool useDevice=false, dim3 blockSize=dim3(1), int nStreams=1) { if (useDevice == false) { for (int i = 0; i < n; i++) { out[i] = in1[i] + in2[i]; } } else // Use device { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); printf("GPU name: %s\n", devProp.name); printf("GPU compute capability: %d.%d\n", devProp.major, devProp.minor); // Pin host memory regions (allocated by malloc) // so that we can use cudaMemcpyAsync size_t nBytes = n * sizeof(int); CHECK(cudaHostRegister(in1, nBytes, cudaHostRegisterDefault)); CHECK(cudaHostRegister(in2, nBytes, cudaHostRegisterDefault)); CHECK(cudaHostRegister(out, nBytes, cudaHostRegisterDefault)); // TODO: Allocate device memory regions // VuMN: Compute gridSize; dim3 gridSize((n - 1) / blockSize.x + 1); int *d_in1, *d_in2, *d_out; CHECK(cudaMalloc(&d_in1, nBytes)); CHECK(cudaMalloc(&d_in2, nBytes)); CHECK(cudaMalloc(&d_out, nBytes)); // TODO: Create "nStreams" device streams // VuMN: Using a for-loop cudaStream_t *streams = (cudaStream_t *) malloc(nStreams * sizeof(cudaStream_t)); for (int i = 0; i < nStreams; i++) CHECK(cudaStreamCreate(&streams[i])); GpuTimer timer; timer.Start(); // TODO: Send jobs (H2D, kernel, D2H) to device streams // VuMN: size of each stream int streamSize = n / nStreams; // VuMN: remainder, to compensate to the last stream int remain = n % nStreams; // VuMN: number of bytes to to copy data (both to and from) for each stream size_t streamBytes = streamSize * sizeof(int); for (int i = 0; i < nStreams; ++i) { // VuMN: offset, the position to start copy data (both to and from) for each stream int offset = streamSize * i; // VuMN: when it comes to the last stream, do compensate if (i == nStreams - 1 && nStreams != 1) { streamBytes += remain * sizeof(int); streamSize += remain; } // VuMN: H2D CHECK(cudaMemcpyAsync(&d_in1[offset], &in1[offset], streamBytes, cudaMemcpyHostToDevice, streams[i])); CHECK(cudaMemcpyAsync(&d_in2[offset], &in2[offset], streamBytes, cudaMemcpyHostToDevice, streams[i])); // VuMN: kernel addVecKernel<<<gridSize, blockSize, 0, streams[i]>>>(&d_in1[offset], &d_in2[offset], streamSize, &d_out[offset]); // VuMN: D2H CHECK(cudaMemcpyAsync(&out[offset], &d_out[offset], streamBytes, cudaMemcpyDeviceToHost, streams[i])); } timer.Stop(); float time = timer.Elapsed(); printf("Processing time of all device streams: %f ms\n\n", time); // TODO: Destroy device streams for (int i = 0; i < nStreams; i++) CHECK(cudaStreamDestroy(streams[i])); free(streams); // TODO: Free device memory regions CHECK(cudaFree(d_in1)); CHECK(cudaFree(d_in2)); CHECK(cudaFree(d_out)); // Unpin host memory regions CHECK(cudaHostUnregister(in1)); CHECK(cudaHostUnregister(in2)); CHECK(cudaHostUnregister(out)); } } int main(int argc, char ** argv) { int n; int *in1, *in2; int *out, *correctOut; // Input data into n n = (1 << 24) + 1; printf("n = %d\n\n", n); // Allocate memories for in1, in2, out size_t nBytes = n * sizeof(int); in1 = (int *)malloc(nBytes); in2 = (int *)malloc(nBytes); out = (int *)malloc(nBytes); /* CHECK(cudaMallocHost(&in1, nBytes)); CHECK(cudaMallocHost(&in2, nBytes)); CHECK(cudaMallocHost(&out, nBytes)); */ correctOut = (int *)malloc(nBytes); // Input data into in1, in2 for (int i = 0; i < n; i++) { in1[i] = rand() & 0xff; // Random int in [0, 255] in2[i] = rand() & 0xff; // Random int in [0, 255] } // Add in1 & in2 on host addVec(in1, in2, n, correctOut); // Add in1 & in2 on device dim3 blockSize(512); // Default int nStreams = 1; // Default if (argc >= 2) { blockSize.x = atoi(argv[1]); if (argc >= 3) { nStreams = atoi(argv[2]); } } addVec(in1, in2, n, out, true, blockSize, nStreams); // Check correctness for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); printf("i+1: %d, %d, %d\n", i + 1, out[i + 1], correctOut[i + 1]); printf("i: %d, %d, %d\n", i, out[i], correctOut[i]); printf("i-1: %d, %d, %d\n", i - 1, out[i - 1], correctOut[i - 1]); return 1; } } printf("CORRECT :)\n"); free(in1); free(in2); free(out); /* CHECK(cudaFreeHost(in1)); CHECK(cudaFreeHost(in2)); CHECK(cudaFreeHost(out)); */ free(correctOut); }
911
/* * Ускорение программы с помощью CUDA. * Вариант: 7 * Бизнес логика: Умножение матрицы на вектор * * Считывание данных происходит из файла. * Программа выполняет бизнес-логику и записывает результат в выходной файл. * В конце файла с результатами сохраняется информация о времени выполнения вычислений * и размере обработанных данных. * * Запуск: nvcc cuda.cu -o cuda.out && \ $PWD/cuda.out ./test_data/1mb ./results/cuda/1mb */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define DEBUG 0 #define LOG 1 void read_matrix(FILE *input_file, int *matrix, long matrix_size); void read_vector(FILE *input_file, int *vector, long vector_length); void print_vector(const int *vector, long vector_length); __global__ void calc_answer(const int *matrix, const int *vector, int *answer, long vector_length); void save_answer(FILE *output_file, const int *answer, long answer_length); int main(int argc, char *argv[], char *argp[]) { char *input_file_name; char *output_file_name; if (argc < 3) { input_file_name = "input_file"; output_file_name = "output_file"; } else { input_file_name = argv[1]; output_file_name = argv[2]; } if (LOG) printf("input file name: %s,\noutput file name: %s.\n\n", input_file_name, output_file_name); FILE *input_file = NULL; input_file = fopen(input_file_name, "r+"); if (input_file == NULL) { printf("input file not found!"); return -1; } long matrix_size; fscanf(input_file, "%ld", &matrix_size); if (LOG) printf("matrix_size: %ld \n", matrix_size); int *matrix = (int *) calloc(matrix_size * matrix_size, sizeof(int)); read_matrix(input_file, matrix, matrix_size); long vector_length = matrix_size; int *vector = (int *) calloc(vector_length, sizeof(int)); read_vector(input_file, vector, vector_length); int *answer = (int *) calloc(vector_length, sizeof(int)); int *dev_matrix, *dev_vector, *dev_answer; int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); cudaMalloc(&dev_matrix, matrix_size * matrix_size * sizeof(int)); cudaMalloc(&dev_vector, vector_length * sizeof(int)); cudaMalloc(&dev_answer, vector_length * sizeof(int)); cudaMemcpy(dev_matrix, matrix, matrix_size * matrix_size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_vector, vector, vector_length * sizeof(int), cudaMemcpyHostToDevice); cudaEvent_t begin, end; float time_spent_in_sec; cudaEventCreate(&begin); cudaEventCreate(&end); int number_of_blocks = numberOfSMs * 32; int threads_per_block = 256; cudaEventRecord(begin, 0); calc_answer<<<number_of_blocks, threads_per_block>>>(dev_matrix, dev_vector, dev_answer, vector_length); cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&time_spent_in_sec, begin, end); time_spent_in_sec /= 1000; cudaMemcpy(answer, dev_answer, vector_length * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_matrix); cudaFree(dev_vector); cudaFree(dev_answer); FILE *output_file = NULL; output_file = fopen(output_file_name, "w+"); if (output_file == NULL) { printf("output file not found!"); return -1; } save_answer(output_file, answer, vector_length); fprintf(output_file, "time: %f\n", time_spent_in_sec); if (LOG) printf("time_spent_in_sec: %f\n", time_spent_in_sec); long size_of_input_data = (long) ((matrix_size * matrix_size + matrix_size) * sizeof(int)); double size_of_input_data_in_mb = (double) size_of_input_data / 1024 / 1024; fprintf(output_file, "size: %f\n", size_of_input_data_in_mb); if (LOG) { printf("matrix_size: %ld\n", matrix_size); printf("size_of_input_data: %ld\n", size_of_input_data); printf("size_of_input_data_in_mb: %f\n\n", size_of_input_data_in_mb); } fclose(input_file); fclose(output_file); free(matrix); free(vector); free(answer); return 0; } void read_matrix(FILE *input_file, int *matrix, long matrix_size) { if (DEBUG) printf("read_matrix:\n"); for (long i = 0; i < matrix_size; i++) { for (long j = 0; j < matrix_size; j++) { fscanf(input_file, "%d", &matrix[i * matrix_size + j]); if (DEBUG) printf("%d ", matrix[i * matrix_size + j]); } if (DEBUG) printf("\n"); } if (DEBUG) printf("\n"); } void read_vector(FILE *input_file, int *vector, long vector_length) { for (long i = 0; i < vector_length; i++) { fscanf(input_file, "%d", &vector[i]); } if (DEBUG) print_vector(vector, vector_length); } void print_vector(const int *vector, long vector_length) { printf("read_vector:\n"); for (long i = 0; i < vector_length; i++) { printf("%d ", vector[i]); } printf("\n\n"); } __global__ void calc_answer(const int *matrix, const int *vector, int *answer, long vector_length) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (long i = tid; i < vector_length; i += stride) { for (long j = 0; j < vector_length; j++) { answer[i] += matrix[i * vector_length + j] * vector[j]; } } } void save_answer(FILE *output_file, const int *answer, long answer_length) { if (DEBUG) printf("save_answer:\n"); fprintf(output_file, "result:\n"); for (long i = 0; i < answer_length; i++) { fprintf(output_file, "%d ", answer[i]); if (DEBUG) printf("%d ", answer[i]); } fprintf(output_file, "\n"); if (DEBUG) printf("\n\n"); }
912
__global__ void mandelgpu(int disp_width, int disp_height, int *array, int max_iter) { double scale_real, scale_imag; double x, y, u, v, u2, v2; int i, j, iter; scale_real = 3.5 / (double)disp_width; scale_imag = 3.5 / (double)disp_height; i = blockIdx.x * blockDim.x + threadIdx.x; x = ((double)i * scale_real) - 2.25; j = blockIdx.y * blockDim.y + threadIdx.y; y = ((double)j * scale_imag) - 1.75; u = 0.0; v = 0.0; u2 = 0.0; v2 = 0.0; iter = 0; while ( u2 + v2 < 4.0 && iter < max_iter ) { v = 2 * v * u + y; u = u2 - v2 + x; u2 = u*u; v2 = v*v; iter = iter + 1; } // if we exceed max_iter, reset to zero iter = iter == max_iter ? 0 : iter; array[i*disp_height + j] = iter; }
913
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <iostream> struct variance{ int N; double mean; variance(int n, double m): N(n), mean(m) {}; __host__ __device__ double operator()(const double &x){ double v = (x - mean) * (x - mean)/N; return v; } }; int main() { int N = 0; thrust::host_vector<double> hostApple; thrust::host_vector<double> hostMicrosoft; while (std::cin.fail() == false) { N += 1; double aapl, msft; std::cin >> aapl >> msft; hostApple.push_back(aapl); hostMicrosoft.push_back(msft); } thrust::device_vector<double> AAPL(hostApple); thrust::device_vector<double> MSFT(hostMicrosoft); thrust::device_vector<double> diff(N); thrust::transform(AAPL.begin(), AAPL.end(), MSFT.begin(), diff.begin(), thrust::minus<double>()); double mean = thrust::reduce(diff.begin(), diff.end(), 0.0, thrust::plus<double>())/N; std::cout << "Media: " << mean << "\n"; thrust::device_vector<double> vetorV(N); thrust::transform(diff.begin(), diff.end(), vetorV.begin(), variance(N,mean)); double variancia = thrust::reduce(vetorV.begin(), vetorV.end(),0.0,thrust::plus<double>()); std::cout << "Variancia: " << variancia << "\n"; }
914
#include "includes.h" ///////////////////////////////////////////////////////// // Computes the 1-stencil using GPUs. // We don't check for error here for brevity. // In your implementation - you must do it! #define BLOCK_SIZE 1024 #define WARP_SIZE 32 #ifndef k #define k 3 #endif #ifndef OUTPUT_PER_THREAD #define OUTPUT_PER_THREAD 1 #endif #define LOCAL_REGISTER_SIZE ((1+OUTPUT_PER_THREAD) > (k+31)/32 ? (1+OUTPUT_PER_THREAD) : (k+31)/32) #ifndef TEST_TIMES #define TEST_TIMES 5 #endif float host_k_stencil (int *A, int *B, int sizeOfA, int withRc); __global__ void one_stencil (int *A, int *B, int sizeOfA) { extern __shared__ int s[]; // Id of thread in the block. int localId = threadIdx.x; // The first index of output element computed by this block. int startOfBlock = blockIdx.x * blockDim.x; // The Id of the thread in the scope of the grid. int globalId = localId + startOfBlock; if (globalId >= sizeOfA) return; // Fetching into shared memory. s[localId] = A[globalId]; if (localId < 2 && blockDim.x + globalId < sizeOfA) { s[blockDim.x + localId] = A[blockDim.x + globalId]; } // We must sync before reading from shared memory. __syncthreads(); // Each thread computes a single output. if (globalId < sizeOfA - 2) B[globalId] = s[localId] + s[localId + 1] + s[localId + 2]; }
915
/** * Copyright © 2018 - 2019 Sergei Iurevich Filippov, All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file contains implementations and interfaces for cuda kernels. They are compiled to a standalone dynamic library * to be linked with D code later. */ const float uint_max_fp = 4294967295.0f; /// Maximum value of unsigned integer represented in floating point format. /** * Returns a floating point value scaled from unsigned integer number x to a given segment [a; b], * meaning 0 will return a and MAX(unsigned int) will return b. * * Due to implementation details it is not recomended to pass a and b close to ±1.0e28 as that will * cause function to return infinity. * * Params: * x = Value to scale. * a = Left bound. * b = Right bound. */ __device__ float scale(const unsigned int x, const float a, const float b) { return a + (b - a) * (float)x / uint_max_fp; } /** * Transform uniformly distrubuted random bits into uniformly distributed random floating point numbers in range * [a; b], where a <= b. 0 will translate to a and MAX(unsigned int) - b. * * The main goal of this function is to minimize rounding errors when scaling any other radnomly generated floating point * numbers. Thus it takes uint bits directly as a source of randomness. If all bits are uniformly distributes * then scaling them to arbitrary floating point segment must provide uniform distribution of random floating point numbers * in a given range. * * Due to implementation details it is not recomended to pass $(D_PARAM a) and $(D_PARAM b) close to ±1.0e28 as that will * cause function to generate infinities. * * Params: * ptr = Pointer to an array of random bits/resulting floating point values. * a = Left bound of the segment. * b = Right bound of the segment. * count = Number of float values to scale. */ __global__ void kernel_scale(void *ptr, const float a, const float b, const size_t count) { unsigned int *uPtr = (unsigned int*)ptr; float *fPtr = (float*)ptr; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) fPtr[i] = scale(uPtr[i], a, b); } /// ditto __host__ void cuda_scale(void *ptr, const float a, const float b, const size_t count) { kernel_scale<<<(count + 1023) / 1023, 1024>>>(ptr, a, b, count); } /** * BLX-α crossover. * * BLX-α crossover is used for real-coded problems. The idea is to pick a random offspring in the space between * two parents x and y extended by the <math><mi>α</mi></math> parameter. Offspring's genes are picked randomly in the range * <math><mrow><mfenced open="[" close="]" separators="; "> * <mrow> * <mtext>min</mtext><mfenced open="(" close=")" separators=", "> * <msub><mi>X</mi><mi>i</mi></msub><msub><mi>Y</mi><mi>i</mi></msub> * </mfenced> * <mo>-</mo> * <mi>α</mi><msub><mi>d</mi><mi>i</mi></msub> * </mrow> * <mrow> * <mtext>max</mtext><mfenced open="(" close=")" separators=", "> * <msub><mi>X</mi><mi>i</mi></msub><msub><mi>Y</mi><mi>i</mi></msub> * </mfenced> * <mo>+</mo> * <mi>α</mi><msub><mi>d</mi><mi>i</mi></msub> * </mrow> * </mfenced></mrow></math> * , where * <math><mrow> * <mi>d</mi> * <mo>=</mo> * <mfenced open="|" close="|" separators=""> * <msub><mi>x</mi><mi>i</mi></msub><mo>-</mo><msub><mi>y</mi><mi>i</mi></msub> * </mfenced> * </mrow></math>. * A picked value will not be out of the range * <math><mfenced open="[" close="]" separators="; "><mi>a</mi><mi>b</mi></mfenced></math> which one is limiting * the search space. * * Params: * x = Parent array. * y = Parent array. * offspring = Offspring array. * a = Minimal crossover value. * b = Maximal crossover value. * alpha = α parameter of BLX-α crossover. Must be &ge 0. Determines how much to extend the search space, where 0 means * not to extend at all. * u = Pointer to an array of random bits. To prevent rounding errors it is of uint type rather than float value * in range [0; 1]. These bits will be translated to float, where 0 translates to the left bound of the search space * and uint.max - to the right bound. */ __global__ void kernel_BLX_a( const float *x, const float *y, float *offspring, const float a, const float b, const float alpha, const unsigned int *u, const size_t count ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) { float _a = fminf(x[i], y[i]) - alpha * fabsf(x[i] - y[i]); float _b = fmaxf(x[i], y[i]) + alpha * fabsf(x[i] - y[i]); offspring[i] = scale(u[i], _a, _b); if (offspring[i] < a) offspring[i] = a; if (offspring[i] > b) offspring[i] = b; } } /// ditto __host__ void cuda_BLX_a( const float *x, const float *y, float *offspring, const float a, const float b, const float alpha, const unsigned int *u, const size_t count ) { kernel_BLX_a<<<(count + 1023) / 1023, 1024>>>(x, y, offspring, a, b, alpha, u, count); } /** * Solve a quadratic equation. * * Params: * x1, x2 = Roots of the equation. * a = x^2 coefficient. * b = x coefficiant. * c = Free coefficient. */ __device__ void quadratic(float &x1, float &x2, const float a, const float b, const float c) { const float D = powf(b, 2) - 4.0f * a * c; x1 = (-b - sqrtf(D)) / 2.0f / a; x2 = (-b + sqrtf(D)) / 2.0f / a; } /** * Rank based parent selection. * * Rank based selection is similar to roulette-wheel selection in which parents are selected with a probability * proportionate to their fitness values. Instead, in the rank based selection probabilities are proportionate * to the individual averall rank. * * This approach lets individuals with lower fitness to breed more often thus preserving genetic diversity and slowing down * convergence. This is especially notable with few individuals having fitness values much higher than the average * population. If parents are selected by the roulette-wheel selection, those best individuals will quicly take over all * population and solution will converge to fast to a local optimum. In the case of the rank based selection even the hugest * gap in fitness values will not speed up convergence and the global optimum will be searched better. * * Params: * ranks = Ranks selected based on scores. * scores = Array of scores. */ __global__ void kernel_RBS(unsigned int *ranks, const float *scores, const size_t count) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) { float x1, x2; // Equation roots quadratic(x1, x2, 0.5f, 0.5f, -scores[i]); float max_root = fmaxf(x1, x2); // ceilf(x - 1.0) is actually not equivalent to floorf(x) at integer values. // ceilf(2.0 - 1.0) = ceilf(1.0) = 1.0 // floorf(2.0) = 2.0 ranks[i] = (unsigned int)ceilf(max_root - 1.0f); } } __host__ void cuda_RBS(unsigned int *ranks, const float *scores, const size_t count) { kernel_RBS<<<(count + 1023) / 1023, 1024>>>(ranks, scores, count); } /** * Fill the array x on a GPU with the value val. * * Params: * x = A pointer to an array to fill. * val = A value to fill with. * n = Number of elements to fill. */ __global__ void kernel_fill(float *x, const float val, const size_t count) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) x[i] = val; } /// ditto __host__ void cuda_fill(float *x, const float val, const size_t count) { kernel_fill<<<(count + 1023) / 1023, 1024>>>(x, val, count); } /** * Per-vector calculation of the Euclidean distance (L2 norm) of a vector array on GPU. * * Params: * x = A pointer to an array of vectors. Must have size of `dim * count` or less but be multiple to `dim`. * y = A pointer to the resulting array of L2 norm values. Must contain `count` elements. * dim = Vectors dimention. * count = Number of vectors in the `x` array and resulting values in the `y` array. */ __global__ void kernel_L2(const float *x, float *y, const unsigned int dim, const size_t count) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) { y[i] = 0; for (int j = 0; j < dim; ++j) y[i] += powf(x[i + count * j], 2); y[i] = sqrtf(y[i]); } } /// ditto __host__ void cuda_L2(const float *x, float *y, const unsigned int dim, const size_t count) { kernel_L2<<<(count + 1023) / 1023, 1024>>>(x, y, dim, count); }
916
#include <cuda.h> #include <stdio.h> void saxpy (float* X, float* Y, float* Z, int n); float avg (float* arr, int n); __global__ void saxpyKernel(float *x, float *y, float *z, float a, int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) z[id] = a*x[id] + y[id]; } int main () { int N = 1<<20; int size = N*sizeof(float); // Host input and output vectors float *h_x, *h_y, *h_z; // Allocate host memory for vecs h_x = (float*)malloc(size); h_y = (float*)malloc(size); h_z = (float*)malloc(size); int i; for (i = 0; i < N; i++) { h_x[i] = 1.0; h_y[i] = 2.0; } // Perform SAXPY on 1M elements saxpy(h_x, h_y, h_z, N); printf("AVG = %f\n", avg(h_z, N)); // free host memory free(h_x); free(h_y); free(h_z); return 0; } void saxpy (float* X, float* Y, float* Z, int n) { //Device input and output vectors float *d_x, *d_y, *d_z; int size = n*sizeof(float); // Allocate device memory cudaMalloc((void**)&d_x, size); cudaMalloc((void**)&d_y, size); cudaError_t z_err = cudaMalloc((void**)&d_z, size); if (z_err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(z_err), __FILE__, __LINE__);} // Copy X and Y vectors to device cudaMemcpy(d_x, X, size, cudaMemcpyHostToDevice); cudaMemcpy(d_y, Y, size, cudaMemcpyHostToDevice); // number of threads per block int blockSize = 1024; // number of blocks //int gridSize = (int)ceil((float)n/blockSize); int gridSize = n/blockSize; saxpyKernel<<<gridSize, blockSize>>>(d_x, d_y, d_z, 2.0, n); // Copy z from device to host cudaMemcpy(Z, d_z, size, cudaMemcpyDeviceToHost); // free device memory cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); } float avg (float* arr, int n) { int i; float total = 0; for (i = 0; i < n; i++) { total += arr[i]; } return total / n; }
917
#include <stdio.h> #include <cuda.h> #include <cmath> #include <iostream> float cpu_array [25]; float cpu_output_array [25]; float *gpu_array_A; float *gpu_array_B; float *gpu_output_array; const int mat_width = 5; dim3 dimBlock(mat_width, mat_width); dim3 dimGrid(1, 1); void initCuda(int width) { cudaMalloc((void**)&gpu_array_A, width*width*sizeof(float)); cudaMemcpy(gpu_array_A, cpu_array, width*width*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&gpu_array_B, width*width*sizeof(float)); cudaMemcpy(gpu_array_B, cpu_array, width*width*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&gpu_output_array, width*width*sizeof(float)); } __global__ void mat_add (float* Ad, float* Bd, float* Pd, int width) { int index = threadIdx.y * width + threadIdx.x; Pd[index] = Ad[index] + Bd[index]; } __global__ void mat_sub (float* Ad, float* Bd, float* Pd, int width) { int index = threadIdx.y * width + threadIdx.x; Pd[index] = Ad[index] - Bd[index]; } __global__ void mat_mult (float* Ad, float* Bd, float* Pd, int width) { int tx = threadIdx.x; int ty = threadIdx.y; float Pvalue = 0; for (int k = 0; k < width; k++) { Pvalue += Ad[ty * width + k] * Bd[k * width + tx]; } Pd[ty * width + tx] = Pvalue; } void cpu_mat_add (float* A, float* B, float* P, int width) { for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { P[j * width + i] = A[j * width + i] + B[j * width + i]; } } } void cpu_mat_sub (float* A, float* B, float* P, int width) { for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { P[j * width + i] = A[j * width + i] - B[j * width + i]; } } } /*** * Simple helper function for printing a matrix. ***/ void cpu_mat_mult (float* A, float* B, float* P, int width) { for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { float Psum = 0; for (int k = 0; k < width; k++) { Psum += A[j * width + k] * B[k * width + i]; } P[j * width + i] = Psum; } } } /*** * Simple helper function for printing a matrix. ***/ void printMatrix (float* M, int width) { for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { std::cout << cpu_output_array[i * width + j] << " "; } std::cout << std::endl; } } int main(int argc, char** argv) { for (int i = 0; i < 25; i++) { cpu_array[i] = i; } initCuda(mat_width); mat_add<<<dimGrid, dimBlock>>>(gpu_array_A, gpu_array_B, gpu_output_array, mat_width); cudaMemcpy(cpu_output_array, gpu_output_array, mat_width*mat_width*sizeof(float), cudaMemcpyDeviceToHost); printMatrix(cpu_output_array, mat_width); cpu_mat_add(cpu_array, cpu_array, cpu_output_array, mat_width); printMatrix(cpu_output_array, mat_width); mat_sub<<<dimGrid, dimBlock>>>(gpu_array_A, gpu_array_B, gpu_output_array, mat_width); cudaMemcpy(cpu_output_array, gpu_output_array, mat_width*mat_width*sizeof(float), cudaMemcpyDeviceToHost); printMatrix(cpu_output_array, mat_width); cpu_mat_sub(cpu_array, cpu_array, cpu_output_array, mat_width); printMatrix(cpu_output_array, mat_width); mat_mult<<<dimGrid, dimBlock>>>(gpu_array_A, gpu_array_B, gpu_output_array, mat_width); cudaMemcpy(cpu_output_array, gpu_output_array, mat_width*mat_width*sizeof(float), cudaMemcpyDeviceToHost); printMatrix(cpu_output_array, mat_width); cpu_mat_mult(cpu_array, cpu_array, cpu_output_array, mat_width); printMatrix(cpu_output_array, mat_width); int a; std::cin>>a; }
918
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<math.h> #define N 512 __global__ void Sum (int *a,int *o) { int tid = blockDim.x*blockIdx.x+threadIdx.x; for(int i = N/2; i > 0; i = i/2) { if(tid < i) { a[tid]+=a[tid+i]; } } o[0] = a[0]; } __global__ void standardDeviation(int *a,int avg) { int tid = blockDim.x*blockIdx.x+threadIdx.x; if(tid<N) { a[tid] -= avg; a[tid] = a[tid]*a[tid]; } } int main() { int *h_a,*d_a,*o_a,*oh_a,*d_a1; int size = N*sizeof(int); h_a = (int *)malloc(size); oh_a = (int *)malloc(size); cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&o_a,size); //new cudaMalloc((void**)&d_a1,size); for(int i = 1; i <= N; i++) { h_a[i-1] = i; } cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_a1,h_a,size,cudaMemcpyHostToDevice); Sum<<<1,N/2>>>(d_a,o_a); cudaDeviceSynchronize(); cudaMemcpy(oh_a,o_a,size,cudaMemcpyDeviceToHost); int arithmetcMean = oh_a[0]/N; standardDeviation<<<1,N>>>(d_a1,arithmetcMean); Sum<<<1,N/2>>>(d_a1,o_a); cudaDeviceSynchronize(); cudaMemcpy(oh_a,o_a,size,cudaMemcpyDeviceToHost); int tmp = oh_a[0]/N; printf("Standard Deviation is - %.2f\n", sqrt(tmp)); cudaFree(d_a); free(h_a); cudaFree(o_a); free(oh_a); cudaFree(d_a1); return 0; }
919
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> #include <pthread.h> pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; __device__ int is_a_match (char *attempt) { char plain_password1[] = "SH1234"; char plain_password2[] = "RE2345"; char plain_password3[] = "EJ3456"; char plain_password4[] = "AN4567"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *p1 = plain_password1; char *p2 = plain_password2; char *p3 = plain_password3; char *p4 = plain_password4; while (*a == *p1) { if (*a == '\0') { printf ("Password: %s\n", plain_password1); break; } a++; p1++; } while(*b == *p2) { if(*b == '\0') { printf("Password: %s\n",plain_password2); break; } b++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",plain_password3); break; } c++; p3++; } while (*d == *p4) { if (*d == '\0') { printf ("Password: %s\n", plain_password4); return 1; } d++; p4++; } return 0; } __global__ void kernel () { char i1, i2, i3, i4; char password [7]; password [6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstMatch = i; char secondMatch = j; password [0] = firstMatch; password [1] = secondMatch; for (i1='0'; i1<='9'; i1++) { for (i2='0'; i2<='9'; i2++) { for (i3='0'; i3<='9'; i3++) { for (i4='0'; i4<='9'; i4++) { password [2] = i1; password [3] = i2; password [4] = i3; password [5] = i4; if(is_a_match(password)) { } else { //printf ("tried: %s\n", password); } } } } } } int time_difference (struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if (dn < 0) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return! (*difference > 0); } int main () { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); pthread_mutex_lock(&mutex); kernel <<<26,26>>>(); cudaThreadSynchronize(); pthread_mutex_unlock(&mutex); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
920
/** * Author: Zachariah Bryant * Description: Generates the average plaquette of a SU(2) lattice * vs equilibration steps for an unthermalized lattice. */ // ******************** // * Headers * // ******************** #include <iostream> #include <fstream> #include <string.h> #include "./Headers/LattiCuda.cuh" using namespace std; // ************************************ // * Definition of Variables * // ************************************ #define LATTSIZE 40 #define BETA 2 // ************************** // * Main Function * // ************************** int main() { LattiCuda model(LATTSIZE, BETA); fstream File; double temp; File.open("../Data/AvgPlaq_vs_Equilibration.dat", ios::out | ios::trunc); double avg{0}; for(int i = 0; i < 20; i++){ temp = model.avgPlaquette(); avg += temp; cout << "\nAvgPlaquette:\t" << temp << "\n"; File << i << " " << temp << "\n"; File.flush(); model.equilibrate(); } File.close(); cout << "Average: " << avg/200 <<"\n"; return 0; }
921
#include "includes.h" __global__ void histogram( int * hist_out, unsigned char * img_in, int img_w,int img_h, int nbr_bin){ int tx=threadIdx.x; int ty=threadIdx.y; int bx=blockIdx.x; int by=blockIdx.y; __shared__ int smem[256]; smem[threadIdx.x]=0; __syncthreads(); unsigned int col= tx + blockDim.x * bx; unsigned int row= ty + blockDim.y * by; int grid_width = gridDim.x * blockDim.x; int id = row * grid_width + col; if(row<img_w && col<img_h) atomicAdd( &(smem[img_in[id]]) ,1); __syncthreads(); atomicAdd(&(hist_out[threadIdx.x]),smem[threadIdx.x]); }
922
#include "includes.h" __global__ void findCentroidsAtomicFreeLocal(int afLocal, int* responses, int nPixels, int* cluster, int* centroidMass, unsigned int* centroidCount) { int const af_id = blockIdx.x; int const cluster_id = blockIdx.y; int const filter_id = threadIdx.x; int* filter_responses = &responses[filter_id*nPixels]; int local_responses = 0; int local_count = 0; int pixel_start = af_id*afLocal; int pixel_end = (af_id+1)*afLocal; pixel_end = pixel_end>nPixels?nPixels:pixel_end; for (int i=pixel_start; i<pixel_end; i++) { if (cluster[i] == cluster_id) { local_responses += filter_responses[i]; local_count++; } } int idx = af_id * gridDim.y*blockDim.x + filter_id*64 + cluster_id; centroidMass[idx] = local_responses; centroidCount[idx] = local_count; }
923
#include "includes.h" __global__ void findMaxAbs(int nRxns, double *d_umat2, int nMets, int *d_rowVec, int *d_colVec, double *d_val, int nnz, double *points, int pointsPerFile, int pointCount, int index){ int newindex = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int k=newindex;k<nnz;k+=stride){ d_umat2[nMets*index+d_rowVec[k]]+=d_val[k]*points[pointCount+pointsPerFile*d_colVec[k]]; } }
924
//gpu_bench.cu #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define CHECK_ERR(x) \ if (x != cudaSuccess) { \ fprintf(stderr,"%s in %s at line %d\n", \ cudaGetErrorString(err),__FILE__,__LINE__); \ exit(-1); \ } \ unsigned long MAX_OPS = 20000000; __global__ void gpu_iops(unsigned long max_ops) { // int a = blockDim.x * blockIdx.x + threadIdx.x; int a=0; int b=0; int c=0; int d=0; int e=0; int f=0; int g=0; int h=0; int i=0; int j=0; int k=0; int l=0; int m=0; int n=0; int o=0; int p=0; int q=0; int r=0; int s=0; int t=0; int u=0; int v=0; int w=0; int x=0; for(unsigned long count=0; count<max_ops; count++) { a=a+1; b=b+2; c=c+3; d=d+4; e=e+5; f=f+6; g=g+7; h=h+8; i=i+9; j=j+10; k=k+11; l=l+12; m=m*13; n=n*14; o=o*15; p=p*16; q=q*17; r=r*18; s=s*19; t=t*20; u=u*21; v=v*22; w=w*23; x=x*24; } } __global__ void gpu_flops(unsigned long max_ops) { // int a = blockDim.x * blockIdx.x + threadIdx.x; float a=0.0; float b=0.0; float c=0.0; float d=0.0; float e=0.0; float f=0.0; float g=0.0; float h=0.0; float i=0.0; float j=0.0; float k=0.0; float l=0.0; float m=0.0; float n=0.0; float o=0.0; float p=0.0; float q=0.0; float r=0.0; float s=0.0; float t=0.0; float u=0.0; float v=0.0; float w=0.0; float x=0.0; for(unsigned long count=0; count<max_ops; count++) { a=a+1.1; b=b+2.2; c=c+3.3; d=d+4.4; e=e+5.5; f=f+6.6; g=g+7.7; h=h+8.8; i=i+9.9; j=j+10.10; k=k+11.11; l=l+12.12; m=m*13.13; n=n*14.14; o=o*15.15; p=p*16.16; q=q*17.17; r=r*18.18; s=s*19.19; t=t*20.20; u=u*21.21; v=v*22.22; w=w*23.23; x=x*24.24; } } int main(int argc, char *argv[]) { char c; int threads = 1024; char test = 'I'; while ( (c = getopt(argc, argv, "n:l:t:") ) != -1) { switch (c) { case 'n': threads = atoi(optarg); break; case 'l': MAX_OPS = atol(optarg); break; case 't': test = optarg[0]; break; default: printf("Usage: ./benchCPU -n [number of threads]\n"); return -1; } } struct timeval tv; long long start, stop; double secs; if(test == 'I') { gettimeofday(&tv, NULL); start = tv.tv_sec*1000000LL + tv.tv_usec; gpu_iops<<< ceil(threads/1024), 1024 >>>(MAX_OPS); cudaThreadSynchronize(); gettimeofday(&tv, NULL); stop = tv.tv_sec*1000000LL + tv.tv_usec; secs = (stop-start)/1000000.0; //printf("Time taken: %lf\n", secs); printf("I\t%lf\n", (MAX_OPS*24.*threads)/(secs*1000000000.)); } else if(test == 'F') { gettimeofday(&tv, NULL); start = tv.tv_sec*1000000LL + tv.tv_usec; gpu_flops<<< ceil(threads/1024), 1024 >>>(MAX_OPS); cudaThreadSynchronize(); gettimeofday(&tv, NULL); stop = tv.tv_sec*1000000LL + tv.tv_usec; secs = (stop-start)/1000000.0; //printf("Time taken: %lf\n", secs); printf("FL\t%lf\n", (MAX_OPS*24.*threads)/(secs*1000000000.)); } }
925
#include "kernel.cuh" #include <iostream> #include <math.h> #define TILE_SIZE 2 __global__ void matrixMulGpuNonShared(float *d_a, float *d_b, float *d_c, const int size) { int row, col; col = TILE_SIZE * blockIdx.x + threadIdx.x; row = TILE_SIZE * blockIdx.y + threadIdx.y; for (int k = 0; k < size; k++) { d_c[row * size + col] += d_a[row * size + k] * d_b[row * size + col]; } } __global__ void matrixMulGpu(float *d_a, float *d_b, float *d_c, const int size) { int row, col; __shared__ float shared_a[TILE_SIZE][TILE_SIZE]; __shared__ float shared_b[TILE_SIZE][TILE_SIZE]; col = TILE_SIZE * blockIdx.x + threadIdx.x; row = TILE_SIZE * blockIdx.y + threadIdx.y; for (int i = 0; i < size / TILE_SIZE; i++) { shared_a[threadIdx.y][threadIdx.x] = d_a[row * size + (i * TILE_SIZE + threadIdx.x)]; shared_b[threadIdx.y][threadIdx.x] = d_b[(i * TILE_SIZE + threadIdx.y) * size + col]; d_c[row * size + col] += d_a[row * size + i] * d_b[i * size + col]; } __syncthreads(); for (int j = 0; j < TILE_SIZE; j++) { d_c[row * size + col] += shared_a[threadIdx.x][j] * shared_b[j][threadIdx.y]; } __syncthreads(); } void runMatrixMul() { const int size = 6; float h_a[size][size], h_b[size][size], h_result[size][size]; float *d_a, *d_b, *d_result; for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { h_a[i][j] = i; h_b[i][j] = j; } } cudaMalloc((void **)&d_a, size * size * sizeof(float)); cudaMalloc((void **)&d_b, size * size * sizeof(float)); cudaMalloc((void **)&d_result, size * size * sizeof(float)); cudaMemcpy(d_a, h_a, size * size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, size * size * sizeof(float), cudaMemcpyHostToDevice); dim3 dimGrid(size / TILE_SIZE, size / TILE_SIZE, 1); dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1); // matrixMulGpu<<<dimGrid, dimBlock>>>(d_a, d_b, d_result, size); matrixMulGpuNonShared<<<dimGrid, dimBlock>>>(d_a, d_b, d_result, size); cudaMemcpy(h_result, d_result, size * size * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_result); std::cout << "The result of the Matrix multiplication is: " << std::endl; for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { std::cout << h_result[i][j] << " "; } std::cout << std::endl; } }
926
#include <stdio.h> #include <string.h> #include <sys/types.h> #include <sys/time.h> #include <cuda.h> static int const height = 521, width = 428, maxLineLength = 200, maxHeaderSize = 5, maxX = width - 1, maxY = height - 1, arraySize = width * height * sizeof(int); void readInputFile (int h_R[width][height], int h_G[width][height], int h_B[width][height], char header[maxHeaderSize][maxLineLength], int *headerSize) { unsigned int h1, h2, h3; int x = 0, y = 0; char *sptr, line[maxLineLength]; FILE *fp; fp = fopen("David.ps", "r"); *headerSize = 0; while(! feof(fp)) { fscanf(fp, "\n%[^\n]", line); if (*headerSize < 5) { strcpy((char *)header[(*headerSize)++], (char *)line); } else { for (sptr = &line[0]; *sptr != '\0'; sptr += 6) { sscanf(sptr,"%2x",&h1); sscanf(sptr+2,"%2x",&h2); sscanf(sptr+4,"%2x",&h3); if (x == width) { x = 0; y++; } if (y < height) { h_R[x][y] = h1; h_G[x][y] = h2; h_B[x][y] = h3; } x++; } } } fclose(fp); } void writeOutputFile (int h_R[width][height], int h_G[width][height], int h_B[width][height], char header[maxHeaderSize][maxLineLength], int headerSize) { int linelen = 12, charPos = 0; FILE *fout; fout= fopen("DavidBlur.ps", "w"); for (int i = 0; i < headerSize; i++) fprintf(fout,"\n%s", header[i]); fprintf(fout,"\n"); for(int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { fprintf(fout, "%02x%02x%02x", h_R[x][y], h_G[x][y], h_B[x][y]); if (++charPos == linelen) { fprintf(fout,"\n"); charPos = 0; } } } fclose(fout); } void allocateDeviceMemory (int (**d_RIn)[width][height], int (**d_GIn)[width][height], int (**d_BIn)[width][height], int (**d_ROut)[width][height], int (**d_GOut)[width][height], int (**d_BOut)[width][height]) { cudaMalloc(d_RIn, arraySize); cudaMalloc(d_GIn, arraySize); cudaMalloc(d_BIn, arraySize); cudaMalloc(d_ROut, arraySize); cudaMalloc(d_GOut, arraySize); cudaMalloc(d_BOut, arraySize); } void freeDeviceMemory (int d_RIn[width][height], int d_GIn[width][height], int d_BIn[width][height], int d_ROut[width][height], int d_GOut[width][height], int d_BOut[width][height]) { cudaFree(d_RIn); cudaFree(d_GIn); cudaFree(d_BIn); cudaFree(d_ROut); cudaFree(d_GOut); cudaFree(d_BOut); } void copyMemoryToDevice (int h_R[width][height], int h_G[width][height], int h_B[width][height], int d_RIn[width][height], int d_GIn[width][height], int d_BIn[width][height]) { cudaMemcpy(d_RIn, h_R, arraySize, cudaMemcpyHostToDevice); cudaMemcpy(d_GIn, h_G, arraySize, cudaMemcpyHostToDevice); cudaMemcpy(d_BIn, h_B, arraySize, cudaMemcpyHostToDevice); } void copyMemoryFromDevice (int d_ROut[width][height], int d_GOut[width][height], int d_BOut[width][height], int h_R[width][height], int h_G[width][height], int h_B[width][height]) { cudaMemcpy(h_R, d_ROut, arraySize, cudaMemcpyDeviceToHost); cudaMemcpy(h_G, d_GOut, arraySize, cudaMemcpyDeviceToHost); cudaMemcpy(h_B, d_BOut, arraySize, cudaMemcpyDeviceToHost); } __global__ void blurKernel (int d_RIn[width][height], int d_GIn[width][height], int d_BIn[width][height], int d_ROut[width][height], int d_GOut[width][height], int d_BOut[width][height]) { int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y; if (x != 0 && x != maxX && y != 0 && y != maxY) { d_ROut[x][y] = (d_RIn[x+1][y] + d_RIn[x-1][y] + d_RIn[x][y+1] + d_RIn[x][y-1]) / 4; d_GOut[x][y] = (d_GIn[x+1][y] + d_GIn[x-1][y] + d_GIn[x][y+1] + d_GIn[x][y-1]) / 4; d_BOut[x][y] = (d_BIn[x+1][y] + d_BIn[x-1][y] + d_BIn[x][y+1] + d_BIn[x][y-1]) / 4; } else if (x == maxX && y != 0 && y != maxY) { d_ROut[x][y] = ( d_RIn[x-1][y] + d_RIn[x][y+1] + d_RIn[x][y-1]) / 3; d_GOut[x][y] = ( d_GIn[x-1][y] + d_GIn[x][y+1] + d_GIn[x][y-1]) / 3; d_BOut[x][y] = ( d_BIn[x-1][y] + d_BIn[x][y+1] + d_BIn[x][y-1]) / 3; } else if (x == 0 && y != 0 && y != maxY) { d_ROut[x][y] = (d_RIn[x+1][y] + d_RIn[x][y+1] + d_RIn[x][y-1]) / 3; d_GOut[x][y] = (d_GIn[x+1][y] + d_GIn[x][y+1] + d_GIn[x][y-1]) / 3; d_BOut[x][y] = (d_BIn[x+1][y] + d_BIn[x][y+1] + d_BIn[x][y-1]) / 3; } else if (y == maxY && x != 0 && x != maxX) { d_ROut[x][y] = (d_RIn[x+1][y] + d_RIn[x-1][y] + d_RIn[x][y-1]) / 3; d_GOut[x][y] = (d_GIn[x+1][y] + d_GIn[x-1][y] + d_GIn[x][y-1]) / 3; d_BOut[x][y] = (d_BIn[x+1][y] + d_BIn[x-1][y] + d_BIn[x][y-1]) / 3; } else if (y == 0 && x != 0 && x != maxX) { d_ROut[x][y] = (d_RIn[x+1][y] + d_RIn[x-1][y] + d_RIn[x][y+1] ) / 3; d_GOut[x][y] = (d_GIn[x+1][y] + d_GIn[x-1][y] + d_GIn[x][y+1] ) / 3; d_BOut[x][y] = (d_BIn[x+1][y] + d_BIn[x-1][y] + d_BIn[x][y+1] ) / 3; } else if (x == maxX && y == 0) { d_ROut[x][y] = ( d_RIn[x-1][y] + d_RIn[x][y+1] ) / 2; d_GOut[x][y] = ( d_GIn[x-1][y] + d_GIn[x][y+1] ) / 2; d_BOut[x][y] = ( d_BIn[x-1][y] + d_BIn[x][y+1] ) / 2; } else if (x == 0 && y == maxY) { d_ROut[x][y] = (d_RIn[x+1][y] + d_RIn[x][y-1]) / 2; d_GOut[x][y] = (d_GIn[x+1][y] + d_GIn[x][y-1]) / 2; d_BOut[x][y] = (d_BIn[x+1][y] + d_BIn[x][y-1]) / 2; } else if (x == maxX && y == maxY) { d_ROut[x][y] = ( d_RIn[x-1][y] + d_RIn[x][y-1]) / 2; d_GOut[x][y] = ( d_GIn[x-1][y] + d_GIn[x][y-1]) / 2; d_BOut[x][y] = ( d_BIn[x-1][y] + d_BIn[x][y-1]) / 2; } else if (x == 0 && y == 0) { d_ROut[x][y] = (d_RIn[x+1][y] + d_RIn[x][y+1] ) / 2; d_GOut[x][y] = (d_GIn[x+1][y] + d_GIn[x][y+1] ) / 2; d_BOut[x][y] = (d_BIn[x+1][y] + d_BIn[x][y+1] ) / 2; } } void outputTimingResults (struct timeval t1, struct timeval t2, struct timeval t3, struct timeval t4, struct timeval t5, struct timeval t6, struct timeval t7, struct timeval t8) { // Convert times to seconds double t1_s = t1.tv_sec + t1.tv_usec / 1000000.0, t2_s = t2.tv_sec + t2.tv_usec / 1000000.0, t3_s = t3.tv_sec + t3.tv_usec / 1000000.0, t4_s = t4.tv_sec + t4.tv_usec / 1000000.0, t5_s = t5.tv_sec + t5.tv_usec / 1000000.0, t6_s = t6.tv_sec + t6.tv_usec / 1000000.0, t7_s = t7.tv_sec + t7.tv_usec / 1000000.0, t8_s = t8.tv_sec + t8.tv_usec / 1000000.0; // Calculate intervals between times double t1t2_s = t2_s - t1_s, t2t3_s = t3_s - t2_s, t3t4_s = t4_s - t3_s, t4t5_s = t5_s - t4_s, t5t6_s = t6_s - t5_s, t6t7_s = t7_s - t6_s, t7t8_s = t8_s - t7_s; // Print final timings printf("Read Input File: %f\n", t1t2_s); printf("Allocate Device Memory: %f\n", t2t3_s); printf("Copy Memory to Device: %f\n", t3t4_s); printf("Blur: %f\n", t4t5_s); printf("Copy Memory from Device: %f\n", t5t6_s); printf("Free Memory on Device: %f\n", t6t7_s); printf("Write Output File: %f\n", t7t8_s); } int main (int argc, const char * argv[]) { // Record the time at different points in execution struct timeval t1, t2, t3, t4, t5, t6, t7, t8; char header[maxHeaderSize][maxLineLength]; int nblurs = atoi(argv[1]), gridWidth, gridHeight, headerSize, blockWidth, blockHeight, h_R[width][height], h_G[width][height], h_B[width][height], // Use pointers to allow swapping input and output arrays in-between blurs without moving memory around (*swap)[width][height], (*d_RIn)[width][height], (*d_GIn)[width][height], (*d_BIn)[width][height], (*d_ROut)[width][height], (*d_GOut)[width][height], (*d_BOut)[width][height]; gettimeofday(&t1, NULL); readInputFile(h_R, h_G, h_B, header, &headerSize); gettimeofday(&t2, NULL); allocateDeviceMemory(&d_RIn, &d_GIn, &d_BIn, &d_ROut, &d_GOut, &d_BOut); gettimeofday(&t3, NULL); copyMemoryToDevice(h_R, h_G, h_B, *d_RIn, *d_GIn, *d_BIn); gettimeofday(&t4, NULL); blockWidth = 16; blockHeight = 16; gridWidth = ceil((double)width / blockWidth); gridHeight = ceil((double)height / blockHeight); dim3 dimGrid(gridWidth, gridHeight, 1); dim3 dimBlock(blockWidth, blockHeight, 1); // nblurs passed as commandline argument to avoid interfering with timing // nblurs = 10; // printf("\nGive the number of times to blur the image\n"); // int icheck = scanf("%d", &nblurs); // Do first blur without swapping output and input pointers blurKernel<<<dimGrid, dimBlock>>>(*d_RIn, *d_GIn, *d_BIn, *d_ROut, *d_GOut, *d_BOut); for (int i = 1; i < nblurs; i++) { // Swap input and output between blurs swap = d_RIn; d_RIn = d_ROut; d_ROut = swap; swap = d_GIn; d_GIn = d_GOut; d_GOut = swap; swap = d_BIn; d_BIn = d_BOut; d_BOut = swap; blurKernel<<<dimGrid, dimBlock>>>(*d_RIn, *d_GIn, *d_BIn, *d_ROut, *d_GOut, *d_BOut); } gettimeofday(&t5, NULL); copyMemoryFromDevice(*d_ROut, *d_GOut, *d_BOut, h_R, h_G, h_B); gettimeofday(&t6, NULL); freeDeviceMemory(*d_RIn, *d_GIn, *d_BIn, *d_ROut, *d_GOut, *d_BOut); gettimeofday(&t7, NULL); writeOutputFile(h_R, h_G, h_B, header, headerSize); gettimeofday(&t8, NULL); outputTimingResults(t1, t2, t3, t4, t5, t6, t7, t8); }
927
#include <stdio.h> #include <stdlib.h> #include <time.h> int are_vectors_equal(int* a, int* b, int n); /* The old-fashioned CPU-only way to add two vectors */ void add_vectors_host(int *result, int *a, int *b, int n) { for (int i=0; i<n; i++) result[i] = a[i] + b[i]; } /* The kernel that will execute on the GPU */ __global__ void add_vectors_kernel(int *result, int *a, int *b, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // If we have more threads than the magnitude of our vector, we need to // make sure that the excess threads don't try to save results into // unallocated memory. if (idx < n) result[idx] = a[idx] + b[idx]; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our vector addition kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our vectors * 2. Copy the vectors to device memory * 3. Execute the kernel * 4. Retrieve the result vector from the device by copying it to the host * 5. Free memory on the device */ void add_vectors_dev(int *result, int *a, int *b, int n) { // Step 1: Allocate memory int *a_dev, *b_dev, *result_dev; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. cudaMalloc((void **) &a_dev, sizeof(int) * n); cudaMalloc((void **) &b_dev, sizeof(int) * n); cudaMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device cudaError_t err = cudaMemcpy(a_dev, a, sizeof(int) * n, cudaMemcpyHostToDevice); if (err != cudaSuccess) printf("ERROR!!!!!!!!!!!!!!!!!!!!!!!!!!!"); cudaMemcpy(b_dev, b, sizeof(int) * n, cudaMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + 512 - 1) / 512, 1, 1); dim3 dimBlock(512, 1, 1); add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n); // Step 4: Retrieve the results cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost); // Step 5: Free device memory cudaFree(a_dev); cudaFree(b_dev); cudaFree(result_dev); } void print_vector(int *array, int n) { int i; for (i=0; i<n; i++) printf("%d ", array[i]); printf("\n"); } int main(void) { int n = 5; // Length of the arrays int a[] = {0, 1, 2, 3, 4}; int b[] = {5, 6, 7, 8, 9}; int host_result[5]; int device_result[5]; int l, i; int* rand_a, *rand_b, *rand_host_result, *rand_device_result; clock_t start, stop; double gpu_time, cpu_time; printf("Please enter vector length: "); scanf("%d", &l); rand_a = (int*) malloc(sizeof(int)*l); rand_b = (int*) malloc(sizeof(int)*l); rand_host_result = (int*) malloc(sizeof(int)*l); rand_device_result = (int*) malloc(sizeof(int)*l); printf("The CPU's answer: "); add_vectors_host(host_result, a, b, n); print_vector(host_result, n); printf("The GPU's answer: "); add_vectors_dev(device_result, a, b, n); print_vector(device_result, n); printf("Generating vectors of length %d... \n", l); for(i=0; i<l; ++i) { rand_a[i] = rand() % 10; rand_b[i] = rand() % 10; //printf("%d: %d, %d \n", i, rand_a[i], rand_b[i]); } start = clock(); add_vectors_host(rand_host_result, rand_a, rand_b, l); stop = clock(); cpu_time = (double) (stop-start)/CLOCKS_PER_SEC; start = clock(); add_vectors_dev(rand_device_result, rand_a, rand_b, l); stop = clock(); gpu_time = (double) (stop-start)/CLOCKS_PER_SEC; //print_vector(rand_host_result, l); printf("CPU compute time: %f", cpu_time); printf("\n"); printf("GPU compute time: %f", gpu_time); printf("\n"); printf("Ratio: %f", cpu_time / gpu_time); printf("\n"); if(!are_vectors_equal(rand_host_result, rand_device_result, l)) { printf("WARNING! Host and device results do not agree"); } free(rand_a); free(rand_b); return 0; } int are_vectors_equal(int* a, int* b, int n) { // Return 1 if vectors a and be are equal, else return 0. int i; for (i=0; i<n; ++i) { if (a[i] != b[i]) return 0; } return 1; }
928
//pass //--blockDim=1024 --gridDim=1 --no-inline #include <cuda.h> #include <stdio.h> #define N 2 //1024 __global__ void definitions (int* A, unsigned int* B) { atomicSub(A,10); atomicSub(B,5); }
929
#include "BaseNode.cuh" BaseNode::BaseNode() { } BaseNode::~BaseNode() { } __device__ double BaseNode::sigmoid(double input) { return 1 / (1 + exp(-input)); } __device__ double BaseNode::sigmoidPrime(double input) { return sigmoid(input) * (1 - sigmoid(input)); } __device__ double BaseNode::activationFunction(double input) { return tanh(input); } __device__ double BaseNode::activationFunctionPrime(double input) { return (1 - (tanh(input) * tanh(input))); }
930
#include <iostream> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <stdio.h> __global__ void Run_Me( int* The_Array , int size) { int ID = blockIdx.x; if(ID < 4) The_Array[ID] = The_Array[ID] * The_Array[ID]; } void Test( thrust::device_vector<int> &A , void(*f)(int*,int) ) { int * GG = thrust::raw_pointer_cast(&A[0]); std::cout<<"Stalling"<<std::endl; dim3 Block ( 4 , 1); (*f)<<<Block,1>>>(GG, 4); std::cout<<"this is the silliest thing, I have every done"<<std::endl; } int main() { thrust::host_vector<int> C(4); C[0] = 1; C[1] = 2; C[2] = 3; C[3] = 4; std::cout<<"RUnning"<<std::endl; thrust::device_vector<int> A = C; Test(A, Run_Me); for(int i = 0 ; i< 4 ;i++) { std::cout << A[i] <<std::endl; } char wait; std::cin >> wait; }
931
#include "includes.h" __global__ void profileSubphaseTruncateP_kernel() {}
932
#include <cuda.h> #include <cuda_runtime_api.h> #include <stdlib.h> #include <stdio.h> #include <string.h> // checkCUDAError ------------------------------------------------------------- // Convience method to check for cuda errors. // @param msg - Unique identifier to help debug. // // From Dr Dobbs "CUDA: Supercomputing for the masses, Part 3" // http://drdobbs.com/architecture-and-design/207200659 //----------------------------------------------------------------------------- void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
933
#include <iostream> #include <unistd.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <cassert> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include <math.h> /** * This kernel essentially serves as a "srand(seed)" on the GPU */ __global__ void setup_kernel(curandState * state){ int y = (blockIdx.y * blockDim.y) + threadIdx.y; int x = (blockIdx.x * blockDim.x) + threadIdx.x; int id = x + (blockDim.x * gridDim.x * y); curand_init(420+69, id, 0, &state[id]); } /** * Executes one timestep on a thread */ __global__ void update(int * inGrid, int * outGrid, curandState * rand_state){ int y = (blockIdx.y * blockDim.y) + threadIdx.y; int x = (blockIdx.x * blockDim.x) + threadIdx.x; int N = blockDim.x * gridDim.x; int id = x + (N * y); //generate random number unsigned int tmprandres = curand(&rand_state[id]); int randres = ((int) tmprandres) % 500; int index = 0; int state = inGrid[id]; //calculate index index += inGrid[((x-1) % N) + N*y]; index += inGrid[((x+1) % N) + N*y]; index += inGrid[x + N*((y-1) % N)]; index += inGrid[x + N*((y+1) % N)]; index += inGrid[((x+1) % N) + N*((y-1) % N)]; index += inGrid[((x-1) % N) + N*((y-1) % N)]; index += inGrid[((x+1) % N) + N*((y+1) % N)]; index += inGrid[((x-1) % N) + N*((y+1) % N)]; //find new state if(state == 0){ if(randres == 0) state = 2; else if(index < 7) state = 0; else if(index < 17) state = 1; else state = 3; } else if(state == 1){ if(randres == 0 || index > 16) state = 3; else if(index < 1) state = 0; else state = 1; } else if(state == 2){ if(randres % 5 < 2) state = 2; else if(randres % 5 < 4) state = 1; else state = 0; } else if(state == 3){ if(index > 9){ state = 3; } else state = 1; } else printf("ERROR: (%d, %d) state out of bounds: %d\n", x, y, state); //update relevant array outGrid[id] = state; } void print_grid(int * grid, int* source_d, int N) { cudaMemcpy(grid, source_d, N*N*sizeof(int), cudaMemcpyDeviceToHost); for(int y = 0; y < N; y++){ for(int x = 0; x < N; x++){ printf("%d ", grid[x + N*y]); } printf("\n"); } return; } void write_to_file(int * grid, int * source_d, int N, int t) { cudaMemcpy(grid, source_d, N*N*sizeof(int), cudaMemcpyDeviceToHost); //char fname[4] = "end"; //char num[10]; //itoa(t, num, 10); //strcat(fname, num); //strcat(fname, ".fbgm"); FILE * binary = fopen("test.fbgm", "wb"); fwrite(grid, sizeof(grid), 1, binary); fclose(binary); } int main(int argc, char * argv[]){ if(argc < 4){ printf("usage: ./simCuda <sidelength> <timesteps> <block divisor for each dimention>\n"); exit(1); } //arguments(board and GPU dimensions) const int N = atoi(argv[1]); const int t = atoi(argv[2]); const int common_divisor = atoi(argv[3]); /* if(N*N % 32){ printf("Choose multiple of 32 on sides for best results\n"); } */ if(N % common_divisor){ printf("Try again with an even divisor of your sidelength\n"); exit(1); } //printf("begin grid setup\n"); //******************************************************************* //****************************GRID SETUP***************************** //******************************************************************* int blockw = N/common_divisor; //blank grid (host) int * blankGrid; //grid memory allocation (host) blankGrid = (int*) calloc(N*N, sizeof(int)); //printf("grid host allocation complete\n"); //grids (device) int * evenGrid_d; int * oddGrid_d; //memory allocation (device) cudaMalloc((void**) &evenGrid_d, N*N*sizeof(int)); cudaMalloc((void**) &oddGrid_d, N*N*sizeof(int)); //printf("even/odd grid device allocation complete\n"); //transfer CPU contents to GPU (all zeroed out) for(int y = 0; y < N; y++){ for(int x = 0; x < N; x++){ cudaMemcpy(evenGrid_d, blankGrid, N*N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(oddGrid_d, blankGrid, N*N*sizeof(int), cudaMemcpyHostToDevice); } } //printf("device grid initialization complete\n"); //******************************************************************* //***************************KERNEL CALLS**************************** //******************************************************************* //printf("begin kernel calls\n"); //declare dimentions of blocks and block arrangement dim3 BLOCK_ARRANGEMENT(common_divisor,common_divisor,1); dim3 BLOCK_SHAPE(blockw,blockw,1); //random number stuff (it's a headache) curandState * states_d; cudaMalloc((void**) &states_d, N*N*sizeof(curandState)); //printf("device random memory allocation complete\n"); setup_kernel<<<BLOCK_ARRANGEMENT,BLOCK_SHAPE>>>(states_d); //printf("device random initialization complete, begin sim\n"); /* usleep(500000); printf("In 3..\n"); sleep(1); printf("2..\n"); sleep(1); printf("1..\n"); sleep(1); */ //call updates for each timestep for(int i = 1; i < t+1; i++){ printf("t=%d\n",i); if(i%2==0){ update<<<BLOCK_ARRANGEMENT,BLOCK_SHAPE>>>(evenGrid_d, oddGrid_d, states_d); //if(i%100 == 0){ //print_grid(blankGrid, oddGrid_d, N); //for(int i = 0; i < N; i++) printf("*"); //printf("\n"); //} } else{ update<<<BLOCK_ARRANGEMENT,BLOCK_SHAPE>>>(oddGrid_d, evenGrid_d, states_d); //if(i%100 == 0){ //print_grid(blankGrid, evenGrid_d, N); //for(int i = 0; i < N; i++) printf("*"); //printf("\n"); //} } //usleep(100000); } print_grid(blankGrid, oddGrid_d, N); write_to_file(blankGrid, oddGrid_d, N, t); //******************************************************************* //*****************************CLEANUP******************************* //******************************************************************* cudaFree(states_d); cudaFree(evenGrid_d); cudaFree(oddGrid_d); free(blankGrid); return 0; }
934
#include "includes.h" //Training of the CNN is done using Keras. After training for 10 epochs, the obtained accuracy on the training data set is 99.70 and on the test data set is 99.14. //This model implements the following layes in order- 2DConvolution---->Maxpooling---->2D Convolution---->Maxpooling---->Fully_connected layer---->Fully_connected layer. //The image is a 28*28 greyscale image. The specifications of the layers are as follows: //Layer_0: Convolution: 32 3*3 kernels with no padding and 1 stride. //Layer_1: Maxpooling: 2*2 filters with with no padding and 1 stride. //Layer_2: Convolution: 64 3*3 kernels with no padding and 1 stride. //Layer_3: Maxpooling: 2*2 filters with with no padding and 1 stride. //Layer_4: Flattening //Layer_5: Fully connected / dense layer with 1024 output units. //Layer_6: Dropout (done during training only). //Layer_7: Fully connected / dense layer with 10 output units. //All arrays and matrices are designed to be row ordered in this implementation. //Kernel that does convolution. This convolution is done by each thread identifying that patch or portion of the image that it is responsible for its result and does the multiplication and addition of it's patche's values with the suitable kernel. //The depth of the output image is the number of kernels. //Kernel that does maxpooling. //This kernel implements the fully connected layers. __global__ void convolution_kernel(int h, int w, int d, double* gpu_in, int k_h, int k_w, int k_d, double* kernel_weights, double* kernel_biases, int num_kernels, int op_h, int op_w, int op_d, double* gpu_out) { //Identifying threads by their IDs. int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; int deep = blockDim.z *blockIdx.z + threadIdx.z; //Return if thread out of bounds if (row >= op_h || col >= op_w || deep >= op_d) return; double out=0.0; int kernel_pointer = 0; //Each thread/each output node identifies the corresponding element in the matrix that it is responsible to multiply-add. for (int depth_pointer = 0; depth_pointer < k_d; depth_pointer++) { for (int row_pointer = 0; row_pointer < k_h; row_pointer++) { for (int column_pointer = 0; column_pointer < k_w; column_pointer++) { out += gpu_in[((row*w + col) + row_pointer * w + column_pointer + h * w*depth_pointer)] * kernel_weights[kernel_pointer + deep * k_h*k_w*k_d]; kernel_pointer++; } } } //Bias addition and relu activation. One bias is applied to one output image layer, since one bias is applicable to one kernel. //Relu activation : relu(a)=max(0,a). If the value is less than 0 then it becomes 0, else it is retained. if (out + kernel_biases[deep] < 0.0) gpu_out[row*op_w + col + deep * op_h*op_w] = 0.0l; else gpu_out[row*op_w + col + deep * op_h*op_w] = out + kernel_biases[deep]; }
935
/* Copyright (c) 2001-2018, The Ohio State University. All rights * reserved. * * This file is part of the MVAPICH2 software package developed by the * team members of The Ohio State University's Network-Based Computing * Laboratory (NBCL), headed by Professor Dhabaleswar K. (DK) Panda. * * For detailed copyright and licensing information, please refer to the * copyright file COPYRIGHT in the top level MVAPICH2 directory. * */ #define MPI_ORDER_C 56 #define MPI_ORDER_FORTRAN 57 extern int rdma_cuda_vec_thread_blksz; extern int rdma_cuda_vec_thread_ysz; extern int rdma_cuda_subarr_thread_blksz; extern int rdma_cuda_subarr_thread_xdim; extern int rdma_cuda_subarr_thread_ydim; extern int rdma_cuda_subarr_thread_zdim; struct iovec { void *iov_base; /* Pointer to data. */ size_t iov_len; /* Length of data. */ }; __global__ void pack_subarray_c_double( double *dst, double *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ i * sub_ny * sub_nz + j * sub_nz + k ] = src[ (i + h_x) * ny * nz + (j + h_y) * nz + (k + h_z) ]; } } __global__ void unpack_subarray_c_double( double *dst, double *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ (i + h_x) * ny * nz + (j + h_y) * nz + (k + h_z) ] = src[ i * sub_ny * sub_nz + j * sub_nz + k ]; } } __global__ void pack_subarray_f_double( double *dst, double *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ i + sub_nx * j + sub_nx * sub_ny * k ] = src[ (i + h_x) + nx * (j + h_y) + nx * ny * (k + h_z) ]; } } __global__ void unpack_subarray_f_double( double *dst, double *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ (i + h_x) + nx * (j + h_y) + nx * ny * (k + h_z) ] = src[ i + sub_nx * j + sub_nx * sub_ny * k ]; } } __global__ void pack_subarray_c_float( float *dst, float *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ i * sub_ny * sub_nz + j * sub_nz + k ] = src[ (i + h_x) * ny * nz + (j + h_y) * nz + (k + h_z) ]; } } __global__ void unpack_subarray_c_float( float *dst, float *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ (i + h_x) * ny * nz + (j + h_y) * nz + (k + h_z) ] = src[ i * sub_ny * sub_nz + j * sub_nz + k ]; } } __global__ void pack_subarray_f_float( float *dst, float *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ i + sub_nx * j + sub_nx * sub_ny * k ] = src[ (i + h_x) + nx * (j + h_y) + nx * ny * (k + h_z) ]; } } __global__ void unpack_subarray_f_float( float *dst, float *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ (i + h_x) + nx * (j + h_y) + nx * ny * (k + h_z) ] = src[ i + sub_nx * j + sub_nx * sub_ny * k ]; } } __global__ void pack_subarray_c_char( char *dst, char *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ i * sub_ny * sub_nz + j * sub_nz + k ] = src[ (i + h_x) * ny * nz + (j + h_y) * nz + (k + h_z) ]; } } __global__ void unpack_subarray_c_char( char *dst, char *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ (i + h_x) * ny * nz + (j + h_y) * nz + (k + h_z) ] = src[ i * sub_ny * sub_nz + j * sub_nz + k ]; } } __global__ void pack_subarray_f_char( char *dst, char *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ i + sub_nx * j + sub_nx * sub_ny * k ] = src[ (i + h_x) + nx * (j + h_y) + nx * ny * (k + h_z) ]; } } __global__ void unpack_subarray_f_char( char *dst, char *src, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z) { //============================================================================== // 2 Registers | 3 arguments //============================================================================== int i, j, k; //============================================================================== // Identify current thread // Notice the +1 shift that is used in order to avoid ghost nodes i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if ( (i < sub_nx) && (j < sub_ny) && (k < sub_nz) ) { dst[ (i + h_x) + nx * (j + h_y) + nx * ny * (k + h_z) ] = src[ i + sub_nx * j + sub_nx * sub_ny * k ]; } } extern "C" void pack_subarray( void *dst, void *src, int dim, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z, int sub_order, int el_size, cudaStream_t stream) { int BLOCK_SIZE_X = 8; int BLOCK_SIZE_Y = 8; int BLOCK_SIZE_Z = 16; int BLOCK_THREAD_SIZE = 1024; if ( rdma_cuda_subarr_thread_xdim != 0 && !(rdma_cuda_subarr_thread_xdim & (rdma_cuda_subarr_thread_xdim-1)) ){ BLOCK_SIZE_X = rdma_cuda_subarr_thread_xdim; } if ( dim > 1 && rdma_cuda_subarr_thread_ydim != 0 && !(rdma_cuda_subarr_thread_ydim & (rdma_cuda_subarr_thread_ydim-1)) ){ BLOCK_SIZE_Y = rdma_cuda_subarr_thread_ydim; } if ( dim > 2 && rdma_cuda_subarr_thread_zdim != 0 && !(rdma_cuda_subarr_thread_zdim & (rdma_cuda_subarr_thread_zdim-1)) ){ BLOCK_SIZE_Z = rdma_cuda_subarr_thread_zdim; } if ( rdma_cuda_subarr_thread_blksz != 0 && !(rdma_cuda_subarr_thread_blksz & (rdma_cuda_subarr_thread_blksz-1)) ){ BLOCK_THREAD_SIZE = ( (rdma_cuda_subarr_thread_blksz < 1024) ? rdma_cuda_subarr_thread_blksz : 1024); } if ( 3 == dim && BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z > BLOCK_THREAD_SIZE ){ BLOCK_SIZE_X = 8; BLOCK_SIZE_Y = 8; BLOCK_SIZE_Z = 16; } else if ( 2 == dim && BLOCK_SIZE_X * BLOCK_SIZE_Y > BLOCK_THREAD_SIZE ){ BLOCK_SIZE_X = 16; BLOCK_SIZE_Y = 32; BLOCK_SIZE_Z = 1; } else if ( 1 == dim ) { BLOCK_SIZE_X = 256; BLOCK_SIZE_Y = 4; BLOCK_SIZE_Z = 1; } int GRID_SIZE_X = (sub_nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X; int GRID_SIZE_Y = (sub_ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y; int GRID_SIZE_Z = (sub_nz + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z; dim3 dimblock( BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z ); dim3 dimgrid( GRID_SIZE_X, GRID_SIZE_Y, GRID_SIZE_Z ); if ( MPI_ORDER_C == sub_order ){ if (el_size == 4) { pack_subarray_c_float<<<dimgrid, dimblock, 0, stream>>>((float *) dst, (float *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } else if (el_size == 1) { pack_subarray_c_char<<<dimgrid, dimblock, 0, stream>>>((char *) dst, (char *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } else if (el_size == 8) { pack_subarray_c_double<<<dimgrid, dimblock, 0, stream>>>((double *) dst, (double *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } } else if ( MPI_ORDER_FORTRAN == sub_order ){ if (el_size == 4) { pack_subarray_f_float<<<dimgrid, dimblock, 0, stream>>>((float *) dst, (float *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } else if (el_size == 1) { pack_subarray_f_char<<<dimgrid, dimblock, 0, stream>>>((char *) dst, (char *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } else if (el_size == 8) { pack_subarray_f_double<<<dimgrid, dimblock, 0, stream>>>((double *) dst, (double *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } } } extern "C" void unpack_subarray( void *dst, void *src, int dim, int nx, int ny, int nz, int sub_nx, int sub_ny, int sub_nz, int h_x, int h_y, int h_z, int sub_order, int el_size, cudaStream_t stream) { int BLOCK_SIZE_X = 8; int BLOCK_SIZE_Y = 8; int BLOCK_SIZE_Z = 16; int BLOCK_THREAD_SIZE = 1024; if ( rdma_cuda_subarr_thread_xdim != 0 && !(rdma_cuda_subarr_thread_xdim & (rdma_cuda_subarr_thread_xdim-1)) ){ BLOCK_SIZE_X = rdma_cuda_subarr_thread_xdim; } if ( dim > 1 && rdma_cuda_subarr_thread_ydim != 0 && !(rdma_cuda_subarr_thread_ydim & (rdma_cuda_subarr_thread_ydim-1)) ){ BLOCK_SIZE_Y = rdma_cuda_subarr_thread_ydim; } if ( dim > 2 && rdma_cuda_subarr_thread_zdim != 0 && !(rdma_cuda_subarr_thread_zdim & (rdma_cuda_subarr_thread_zdim-1)) ){ BLOCK_SIZE_Z = rdma_cuda_subarr_thread_zdim; } if ( rdma_cuda_subarr_thread_blksz != 0 && !(rdma_cuda_subarr_thread_blksz & (rdma_cuda_subarr_thread_blksz-1)) ){ BLOCK_THREAD_SIZE = ( (rdma_cuda_subarr_thread_blksz < 1024) ? rdma_cuda_subarr_thread_blksz : 1024); } if ( 3 == dim && BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z > BLOCK_THREAD_SIZE ){ BLOCK_SIZE_X = 8; BLOCK_SIZE_Y = 8; BLOCK_SIZE_Z = 16; } else if ( 2 == dim && BLOCK_SIZE_X * BLOCK_SIZE_Y > BLOCK_THREAD_SIZE ){ BLOCK_SIZE_X = 16; BLOCK_SIZE_Y = 32; BLOCK_SIZE_Z = 1; } else if ( 1 == dim ) { BLOCK_SIZE_X = 256; BLOCK_SIZE_Y = 4; BLOCK_SIZE_Z = 1; } int GRID_SIZE_X = (sub_nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X; int GRID_SIZE_Y = (sub_ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y; int GRID_SIZE_Z = (sub_nz + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z; dim3 dimblock( BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z ); dim3 dimgrid( GRID_SIZE_X, GRID_SIZE_Y, GRID_SIZE_Z ); if ( MPI_ORDER_C == sub_order ){ if (el_size == 4) { unpack_subarray_c_float<<<dimgrid, dimblock, 0, stream>>>((float *) dst, (float *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } else if (el_size == 1) { unpack_subarray_c_char<<<dimgrid, dimblock, 0, stream>>>((char *) dst, (char *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } else if (el_size == 8) { unpack_subarray_c_double<<<dimgrid, dimblock, 0, stream>>>((double *) dst, (double *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } } else if ( MPI_ORDER_FORTRAN == sub_order ){ if (el_size == 4) { unpack_subarray_f_float<<<dimgrid, dimblock, 0, stream>>>((float *) dst, (float *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } else if (el_size == 1) { unpack_subarray_f_char<<<dimgrid, dimblock, 0, stream>>>((char *) dst, (char *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } else if (el_size == 8) { unpack_subarray_f_double<<<dimgrid, dimblock, 0, stream>>>((double *) dst, (double *) src, nx, ny, nz, sub_nx, sub_ny, sub_nz, h_x, h_y, h_z ); } } } __global__ void pack_unpack_vector_double( double *dst, int dpitch, double *src, int spitch, int width, int height) { //============================================================================== // 2 Registers | 2 arguments //============================================================================== int i, j; //============================================================================== i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; if ( i < height && j < width ) { dst[i * dpitch + j] = src[i * spitch + j]; } } __global__ void pack_unpack_vector_float( float *dst, int dpitch, float *src, int spitch, int width, int height) { //============================================================================== // 2 Registers | 2 arguments //============================================================================== int i, j; //============================================================================== i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; if ( i < height && j < width ) { dst[i * dpitch + j] = src[i * spitch + j]; } } __global__ void pack_unpack_vector_char( char *dst, int dpitch, char *src, int spitch, int width, int height) { //============================================================================== // 2 Registers | 2 arguments //============================================================================== int i, j; //============================================================================== i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; if (i < height && j < width ) { dst[i * dpitch + j] = src[i * spitch + j]; } } extern "C" void pack_unpack_vector_kernel( void *dst, int dpitch, void *src, int spitch, int width, int height, cudaStream_t stream) { int BLOCK_SIZE_Y, BLOCK_THREAD_SIZE; int dtsize; int elems; if ((0 == (width % sizeof(double))) && (0 == (dpitch % sizeof(double))) && (0 == (spitch % sizeof(double)))) { dtsize = sizeof(double); } else if ((0 == (width % sizeof(float))) && (0 == (dpitch % sizeof(float))) && (0 == (spitch % sizeof(float)))) { dtsize = sizeof(float); } else { dtsize = sizeof(char); } elems = width / dtsize; if ( rdma_cuda_vec_thread_ysz != 0 && !(rdma_cuda_vec_thread_ysz & (rdma_cuda_vec_thread_ysz-1)) ){ BLOCK_SIZE_Y = rdma_cuda_vec_thread_ysz; }else{ switch ( elems ){ case 1: BLOCK_SIZE_Y = 1; break; case 2: BLOCK_SIZE_Y = 2; break; case 3: case 4: BLOCK_SIZE_Y = 4; break; case 5: case 6: case 7: case 8: BLOCK_SIZE_Y = 8; break; case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: BLOCK_SIZE_Y = 16; break; default: BLOCK_SIZE_Y = 32; break; } } if ( rdma_cuda_vec_thread_blksz != 0 && !(rdma_cuda_vec_thread_blksz & (rdma_cuda_vec_thread_blksz-1)) ){ BLOCK_THREAD_SIZE = ( (rdma_cuda_vec_thread_blksz < 1024) ? rdma_cuda_vec_thread_blksz : 1024); } else{ BLOCK_THREAD_SIZE = 1024; } int BLOCK_SIZE_X = BLOCK_THREAD_SIZE / BLOCK_SIZE_Y; int GRID_SIZE_X = (height + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X; int GRID_SIZE_Y = (elems + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y; dim3 dimblock( BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); dim3 dimgrid( GRID_SIZE_X, GRID_SIZE_Y, 1); if ((0 == (width % sizeof(double))) && (0 == (dpitch % sizeof(double))) && (0 == (spitch % sizeof(double)))) { pack_unpack_vector_double<<<dimgrid, dimblock, 0, stream>>>((double *) dst, dpitch / sizeof(double), (double *) src, spitch / sizeof(double), width / sizeof(double), height); } else if ((0 == (width % sizeof(float))) && (0 == (dpitch % sizeof(float))) && (0 == (spitch % sizeof(float)))) { pack_unpack_vector_float<<<dimgrid, dimblock, 0, stream>>>((float *) dst, dpitch / sizeof(float), (float *) src, spitch / sizeof(float), width / sizeof(float), height); } else if ((0 == (width % sizeof(char))) && (0 == (dpitch % sizeof(char))) && (0 == (spitch % sizeof(char)))) { pack_unpack_vector_char<<<dimgrid, dimblock, 0, stream>>>((char *) dst, dpitch / sizeof(char), (char *) src, spitch / sizeof(char), width / sizeof(char), height); } }
936
// // Created by Peter Rigole on 2019-05-24. // #include "Timer.cuh" Timer::Timer() : beg_(clock_::now()) {} void Timer::reset() { beg_ = clock_::now(); } double Timer::elapsed() const { return std::chrono::duration_cast<second_> (clock_::now() - beg_).count(); }
937
#include "includes.h" __global__ void OPT_4_HIST(int *d_lcmMatrix, int *d_LCMSize, int *d_histogram, int n_vertices) { int i = threadIdx.x + blockIdx.x * blockDim.x; int count = 0, countMax = -1; if(i<n_vertices) { int iStart = 0; if(i>0) iStart = d_LCMSize[i - 1]; //Offset count = 0; int iSize = d_LCMSize[i] - iStart; for(int j = 0; j < n_vertices; j++) { int jStart = 0; if(j>0) jStart = d_LCMSize[j - 1]; //Offset int jSize = d_LCMSize[j] - jStart; if(iSize != jSize) continue; int eq = 1; for(int k = 0; k < iSize; k++) { if(d_lcmMatrix[iStart + k] != d_lcmMatrix[jStart + k]) { eq = 0; break; } } if(eq == 1) { count++; } } if(countMax < count) countMax = count; atomicAdd((int*)&d_histogram[count], 1); // d_histogram[count]++; } }
938
#include <stdio.h> __global__ void add( int a, int b, int *c ){ *c = a + b; } int main(void){ int c; int *dev_c; // pointer to device cudaMalloc( (void**)&dev_c, sizeof(int) ); add<<<1, 1>>>( 2, 8, dev_c); cudaMemcpy( &c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); printf( "2 + 7 = %d\n", c ); cudaFree( dev_c ); return 0; }
939
/** \file */ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <time.h> #include <sys/time.h> #include <stdint.h> int nextPower(int); void die(const char *); void warn(const char *); void read_from_file(int *, char *, int); void write_to_file(int *, char *, int); /** * play - Plays the game for one step. * First, counts the neighbors, taking into account boundary conditions * Then, acts on the rules. * Updates need to happen all together, so a temporary new array is allocated */ __global__ void play(int *X, int *d_new, int N){ int i = (blockIdx.x*blockDim.x)+threadIdx.x; int j = (blockIdx.y*blockDim.y)+threadIdx.y; int up, down, left, right; if( i<N && j<N){ int sum = 0; // Code below is faster but hard to read up = ((i-1)+N)%N; down = (i+1)%N; left = ((j-1)+N)%N; right = (j+1)%N; sum = X[N*up+left]+ //i-1, j-1 X[N*up+j]+ //i-1, j X[N*up+right]+ //i-1, j+1 X[N*i+left]+ //i, j-1 X[N*i+right]+ //i, j+1 X[N*down+left]+ //i+1, j-1 X[N*down+j]+ //i+1, j X[N*down+right];//i+1, j+1 //act based on rules if(X[i*N+j] == 0 && sum == 3 ){ d_new[i*N+j]=1; //born }else if ( X[i*N+j] == 1 && (sum < 2 || sum>3 ) ){ d_new[i*N+j]=0; //dies - loneliness or overpopulation }else{ d_new[i*N+j] = X[i*N+j]; //nothing changes } } return; } /** * main - plays the game of life for t steps according to the rules: * - A dead(0) cell with exactly 3 living neighbors becomes alive (birth) * - A dead(0) cell with any other number of neighbors stays dead (barren) * - A live(1) cell with 0 or 1 living neighbors dies (loneliness) * - A live(1) cell with 4 or more living neighbors dies (overpopulation) * - A live(1) cell with 2 or 3 living neighbors stays alive (survival) */ int main(int argc, char **argv){ //sanity check for input if(argc !=5){ printf("Usage: %s filename size t threads, where:\n", argv[0]); printf("\tfilename is the input file \n"); printf("\tsize is the grid side and \n"); printf("\tt generations to play\n"); printf("\t threadsXthreads per block\n"); die("Wrong arguments"); } //declarations char *filename = argv[1]; int N = atoi(argv[2]); int t = atoi(argv[3]); int thrds = atoi(argv[4]); int gen = 0; int *table = (int *)malloc(N*N*sizeof(int)); if (!table) die("Couldn't allocate memory to table"); //read input read_from_file(table, filename, N); //get the smallest power of 2 larger than N int Npow2 = nextPower(N); //CUDA - timing float gputime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //CUDA - split board into squares dim3 threadsPerBlock(thrds, thrds); dim3 numBlocks(Npow2/threadsPerBlock.x, Npow2/threadsPerBlock.y); //CUDA - copy input to device int *d_table; cudaMalloc(&d_table, N*N*sizeof(int)); int *d_new; cudaMalloc(&d_new, N*N*sizeof(int)); cudaEventRecord(start, 0); cudaMemcpy(d_table, table, N*N*sizeof(int), cudaMemcpyHostToDevice); //CUDA - play game for t generations for(gen=0; gen<t; gen++){ //alternate between using d_table and d_new as temp if(gen%2==0){ play<<<numBlocks, threadsPerBlock>>>(d_table /*data*/, d_new /*temp*/, N); }else{ play<<<numBlocks, threadsPerBlock>>>(d_new /*data*/, d_table /*temp*/, N); } cudaDeviceSynchronize(); //don't continue if kernel not done } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gputime, start, stop); printf("[%d]\t %g \n",gen, gputime/1000.0f); //CUDA - copy data from device if(t%2==1){ cudaMemcpy(table, d_new, N*N*sizeof(int), cudaMemcpyDeviceToHost); }else{ cudaMemcpy(table, d_table, N*N*sizeof(int), cudaMemcpyDeviceToHost); } write_to_file(table, filename, N); free(table); cudaFree(d_new); cudaFree(d_table); return 0; } /** * die - display an error and terminate. * Used when some fatal error happens * and continuing would mess things up. */ void die(const char *message){ if(errno){ perror(message); }else{ printf("Error: %s\n", message); } exit(1); } /** * warn - display a warning and continue * used when something didn't go as expected */ void warn(const char *message){ if(errno){ perror(message); }else{ printf("Warning: %s\n", message); } return; } /** * read_from_file - read N*N integer values from an appropriate file. * Saves the game's board into array X for use by other functions * Warns or kills the program if something goes wrong */ void read_from_file(int *X, char *filename, int N){ FILE *fp = fopen(filename, "r+"); int size = fread(X, sizeof(int), N*N, fp); if(!fp) die("Couldn't open file to read"); if(!size) die("Couldn't read from file"); if(N*N != size) warn("Expected to read different number of elements"); fclose(fp); return; } /** * write_to_file - write N*N integer values to a binary file. * Saves game's board from array X to the file * Names the file tableNxN_new.bin, so the input file is not overwritten */ void write_to_file(int *X, char *filename, int N){ //save as tableNxN_new.bin char newfilename[100]; sprintf(newfilename, "cuda_table%dx%d.bin", N, N); FILE *fp; int size; if( ! ( fp = fopen(newfilename, "w+") ) ) die("Couldn't open file to write"); if( ! (size = fwrite(X, sizeof(int), N*N, fp)) ) die("Couldn't write to file"); if (size != N*N) warn("Expected to write different number of elements"); fclose(fp); return; } /** * nextPower - return smallest power of 2 larger than N */ int nextPower(int N){ int n=0; while(1){ if(1<<n < N){ n++; }else{ return 1<<n; } } }
940
/********************************************************************** * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation **********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void init_line(void); void update(void); void printfinal(void); int nsteps, /* number of time steps */ tpoints, /* total points along string */ rcode; /* generic return code */ float *values, /* values at time t */ *oldval, /* values at time t-dt */ *newval; /* values at time t+dt */ /********************************************************************* * Checks input value from parameter *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: ", MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Initialize points on line **********************************************************************/ void init_line(void) { int i, j; float x, fac, k, tmp; /*allocate array memory to host*/ size_t size = (tpoints+2)*sizeof(float); values = (float*)malloc(size); newval = (float*)malloc(size); oldval = (float*)malloc(size); /* Calculate initial values based on sine curve. */ fac = 2.0 * PI; k = 0.0; tmp = tpoints - 1; for (j = 0; j < tpoints; j++) { x = k/tmp; values[j] = sin(fac * x); k = k + 1.0; } /* Initialize old values array */ for (i = 0; i < tpoints; i++) oldval[i] = values[i]; } /********************************************************************** * Calculate new values using wave equation **********************************************************************/ void do_math(int i) { float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c * dtime / dx); sqtau = tau * tau; newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]); } /********************************************************************** * Update all values along line a specified number of times **********************************************************************/ __global__ void parallel_update(float* d_values, float* d_oldval, float* d_newval, int tpoints, int nsteps, int threadsPerBlock){ int i; int pointID = blockIdx.x*threadsPerBlock+threadIdx.x; float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c*dtime / dx); sqtau = tau * tau; for(i=0; i<nsteps;i++){ if((pointID == 1) || (pointID == tpoints)) d_newval[pointID] = 0.0; else d_newval[pointID] = (2.0*d_values[pointID]) - d_oldval[pointID] + (sqtau*(-2.0)*d_values[pointID]); d_oldval[pointID] = d_values[pointID]; d_values[pointID] = d_newval[pointID]; } } void update() { int i, j; /* Update values for each time step */ for (i = 0; i < nsteps; i++) { for (j = 0; j < tpoints; j++) { /* global endpoints */ if ((j == 1) || (j == tpoints)) newval[j] = 0.0; else do_math(j); } /* Update old values with new values */ for (j = 0; j < tpoints; j++) { oldval[j] = values[j]; values[j] = newval[j]; } } } /********************************************************************** * Print final results * *********************************************************************/ void printfinal () { int i; for (i = 1; i <= tpoints; i++) { printf("%6.4f ", values[i]); if (i%10 == 0) printf("\n"); } } int main(int argc, const char *argv[]) { sscanf(argv[1], "%d", &tpoints); sscanf(argv[2], "%d", &nsteps); check_param(); printf("Initializing points on the line...\n"); init_line(); printf("Updating all points for all time steps...\n"); //update(); //parallel this function //allocate vector in device's memory size_t size=(tpoints+2)*sizeof(float); float* d_values; cudaMalloc(&d_values, size); float* d_oldval; cudaMalloc(&d_oldval, size); float* d_newval; cudaMalloc(&d_newval, size); //copy memory from host to device cudaMemcpy( d_values, values, size, cudaMemcpyHostToDevice); cudaMemcpy( d_newval, newval, size, cudaMemcpyHostToDevice); cudaMemcpy( d_oldval, oldval, size, cudaMemcpyHostToDevice); //launch the kernel int N = tpoints+2; int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock-1) / threadsPerBlock; //N is the number of total threads parallel_update<<<blocksPerGrid, threadsPerBlock>>>( d_values, d_oldval, d_newval, tpoints, nsteps, threadsPerBlock); //copy the result from device to host cudaMemcpy(values, d_values, size, cudaMemcpyDeviceToHost); printf("Printing final results...\n"); printfinal(); printf("\nDone.\n\n"); //free device's memory cudaFree(d_values); cudaFree(d_oldval); cudaFree(d_newval); //free host memory free(values); free(newval); free(oldval); return 0; }
941
__device__ float multiplyByTwo(float number) { return number*2.0f; } __device__ float divideByTwo(float number) { return number*0.5f; }
942
#include "includes.h" __global__ void Return64( unsigned long long *sum, unsigned long long *out, const unsigned long long *pIn ) { out[threadIdx.x] = atomicAdd( &sum[threadIdx.x], *pIn ); }
943
/* * Copyright 2016 Alexander Terenin * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ #include <curand_kernel.h> /* * Function : cuda_tauSqInv * Purpose : calculates the relevant parameters for tau^-2, and draws one sample from G(a,b) distribution with a large by implementing the rejection sampler in Cheng (1977) for a set of parallel threads in one block and taking one of the samples that wasn't rejected. Note that to work properly, blockDim.x and size of shared memory should be equal. * Argument *state : pointer to random number generator * Argument *tauSqInv : pointer to tauSqInv output and LB input (used in calculating rate) * Argument *alphaG : pointer to shape parameter * Argument *xiInv : pointer to xiInv (used in calculating rate) * Output : mutates tauSqInv and stores result in its place */ extern "C" __global__ void cuda_tauSqInv(curandStatePhilox4_32_10_t *globalState, float *tauSqInv, float *alphaG, float *xiInv) { extern __shared__ float acc[]; //store accepted proposals in shared memory __shared__ int success[1]; //store flag value indicating whether proposal was accepted in shared memory if(threadIdx.x == 0) success[1] = 0; //initialize success if(threadIdx.x < blockDim.x && blockIdx.x == 0) { acc[threadIdx.x] = 0.0f; //copy parameters to local memory float alpha = alphaG[0]; float LB = tauSqInv[0]; //copy RNG to local memory, skip to new sequence (avoid overlap with z), and curandStatePhilox4_32_10_t state = globalState[0]; //copy random number generator state to local memory skipahead((unsigned long long) (6*threadIdx.x), &state); //give each thread its own pseudorandom subsequence //compute rate parameter float beta = xiInv[0] + (0.5 * LB); //compute constants float a = rsqrtf(2.0f * alpha - 1.0f); float b = alpha - 1.3862944f; //log(4) = 1.3862944f float c = alpha + (1.0f / a); //perform rejection sampling while(success[0] == 0) { //compute uniforms float u1 = curand_uniform(&state); //one uniform for proposal float u2 = curand_uniform(&state); //one uniform for accept/reject step //compute proposal-dependant constants float v = a * logf(u1 / (1.0f - u1)); float x = alpha * expf(v); //perform accept/reject if( (b + (c*v) - x) > logf(u1 * u1 * u2) ) { acc[threadIdx.x] = x; } __syncthreads(); //find accepted value on thread 0 if(threadIdx.x == 0) { for(int j=0; j < blockDim.x; j++) { float stdGamma = acc[j]; if(stdGamma > 0.0f) { //thread accepted its proposal tauSqInv[0] = stdGamma / beta; //write accepted proposal back to global memory success[0] = 1; //tell other threads to stop break; //stop checking for accepted proposals } } } } __syncthreads(); //last thread: copy curand state back to global memory if(threadIdx.x == blockDim.x - 1) globalState[0] = state; } }
944
#include <stdio.h> #include <sys/time.h> #define EPSILON 1.0e-6 struct Particle { float3 position; float3 velocity; }; __host__ __device__ uint gen_random(uint a, uint b, uint c = 10, uint seed = 10) { return (seed*a+b) % c; } __device__ void update_position(Particle* part_array_gpu, float dt, uint i) { part_array_gpu[i].position.x += part_array_gpu[i].velocity.x * dt; part_array_gpu[i].position.y += part_array_gpu[i].velocity.y * dt; part_array_gpu[i].position.z += part_array_gpu[i].velocity.z * dt; } __device__ void update_velocity(Particle* part_array_gpu, float dt, uint i, uint j) { part_array_gpu[i].velocity.x += (float)(gen_random(i, j)) * dt; part_array_gpu[i].velocity.y += (float)(gen_random(i, j)) * dt; part_array_gpu[i].velocity.z += (float)(gen_random(i, j)) * dt; } __global__ void updateKernel(Particle* part_array_gpu, float dt, uint j) { const int i = blockIdx.x*blockDim.x + threadIdx.x; update_velocity(part_array_gpu, dt, i, j); update_position(part_array_gpu, dt, i); } __host__ void updateCPU(Particle* part_array_cpu, float dt, uint j, uint NPARTICLES) { for (int i = 0; i < NPARTICLES; i++) { if(i < NPARTICLES) { part_array_cpu[i].velocity.x += (float)(gen_random(i, j)) * dt; part_array_cpu[i].velocity.y += (float)(gen_random(i, j)) * dt; part_array_cpu[i].velocity.z += (float)(gen_random(i, j)) * dt; part_array_cpu[i].position.x += part_array_cpu[i].velocity.x * dt; part_array_cpu[i].position.y += part_array_cpu[i].velocity.y * dt; part_array_cpu[i].position.z += part_array_cpu[i].velocity.z * dt; } } } __host__ bool compare_float(float a, float b) { return (fabs(a-b) < EPSILON); } __host__ bool compare_float3(float3 a, float3 b) { return (compare_float(a.x, b.x) && compare_float(a.y, b.y) && compare_float(a.z, b.z)); } __host__ bool compareParticle(Particle particle1, Particle particle2) { bool result = true; result &= compare_float3(particle1.position, particle2.position); result &= compare_float3(particle1.velocity, particle2.velocity); return result; } double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } int main(int argc, char** argv) { const float dt = 1.0; bool flag = 1; const int NITER = atoi(argv[1]); const int TPB = atoi(argv[2]); const int NPARTICLES = atoi(argv[3]); // Declare a pointer for an array of particles Particle* particles_gpu; Particle* particles_cpu; Particle* particles_res; cudaHostAlloc((void **) &particles_cpu, NPARTICLES*sizeof(Particle), cudaHostAllocDefault); cudaHostAlloc((void **) &particles_res, NPARTICLES*sizeof(Particle), cudaHostAllocDefault); // Allocate device memory to store the output array cudaMalloc(&particles_gpu, NPARTICLES*sizeof(Particle)); for (int i = 0; i < NPARTICLES; i++) { if(i < NPARTICLES) { particles_cpu[i].velocity.x = 0; particles_cpu[i].velocity.y = 0; particles_cpu[i].velocity.z = 0; particles_cpu[i].position.x = 1; particles_cpu[i].position.y = 1; particles_cpu[i].position.z = 1; particles_res[i].velocity.x = 0; particles_res[i].velocity.y = 0; particles_res[i].velocity.z = 0; particles_res[i].position.x = 1; particles_res[i].position.y = 1; particles_res[i].position.z = 1; } } //double iStart_HtoD = cpuSecond(); //double iElaps_HtoD = cpuSecond() - iStart_HtoD; double iStart_gpu = cpuSecond(); //GPU computation for (int j = 0; j < NITER; j++) { //printf("GPU iteration numéro %d : \n", j); cudaMemcpy(particles_gpu, particles_res, NPARTICLES*sizeof(Particle), cudaMemcpyHostToDevice); updateKernel<<<(NPARTICLES+TPB-1) / TPB, TPB>>>(particles_gpu, dt, j); cudaMemcpy(particles_res, particles_gpu, NPARTICLES*sizeof(Particle), cudaMemcpyDeviceToHost); } cudaDeviceSynchronize(); //double iElaps_gpu = cpuSecond() - iStart_gpu; //double iStart_cpu = cpuSecond(); //CPU computation for (int j = 0; j < NITER; j++) { updateCPU(particles_cpu, dt, j, NPARTICLES); } //double iElaps_cpu = cpuSecond() - iStart_cpu; for (int i = 0; i < NPARTICLES; i++) { if(i < NPARTICLES) { if(!(compareParticle(particles_cpu[i], particles_res[i]))) { flag = 0; break; } } } /*double iStart_DtoH = cpuSecond(); cudaMemcpy(particles_cpu, particles_gpu, NPARTICLES*sizeof(Particle), cudaMemcpyDeviceToHost); double iElaps_DtoH = cpuSecond() - iStart_DtoH; */ printf("Comparing the output for each implementation… "); if (flag) { printf("Correct!\n"); } else { printf("Incorrect\n"); } //delete[] particles_cpu; //delete[] particles_res; cudaFree(particles_gpu); cudaFreeHost(particles_gpu); cudaFreeHost(particles_res); // Free the memory return 0; }
945
//Based on the work of Andrew Krepps #include <chrono> #include <fstream> #include <random> #include <stdio.h> #include <string> #include <iostream> typedef struct { unsigned int a; unsigned int b; } MathStruct; typedef struct { int add; int sub; int mult; int mod; int cipher; } ResultsStruct; // Test String used for the cipher const std::string LOREM_IPSUM = "Lorem ipsum dolor sit amet, consectetur adipiscing elit." "Ut sed feugiat felis. Vestibulum accumsan ornare convallis.\n" "Phasellus nulla nunc, dignissim sed vulputate quis, luctus feugiat" "elit. Vivamus consectetur magna risus, ut condimentum quam egestas\n" "elementum. Fusce sed mauris in ex posuere ultricies. Pellentesque at" "venenatis est. Vestibulum vitae accumsan lacus. Nunc sagittis hendrerit\n" "sem ut viverra. Morbi sed sodales sem. Integer hendrerit elit vel velit" "tincidunt, vitae molestie purus iaculis gravida, hi."; // Uses the GPU to add the block + thread index in array_a to array_b to array_results __global__ void add_arrays( const MathStruct* const data, ResultsStruct* const results) { int index = threadIdx.x + blockIdx.x * blockDim.x; results[index].add = data[index].a + data[index].b; } // Uses the GPU to subtract the block + thread index in array_b from array_a to array_results __global__ void sub_arrays( const MathStruct* const data, ResultsStruct* const results) { int index = threadIdx.x + blockIdx.x * blockDim.x; results[index].sub = data[index].a - data[index].b; } // Uses the GPU to multiply the block + thread index in array_a by array_b to array_results __global__ void mult_arrays( const MathStruct* const data, ResultsStruct* const results) { int index = threadIdx.x + blockIdx.x * blockDim.x; results[index].mult = data[index].a * data[index].b; } // Uses the GPU to mudulo the block + thread index in array_a by array_b to array_results __global__ void mod_arrays( const MathStruct* const data, ResultsStruct* const results) { int index = threadIdx.x + blockIdx.x * blockDim.x; results[index].mod = data[index].a % data[index].b; } // Uses the GPU to perform the shift cipher (decrypt) operation subtracting the shift value from the character __global__ void decrypt_cipher( char* const data, const int* const shift) { int index = threadIdx.x + blockIdx.x * blockDim.x; data[index] = data[index] - *shift; } // Uses the GPU to perform the shift cipher (encrypt) operation adding the shift value from the character __global__ void encrypt_cipher( char* const data, const int* const shift) { int index = threadIdx.x + blockIdx.x * blockDim.x; data[index] = data[index] + *shift; } // Helper function for writing the add math results to file. __host__ void write_results( const std::string& outputName, const int& totalThreads, const int& blockSize, const int& add_time, const int& sub_time, const int& mult_time, const int& mod_time, const MathStruct* const data, const ResultsStruct* const results) { std::ofstream stream(outputName); if (stream.is_open()) { stream << "Results with Thread Count: " << totalThreads << " and Block Size: " << blockSize << "\n"; stream << "Add Time nanoseconds:\t" << add_time << "\n"; stream << "Sub Time nanoseconds:\t" << sub_time << "\n"; stream << "Mult Time nanoseconds:\t" << mult_time << "\n"; stream << "Mod Time nanoseconds:\t" << mod_time << "\n"; stream << "Add Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << data[i].a << ") + B(" << data[i].b << ") = " << results[i].add << "\n"; } stream << "\n\nSub Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << data[i].a << ") - B(" << data[i].b << ") = " << results[i].sub << "\n"; } stream << "\n\nMult Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << data[i].a << ") * B(" << data[i].b << ") = " << results[i].mult << "\n"; } stream << "\n\nMult Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << data[i].a << ") % B(" << data[i].b << ") = " << results[i].mod << "\n"; } } else{ printf("FILE NOT OPEN?\n"); } stream.close(); } // Helper function for executing the cipher functionality via pinned or pageable memory __host__ void run_cipher_kernal( const int& blockSize, const int& totalThreads, const int& numBlocks, char*& data, char* d_data, const int* const d_shift) { auto results_size = totalThreads * sizeof(char); auto start = std::chrono::high_resolution_clock::now(); encrypt_cipher<<<numBlocks, blockSize>>>(d_data, d_shift); auto stop = std::chrono::high_resolution_clock::now(); auto encrypt_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); cudaDeviceSynchronize(); cudaMemcpy(data, d_data, results_size, cudaMemcpyDeviceToHost);; printf("\nEncrypted data:\n"); printf("%s\n\n", data); start = std::chrono::high_resolution_clock::now(); decrypt_cipher<<<numBlocks, blockSize>>>(d_data, d_shift); stop = std::chrono::high_resolution_clock::now(); auto decrypt_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); cudaDeviceSynchronize(); start = std::chrono::high_resolution_clock::now(); cudaMemcpy(data, d_data, results_size, cudaMemcpyDeviceToHost); stop = std::chrono::high_resolution_clock::now(); auto copy_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); printf("\nDecrypted data: %s\n\n", data); bool match = true; for ( int i = 0; i < LOREM_IPSUM.length(); i++ ) { if ( LOREM_IPSUM[i] != data[i] ) { printf("DONT MATCH!\n"); match = false; break; } } if ( match ) { printf("THEY MATCH!\n"); } printf("\nEncrypt took %d nanoseconds\n", int(decrypt_time)); printf("Decrypt took %d nanoseconds\n", int(decrypt_time)); printf("Copy device -> host took %d nanoseconds\n", int(copy_time)); } // Helper function for executing the math functionality via pinned or pageable memory // calls the add_array, sub_array, mult_array, and mod_array and copies the results to an interleaved // struct __host__ void run_math_kernal( const int& blockSize, const int& totalThreads, const int& numBlocks, const std::string& outputName, const MathStruct* const data, ResultsStruct*& results, MathStruct* d_data, ResultsStruct* d_results) { auto start = std::chrono::high_resolution_clock::now(); add_arrays<<<numBlocks, blockSize>>>(d_data, d_results); auto stop = std::chrono::high_resolution_clock::now(); auto add_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); start = std::chrono::high_resolution_clock::now(); sub_arrays<<<numBlocks, blockSize>>>(d_data, d_results); stop = std::chrono::high_resolution_clock::now(); auto sub_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); start = std::chrono::high_resolution_clock::now(); mult_arrays<<<numBlocks, blockSize>>>(d_data, d_results); stop = std::chrono::high_resolution_clock::now(); auto mult_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); start = std::chrono::high_resolution_clock::now(); mod_arrays<<<numBlocks, blockSize>>>(d_data, d_results); stop = std::chrono::high_resolution_clock::now(); auto mod_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); auto results_size = totalThreads * sizeof(ResultsStruct); cudaDeviceSynchronize(); start = std::chrono::high_resolution_clock::now(); // Copy results to host cudaMemcpy(results, d_results, results_size, cudaMemcpyDeviceToHost); stop = std::chrono::high_resolution_clock::now(); printf("Copy device -> host took %d nanoseconds\n", int(std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count())); printf("Results with Thread Count: %d and Block Size: %d\n", totalThreads, blockSize); printf("Add Time nanoseconds:\t %ld\n", add_time); printf("Sub Time nanoseconds:\t %ld\n", sub_time); printf("Mult Time nanoseconds:\t %ld\n", mult_time); printf("Mod Time nanoseconds:\t %ld\n", mod_time); if ( !outputName.empty() ) { write_results(outputName, totalThreads, blockSize, add_time, sub_time, mult_time, mod_time, data, results); } } // Helper function for initilizing the data used by the cipher will output results of pageable and // pinned memory allocation. __host__ void init_cipher_data(const int& totalThreads, const bool& pageable, char*& data, char*& d_data, const int* shift, int*& d_shift) { auto data_size = totalThreads * sizeof(char); auto shift_size = sizeof(int); cudaMalloc((void**)&d_data, data_size); cudaMalloc((void**)&d_shift, shift_size); if ( pageable ) { auto start = std::chrono::high_resolution_clock::now(); data = (char*)malloc(data_size); auto stop = std::chrono::high_resolution_clock::now(); printf("%s malloc took %d nanoseconds\n", pageable ? "Pageable" : "Pinned", int(std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count())); } else { auto start = std::chrono::high_resolution_clock::now(); cudaMallocHost((void**)&data, data_size); auto stop = std::chrono::high_resolution_clock::now(); printf("%s malloc took %d nanoseconds\n", pageable ? "Pageable" : "Pinned", int(std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count())); } for( int i = 0; i < totalThreads; i++ ) { data[i] = LOREM_IPSUM[i]; } auto start = std::chrono::high_resolution_clock::now(); cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice); cudaMemcpy(d_shift, shift, shift_size, cudaMemcpyHostToDevice); auto stop = std::chrono::high_resolution_clock::now(); printf("%s copy took %d nanoseconds\n", pageable ? "Pageable" : "Pinned", int(std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count())); } // Helper function for initilizing the data used by the math functions will output results of pageable and // pinned memory allocation. __host__ void init_math_data(const int& totalThreads, const bool& pageable, MathStruct*& host_data, ResultsStruct*& host_results, MathStruct*& d_data, ResultsStruct*& d_results) { auto data_size = totalThreads * sizeof(MathStruct); auto results_size = totalThreads * sizeof(ResultsStruct); cudaMalloc((void**)&d_data, data_size); cudaMalloc((void**)&d_results, results_size); if ( pageable ) { auto start = std::chrono::high_resolution_clock::now(); host_data = (MathStruct*)malloc(data_size); host_results = (ResultsStruct*)malloc(results_size); auto stop = std::chrono::high_resolution_clock::now(); printf("%s malloc took %d nanoseconds\n", pageable ? "Pageable" : "Pinned", int(std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count())); } else { auto start = std::chrono::high_resolution_clock::now(); cudaMallocHost((void**)&host_data, data_size); cudaMallocHost((void**)&host_results, results_size); auto stop = std::chrono::high_resolution_clock::now(); printf("%s malloc took %d nanoseconds\n", pageable ? "Pageable" : "Pinned", int(std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count())); } // Used for random number generation std::default_random_engine generator; std::uniform_int_distribution<int> distribution(0,3); for( int i = 0; i < totalThreads; i++ ) { host_data[i].a = i; host_data[i].b = distribution( generator ); } auto start = std::chrono::high_resolution_clock::now(); cudaMemcpy(d_data, host_data, data_size, cudaMemcpyHostToDevice); auto stop = std::chrono::high_resolution_clock::now(); printf("%s copy took %d nanoseconds\n", pageable ? "Pageable" : "Pinned", int(std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count())); } // Helper function for cleaning up allocated memory used by math functionality. __host__ void cleanup_math( const bool& pageable, MathStruct*& data, MathStruct*& d_data, ResultsStruct*& results, ResultsStruct*& d_results) { cudaFree(d_data); cudaFree(d_results); if ( pageable ) { free(data); free(results); } else { cudaFree(data); cudaFree(results); } } // Helper function for cleaning up allocated memory used by cipher functionality. __host__ void cleanup_cipher( const bool& pageable, char*& data, char*& d_data, int*& d_shift) { cudaFree(d_data); cudaFree(d_shift); if ( pageable ) { free(data); } else { cudaFree(data); } } // Used to run the cipher functionality with pageable memory __host__ void execute_cipher_pageable_mem( const int& blockSize, const int& numBlocks, const int& shift) { char* data = nullptr; char* d_data = nullptr; int* d_shift = nullptr; init_cipher_data(LOREM_IPSUM.length(), true, data, d_data, &shift, d_shift); run_cipher_kernal(blockSize, LOREM_IPSUM.length(), numBlocks, data, d_data, d_shift); cleanup_cipher(true, data, d_data, d_shift); } // Used to run the cipher functionality with pinnable memory __host__ void execute_cipher_pinnable_mem( const int& blockSize, const int& numBlocks, const int& shift) { char* data = nullptr; char* d_data = nullptr; int* d_shift = nullptr; init_cipher_data(LOREM_IPSUM.length(), false, data, d_data, &shift, d_shift); run_cipher_kernal(blockSize, LOREM_IPSUM.length(), numBlocks, data, d_data, d_shift); cleanup_cipher(false, data, d_data, d_shift); } // Used to run the math functionality with pageable memory __host__ void execute_math_pageable_mem( const int& blockSize, const int& totalThreads, const int& numBlocks, const bool& writeResults, const std::string& outputName) { MathStruct* data = nullptr; MathStruct* d_data = nullptr; ResultsStruct* results = nullptr; ResultsStruct* d_results = nullptr; init_math_data(totalThreads, true, data, results, d_data, d_results); run_math_kernal(blockSize, totalThreads, numBlocks, outputName, data, results, d_data, d_results); cleanup_math(true, data, d_data, results, d_results); } // Used to run the math functionality with pinnable memory __host__ void execute_math_pinnable_mem( const int& blockSize, const int& totalThreads, const int& numBlocks, const bool& writeResults, const std::string& outputName) { MathStruct* data = nullptr; MathStruct* d_data = nullptr; ResultsStruct* results = nullptr; ResultsStruct* d_results = nullptr; init_math_data(totalThreads, false, data, results, d_data, d_results); run_math_kernal(blockSize, totalThreads, numBlocks, outputName, data, results, d_data, d_results); cleanup_math(false, data, d_data, results, d_results); } int main(int argc, char** argv) { // read command line arguments int totalThreads = 512; int blockSize = 256; bool outputResults = false; std::string outputName; if (argc >= 2) { totalThreads = atoi(argv[1]); } if (argc >= 3) { blockSize = atoi(argv[2]); } if (argc >= 4) { outputResults = true; outputName = argv[3]; } int numBlocks = totalThreads/blockSize; // validate command line arguments if (totalThreads % blockSize != 0) { ++numBlocks; totalThreads = numBlocks*blockSize; printf("Warning: Total thread count is not evenly divisible by the block size\n"); printf("The total number of threads will be rounded up to %d\n", totalThreads); } printf("\n####################### MATH FUNCTIONALITY START #########################\n"); printf("####################### PAGEABLE MEMORY #########################\n"); execute_math_pageable_mem( blockSize, totalThreads, numBlocks, outputResults, outputName); printf("####################### PINNABLE MEMORY #########################\n"); execute_math_pinnable_mem( blockSize, totalThreads, numBlocks, outputResults, outputName); printf("\n\n####################### CIPHER START #########################\n"); printf("Cipher is hardcoded to 4 blocks @ 128 threads per block\n\n"); execute_cipher_pageable_mem( 128, 4, 3 ); printf("####################### CIPHER PINNABLE #########################\n"); execute_cipher_pinnable_mem( 128, 4, 3 ); }
946
#include <stdio.h> #include <stdlib.h> #define SIZE 10 int main(int argc , char **argv){ int * p; cudaError_t err; // Should be cudaMalloc((void**)&p,SIZE*sizeof(int)) err=cudaMalloc((void**)&p,SIZE); if( err != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } cudaFree(p); return 0; }
947
/* Compiling with nvcc: nvcc 3d_matrix_allocte.cu -o 3d_matrix_allocte -std=c++11 ./3d_matrix_allocte Sample Output: Matrix Allocated Time taken for matrix allocation : 9 microseconds */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <iostream> using namespace std; // The following code sample allocates a width x height x // depth 3D array of floating-point values and shows how // to loop over the array elements in device code // Device code __global__ void matrixLoop(cudaPitchedPtr devPitchedPtr, int width, int height, int depth) { char* deice_pointer = (char* )devPitchedPtr.ptr; size_t pitch = devPitchedPtr.pitch; size_t slicePitch = pitch * height; for (int z = 0; z < depth; ++z) { char* slice = deice_pointer + z * slicePitch; for (int y = 0; y < height; ++y) { float* row = (float*)(slice + y * pitch); for (int x = 0; x < width; ++x) { float element = row[x]; } } } } // Host code int main() { int width = 128, height = 128, depth = 128; //cuda data structure used to dimension o the matrix cudaExtent extent = make_cudaExtent(width * sizeof(float), height, depth); // declare cuda pitched memory pointer cudaPitchedPtr devPitchedPtr; // allocate 3d matrix cudaMalloc3D(&devPitchedPtr, extent); printf("Matrix Allocated\n"); matrixLoop<<<100, 512>>>(devPitchedPtr, width, height, depth); }
948
/****************************************************************************** * *Computer Engineering Group, Heidelberg University - GPU Computing Exercise 04 * * Group : TBD * * File : kernel.cu * * Purpose : Memory Operations Benchmark * ******************************************************************************/ #include <stdio.h> // // Test Kernel // __global__ void globalMem2SharedMem (float* device, float* outFloat, int device_size) { extern __shared__ float shMem[]; int incr = gridDim.x * blockDim.x;//increment of the shared/global memory index int ind = threadIdx.x + blockIdx.x * blockDim.x;//initial index of every thread in the shared/global memory int index_size=device_size/sizeof(float);//number of floats that fit into the shared memory if(ind==0){ *outFloat=shMem[0];//assign shared memory value to output variable to prohibit compiler optimizations } for(int i=ind;i<index_size;i+=incr){ shMem[i]=device[i];//copy global -> shared } } void globalMem2SharedMem_Wrapper(dim3 gridSize, dim3 blockSize, int shmSize, float* device, float* outFloat) { globalMem2SharedMem<<< gridSize, blockSize, shmSize >>>(device, outFloat, shmSize); } __global__ void SharedMem2globalMem//analogous to global2shared (float* device, float* outFloat, int device_size) { extern __shared__ float shMem[]; int incr = gridDim.x * blockDim.x; int ind = threadIdx.x + blockIdx.x * blockDim.x; int index_size=device_size/sizeof(float); if(ind==0){ *outFloat=shMem[0]; } for(int i=ind;i<index_size;i+=incr){ device[i]=shMem[i]; } } void SharedMem2globalMem_Wrapper(dim3 gridSize, dim3 blockSize, int shmSize, float* device, float* outFloat) { globalMem2SharedMem<<< gridSize, blockSize, shmSize >>>(device, outFloat, shmSize); } __global__ void SharedMem2Registers (float* outFloat, int shared_size) { extern __shared__ float shMem[]; int incr = gridDim.x * blockDim.x;//increment of the shared memory index int ind = threadIdx.x +blockIdx.x * blockDim.x;//initial index of every thread in the shared memory int index_size=shared_size/sizeof(float);//number of floats that fit into the shared memory const int reg_size=256;//constant register size to make sure it really is saved into a thread-local register float reg[reg_size]; reg[0]=1; int k=0;//index within the register for(int i=ind;i<index_size && k<reg_size;i+=incr){//make sure that both the register/shared mem index stay within bounds reg[k]=shMem[i];//copy shared->register k++;//increment register index } if(ind==0){ *outFloat=reg[0];//assign register memory value to output variable to prohibit compiler optimizations } } void SharedMem2Registers_Wrapper(dim3 gridSize, dim3 blockSize, int shmSize, float* outFloat) { SharedMem2Registers<<< gridSize, blockSize, shmSize >>>(outFloat, shmSize); } __global__ void Registers2SharedMem//analogous to shared2register ( float* outFloat, int shared_size) { extern __shared__ float shMem[]; int incr = gridDim.x * blockDim.x; int ind = threadIdx.x +blockIdx.x * blockDim.x; int index_size=shared_size/sizeof(float); const int reg_size=256; float reg[reg_size]; reg[0]=1; int k=0; for(int i=ind;i<index_size && k<reg_size;i+=incr){ shMem[i]=reg[k]; k++; } if(ind==0){ *outFloat=reg[0]; } } void Registers2SharedMem_Wrapper(dim3 gridSize, dim3 blockSize, int shmSize, float* outFloat) { Registers2SharedMem<<< gridSize, blockSize, shmSize >>>(outFloat,shmSize); } __global__ void bankConflictsRead (float* outFloat, int shared_size, int stride, int rep, long* clock) { extern __shared__ float shMem[]; int ind = threadIdx.x * stride;//determine index as thread index times stride int index_size=shared_size/sizeof(float);//number of floats in shared while(ind>=index_size) ind-=index_size;//make sure only valid addresses are probed float reg;//single float register if(ind==0){ reg=0; *outFloat=reg;//assign register memory value to output variable to prohibit compiler optimizations } long init=clock64();//take initial time for(int i=0;i<rep;i++){//repeat rep times for stability reg=shMem[ind];//load the same element into reg repeatedly } __syncthreads();//wait for all threads to finish reading long final=clock64();//take final time if(ind==0){ *clock=final-init;//save time differnce to global memory } } void bankConflictsRead_Wrapper(dim3 gridSize, dim3 blockSize, int shmSize, float* outFloat, int stride, int rep, long* clock) { bankConflictsRead<<< gridSize, blockSize, shmSize >>>(outFloat, shmSize, stride, rep, clock); }
949
#include "includes.h" __global__ void prefSumBinTreeCudaMulti(float *a, int n) { __shared__ float shm[CUDA_THREAD_NUM]; int tid=threadIdx.x; int bid=blockIdx.x; int dot=2;//depth of tree if((tid+1)%dot==0) { shm[tid]=a[CUDA_THREAD_NUM*bid+tid]+a[CUDA_THREAD_NUM*bid+tid-1]; } dot*=2; __syncthreads(); while(dot<=n) { if((tid+1)%dot==0) { shm[tid]=shm[tid]+shm[tid-dot/2]; } dot*=2; __syncthreads(); } dot/=2; while(dot>2) { if((tid+1)%dot==0) { if((tid+1)/dot!=1) { shm[tid-dot/2]=shm[tid-dot/2]+shm[tid-dot]; } } dot/=2; __syncthreads(); } if((tid+1)%2==0) { a[CUDA_THREAD_NUM*bid+tid]=shm[tid]; } else if(tid>0) { a[CUDA_THREAD_NUM*bid+tid]=a[CUDA_THREAD_NUM*bid+tid]+shm[tid-1]; } }
950
#include"cuda_runtime.h" #include"device_launch_parameters.h" #include<stdio.h> #include<string.h> #include<time.h> __global__ void Toggle(char *a, char *b, int n) { int tid; tid = threadIdx.x; if(a[tid]>='A' && a[tid]<='Z') { b[tid]=a[tid]+32; } else { b[tid]=a[tid]-32; } } int main() { clock_t t; t = clock(); char a[100],b[100]; int i,n,size; char *d_a, *d_b; printf("\nEnter the string\n"); scanf("%s", a); n = strlen(a); printf("\nNo of charcaters is\t%d", n); size = sizeof(char); printf("\nSize is \t%d\n", size); cudaMalloc((void **)&d_a,n*size); cudaMalloc((void **)&d_b,n*size); cudaMemcpy(d_a,a,n*size,cudaMemcpyHostToDevice); cudaEvent_t start, stop; float elapsed_time_ms; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); Toggle<<<1,n>>>(d_a,d_b,n); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms,start,stop); printf("\nTime to calculate results inside GPU is: %fms\n",elapsed_time_ms); cudaMemcpy(b,d_b,n*size,cudaMemcpyDeviceToHost); printf("\nToggled string is \n"); for(i=0;i<n;i++) printf("%c",b[i]); cudaFree(d_a); cudaFree(d_b); t = clock()-t; double time_taken; time_taken = ((double)t)/CLOCKS_PER_SEC; printf("\nEntire program took %f seconds to execute\n", time_taken); return 0; }
951
/* Ocelot 0.4.72 issue. Steve Worley Oct 27 2009 sw@worley.com Ocelot fails when running kernels using dynamic shared memory, in 32 bit only. 32 bit (ONLY!) Ubuntu 9.04. CUDA 2.3 Compile with: nvcc ocbug.cu -lOcelotExecutive -lOcelotTrace -lOcelotIr -lOcelotParser -lhydrazine -lcudart run, and you get the error output: a.out: ocelot/executive/implementation/CooperativeThreadArray.cpp:1093: ir::PTXU32 executive::CooperativeThreadArray::operandAsU32(int, const ir::PTXOperand&): Assertion `0 == "invalid address mode of operand"' failed. Likely it has to do with the dynamic shared memory. Static variables work fine. */ #include <cstdio> __global__ void kernel(int *source) { extern __shared__ int s[]; s[threadIdx.x]=source[threadIdx.x]; } int main() { int *src; int host[10000]={0}; cudaSetDevice(0); cudaMalloc((void**)&src, 10000*sizeof(int)); cudaMemcpy(src, host, 10000*sizeof(int), cudaMemcpyHostToDevice); kernel<<<128, 128, 15000>>>(src); printf("TEST PASSED\n"); return 0; }
952
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime_api.h> void printDevProp(cudaDeviceProp devProp) { printf("GPU card name - %s\n",devProp.name); printf("GPU Computation Minor Capability - %d\n",devProp.minor); printf("GPU Computation Major Capability - %d\n",devProp.major); printf("Maximum number of block dimensions - %d %d %d\n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]); printf("Maximum number of grid dimensions - %d %d %d\n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]); printf("Total GPU Memory global(bytes) - %zu\n",devProp.totalGlobalMem); printf("Total GPU Memory const(bytes) - %zu\n",devProp.totalConstMem); printf("Shared Memory available per block(bytes) - %zu\n",devProp.sharedMemPerBlock); printf("Warp size (number of threads per warp) - %d\n",devProp.warpSize); printf("Clock frequency in kilohertz - %d\n",devProp.clockRate); printf("Number of multiprocessors on device - %d\n",devProp.multiProcessorCount); printf("32-bit registers available per block - %d\n",devProp.regsPerBlock); printf("Maximum number of threads per block - %d\n",devProp.maxThreadsPerBlock); printf("Device can concurrently copy memory and execute a kernel - %d\n",devProp.deviceOverlap); printf("Whether there is a run time limit on kernels - %d\n",devProp.kernelExecTimeoutEnabled); printf("Device is integrated as opposed to discrete - %d\n",devProp.integrated); } int main() { int i; int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); for (i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } return 0; }
953
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void hello_cuda() { printf("Hello World from CUDA!\n"); } /* int main() { int nx = 16, ny = 4; //Limit for block size x <= 65536, y <= 2^32-1, z <= 65536 dim3 block(8, 2); //Number of Dimension 3 blocks/threads within each grid //Limit for grid size x <= 1024, y <= 1024, z <= 64 //AND //Limit for grid size x * y * z <= 1024 dim3 grid(nx / block.x, ny / block.y); //Number of Dimension 3 grids on the GPU hello_cuda <<<grid, block>>>(); cudaDeviceSynchronize(); //Call above is async without this function call! cudaDeviceReset(); return 0; } */
954
void __global__ calcU_GPU(float3* r, float* u){ //float3* r = (float3*) r_data; //float* u = (float*) u_data; //if(threadIdx.x != blockIdx.x){ if(blockIdx.x != threadIdx.x){ float3 r_i = r[blockIdx.x]; float3 r_j = r[threadIdx.x]; int pointer = blockDim.x * blockIdx.x * 9 + (threadIdx.x * 9); //cuPrintf("%d\n",pointer); // cuPrintf("BLOCK: %d pointer= %d\n",blockDim.x,pointer); for(int i=0;i<blockIdx.x;i++){ float r1 = sqrt(pow(r_i.x-r_j.x,2) + pow(r_i.y-r_j.y,2) + pow(r_i.z-r_j.z,2)); // x u[pointer] = (3 * (r_i.x - r_j.x)/r1 * (r_i.x - r_j.x)/r1 - 1) / r1*r1*r1; u[pointer + 1] = (3 * (r_i.x - r_j.x)/r1 * (r_i.y - r_j.y)/r1 ) / r1*r1*r1; u[pointer + 2] = (3 * (r_i.x - r_j.x)/r1 * (r_i.z - r_j.z)/r1 ) / r1*r1*r1; // y u[pointer + 3] = (3 * (r_i.y - r_j.y)/r1 * (r_i.x - r_j.x)/r1 ) / r1*r1*r1; u[pointer + 4] = (3 * (r_i.y - r_j.y)/r1 * (r_i.y - r_j.y)/r1 - 1) / r1*r1*r1; u[pointer + 5] = (3 * (r_i.y - r_j.y)/r1 * (r_i.z - r_j.z)/r1 ) / r1*r1*r1; // z u[pointer + 6] = (3 * (r_i.z - r_j.z)/r1 * (r_i.x - r_j.x)/r1 ) / r1*r1*r1; u[pointer + 7] = (3 * (r_i.z - r_j.z)/r1 * (r_i.y - r_j.y)/r1 ) / r1*r1*r1; u[pointer + 8] = (3 * (r_i.z - r_j.z)/r1 * (r_i.z - r_j.z)/r1 - 1) / r1*r1*r1; } } }
955
#include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <cuda_runtime_api.h> #include <unistd.h> /****************************************************************************** * This program takes an initial estimate of m and c and finds the associated * rms error. It is then as a base to generate and evaluate 8 new estimates, * which are steps in different directions in m-c space. The best estimate is * then used as the base for another iteration of "generate and evaluate". This * continues until none of the new estimates are better than the base. This is * a gradient search for a minimum in mc-space. * * To compile: * nvcc -o lr_coursework lr_coursework.c -lm * * To run: * ./lr_coursework * * Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {76.94,124.69},{85.12,149.73},{75.51,146.60},{69.29,123.38}, {83.91,157.47},{84.32,143.29},{89.13,134.82},{78.85,145.88}, {69.64,137.13},{83.35,140.60},{20.23,70.20},{60.53,122.96}, {37.60,92.40},{67.00,127.74},{31.88,71.49},{62.09,114.35}, {38.53,93.58},{97.47,155.92},{70.17,131.58},{22.21,59.54}, {22.53,52.86},{ 4.40,32.43},{98.61,177.92},{97.69,166.69}, {18.26,38.36},{17.96,66.61},{92.16,188.25},{84.05,153.40}, {36.34,87.75},{93.43,167.58},{70.70,140.10},{30.27,93.51}, {22.22,72.54},{26.98,75.73},{70.90,126.93},{34.89,90.32}, {56.06,113.83},{84.21,148.38},{93.04,165.13},{78.67,152.01}, {88.46,146.74},{67.75,134.87},{98.05,177.26},{48.88,109.70}, {13.98,64.97},{81.58,151.75},{62.93,128.67},{69.73,140.58}, {71.51,144.60},{45.09,91.52},{66.02,138.53},{ 6.39,40.31}, {78.87,154.79},{22.25,86.28},{27.01,72.97},{22.58,62.03}, {36.51,76.63},{ 9.22,41.94},{24.31,70.67},{43.02,108.85}, {54.97,116.25},{85.56,138.70},{59.56,126.52},{75.43,139.92}, {20.53,55.18},{99.26,184.17},{47.44,107.14},{12.33,56.25}, {63.02,128.19},{30.19,92.37},{97.47,167.03},{92.94,184.03}, {66.70,136.10},{86.99,173.29},{30.01,79.94},{19.51,68.97}, {30.62,78.34},{97.03,165.92},{85.27,159.80},{ 8.59,39.06}, {22.78,72.57},{50.12,99.23},{78.25,151.84},{ 5.80,21.49}, {99.94,174.26},{79.15,143.38},{76.25,135.36},{46.84,90.82}, {24.68,77.30},{37.63,93.44},{88.78,163.67},{81.14,157.57}, {24.51,46.28},{48.00,107.36},{31.54,78.37},{70.84,124.41}, { 9.43,53.77},{36.22,87.08},{12.57,43.87},{81.03,136.30}, {16.92,60.97},{38.41,88.95},{75.34,163.51},{23.04,58.70}, {50.24,96.84},{93.45,170.19},{50.57,116.51},{16.85,68.63}, {47.97,93.94},{58.72,121.60},{73.24,138.17},{96.56,176.73}, {36.80,88.24},{70.05,129.77},{60.49,131.82},{ 5.79,61.99}, {44.45,105.21},{82.61,146.91},{94.34,165.19},{60.71,128.19}, {88.85,150.83},{54.16,97.19},{35.67,89.33},{34.63,87.93}, {73.40,129.27},{66.70,134.01},{81.98,165.35},{ 3.66,56.48}, {32.69,86.30},{ 8.01,42.09},{26.77,74.46},{78.15,138.41}, {68.84,135.45},{43.28,111.13},{91.20,175.14},{37.84,95.33}, {88.47,166.44},{75.16,154.58},{50.15,93.19},{27.64,76.93}, {84.90,150.89},{54.61,104.22},{13.53,63.03},{13.65,57.03}, {23.63,67.65},{23.70,58.81},{38.69,111.70},{70.63,129.42}, {79.79,152.66},{47.27,90.28},{97.16,183.35},{48.34,115.36}, {41.15,86.60},{29.52,81.66},{ 5.14,45.53},{76.64,161.05}, {99.98,161.44},{75.56,142.78},{18.51,45.96},{93.90,176.34}, {31.23,86.13},{67.13,135.17},{15.96,48.96},{38.67,89.85}, {74.90,129.87},{89.97,153.28},{ 2.50,29.99},{84.41,147.07}, {12.98,36.60},{ 2.02,43.03},{51.76,120.81},{36.21,93.15}, {63.93,124.03},{66.46,132.36},{79.92,149.78},{92.36,171.86}, {86.96,148.81},{65.53,125.73},{12.79,60.01},{63.06,125.93}, {50.81,113.01},{61.74,129.85},{ 8.02,44.07},{44.87,117.19}, {38.30,84.43},{75.20,140.98},{82.07,153.94},{38.40,84.38}, {99.95,179.39},{51.84,99.20},{73.60,149.21},{23.78,72.31}, { 5.21,49.78},{22.81,88.63},{59.80,106.55},{65.23,122.87}, { 4.07,46.60},{23.42,75.02},{97.44,159.57},{92.80,155.81}, {61.52,91.23},{ 0.09,47.09},{56.03,106.05},{57.76,111.59}, {45.74,111.27},{23.39,56.86},{11.55,50.89},{81.80,160.73}, {97.67,168.64},{25.95,66.05},{75.03,130.36},{58.11,122.69}, {19.06,74.80},{23.47,82.34},{70.18,117.20},{18.91,65.19}, {12.05,41.99},{28.78,75.68},{23.18,65.81},{17.42,67.69}, {65.23,116.74},{33.27,78.30},{77.97,151.22},{87.74,153.45}, {95.61,169.76},{59.49,121.01},{25.65,81.63},{38.67,89.19}, {11.40,59.22},{ 2.34,39.39},{14.83,47.35},{55.13,117.21}, {12.03,64.77},{97.76,161.00},{87.17,165.48},{ 1.90,52.12}, { 4.68,43.22},{ 8.17,55.96},{65.56,120.43},{16.95,49.81}, {75.95,155.13},{54.87,112.50},{31.77,77.71},{86.83,143.69}, {52.54,108.18},{28.81,78.34},{ 9.31,33.19},{20.88,72.73}, {76.41,143.86},{32.15,78.14},{26.30,63.99},{98.94,165.11}, {85.77,136.01},{12.19,59.40},{79.76,152.44},{45.38,101.45}, {44.80,107.98},{65.88,145.47},{32.52,90.77},{28.45,77.94}, {32.11,87.10},{40.64,87.17},{30.08,85.39},{79.08,147.28}, {78.45,140.98},{71.29,139.23},{55.25,113.40},{49.85,105.06}, {87.99,173.48},{26.14,68.27},{94.37,196.05},{32.95,80.49}, {52.41,99.85},{ 6.63,47.96},{82.20,145.91},{74.62,152.49}, { 6.11,61.65},{29.65,66.28},{ 7.74,54.35},{13.33,68.27}, {65.80,128.05},{12.55,57.99},{65.55,123.38},{91.30,168.39}, {80.75,142.64},{ 0.30,30.65},{23.88,71.93},{55.73,108.33}, {30.32,99.46},{ 3.82,43.67},{16.48,64.31},{60.73,117.98}, {96.16,177.71},{82.21,140.88},{85.75,141.71},{29.67,46.80}, {69.19,137.89},{30.13,69.29},{12.05,61.28},{33.73,84.93}, {78.84,157.15},{29.72,90.48},{88.45,161.11},{51.52,131.57}, {83.33,153.38},{27.68,71.65},{38.90,95.98},{12.55,40.85}, {86.10,153.92},{52.57,96.57},{68.49,130.39},{63.79,116.10}, {47.89,89.20},{ 0.47,26.48},{99.64,161.53},{67.36,129.66}, {43.87,108.32},{16.02,68.54},{93.00,165.22},{91.42,154.58}, {17.97,68.57},{66.51,112.87},{99.13,171.38},{72.62,150.98}, {39.53,101.11},{39.64,103.34},{77.99,148.00},{43.75,91.39}, {62.81,113.89},{84.88,139.75},{10.26,51.88},{98.34,174.86}, {58.29,99.95},{59.65,129.51},{67.69,126.98},{37.72,84.93}, {83.42,147.53},{96.70,167.91},{22.26,72.11},{43.27,112.34}, {61.91,125.82},{77.50,135.75},{41.88,89.87},{47.96,95.98}, {40.61,93.42},{14.44,65.64},{15.82,58.43},{ 9.85,62.30}, {26.03,70.40},{30.38,79.71},{89.23,164.43},{79.35,143.56}, {21.87,68.53},{14.02,34.23},{55.33,121.06},{36.36,89.25}, { 8.32,51.15},{67.99,166.35},{78.55,150.12},{39.83,87.52}, {57.79,102.69},{46.47,77.87},{41.02,84.11},{50.80,103.29}, {73.81,122.69},{ 6.42,52.70},{11.88,59.41},{28.73,86.15}, {62.03,106.70},{27.32,72.34},{72.94,132.20},{ 0.30,32.44}, {86.65,164.44},{58.49,102.01},{72.38,147.17},{ 1.51,39.27}, {67.44,133.31},{58.01,129.12},{95.56,159.37},{37.83,107.91}, { 4.20,59.30},{87.20,144.53},{67.79,129.22},{99.79,176.18}, {16.80,77.15},{96.97,173.56},{70.47,134.40},{29.41,90.22}, {58.22,131.84},{ 1.65,33.08},{57.71,121.84},{39.15,82.95}, {15.90,44.97},{33.69,96.70},{ 8.61,59.97},{34.82,83.08}, {39.87,106.38},{32.41,84.68},{65.44,111.44},{ 5.13,49.35}, {18.12,63.76},{23.81,76.56},{59.76,119.97},{74.84,147.99}, {70.64,143.18},{95.70,171.42},{87.74,158.87},{32.42,83.65}, {42.68,114.73},{55.62,127.52},{ 0.76,37.01},{40.64,92.40}, {60.94,137.90},{16.63,52.92},{91.46,174.79},{51.40,104.22}, {54.13,101.22},{69.99,124.73},{49.95,97.62},{10.47,50.29}, {77.35,140.02},{62.77,129.66},{46.95,104.46},{10.00,48.83}, {80.13,144.87},{31.58,90.46},{86.84,148.97},{13.01,58.30}, {68.17,125.29},{74.62,145.55},{99.38,164.77},{23.32,66.83}, {42.53,98.41},{47.74,109.49},{81.99,144.26},{83.56,159.05}, {77.77,143.62},{49.36,98.86},{ 0.92,14.87},{39.57,93.42}, {56.59,98.18},{90.64,172.55},{60.37,129.79},{ 4.06,44.60}, {71.63,142.93},{16.44,58.27},{34.72,88.83},{11.61,55.62}, {55.55,112.78},{ 2.63,43.396},{50.21,109.25},{10.61,65.78}, {58.26,119.03},{74.24,156.99},{13.94,57.91},{18.28,55.93}, {50.73,80.82},{ 3.70,23.40},{ 6.07,46.69},{83.66,160.83}, {52.55,124.89},{68.51,116.63},{99.30,169.38},{99.07,174.67}, {70.81,157.00},{49.02,104.62},{63.59,123.82},{25.28,75.22}, {92.62,175.42},{78.88,145.42},{31.93,82.91},{69.67,121.40}, {23.55,53.65},{62.35,133.88},{84.30,173.36},{99.55,191.78}, {97.79,148.12},{39.26,107.23},{ 8.45,51.29},{92.26,164.87}, {48.59,102.82},{55.74,107.10},{ 7.01,32.94},{86.16,159.98}, {80.92,137.25},{78.42,147.01},{ 1.89,44.75},{21.94,85.06}, {24.48,86.90},{69.87,113.67},{33.09,74.78},{42.31,75.96}, {49.03,92.39},{11.95,40.35},{29.02,82.57},{92.08,156.79}, {73.43,144.00},{79.18,182.89},{97.57,168.50},{22.87,57.14}, {94.29,153.15},{27.79,63.79},{29.31,86.34},{13.01,37.32}, {23.83,78.39},{38.75,85.29},{76.09,131.04},{17.75,56.79}, { 5.01,51.67},{ 8.04,63.87},{95.95,160.31},{48.15,104.24}, {58.62,113.30},{62.09,119.33},{88.14,159.41},{76.78,139.99}, {46.07,94.12},{18.90,71.48},{14.00,60.97},{44.13,122.19}, {73.91,139.62},{48.62,102.26},{81.77,152.70},{92.29,143.32}, { 9.89,40.78},{70.04,128.77},{72.16,126.83},{32.68,77.02}, { 4.78,36.80},{ 4.52,25.96},{46.72,98.31},{67.67,148.21}, {44.06,90.31},{45.33,92.759},{53.24,116.86},{83.65,141.72}, {30.34,83.87},{20.31,53.79},{33.07,90.82},{35.84,84.35}, {37.35,81.15},{22.60,50.96},{84.53,142.81},{75.58,132.74}, {95.70,171.00},{53.68,110.30},{27.61,93.09},{83.50,161.57}, {71.31,134.44},{ 5.53,35.49},{73.73,135.30},{78.89,145.01}, {13.45,57.05},{20.93,54.46},{25.54,57.96},{75.07,142.54}, { 3.69,40.71},{63.26,124.82},{79.51,140.61},{59.07,115.52}, {39.58,69.45},{89.65,158.32},{ 8.83,59.04},{99.44,178.84}, {68.77,132.12},{52.49,106.17},{31.39,71.40},{19.08,72.71}, {68.13,115.40},{38.33,90.09},{52.14,125.64},{ 5.51,41.20}, {43.23,88.64},{38.60,86.59},{79.83,143.51},{16.94,75.13}, {29.46,73.32},{17.50,70.92},{14.74,61.71},{45.57,93.64}, {68.55,129.83},{46.02,111.94},{63.60,125.36},{84.93,155.21}, {25.13,68.54},{65.64,138.43},{25.33,81.27},{92.97,171.53}, {35.93,86.53},{86.28,161.12},{44.57,100.01},{97.59,175.01}, {18.61,58.73},{50.88,112.87},{88.48,161.71},{73.83,131.60}, {99.28,184.56},{15.04,84.69},{28.50,59.38},{12.60,64.62}, { 9.11,33.01},{28.97,90.08},{60.13,115.57},{63.92,138.23}, {11.40,51.62},{60.63,106.82},{99.62,173.35},{ 6.11,35.62}, {27.81,72.53},{ 4.03,27.95},{98.43,171.81},{32.63,74.73}, {25.99,69.03},{96.31,163.70},{44.54,89.37},{92.97,160.80}, {63.24,128.75},{ 9.55,53.37},{76.22,146.16},{94.23,165.23}, {60.92,93.41},{18.66,60.57},{12.48,32.10},{92.75,150.29}, { 7.15,45.48},{82.82,135.01},{94.80,182.72},{78.83,136.70}, { 1.78,46.39},{ 0.28,29.81},{20.03,58.07},{78.67,159.85}, {93.02,161.76},{50.08,93.81},{65.25,143.20},{ 5.97,40.75}, {92.05,164.26},{92.13,148.76},{92.45,161.58},{ 5.41,38.37}, {23.22,78.27},{97.09,174.75},{60.45,129.86},{42.00,89.25}, {87.16,153.53},{19.28,64.51},{ 9.98,43.11},{49.65,104.88}, {89.90,172.18},{35.30,80.57},{ 4.23,23.32},{20.49,74.64}, {71.16,146.24},{ 9.35,68.21},{73.32,122.41},{17.18,59.31}, {72.41,139.63},{14.89,64.82},{99.66,171.55},{36.64,87.08}, {58.48,112.65},{ 8.22,46.77},{73.60,138.34},{51.49,98.61}, {10.81,52.85},{43.38,105.22},{70.87,146.66},{43.70,112.35}, {24.20,60.33},{38.70,85.61},{27.70,83.16},{99.64,158.22}, { 2.47,60.98},{44.83,99.80},{ 6.54,35.17},{53.37,99.18}, {95.92,178.74},{29.02,73.35},{ 1.25,31.28},{ 4.09,45.63}, {44.13,107.44},{81.51,135.33},{49.05,103.72},{93.28,173.40}, {54.42,101.30},{74.66,143.49},{91.37,167.32},{ 3.01,42.76}, {54.82,119.26},{85.70,147.08},{33.31,85.93},{98.93,196.63}, {54.97,108.60},{91.51,172.46},{93.86,168.32},{28.34,80.04}, {71.05,145.29},{38.81,107.70},{97.70,181.16},{44.40,100.31}, {19.20,62.98},{29.62,79.92},{64.37,127.28},{70.61,129.59}, {95.43,171.12},{91.08,163.10},{51.78,121.56},{73.39,134.04}, {20.12,71.68},{72.87,166.21},{17.45,60.38},{26.49,84.29}, {32.74,82.33},{42.20,99.64},{85.38,157.54},{89.36,162.22}, {24.64,65.67},{72.46,131.96},{65.86,126.91},{44.99,104.03}, { 3.88,46.41},{27.60,83.88},{74.58,137.71},{88.74,163.55}, {71.74,142.05},{22.26,58.44},{40.30,96.44},{87.97,159.53}, {61.71,137.22},{52.84,113.53},{98.08,156.03},{80.27,159.75}, {51.72,112.51},{13.92,74.92},{41.91,95.60},{54.53,101.10}, {50.64,106.94},{ 9.83,48.95},{60.28,121.14},{64.74,116.54}, {70.06,142.05},{58.11,134.04},{54.83,90.68},{86.87,160.67}, {90.71,172.80},{ 1.79,34.67},{64.02,133.94},{ 5.77,47.90}, {91.87,152.15},{50.45,102.58},{94.22,177.81},{62.21,128.39}, {52.28,107.19},{98.57,171.88},{14.25,59.73},{43.16,83.07}, {82.84,150.04},{51.00,92.64},{53.02,102.83},{ 9.47,37.69}, {59.60,119.25},{99.56,166.42},{68.72,139.24},{ 3.46,40.50}, {21.70,76.81},{78.92,141.56},{17.47,57.96},{80.48,128.40}, { 6.76,29.28},{16.14,54.19},{55.58,97.02},{78.85,165.78}, {95.99,163.18},{64.92,126.83},{61.03,123.99},{93.97,170.55}, {97.03,181.61},{99.16,187.83},{33.99,81.64},{51.60,109.25}, {12.42,61.71},{23.09,68.16},{61.86,125.34},{47.16,111.12}, {62.14,119.37},{95.73,162.44},{12.54,73.40},{79.54,145.97}, {66.33,119.82},{88.68,163.37},{ 8.30,49.86},{ 7.06,51.59}, {54.64,110.52},{19.58,63.04},{40.40,76.01},{71.79,147.24}, {65.33,119.61},{24.25,73.73},{93.71,173.33},{84.98,156.37}, {71.69,140.90},{88.82,163.04},{28.70,71.92},{63.51,125.82}, { 2.52,56.53},{16.59,57.19},{ 4.69,51.71},{66.16,128.12}, {99.16,175.08},{85.20,150.40},{67.52,131.68},{98.44,157.07}, {23.50,74.14},{13.83,53.49},{36.83,96.33},{50.81,97.21}, {90.89,151.05},{88.77,149.26},{24.56,65.91},{29.39,87.09}, {40.68,94.57},{37.06,92.04},{41.94,84.73},{78.34,159.44}, {58.06,120.10},{66.08,137.57},{38.76,95.59},{90.16,162.35}, { 1.23,32.05},{18.40,79.85},{83.68,160.43},{70.55,137.08}, {48.80,116.13},{58.34,134.98},{64.55,151.88},{19.11,54.99}, {17.15,62.20},{ 8.76,41.24},{69.37,127.38},{ 0.02,19.91}, {89.31,173.50},{25.05,65.73},{29.51,71.31},{74.47,127.67}, { 7.79,49.80},{ 9.76,46.79},{44.56,97.59},{12.82,48.87}, {26.64,74.62},{ 8.68,48.98},{99.55,161.34},{54.80,119.52}, {40.01,86.37},{31.59,97.22},{84.72,153.95},{91.17,156.54}, {59.85,123.74},{97.10,176.28},{86.33,145.80},{43.16,92.40}, {76.57,132.84},{38.40,93.39},{24.22,72.02},{67.22,139.69}, {44.30,125.48},{ 9.13,23.05},{85.36,153.44},{82.06,146.76}, {95.08,163.29},{78.47,153.07},{97.25,155.67},{24.26,59.22}, {29.48,73.16},{11.24,52.71},{40.39,89.98},{89.41,168.71}, {51.84,117.90},{79.26,151.83},{37.47,91.62},{56.44,110.52}, { 9.75,54.62},{37.20,103.71},{56.91,123.87},{62.93,128.80}, {55.27,118.51},{76.67,170.89},{82.59,158.13},{ 4.32,35.69}, {80.02,143.24},{92.93,165.87},{57.21,115.22},{48.28,97.73}, {34.12,88.32},{23.01,74.70},{43.39,88.40},{53.99,114.82}, {43.23,85.31},{37.38,78.87},{53.70,99.14},{80.86,140.55}, {83.41,152.02},{41.76,92.07},{46.13,87.93},{71.33,150.25}, {57.03,111.85},{53.63,111.67},{57.60,105.70},{84.09,153.43}, {94.65,188.65},{26.18,60.44},{61.56,130.96},{71.92,133.33}, {73.47,148.70},{70.59,155.82},{55.25,106.47},{39.05,78.82}, {22.82,59.61},{18.96,64.30},{48.51,105.28},{72.98,127.46}, {45.37,101.92},{95.44,171.66},{66.34,123.39},{82.91,157.36}, {32.95,90.63},{74.25,133.83},{ 5.38,38.85},{16.07,52.25}, {14.03,65.20},{72.95,123.91},{ 5.49,27.28},{23.20,65.30}, {26.05,74.95},{ 8.96,41.39},{38.88,90.58},{36.53,77.95}, {32.58,96.24},{67.62,138.86},{33.63,96.46},{ 3.97,46.13}, { 9.36,51.30},{89.54,151.56},{ 2.04,50.74},{91.76,168.71}, { 2.97,24.38},{77.29,155.71},{79.72,141.21},{55.39,115.18}, {27.87,68.07},{88.52,158.19},{67.42,122.05},{75.84,146.67}, {54.66,83.52},{52.04,113.77},{57.15,110.66},{ 7.28,31.72}, {80.30,144.21},{79.46,150.44},{85.21,161.08},{25.16,76.32}, {59.84,126.36},{15.75,73.98},{15.06,36.26},{83.62,150.75}, {81.55,144.19},{98.43,171.96},{57.64,120.44},{21.58,59.74}, {53.96,113.42},{99.61,166.69},{61.04,104.13},{23.26,60.93}, {91.26,148.91},{78.49,135.20},{ 3.76,59.75},{45.98,99.50}, {76.59,135.69},{14.28,53.24},{50.91,116.54},{92.53,191.37}, {42.29,90.46},{32.91,85.46},{91.97,178.74},{38.66,82.60}, {65.44,119.45},{44.22,109.72},{47.66,83.32},{51.49,118.12}, {26.70,81.33},{25.40,78.19},{17.66,63.50},{ 2.67,57.07}, {99.41,164.10},{41.46,89.46},{39.25,84.32},{15.97,56.02}, {49.31,104.93},{69.45,127.07},{77.58,157.60},{89.97,174.81} }; int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } double residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } __device__ double d_residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } __global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) { int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); //Allocate memory for d_dm error = cudaMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_dc error = cudaMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_error_sum_arr error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_data error = cudaMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } //Copy memory for dm to d_dm error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for dc to d_dc error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for data to d_data error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error, cudaGetErrorString(error)); } for(i=0;i<8;i++) { //Host variable storing the array returned from the kernel function. double h_error_sum_arr[1000]; //Stores the total sum of the values from the error sum array. double error_sum_total; //Stores the mean of the total sum of the error sums. double error_sum_mean; //Call the rms_error function using 100 blocks and 10 threads. d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); cudaThreadSynchronize(); //Copy memory for d_error_sum_arr error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error, cudaGetErrorString(error)); } //Loop through the error sum array returned from the kernel function for(int j=0; j<n_data; j++) { //Add each error sum to the error sum total. error_sum_total += h_error_sum_arr[j]; } //Calculate the mean for the error sum. error_sum_mean = error_sum_total / n_data; //Calculate the square root for the error sum mean. e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } //Reset the error sum total. error_sum_total = 0; } printf("best m,c is %lf,%lf with error %lf in direction %d\n", dm[best_error_i], dc[best_error_i], best_error, best_error_i); if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } //Free memory for d_dm error = cudaFree(d_dm); if(error){ fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_dc error = cudaFree(d_dc); if(error){ fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_data error = cudaFree(d_data); if(error){ fprintf(stderr, "cudaFree on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_error_sum_arr error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); //Get the system time after we have run the linear regression function. clock_gettime(CLOCK_MONOTONIC, &finish); //Calculate the time spent between the start time and end time. time_difference(&start, &finish, &time_elapsed); //Output the time spent running the program. printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
956
#include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<thrust/sort.h> #include<thrust/copy.h> #include<thrust/generate.h> #include<cstdlib> int main() { thrust::host_vector<int> arr(12); thrust::generate(arr.begin(),arr.end(),rand); thrust::device_vector<int> device_array=arr; thrust::sort(device_array.begin(),device_array.end()); thrust::copy(device_array.begin(),device_array.end(),arr.begin()); for(auto it:arr) printf("%d ",it); return 0; }
957
// // Created by kindr on 2021/4/29. // #include "zeroCopyMemory.cuh" #include "../../common/utils.cuh" #include <cstdio> #include <vector> __global__ void addOne(float *vec, size_t N) { unsigned idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) vec[idx] = vec[idx] + 1.f; } void zeroCopyMemory(size_t nElement, size_t nThread) { float *vec; size_t nBytes = nElement * sizeof(float); cudaHostAlloc(&vec, nBytes, cudaHostAllocMapped); CHECK(cudaGetLastError()); memset(vec, 0, nBytes); size_t nBlock = (nElement + nThread - 1) / nThread; addOne<<<nBlock, nThread>>>(vec, nElement); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); bool isSame = true; for (size_t i = 0; i < nElement; ++i) { if (vec[i] != 1.f) { isSame = false; } } printf("isSame?: %s", isSame ? "true" : "false"); cudaFreeHost(vec); }
958
/* This program counts the number of occurrences of a pattern in a string. Benchmarking is done between the CPUs sequential execution and GPUs parallel execution to compare the two approaches to solving the problem. */ /* Note that there is a considerable dependency of the ratio of execution times of the CPU and GPU on the hardware which is being used to execute the run the program. */ /* There is dependency between the threads of the GPU. Check to see if they are handled properly. (After execution we see that it isn't working well... Have to see whats the problem) (Try to use atomic operation) */ #include<stdio.h> #include<cuda.h> #include<time.h> #include<string.h> // Returns the duration from start to end times in sec double time_elapsed(struct timespec *start, struct timespec *end) { double t; t = (end->tv_sec - start->tv_sec); // diff in seconds t += (end->tv_nsec - start->tv_nsec) * 0.000000001; // diff in nanoseconds return t; } // GPU function is find the total number of matches. __global__ void GPU_Match(const char *str, const char *ptrn, int n, int m, int *count) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i > (n-m)) return; int j = 0; while(j < m && ptrn[j] == str[i+j]) j++; //__syncthreads(); // Not necessary if(j == m) atomicAdd(count, 1); // Doesnt work without atomic operation.... result without atomic will be wrong because of data race. return; } // CPU function is find the total number of matches. void CPU_Match(const char *str, const char *ptrn, int n, int m, int *count) { for(int i = 0; i < (n-m+1); i++) { int j = 0; while(j < m && ptrn[j] == str[i+j]) j++; if(j == m) *count = *count + 1; } return; } // Code execution begins here int main() { struct timespec start1, end1; // to find GPU time struct timespec start2, end2; // to find CPU time int n, m; char string[1<<20], pattern[1<<20]; // Buffers printf("Enter the string: "); // Input string scanf(" %[^\n]", string); printf("Enter the pattern to be matched: "); // Input pattern scanf(" %[^\n]", pattern); n = strlen(string); // Find string length m = strlen(pattern); // Find pattern length char *str, *ptrn; // Device memory pointers int *count1, *count2; // Device memory pointers // Allocating Device Memory if(cudaMallocManaged(&str, (1+n)*sizeof(char)) != cudaSuccess) { printf("Malloc Error 1!"); return 0; } if(cudaMallocManaged(&ptrn, (1+n)*sizeof(char)) != cudaSuccess) { printf("Malloc Error 2!"); cudaFree(str); return 0; } if(cudaMallocManaged(&count1, sizeof(int)) != cudaSuccess) { printf("Malloc Error 3!"); cudaFree(str); cudaFree(ptrn); return 0; } if(cudaMallocManaged(&count2, sizeof(int)) != cudaSuccess) { printf("Malloc Error 4!"); cudaFree(str); cudaFree(ptrn); cudaFree(count1); return 0; } int count = 0; cudaMemcpy(str, string, n, cudaMemcpyHostToDevice); // Copy the data to the unified memory cudaMemcpy(ptrn, pattern, n, cudaMemcpyHostToDevice); // Copy the data to the unified memory cudaMemcpy(count1, &count, sizeof(int), cudaMemcpyHostToDevice); // Copy the data to the unified memory cudaMemcpy(count2, &count, sizeof(int), cudaMemcpyHostToDevice); // Copy the data to the unified memory clock_gettime(CLOCK_REALTIME, &start1); GPU_Match<<<(n-m+1), 1>>>(str, ptrn, n, m, count1); // Get GPU time cudaDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end1); clock_gettime(CLOCK_REALTIME, &start2); CPU_Match(str, ptrn, n, m, count2); // Get CPU time clock_gettime(CLOCK_REALTIME, &end2); printf("\n"); printf("The GPU result is: %d\n", *(count1)); printf("The CPU result is: %d\n", *(count2)); printf("\n"); printf("The GPU time is: %lf sec\n", time_elapsed(&start1, &end1)); printf("The CPU time is: %lf sec\n", time_elapsed(&start2, &end2)); cudaFree(str); cudaFree(ptrn); cudaFree(count1); cudaFree(count2); cudaDeviceReset(); return 0; }
959
#include "includes.h" __global__ void ReluBackKernel(float* Z, float* dZ, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size){ if(Z[id] <= 0) dZ[id] = 0; } }
960
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <malloc.h> __global__ void add(int *x, int *y, int *z) { *z = *x + *y; printf("z is %d\n", *z); } int main() { //Declaration int *a, *b, *c; int *deva, *devb, *devc; //Dynamic Memory Allocation in Host a = (int *)malloc(sizeof(int)); b = (int *)malloc(sizeof(int)); c = (int *)malloc(sizeof(int)); //Reserving Memory in Device cudaMalloc((int **)&deva, sizeof(int)); cudaMalloc((int **)&devb, sizeof(int)); cudaMalloc((int **)&devc, sizeof(int)); //Inputting values from user printf("Enter value of a and b\n"); scanf("%d %d", a, b); /**c = *a + *b; printf("answer: %d\n", *c);*/ //Coping values from HostToDevice cudaMemcpy(deva, a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(devb, b, sizeof(int), cudaMemcpyHostToDevice); //Calling Kernel add<<<1,1>>>(deva, devb, devc); //Coping values from DeviceToHost cudaMemcpy(c, devc, sizeof(int), cudaMemcpyDeviceToHost); printf("Result is: %d\n", *c); //Free-up the memory cudaFree(deva), cudaFree(devb), cudaFree(devc); return 0; }
961
/* * The code has been written by Karan Bhanot, Abolaji Adesoji, Aditya Joshi and Dhyanjyoti Nath. * * Some function definitions are referenced from * sample code provided by Christopher D. Carothers, * provided as part of his class assignment of Parallel Computing * Spring 2020. */ // Include headers (including CUDA) #include<stdio.h> #include<stdlib.h> #include<unistd.h> #include<stdbool.h> #include<cuda.h> #include<cuda_runtime.h> // Buffer extern long long *buf; /* * Returns the inialized buffer on CUDA */ extern "C" void getBuffer( int rank, int numranks, long long blocksize ) { // Check and assign the device for this MPI rank cudaError_t cE; int cudaDeviceCount; // Check if enough devices are available if ((cE = cudaGetDeviceCount(&cudaDeviceCount)) != cudaSuccess) { printf("Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount); exit(-1); } // Set device given that it is available if ((cE = cudaSetDevice(rank % cudaDeviceCount)) != cudaSuccess) { printf(" Unable to have rank %d set to cuda device %d, error is %d \n", rank, (rank % cudaDeviceCount), cE); exit(-1); } // Assign memory to the buf variable cudaMallocManaged(&buf, blocksize); }
962
#include <cuda_runtime_api.h> #include <stdint.h> __global__ void rect_fwd_kernel( const float *in_act, int dim, float *out_act) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < dim) { float x = in_act[idx]; if (x > 0.0f) { out_act[idx] = x; } else { out_act[idx] = 0.0f; } } } extern "C" void rembrandt_rect_fwd( const float *in_act, int dim, float *out_act, cudaStream_t stream) { rect_fwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>( in_act, dim, out_act); } __global__ void rect_bwd_kernel( const float *in_act, int dim, const float *out_delta, float *in_delta) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < dim) { float x = in_act[idx]; if (x > 0.0f) { in_delta[idx] = out_delta[idx]; } else { in_delta[idx] = 0.0f; } } } extern "C" void rembrandt_rect_bwd( const float *in_act, int dim, const float *out_delta, float *in_delta, cudaStream_t stream) { rect_bwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>( in_act, dim, out_delta, in_delta); }
963
#include "includes.h" __global__ void Copy3DKernel ( const unsigned short *d_src, float *d_dst, float min_intensity, const int width, const int height, const int depth ) { const int baseX = blockIdx.x * blockDim.x + threadIdx.x; const int baseY = blockIdx.y * blockDim.y + threadIdx.y; const int baseZ = blockIdx.z * blockDim.z + threadIdx.z; const int idx = (baseZ * height + baseY) * width + baseX; const float intensity = (float)d_src[idx]; d_dst[idx] = (intensity >= min_intensity) ? intensity : 0; }
964
#include "lambert_implement.h" #include "brdf_common.h" __global__ void lambert_kernel(float3* pos, unsigned int width, float reflectance) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float3 pin = pos[y*width+x]; float r = reflectance / 3.14159265; pos[y*width+x] = scale(normalize(pin), r); } extern "C" void lambert_brdf(float3 *pos, unsigned numVertices, unsigned width, float reflectance) { dim3 block(8, 8, 1); unsigned height = numVertices / width; dim3 grid(width / block.x, height / block.y, 1); lambert_kernel<<< grid, block>>>(pos, width, reflectance); }
965
//raytracer.mustafaisik.net// #include "timer.cuh" Timer::Timer() : m_start_time(std::chrono::system_clock::now()) {} //start void Timer::start() { m_start_time = std::chrono::system_clock::now(); } //getTime double Timer::getTime() { using second = std::chrono::duration<double, std::ratio <1>>; return std::chrono::duration_cast<second>(std::chrono::system_clock::now() - m_start_time).count(); }
966
#include <stdio.h> #include <stdlib.h> #include <inttypes.h> #include <stdint.h> #include <cuda.h> #include <cuda_runtime.h> #define PAGESIZE 32 #define PHYSICAL_MEM_SIZE 32768 #define STORAGE_SIZE 131072 #define DATAFILE "./data.bin" #define OUTFILE "./snapshot.bin" typedef unsigned char uchar; typedef uint32_t u32; int load_bFile(char* filename, uchar* input, int ssz) { int sz=0; FILE *myfile; myfile = fopen(filename,"rb"); while(1==fread(input+sz*sizeof(uchar),sizeof(uchar),1,myfile)) { sz++; } fclose(myfile); return sz; } void write_bFile(char* out , uchar* results , int input_size) { FILE *myfile; int i; myfile = fopen(out,"wb"); for(i = 0 ; i < input_size ; i++) { fputc(results[i],myfile); } fclose(myfile); } __device__ __managed__ int PAGE_ENTRIES = 0; __device__ __managed__ int PAGEFAULT = 0; __device__ __managed__ uchar storage[STORAGE_SIZE]; __device__ __managed__ uchar results[STORAGE_SIZE]; __device__ __managed__ uchar input[STORAGE_SIZE]; __device__ __managed__ int pt_entries = PHYSICAL_MEM_SIZE/PAGESIZE; extern __shared__ u32 pt[]; __device__ void init_pageTable(int pt_entries) { int i; for( i = 0; i < pt_entries ; i++) { pt[i]=5555; //i is frame number, pt[i] is which page in this frame pt[i+pt_entries]='i'; // valid bit pt[i+pt_entries*2]=0; //LRU counter } } __device__ void swap_block(uchar *phy, uchar *log, int are,int frame) //swap needy page in storage and swap the victim back when pagefault occurs. { int i,k; for(i = 0,k = are * PAGESIZE ; i < PAGESIZE ; k++ , i++) { phy[k]=log[frame*PAGESIZE+i]; log[frame*PAGESIZE+i]='n'; } } __device__ void swap_out(uchar *phy, uchar *log, int victim,int frame) //swap needy page in storage and swap the victim back when pagefault occurs. { int i,k; for(i = 0,k = victim * PAGESIZE ; i < PAGESIZE ; k++ , i++) { log[frame*PAGESIZE+i]=phy[k]; phy[k]='n'; } } __device__ u32 paging(uchar *buffer, u32 frame_num, u32 offset) { int flag = 0; int ad = 0; int i; for(i=0 ; i < pt_entries ; i++) { if(pt[i]==frame_num) //Needy page is in physical memory { flag=1; ad=i; break; } else if(pt[i+pt_entries]=='i') //This page is free for swapping in the page we need. { PAGEFAULT++; flag=2; ad=i; break; } } if(flag==1) { pt[ad+2*pt_entries]++; //LRU counter +1, return the address in physical memory. ad=(ad*PAGESIZE)+offset; } else if(flag==2) { pt[ad+pt_entries]='v'; //swap in the page we need (in storage) and update page table pt[ad]=frame_num; swap_block(buffer,storage,ad,frame_num); pt[ad+pt_entries*2]++; //LRU counter +1, return the address in physical memory. ad=(ad*PAGESIZE)+offset; } else { int min=pt[pt_entries-1+pt_entries*2]; //set the last LRU counter as a start point int victim=0; for(i=pt_entries-1;i>=0;i--) //Finding the LRU page from bottom to up according to LRU counter value. { if(pt[i+2*pt_entries]<=min) { min=pt[i+2*pt_entries]; victim=i; } } swap_out(buffer,storage,victim,pt[victim]); pt[victim+pt_entries]='i'; //set bit to invalid ad=paging(buffer,frame_num,offset); } return ad; } __device__ uchar Gread(uchar *buffer, u32 addr) { u32 frame_num = addr/PAGESIZE; u32 offset = addr%PAGESIZE; addr = paging(buffer, frame_num, offset); return buffer[addr]; } __device__ void Gwrite(uchar *buffer, u32 addr, uchar value) { u32 frame_num = addr/PAGESIZE; u32 offset = addr%PAGESIZE; addr = paging(buffer, frame_num, offset); buffer[addr] = value; } __device__ void snapshot(uchar *results, uchar* buffer, int offset, int input_size) { for(int i = 0 ; i < input_size ; i++) results[i] = Gread(buffer, i+offset); } __global__ void mykernel(int input_size) { __shared__ uchar data[PHYSICAL_MEM_SIZE]; init_pageTable(pt_entries); //####Gwrite/Gread code section start#### for(int i = 0; i < input_size ; i++) Gwrite(data, i , input[i]); for(int i = input_size -1 ; i >= input_size - 10 ; i--) int value = Gread(data,i); snapshot(results, data,0, input_size); //####Gwrite/Gread code section end#### printf("pagefault times = %d\n", PAGEFAULT); } int main() { int input_size = load_bFile(DATAFILE, input, STORAGE_SIZE); cudaSetDevice(2); mykernel<<<1, 1, 16384>>>(input_size); cudaDeviceSynchronize(); cudaDeviceReset(); write_bFile(OUTFILE, results, input_size); return 0; }
967
/* número por una constante C, se deben realizar dos implementaciones: a.Tanto C como N deben ser pasados como parámetros al kernel. b.Tanto C como N deben estar almacenados en la memoria de constantes de la GPU*/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> //M and N number of threads (grid and block) #define M 1 #define N 10 __global__ void multiply( const int a[] , int c[] , const int sqrt_dim,const int thread_number) { int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x; //for an element in matrix[i][j] , its coordinate k in array[] is i+j*sqrt(size_array) //transposed index is j+i*sqrt(size) //to get i and j from an index: i = (int)index/sqrt(size) and j=(index-i)/sqrt(size) int index_i = index < sqrt_dim ? index : (int)index%sqrt_dim; int index_j = (index-index_i)/sqrt_dim; int dim=sqrt_dim*sqrt_dim; if(index<dim){ printf("index %i in matrix form would be i=%i and j=%i\n", index, index_i, index_j); if(dim<=thread_number){ //if more threads than array size printf("Thread %i; Modifying value of index %i\n ", index, index); c[index_j+index_i * sqrt_dim]=a[index]; } else{ //if less threads than array size printf("index %i in matrix form would be i=%i and j=%i\n", index, index_i, index_j); if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){ printf("Thread %i; Modifying value of index %i \n", index, i); c[index_j+index_i * sqrt_dim]=a[i]; } } else{ //if last thread deal with all remaining array entries for(int i=index*(int)(dim/thread_number); i< dim; i++){ printf("Thread %i; Modifying value of index %i\n",index, i ); c[index_j+index_i *sqrt_dim]=a[i]; } } } } } int main(int argc, char *argv[]){ //Measure time clock_t time_begin; time_begin=clock(); // pointers to host & device arrays int *d_array1 = 0,*d_array2 = 0; int *h_array1 = 0,*h_array2 = 0; int size_array=16; //here, size_array =L hqs to be a square // malloc columns of host arrays h_array1 = (int*)malloc( size_array * sizeof(int)); h_array2 = (int*)malloc( size_array * sizeof(int)); for(int i=0; i<size_array; i++){ h_array1[i]=rand()%10; printf("%i\t", h_array1[i]); if((i+1)%(int)(sqrt((float)size_array))==0) printf("\n"); } printf("\n"); // cudaMalloc a device array cudaMalloc(&d_array1,size_array * sizeof(int)); cudaMalloc(&d_array2,size_array * sizeof(int)); // download and inspect the result on the host: cudaMemcpy(d_array1, h_array1, sizeof(int)*size_array, cudaMemcpyHostToDevice); dim3 bloque(N,N); //Bloque bidimensional de N*N hilos dim3 grid(M,M); //Grid bidimensional de M*M bloques int thread_number= N*N*M*M; multiply<<<grid, bloque>>>(d_array1, d_array2 , sqrt((float)size_array), thread_number); cudaThreadSynchronize(); // download and inspect the result on the host: cudaMemcpy(h_array2, d_array2, sizeof(int)*size_array, cudaMemcpyDeviceToHost); for(int i=0; i<size_array; i++){ printf("%i\t", h_array2[i]); if((i+1)%(int)(sqrt((float)size_array))==0) printf("\n"); } printf("\n"); // deallocate memory free(h_array2); free(h_array1); cudaFree(d_array2);cudaFree(d_array1); printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.2s }
968
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> // Comment out this line to enable debug mode #define NDEBUG /* time stamp function in milliseconds */ __host__ double getTimeStamp() { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_usec / 1000000 + tv.tv_sec; } __host__ void initB(float *B, int nB) { for (int i = 0; i < nB; i++) { int iIndex = i * nB * nB; for (int j = 0; j < nB; j++) { int ijIndex = iIndex + j * nB; for (int k = 0; k < nB; k++) { int ijkIndex = ijIndex + k; if (i == 0 || j == 0 || k == 0) { B[ijkIndex] = 0; } else { B[ijkIndex] = ((i - 1 + j - 1 + k - 1) % 10) * (float)1.1; } } } } } #define h_getB(B, nB, i, j, k) B[((i) + 1) * nB * nB + ((j) + 1) * nB + ((k) + 1)] __host__ void jacobiRelaxationReference(float *A, float *B, int n) { int nB = n + 1; for (int i = 0; i < n; i++) { int iIndex = i * n * n; for (int j = 0; j < n; j++) { int ijIndex = iIndex + j * n; for (int k = 0; k < n; k++) { int ijkIndex = ijIndex + k; if (i >= n - 1 || j >= n - 1 || k >= n - 1) { A[ijkIndex] = 0.0; } else { A[ijkIndex] = (float)0.8 * (h_getB(B, nB, i - 1, j, k) + h_getB(B, nB, i + 1, j, k) + h_getB(B, nB, i, j - 1, k) + h_getB(B, nB, i, j + 1, k) + h_getB(B, nB, i, j, k - 1) + h_getB(B, nB, i, j, k + 1)); } } } } } __host__ int checkA(float *Expected, float *Actual, int n) { for (int i = 0; i < n; i++) { int iIndex = i * n * n; for (int j = 0; j < n; j++) { int ijIndex = iIndex + j * n; for (int k = 0; k < n; k++) { int ijkIndex = ijIndex + k; if (Expected[ijkIndex] != Actual[ijkIndex]) { #ifndef NDEBUG printf("(i=%d, j=%d, k=%d) Expected=%f Actual=%f\n", i, j, k, Expected[ijkIndex], Actual[ijkIndex]); #endif return 0; } } } } return 1; } __host__ double sumA(float *A, int n) { double sum = 0; for (int i = 0; i < n; i++) { int iIndex = i * n * n; for (int j = 0; j < n; j++) { int ijIndex = iIndex + j * n; for (int k = 0; k < n; k++) { int ijkIndex = ijIndex + k; sum += A[ijkIndex] * (((i + j + k) % 10) ? 1 : -1); } } } return sum; } __global__ void jacobiRelaxation(float *A, float *B, int n, int startingI) { extern __shared__ float s_data[]; /* Global Index */ int globalK = blockDim.x * blockIdx.x + threadIdx.x; int globalJ = blockDim.y * blockIdx.y + threadIdx.y; int globalI = blockDim.z * blockIdx.z + threadIdx.z + startingI; int globalIdx = globalI * n * n + globalJ * n + globalK; int nB = n + 1; int sizePerGlobalBI = nB * nB; int sizePerGlobalBJ = nB; int globalBIIndex = (globalI + 1) * sizePerGlobalBI; int globalBIJIndex = globalBIIndex + (globalJ + 1) * sizePerGlobalBJ; int globalBIdx = globalBIJIndex + (globalK + 1); if (globalK >= n || globalJ >= n || globalI >= n) { return; } /* Local Index */ // int sizeI = blockDim.z + 2; int sizeJ = blockDim.y + 2; int sizeK = blockDim.x + 2; int sizePerLocalI = sizeJ * sizeK; int sizePerLocalJ = sizeK; int localIIndex = (threadIdx.z + 1) * sizePerLocalI; int localIJIndex = localIIndex + (threadIdx.y + 1) * sizePerLocalJ; int localIdx = localIJIndex + (threadIdx.x + 1); s_data[localIdx] = B[globalBIdx]; if (threadIdx.z == 0) { s_data[localIdx - sizePerLocalI] = B[globalBIdx - sizePerGlobalBI]; s_data[localIdx + blockDim.z * sizePerLocalI] = B[globalBIdx + blockDim.z * sizePerGlobalBI]; } if (threadIdx.y == 0) { s_data[localIdx - sizePerLocalJ] = B[globalBIdx - sizePerGlobalBJ]; s_data[localIdx + blockDim.y * sizePerLocalJ] = B[globalBIdx + blockDim.y * sizePerGlobalBJ]; } if (threadIdx.x == 0) { s_data[localIdx - 1] = B[globalBIdx - 1]; s_data[localIdx + blockDim.x] = B[globalBIdx + blockDim.x]; } __syncthreads(); if (globalK == n - 1 || globalJ == n - 1 || globalI == n - 1) { A[globalIdx] = 0; } else { A[globalIdx] = (float)0.8 * (s_data[localIdx - sizePerLocalI] + s_data[localIdx + sizePerLocalI] + s_data[localIdx - sizePerLocalJ] + s_data[localIdx + sizePerLocalJ] + s_data[localIdx - 1] + s_data[localIdx + 1]); } } int main(int argc, char *argv[]) { int error = 0; /* Get Dimension */ if (argc != 2) { printf("Error: The number of arguments is not exactly 1\n"); return 0; } int n = atoi(argv[1]); size_t numElem = n * n * n; size_t numBytes = numElem * sizeof(float); int nB = n + 1; size_t numElemB = nB * nB * nB; size_t numBytesB = numElemB * sizeof(float); #ifndef NDEBUG printf("n=%d, numElem=%ld, numBytes=%ld\n", n, numElem, numBytes); printf("nB=%d, numElemB=%ld, numBytesB=%ld\n", nB, numElemB, numBytesB); #endif /* Allocate Host Memory */ float *h_B = NULL; error = error || cudaHostAlloc((void **)&h_B, numBytesB, 0); #ifndef NDEBUG float *h_hA = (float *)malloc(numBytes); #endif float *h_dA = NULL; error = error || cudaHostAlloc((void **)&h_dA, numBytes, 0); if (error) { printf("Error: cudaHostAlloc returns error\n"); return 0; } /* Initialize Host Memory */ initB(h_B, nB); #ifndef NDEBUG double timestampPreCpuKernel = getTimeStamp(); jacobiRelaxationReference(h_hA, h_B, n); double timestampPostCpuKernel = getTimeStamp(); printf("CPU: %lf %ld\n", sumA(h_hA, n), (long)ceil(1000*(timestampPostCpuKernel - timestampPreCpuKernel))); #endif /* Allocate Device Memory */ float *d_B = NULL; error = error || cudaMalloc((void **)&d_B, numBytesB); float *d_A = NULL; error = error || cudaMalloc((void **)&d_A, numBytes); if (error) { printf("Error: cudaMalloc returns error\n"); return 0; } /* Configuration */ #define NUM_STREAM 2 int nIStreams[NUM_STREAM]; for (int i = 0; i < NUM_STREAM; i++) { nIStreams[i] = n / NUM_STREAM; } nIStreams[NUM_STREAM - 1] += n % NUM_STREAM; dim3 d_blockDim; d_blockDim.x = 32; d_blockDim.y = 32; d_blockDim.z = 1; // must be 1 dim3 d_gridDimStreams[NUM_STREAM]; for (int i = 0; i < NUM_STREAM; i++) { d_gridDimStreams[i].x = (n - 1) / d_blockDim.x + 1; d_gridDimStreams[i].y = (n - 1) / d_blockDim.y + 1; d_gridDimStreams[i].z = (nIStreams[i] - 1) / d_blockDim.z + 1; } /* Create NUM_STREAM Streams */ cudaStream_t d_streams[NUM_STREAM]; for (int i = 0; i < NUM_STREAM; i++) { error = error || cudaStreamCreate(&d_streams[i]); } if (error) { printf("Error: cudaStreamCreate returns error\n"); return 0; } // TIMER BEGIN /* Copy Host Memory to Device Memory */ double timestampPreCpuGpuTransfer = getTimeStamp(); size_t numElemBStream1 = 0; if (NUM_STREAM != 1) { numElemBStream1 = (nIStreams[0] + 1 + 1) * nB * nB; } else { numElemBStream1 = (nIStreams[0] + 1) * nB * nB; } error = error || cudaMemcpyAsync(d_B, h_B, numElemBStream1 * sizeof(float), cudaMemcpyHostToDevice, d_streams[0]); if (NUM_STREAM != 1) { cudaStreamSynchronize(d_streams[0]); } int numElemBStreams = numElemBStream1; for (int i = 1; i < NUM_STREAM; i++) { int nBIStreami = nIStreams[i]; size_t numElemBStreami = ((i == NUM_STREAM - 1) ? nBIStreami - 1 : nBIStreami) * nB * nB; error = error || cudaMemcpyAsync(d_B + numElemBStreams, h_B + numElemBStreams, numElemBStreami * sizeof(float), cudaMemcpyHostToDevice, d_streams[i]); numElemBStreams += numElemBStreami; if (i != NUM_STREAM - 1) { // Synchronize between cudaMemcpyAsync cudaStreamSynchronize(d_streams[i]); } } if (numElemBStreams != numElemB) { printf("Error: cudaMemcpyAsync does not cover entire B (%ld != %ld)\n", numElemBStreams, numElemB); return 0; } if (error) { printf("Error: cudaMemcpyAsync B returns error %d\n", error); return 0; } /* Run Kernel */ int d_smemNumElem = (d_blockDim.x + 2) * (d_blockDim.y + 2) * (d_blockDim.z + 2); size_t d_smemNumBytes = d_smemNumElem * sizeof(float); size_t d_startingI = 0; for (int i = 0; i < NUM_STREAM; i++) { jacobiRelaxation<<<d_gridDimStreams[i], d_blockDim, d_smemNumBytes, d_streams[i]>>>(d_A, d_B, n, d_startingI); d_startingI += nIStreams[i]; } /* Copy Device Memory to Host Memory */ size_t numElemAStreams = 0; for (int i = 0; i < NUM_STREAM; i++) { size_t numElemAStreami = nIStreams[i] * n * n; error = error || cudaMemcpyAsync(h_dA + numElemAStreams, d_A + numElemAStreams, numElemAStreami * sizeof(float), cudaMemcpyDeviceToHost, d_streams[i]); numElemAStreams += numElemAStreami; } if (numElemAStreams != numElem) { printf("Error: cudaMemcpyAsync does not cover entire A\n"); return 0; } if (error) { printf("Error: cudaMemcpyAsync A returns error %d\n", error); return 0; } /* Synchronize Streams */ for (int i = 0; i < NUM_STREAM; i++) { cudaStreamSynchronize(d_streams[i]); } double timestampPostGpuCpuTransfer = getTimeStamp(); // TIMER END /* Free Device Memory */ cudaFree(d_A); d_A = NULL; cudaFree(d_B); d_B = NULL; /* Output */ double aValue = sumA(h_dA, n); long totalGpuElapased = (long)ceil(1000*(timestampPostGpuCpuTransfer - timestampPreCpuGpuTransfer)); printf("%lf %ld\n", aValue, totalGpuElapased); #ifndef NDEBUG for (int i = 0; i < NUM_STREAM; i++) { printf("d_gridDimStream%d=(%d, %d, %d), d_blockDim=(%d, %d, %d), d_smemNumBytes=%ld\n", i, d_gridDimStreams[i].x, d_gridDimStreams[i].y, d_gridDimStreams[i].z, d_blockDim.x, d_blockDim.y, d_blockDim.z, d_smemNumBytes); } /* Verify Device Result with Host Result */ error = error || !checkA(h_hA, h_dA, n); if(error) { printf("Error: GPU result does not with CPU result\n"); } #endif /* Free Host Memory */ cudaFreeHost(h_dA); h_dA = NULL; #ifndef NDEBUG free(h_hA); h_hA = NULL; #endif cudaFreeHost(h_B); h_B = NULL; /* Clean Up Device Resource */ cudaDeviceReset(); }
969
#include <cuda_runtime.h> #include <stdio.h> #define TILE_SIZE 4 #define INPUT_SIZE 12 #define MASK_WIDTH 5 __constant__ float M[MASK_WIDTH]; __global__ void convolution_shared_memory(float *N, float *P){ int i = blockIdx.x*blockDim.x+threadIdx.x; __shared__ float N_s[TILE_SIZE]; N_s[threadIdx.x]=N[i]; __syncthreads(); int this_title_start_point = blockIdx.x*blockDim.x; int next_tile_start_point = (blockIdx.x+1)*blockDim.x; int n_start_point = i-(MASK_WIDTH/2); float Pvalue = 0; for(int j =0; j < MASK_WIDTH; j++){ int N_index = n_start_point+j; if(N_index >= 0 && N_index < INPUT_SIZE){ if((N_index >= this_title_start_point) && (N_index < next_tile_start_point)){ Pvalue+=N_s[threadIdx.x+j-(MASK_WIDTH/2)]*M[j]; } else{ Pvalue+=N[N_index]*M[j]; } } } P[i]=Pvalue; } __global__ void convolution_constant_memory(float *N, float *P, int Width){ int i = blockIdx.x*blockDim.x+threadIdx.x; float Pvalue = 0; int n_start_point = i-(MASK_WIDTH/2); for(int j =0; j<MASK_WIDTH;j++){ if(n_start_point+j >=0 && n_start_point+j < Width){ Pvalue+= N[n_start_point+j]*M[j]; } } P[i]=Pvalue; } __global__ void convolution_global_memory(float *N, float *M, float *P, int Width){ int i = blockIdx.x*blockDim.x+threadIdx.x; float Pvalue = 0; int n_start_point = i-(MASK_WIDTH/2); for(int j =0; j<MASK_WIDTH;j++){ if(n_start_point+j >=0 && n_start_point+j < Width){ Pvalue+= N[n_start_point+j]*M[j]; } } P[i]=Pvalue; } int main(){ //device input and output float *d_N = 0; float *d_P = 0; cudaMalloc(&d_N,INPUT_SIZE*sizeof(float)); cudaMalloc(&d_P,INPUT_SIZE*sizeof(float)); //host input and output float *h_N = (float*)malloc(INPUT_SIZE*sizeof(float)); float *h_P = (float*)malloc(INPUT_SIZE*sizeof(float)); float *h_M = (float*)malloc(MASK_WIDTH*sizeof(float)); //initialize input on host for(int i=0;i<INPUT_SIZE;++i){ h_N[i]=(float)i; } //transfer input to device cudaMemcpy(d_N,h_N,INPUT_SIZE*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_P,h_P,INPUT_SIZE*sizeof(float),cudaMemcpyHostToDevice); //initialize mask on host for(int j=0;j<MASK_WIDTH;++j){ h_M[j]=(float)j; } //transfer mask to constant memory cudaMemcpyToSymbol(M,h_M,MASK_WIDTH*sizeof(float)); //call convolution kernel convolution_shared_memory<<<(INPUT_SIZE+TILE_SIZE-1)/TILE_SIZE,TILE_SIZE >>>(d_N,d_P); //retrieve result from device cudaMemcpy(h_P,d_P,INPUT_SIZE*sizeof(float),cudaMemcpyDeviceToHost); for(int i=0; i<INPUT_SIZE;++i){ printf("%f\n", h_P[i]); } cudaFree(d_N); cudaFree(d_P); cudaFree(M); free(h_N); free(h_P); free(h_M); }
970
struct DummyComplexFloat { public: float Real; public: float Imag; __device__ DummyComplexFloat() { Real = 0; Imag = 0; } // Methods __device__ DummyComplexFloat(float r, float i) { Real = r; Imag = i; } __device__ DummyComplexFloat Add(DummyComplexFloat c) { return DummyComplexFloat((Real + c.Real), (Imag + c.Imag)); } };
971
__global__ void sum_kernel(float *g_odata, float *g_idata, int N) // Naive kernel { // YOUR TASKS: // - Write a naive kernel where parallel sum reduction is done on a per block basis // and reduction sum is returned in g_odata. // - For simplicity, assume kernel only considers a dataset within each block of size 2^p, p=1,2, // // access thread id within block //unsigned int t = ???; // Reduction per block in global memory //????? // Output partial sum //if (t==0) g_odata[ ????? ] = g_idata[ ????? ]; } __global__ void sum_kernel2(float *g_odata, float *g_idata, int N) // Shared memory kernel { // YOUR TASKS: // - Improve naive kernel to use shared memory per block for reduction // - Employ dynamic allocation of shared memory where mem block size is determined by host // - Threads within a block should collaborate on loading data from device to shared memory // - For simplicity, assume kernel only considers a dataset within each block of size 2^p, p=1,2, // // shared mem array // the size is determined by the host application // access thread id //unsigned int t = ????? ; // read in input data to shared memory from global memory //????? __syncthreads(); // Reduction per block in shared memory //????? // Output partial sum //if (t==0) g_odata[blockIdx.x] = ?????; } __global__ void sum_kernel3(float *g_odata, float *g_idata, int N) { // YOUR TASKS: // - Change stride pattern in sum_kernel2 for reduction step. } __global__ void sum_kernel4(float *g_odata, float *g_idata, int N) { // YOUR TASKS: // - Change stride pattern in sum_kernel3 for reduction step. } __global__ void sum_kernel5(float *g_odata, float *g_idata, int N) { // YOUR TASKS: // - Optimize as much as possible }
972
#include "includes.h" __global__ void scale2DArray ( const int dim, const int nwl, const float *zr, const float *xx, float *xx1 ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int t = i + j * dim; if ( i < dim && j < nwl ) { xx1[t] = zr[j] * xx[t]; } }
973
#include "includes.h" __global__ void divMat(float *a, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if((idx*N) < (N*N)) a[idx *N] /= N; }
974
#include <stdio.h> __global__ void jacobi(double * uold, double * unew, double * f, int N, double lambda2){ int blockId = blockIdx.x + blockIdx.y * gridDim.x; int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(index < N*N){ int M = N+2; int i = index + M + 1 + 2 * (index / N); unew[i] = 0.25 * (uold[i-1] + uold[i+1] + uold[i-M] + uold[i+M] + lambda2*f[i]); } }
975
#include <chrono> #include <cuda.h> #include <fstream> #include <iostream> #include <stdint.h> #include <stdio.h> __global__ void WakeGpuKernel(int reps) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= reps) return; } // Calculate one element of matrix per thread __global__ void FloydSimple(uint32_t *graph, uint32_t *result, uint32_t n, int k) { const int i = blockIdx.y * blockDim.y + threadIdx.y; const int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n || j >= n) return; result[i * n + j] = (graph[i * n + j] < (graph[i * n + k] + graph[k * n + j])) ? graph[i * n + j] : (graph[i * n + k] + graph[k * n + j]); } __host__ int main(int argc, char **argv) { if (argc < 3) { std::cout << "usage: " << argv[0] << " graph_path results_path" << std::endl; return 1; } // Read vertex count and all graph uint32_t n; std::fstream graph_reader(argv[1], std::fstream::in | std::fstream::binary); graph_reader.read((char*)&n, 4); uint32_t *h_graph = new uint32_t[n * n]; uint32_t *h_floyd_result = new uint32_t[n * n]; for (size_t i = 0; i < n * n; ++i) { uint8_t current_elem; graph_reader.read((char *)&current_elem, 1); h_graph[i] = current_elem; } // Run empty task on cuda - it will decrease time of first run int threadNum = std::min(n, uint32_t(32)); dim3 blockSize(threadNum, threadNum, 1); dim3 gridSize(n / threadNum + 1, n / threadNum + 1, 1); WakeGpuKernel<<<1, blockSize>>>(32); // Copy graph to device global memory auto start = std::chrono::steady_clock::now(); uint32_t *d_graph, *d_floyd_result; cudaMalloc(&d_graph, sizeof(uint32_t) * n * n); cudaMalloc(&d_floyd_result, sizeof(uint32_t) * n * n); cudaMemcpy(d_floyd_result, h_graph, sizeof(uint32_t) * n * n, cudaMemcpyHostToDevice); cudaEvent_t iterationFinishedEvent; cudaEventCreate(&iterationFinishedEvent); // Start Floyd algorithm on cuda for (size_t k = 0; k < n; ++k) { std::swap(d_graph, d_floyd_result); // Start all threads for one iteration FloydSimple<<<gridSize, blockSize>>>(d_graph, d_floyd_result, n, k); cudaEventRecord(iterationFinishedEvent); cudaEventSynchronize(iterationFinishedEvent); } // Copy results to host cudaMemcpy(h_floyd_result, d_floyd_result, sizeof(int) * n * n, cudaMemcpyDeviceToHost); // Calculate all time used by cuda, and print it to console auto duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now() - start); std::cout << "cuda_simple: " << n << " " << duration.count() << std::endl; // Write Floyd results to file std::fstream result_writer(argv[2], std::fstream::out | std::fstream::binary); for (size_t i = 0; i < n * n; ++i) { result_writer.write((char*)&h_floyd_result[i], 4); } result_writer.close(); delete[] h_graph; delete[] h_floyd_result; cudaFree(d_graph); cudaFree(d_floyd_result); return 0; }
976
#include <stdio.h> __global__ void hello_GPU(int index){ printf("Hello from GPU%d[%d]!\n", index, threadIdx.x); } int main(void) { printf("Hello from CPU!\n"); hello_GPU<<<1,4>>>(1); hello_GPU<<<1,6>>>(2); cudaDeviceSynchronize(); return 0; }
977
#include <cmath> #include <fstream> #include <iomanip> #include <limits> #include <stdexcept> #include <string> #include <vector> #include <iostream> #define N_STEPS 200000 #define DT 60. #define EPS 1e-3 #define G 6.674e-11 #define GRAVITY_DEVICE_MASS(M0, T) (M0 + 0.5 * M0 * fabs(sin(T/6000))) #define PLANET_RADIUS ((double) 1e7) #define MISSILE_SPEED ((double) 1e6) #define GET_MISSILE_COST(T) (1e5 + 1e3 * T) // namespace param { // const int n_steps = 200000; // const double dt = 60; // const double eps = 1e-3; // const double G = 6.674e-11; // double gravity_device_mass(double m0, double t) { // return m0 + 0.5 * m0 * fabs(sin(t / 6000)); // } // const double planet_radius = 1e7; // const double missile_speed = 1e6; // double get_missile_cost(double t) { return 1e5 + 1e3 * t; } // } // namespace param void read_input(const char* filename, int& n, int& planet, int& asteroid, std::vector<double>& qx, std::vector<double>& qy, std::vector<double>& qz, std::vector<double>& vx, std::vector<double>& vy, std::vector<double>& vz, std::vector<double>& m, std::vector<std::string>& type) { std::ifstream fin(filename); fin >> n >> planet >> asteroid; qx.resize(n); qy.resize(n); qz.resize(n); vx.resize(n); vy.resize(n); vz.resize(n); m.resize(n); type.resize(n); for (int i = 0; i < n; i++) { fin >> qx[i] >> qy[i] >> qz[i] >> vx[i] >> vy[i] >> vz[i] >> m[i] >> type[i]; } } void write_output(const char* filename, double min_dist, int hit_time_step, int gravity_device_id, double missile_cost) { std::ofstream fout(filename); fout << std::scientific << std::setprecision(std::numeric_limits<double>::digits10 + 1) << min_dist << '\n' << hit_time_step << '\n' << gravity_device_id << ' ' << missile_cost << '\n'; } void run_step(int step, int n, std::vector<double>& qx, std::vector<double>& qy, std::vector<double>& qz, std::vector<double>& vx, std::vector<double>& vy, std::vector<double>& vz, const std::vector<double>& m, const std::vector<std::string>& type) { // compute accelerations std::vector<double> ax(n), ay(n), az(n); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (j == i) continue; double mj = m[j]; if (type[j] == "device") { mj = GRAVITY_DEVICE_MASS(mj, step * DT); } double dx = qx[j] - qx[i]; double dy = qy[j] - qy[i]; double dz = qz[j] - qz[i]; double dist3 = pow(dx * dx + dy * dy + dz * dz + EPS * EPS, 1.5); ax[i] += G * mj * dx / dist3; ay[i] += G * mj * dy / dist3; az[i] += G * mj * dz / dist3; } } // update velocities for (int i = 0; i < n; i++) { vx[i] += ax[i] * DT; vy[i] += ay[i] * DT; vz[i] += az[i] * DT; } // update positions for (int i = 0; i < n; i++) { qx[i] += vx[i] * DT; qy[i] += vy[i] * DT; qz[i] += vz[i] * DT; } } int main(int argc, char** argv) { if (argc != 3) { throw std::runtime_error("must supply 2 arguments"); } int n, planet, asteroid; std::vector<double> qx, qy, qz, vx, vy, vz, m; std::vector<std::string> type; read_input(argv[1], n, planet, asteroid, qx, qy, qz, vx, vy, vz, m, type); // Problem 1 double min_dist = std::numeric_limits<double>::infinity(); std::vector<double> qx1(qx), qy1(qy), qz1(qz), vx1(vx), vy1(vy), vz1(vz), m1(m); std::vector<std::string> type1(type); #pragma omp parallel for for (int i = n-1; i >= 0; i--) { if (type1[i] == "device") { m1[i] = 0; } else { break; } } for (int step = 0; step <= N_STEPS; step++) { if(step % 10000 == 0) printf("Step: %d\n", step); if (step > 0) { run_step(step, n, qx1, qy1, qz1, vx1, vy1, vz1, m1, type1); } double dx = qx1[planet] - qx1[asteroid]; double dy = qy1[planet] - qy1[asteroid]; double dz = qz1[planet] - qz1[asteroid]; min_dist = std::min(min_dist, dx * dx + dy * dy + dz * dz); if(step % 10000 == 0) printf("MinDist: %lf\n", min_dist); } // Problem 2 int hit_time_step = -2; // std::vector<double> qx2(qx), qy2(qy), qz2(qz), vx2(vx), vy2(vy), vz2(vz), m2(m); // std::vector<std::string> type2(type); // for (int step = 0; step <= N_STEPS; step++) { // if (step > 0) { // run_step(step, n, qx2, qy2, qz2, vx2, vy2, vz2, m2, type2); // } // double dx = qx2[planet] - qx2[asteroid]; // double dy = qy2[planet] - qy2[asteroid]; // double dz = qz2[planet] - qz2[asteroid]; // if (dx * dx + dy * dy + dz * dz < PLANET_RADIUS * PLANET_RADIUS) { // hit_time_step = step; // break; // } // } // Problem 3 // TODO int gravity_device_id = -999; double missile_cost = -999; // if(hit_time_step == -2) { // gravity_device_id = -1; // missile_cost = 0; // } else { // std::vector<double> qx3(qx), qy3(qy), qz3(qz), vx3(vx), vy3(vy), vz3(vz), m3(m); // std::vector<std::string> type3(type); // auto distance = [&](int i, int j) -> double { // double dx = qx3[i] - qx3[j]; // double dy = qy3[i] - qy3[j]; // double dz = qz3[i] - qz3[j]; // return sqrt(dx * dx + dy * dy + dz * dz); // }; // for(int i=n-1; i>=0; i--) { // if(type3[i] == "device") { // double i_cost = std::numeric_limits<double>::infinity(); // double missile_min_dist = std::numeric_limits<double>::infinity(); // if( i != (n-1) ) { qx3 = qx; qy3 = qy; qz3 = qz; vx3 = vx; vy3 = vy; vz3 = vz; m3 = m;} // for (int step = 0; step <= N_STEPS; step++) { // if (step > 0) { // if(distance(planet, i) < step * DT * MISSILE_SPEED) { // if(i_cost == std::numeric_limits<double>::infinity()) { // m3[i] = 0; // i_cost = std::min(i_cost, GET_MISSILE_COST(step*DT)); // std::cout << "step: " << step << "Distance :" << distance(planet, i) << " Missile travel: " << step * MISSILE_SPEED << " Cost = " << i_cost << ";\n"; // } // } // run_step(step, n, qx3, qy3, qz3, vx3, vy3, vz3, m3, type3); // } // double dx = qx3[planet] - qx3[asteroid]; // double dy = qy3[planet] - qy3[asteroid]; // double dz = qz3[planet] - qz3[asteroid]; // missile_min_dist = std::min(missile_min_dist, sqrt(dx * dx + dy * dy + dz * dz)); // if(missile_min_dist < PLANET_RADIUS) { // std::cout << "Device " << i << " break at step " << step << ",\n missile_min_dist = " << missile_min_dist << '\n'; // break; // } // if(step == N_STEPS) { // if(missile_cost > 0) { // if(i_cost < missile_cost) { // gravity_device_id = i; // missile_cost = i_cost; // } // } else { // gravity_device_id = i; // missile_cost = i_cost; // } // } // } // } else { // break; // } // } // if(gravity_device_id == -999) { // gravity_device_id = -1; // missile_cost = 0; // } // } write_output(argv[2], sqrt(min_dist), hit_time_step, gravity_device_id, missile_cost); }
978
//{{{ save_data_from_device_to_host //void save_data(cudaStream_t stream, float *device_pointer, float *host_pointer, size_t size) { void save_data(float *device_pointer, float *host_pointer, size_t size) { //{{{ Copy data and set up the GPU constants/variables. cudaMemcpy(host_pointer, device_pointer, size, cudaMemcpyDeviceToHost); //cudaMemcpyAsync(host_pointer, device_pointer, size, cudaMemcpyDeviceToHost, stream); //}}} } //}}}
979
#include <stdio.h> #include <cuda_runtime.h> __global__ void checkGlobalVariable(); #define CHECK(call) { \ const cudaError_t error = call; \ if (error != cudaSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } \ __device__ float devData; int main(int argc, char **argv) { // initialize the global variable float value = 3.14f; CHECK(cudaMemcpyToSymbol(devData, &value, sizeof(float))); printf("Host: copied %f to the global variable\n", value); // invoke kernel checkGlobalVariable<<<1, 32>>>(); // copy back to host CHECK(cudaMemcpyFromSymbol(&value, devData, sizeof(float))); printf("Host: the value changed by the kernel to %f\n", value); cudaDeviceReset(); return 0; } __global__ void checkGlobalVariable() { printf("Device: the value of the global variable is %f\n", devData); devData += 2.0f; __syncthreads(); }
980
#include <stdio.h> #include <math.h> // sin(x*x)*cos(x)*x - x*x #define XMIN 0.0 #define XMAX 10.0 #define N 10000 double vh[N], vd[N]; __device__ __host__ double funcion(double x) { return sin(x*x)*cos(x)*x - x*x; } __global__ void kernelEvaluaFuncion(double *v, double xmin, double xmax, int n){ int i=threadIdx.x + blockIdx.x*blockDim.x; double dx= (xmax-xmin)/(double)(n-1); double x = xmin + i*dx; v[i] = funcion(x); } void llenaPolinomioEnDevice(double *v, int n) { double *vD; int size = n*sizeof(double); cudaMalloc(&vD, size); kernelEvaluaFuncion<<<10, 1000>>>(vD, XMIN, XMAX, n); cudaMemcpy(v, vD, size, cudaMemcpyDeviceToHost); cudaFree(vD); } void llenaPolinomioEnHost(double *v, int n) { int i; double x, dx; dx = (XMAX-XMIN)/(double)(N-1); x = XMIN; for (i=0; i<N; i++) { v[i] = funcion(x); x+=dx; } } int main() { int i; double x, dx; dx = (XMAX-XMIN)/(double)(N-1); llenaPolinomioEnHost(vh, N); llenaPolinomioEnDevice(vd, N); x = XMIN; for (i=0; i<N; i++) { printf("%4d %4.2lf %6.12lf %6.12lf\n", i, x, vh[i], vd[i]); x+=dx; } }
981
// Programação Paralela e Distribuída (Programação paralela em GPUs) (MAB622) (DCC/UFRJ) // Outubro de 2012 // Prof.: Silvana Rossetto // Laboratório 1: Introducao ao ambiente de programação CUDA no Linux // Multiplica duas float*es: A * B = C #include <cstdlib> #include <cstdio> #include <cmath> #include <cstring> // Thread block size const int BLOCK_SIZE = 16; const int SHARED_SIZE = 16; const int WA = 1024; // Matrix A width const int HA = 1024; // Matrix A height const int WB = HA;// Matrix B width const int HB = WA; // Matrix B height const int WC = WA; // Matrix C width const int HC = HB; // Matrix C height const int CUDA_COALESC = 1; const int CUDA = 2; #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } ////////////////////////// CUDA Kernel ///////////////////////// __global__ void matrix_mult_coalesc(float* C, float* A, float* B, int width, int height) { __shared__ float As[SHARED_SIZE][SHARED_SIZE]; __shared__ float Bs[SHARED_SIZE][SHARED_SIZE]; int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; int row = by * BLOCK_SIZE + ty; int col = bx * BLOCK_SIZE + tx; int gridDimX = width / BLOCK_SIZE; float elementC = 0.0; for(int m = 0; m < gridDimX; ++m) { if(row < height && col < width) { As[ty][tx] = A[(row * width) + (m * BLOCK_SIZE + tx)]; Bs[ty][tx] = B[(m * BLOCK_SIZE + ty) * width + col]; } __syncthreads(); if(row < height && col < width) { for(int k = 0; k < BLOCK_SIZE; ++k) { elementC += As[ty][k] * Bs[k][tx]; } } __syncthreads(); } // Write the matrix to device memory each // thread writes one element C[row * width + col] = elementC; } __global__ void matrix_mult(float *c, float *a, float *b, int width, int height) { int line = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; float c_element = 0; if(line >= height || column >= width) { return; } for(int k = 0; k < width; ++k) { c_element += a[line * width + k] * b[k * width + column]; } c[line * width + column] = c_element; } // Inicializa uma float* de float (inteiros consecutivos) void initMat(float* data, size_t size, int mod) { int _mod = 13 + ((mod % 2 == 0) ? (mod + 1) : mod); for (int i = 0; i < size; ++i) { data[i] = (float)(i % _mod) + 1; } } void fprintMat(FILE* input, float* data, size_t size, int w) { for(int i = 0; i < size; ++i) { fprintf(input, "%f ", data[i]); if(((i + 1) % w) == 0) { fprintf(input, "\n"); } } } // Imprime uma float* de float void printMat(float* data, size_t size, int w) { fprintMat(stdin, data, size, w); } float* fscanMat(FILE* input, size_t size) { float* m = (float*) malloc(sizeof(float) * size); for(int i = 0; i < size; ++i) { fscanf(input, "%f", m+i); } return m; } float* scanMat(size_t size) { return fscanMat(stdin, size); } //funcao main int main(int argc, char** argv) { if(argc < 2) { printf("Usage: %s <algorithm> <debug opcional>\nalgorithm:\n\t c (Cuda)\n\t cc (Cuda Coalesc)\n", argv[0]); return -1; } const int debug = (argc > 2 ? strcmp("debug", argv[2]) == 0 : 0); int state = -1; //1. aloca memoria para as float*es de entrada A e B no host size_t size_A = WA * HA; size_t mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); size_t size_B = WB * HB; size_t mem_size_B = sizeof(float) * size_B; float* h_B = (float*) malloc(mem_size_B); FILE* f_A = NULL; FILE* f_B = NULL; FILE* f_C = NULL; if(strlen(argv[1]) == 1 && strncmp("c", argv[1], 1) == 0) { state = CUDA; if(debug) { f_A = fopen("m_A_c.txt", "w"); f_B = fopen("m_B_c.txt", "w"); f_C = fopen("m_C_c.txt", "w"); } } else if(strncmp("cc", argv[1], 2) == 0) { state = CUDA_COALESC; if(debug) { f_A = fopen("m_A_cc.txt", "w"); f_B = fopen("m_B_cc.txt", "w"); f_C = fopen("m_C_cc.txt", "w"); } } //2. inicializa as float'es de entrada initMat(h_A, size_A, WA); initMat(h_B, size_B, WB); //3. (opcional) imprime as float*es de entrada if(debug) { fprintMat(f_A, h_A, size_A, WA); fprintMat(f_B, h_B, size_B, WB); } //4. aloca memoria para a float* de saida C no host unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* h_C = (float*) malloc(mem_size_C); //5. aloca memoria no device para as tres float*es (A, B e C) float* d_A; float* d_B; float* d_C; CUDA_CHECK_RETURN(cudaMalloc((void**) &d_A, mem_size_A)); CUDA_CHECK_RETURN(cudaMalloc((void**) &d_B, mem_size_B)); CUDA_CHECK_RETURN(cudaMalloc((void**) &d_C, mem_size_C)); //6. copia as float*es de entrada do host para o device CUDA_CHECK_RETURN(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice)); //7. executa a multiplicacao // seta os parametros de configuracao do kernel dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(WC/threads.x, HC/threads.y); grid.x = (grid.x == 0 ? 1 : grid.x); grid.y = (grid.y == 0 ? 1 : grid.y); // executa o kernel if(state == CUDA) { matrix_mult<<<grid, threads>>>(d_C, d_A, d_B, WC, HC); } else { matrix_mult_coalesc<<<grid, threads>>>(d_C, d_A, d_B, WC, HC); } CUDA_CHECK_RETURN(cudaGetLastError()); //8. copia os resultados do device para o host CUDA_CHECK_RETURN(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost)); //9. imprime a float* de saida if(debug) { fprintMat(f_C, h_C, size_C, WC); fclose(f_A); fclose(f_B); fclose(f_C); } //10. libera os espacos de memoria free(h_A); free(h_B); free(h_C); CUDA_CHECK_RETURN(cudaFree(d_A)); CUDA_CHECK_RETURN(cudaFree(d_B)); CUDA_CHECK_RETURN(cudaFree(d_C)); }
982
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> //static const int WORK_SIZE = 256; // ./B03 60 5555 5555 25 /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __device__ unsigned int bitreverse(unsigned int number) { number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4); number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2); number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1); return number; } /** * CUDA kernel function that reverses the order of bits in each element of the array. */ __global__ void bitreverse(void *data) { unsigned int *idata = (unsigned int*) data; idata[threadIdx.x] = bitreverse(idata[threadIdx.x]); } __global__ void multiply(int *a, int *b, int *c, int a_r,int a_c, int b_r, int b_c) { //c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; int index = threadIdx.x + blockIdx.x * blockDim.x; // c is a_r b_c int row = index/b_c; int column = index % b_c; int i; for(i=0; i<a_c;i++) { c[index] += a[row+i]*b[column+(i*b_c)]; } } void printMatrix(int* m, int rows, int columns) { int i; int j; for(i=0; i<rows;i++) { for(j=0;j<columns;j++) { printf("%d\t",m[i*columns+j]); } printf("\n"); } printf("\n"); } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char *argv[]) { struct timeval t0; struct timeval t1; int a_r = atoi(argv[1]); // count of rows from A int a_c = atoi(argv[2]); // column from A int b_r = atoi(argv[3]); // count of rows from B int b_c = atoi(argv[4]); // column from B if(a_c != b_r) { printf("\n\tError! \n\tPlease match the size of colums of A with the size of rows of B!\n\n"); return -1; } int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c // int size = i * sizeof(int); a = (int *)malloc(a_r*a_c * sizeof(int)); b = (int *)malloc(b_r*b_c * sizeof(int)); c = (int *)malloc(a_r*b_c* sizeof(int)); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, a_r*a_c * sizeof(int)); cudaMalloc((void **)&d_b, b_r*b_c * sizeof(int)); cudaMalloc((void **)&d_c, a_r*b_c * sizeof(int)); int nDevices; cudaGetDeviceCount(&nDevices); int THREADS_PER_BLOCK = 0; for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf(" Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Threads Per Block: %d\n\n", prop.maxThreadsPerBlock); THREADS_PER_BLOCK = prop.maxThreadsPerBlock; } int i = 0; int j = 0; for(i =0; i < a_r; i++) { for(j=0; j < a_c; j++) { a[i*a_c+j] = 1; //rand() % 100; } } for(i=0; i < b_r; i++) { for(j=0; j < b_c; j++) { b[i*b_c+j] = 1; //rand() % 100; } } for(i=0; i < a_r; i++) { for(j= 0; j < b_c; j++) { c[i*b_c+j] = 0; } } gettimeofday(&t0,0); // Copy inputs to device cudaMemcpy(d_a, a, a_r*a_c * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, b_r*b_c * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, a_r*b_c * sizeof(int), cudaMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks int blocks = ceil((float)a_r*b_c/1024); // round up multiply<<<blocks,1024>>>(d_a, d_b, d_c, a_r,a_c, b_r, b_c); // Copy result back to host cudaMemcpy(c, d_c, a_r*b_c * sizeof(int), cudaMemcpyDeviceToHost); gettimeofday(&t1,0); double time_spent = (t1.tv_sec-t0.tv_sec) + (double)(t1.tv_usec-t0.tv_usec)/1000000; printMatrix(a, a_r, a_c); printMatrix(b, b_r, b_c); printMatrix(c, a_r, b_c); printf("Time Calculated: %f\n\n", time_spent); printf("Block Count: %d\n",blocks); return 0; }
983
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa \ // RUN: -aux-triple x86_64-unknown-linux-gnu \ // RUN: -fcuda-is-device -verify -fsyntax-only %s // RUN: %clang_cc1 -triple nvptx \ // RUN: -aux-triple x86_64-unknown-linux-gnu \ // RUN: -fcuda-is-device -verify -fsyntax-only %s // expected-no-diagnostics #define __device__ __attribute__((device)) __int128 h_glb; __device__ __int128 d_unused; __device__ __int128 d_glb; __device__ __int128 bar() { return d_glb; }
984
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3) { for (int i=2; i<=N-3; i++) { double _t_12_; double _t_13_; double _t_10_; double _t_19_; double _t_17_; double _t_20_; double _t_24_; double _t_22_; double _t_25_; double _t_32_; double _t_30_; double _t_33_; double _t_37_; double _t_35_; double _t_38_; double _t_44_; double _t_42_; double _t_45_; double _t_49_; double _t_47_; double _t_50_; double _t_7_; double _t_5_; double _t_8_; double _t_23_; double _t_153_; double _t_152_; double _t_21_; double _t_1_; double _t_150_; double _t_18_; double _t_16_; double _t_15_; double _t_158_; double _t_156_; double _t_14_; double _t_48_; double _t_192_; double _t_191_; double _t_46_; double _t_26_; double _t_189_; double _t_43_; double _t_41_; double _t_40_; double _t_39_; double _t_197_; double _t_195_; double _t_36_; double _t_173_; double _t_172_; double _t_34_; double _t_170_; double _t_31_; double _t_29_; double _t_28_; double _t_178_; double _t_176_; double _t_27_; double _t_0_; double _t_11_; double _t_134_; double _t_133_; double _t_9_; double _t_131_; double _t_6_; double _t_4_; double _t_3_; double _t_139_; double _t_137_; double _t_2_; double r1ic0jc0kc0 = r1[i][j][k]; double _t_145_; double _t_143_; double _t_164_; double _t_162_; double _t_184_; double _t_182_; double _t_203_; double _t_201_; double _t_135_; double _t_77_; double _t_132_; double _t_59_; double _t_154_; double _t_151_; double _t_56_; double _t_74_; double _t_136_; double _t_114_; double _t_130_; double _t_129_; double _t_128_; double _t_96_; double _t_155_; double _t_149_; double _t_148_; double _t_111_; double _t_93_; double _t_140_; double _t_83_; double _t_138_; double _t_65_; double _t_159_; double _t_157_; double _t_63_; double _t_81_; double _t_141_; double _t_120_; double _t_102_; double _t_160_; double _t_100_; double _t_118_; double _t_146_; double _t_88_; double _t_144_; double _t_70_; double _t_165_; double _t_163_; double _t_68_; double _t_86_; double _t_147_; double _t_125_; double _t_142_; double _t_107_; double _t_166_; double _t_161_; double _t_127_; double _t_105_; double _t_123_; double _t_174_; double _t_78_; double _t_171_; double _t_60_; double _t_193_; double _t_190_; double _t_175_; double _t_115_; double _t_169_; double _t_97_; double _t_168_; double _t_167_; double _t_194_; double _t_188_; double _t_187_; double _t_179_; double _t_84_; double _t_177_; double _t_66_; double _t_198_; double _t_196_; double _t_180_; double _t_121_; double _t_103_; double _t_199_; double _t_185_; double _t_89_; double _t_183_; double _t_71_; double _t_204_; double _t_202_; double _t_186_; double _t_126_; double _t_181_; double _t_108_; double _t_205_; double _t_200_; double _t_113_; double _t_112_; double _t_58_; double _t_57_; double _t_76_; double _t_75_; double _t_95_; double _t_94_; double _t_110_; double _t_109_; double _t_90_; double _t_119_; double _t_117_; double _t_116_; double _t_124_; double _t_122_; double _t_55_; double _t_54_; double _t_53_; double _t_64_; double _t_62_; double _t_61_; double _t_69_; double _t_67_; double _t_73_; double _t_72_; double _t_82_; double _t_80_; double _t_79_; double _t_87_; double _t_85_; double _t_52_; double _t_92_; double _t_91_; double _t_101_; double _t_99_; double _t_98_; double _t_106_; double _t_104_; double _t_51_; _t_12_ = -u2[i][j-2][k+2]; _t_12_ += u2[i][j+2][k+2]; _t_13_ = -u2[i][j-1][k+2]; _t_13_ += u2[i][j+1][k+2]; _t_10_ = c1 * _t_13_; _t_10_ += c2 * _t_12_; _t_19_ = -u1[i][j-2][k-2]; _t_19_ += u1[i][j+2][k-2]; _t_17_ = c2 * _t_19_; _t_20_ = -u1[i][j-1][k-2]; _t_20_ += u1[i][j+1][k-2]; _t_17_ += c1 * _t_20_; _t_24_ = -u2[i][j-2][k-2]; _t_24_ += u2[i][j+2][k-2]; _t_22_ = c2 * _t_24_; _t_25_ = -u2[i][j-1][k-2]; _t_25_ += u2[i][j+1][k-2]; _t_22_ += c1 * _t_25_; _t_32_ = -u1[i][j-2][k+1]; _t_32_ += u1[i][j+2][k+1]; _t_30_ = c2 * _t_32_; _t_33_ = -u1[i][j-1][k+1]; _t_33_ += u1[i][j+1][k+1]; _t_30_ += c1 * _t_33_; _t_37_ = -u2[i][j-2][k+1]; _t_37_ += u2[i][j+2][k+1]; _t_35_ = c2 * _t_37_; _t_38_ = -u2[i][j-1][k+1]; _t_38_ += u2[i][j+1][k+1]; _t_35_ += c1 * _t_38_; _t_44_ = -u1[i][j-2][k-1]; _t_44_ += u1[i][j+2][k-1]; _t_42_ = c2 * _t_44_; _t_45_ = -u1[i][j-1][k-1]; _t_45_ += u1[i][j+1][k-1]; _t_42_ += c1 * _t_45_; _t_49_ = -u2[i][j-2][k-1]; _t_49_ += u2[i][j+2][k-1]; _t_47_ = c2 * _t_49_; _t_50_ = -u2[i][j-1][k-1]; _t_50_ += u2[i][j+1][k-1]; _t_47_ += c1 * _t_50_; _t_7_ = -u1[i][j-2][k+2]; _t_7_ += u1[i][j+2][k+2]; _t_5_ = c2 * _t_7_; _t_8_ = -u1[i][j-1][k+2]; _t_8_ += u1[i][j+1][k+2]; _t_5_ += c1 * _t_8_; _t_23_ = la[i][j][k-2] * met2[i][j][k-2]; _t_153_ = 2.0 * mu[i][j][k-2]; _t_153_ += la[i][j][k-2]; _t_152_ = _t_153_ * met2[i][j][k-2]; _t_21_ = _t_23_ * met1[i][j][k-2]; _t_1_ = _t_21_ * _t_22_; _t_150_ = _t_152_ * met1[i][j][k-2]; _t_18_ = mu[i][j][k-2] * met3[i][j][k-2]; _t_16_ = _t_18_ * met1[i][j][k-2]; _t_15_ = _t_16_ * _t_17_; _t_158_ = mu[i][j][k-2] * met3[i][j][k-2]; _t_156_ = _t_158_ * met1[i][j][k-2]; _t_14_ = _t_15_ * stry[j]; _t_1_ += _t_14_ * strx[i]; _t_48_ = la[i][j][k-1] * met2[i][j][k-1]; _t_192_ = 2.0 * mu[i][j][k-1]; _t_192_ += la[i][j][k-1]; _t_191_ = _t_192_ * met2[i][j][k-1]; _t_46_ = _t_48_ * met1[i][j][k-1]; _t_26_ = _t_46_ * _t_47_; _t_189_ = _t_191_ * met1[i][j][k-1]; _t_43_ = mu[i][j][k-1] * met3[i][j][k-1]; _t_41_ = _t_43_ * met1[i][j][k-1]; _t_40_ = _t_41_ * _t_42_; _t_39_ = _t_40_ * stry[j]; _t_26_ += _t_39_ * strx[i]; _t_197_ = mu[i][j][k-1] * met3[i][j][k-1]; _t_195_ = _t_197_ * met1[i][j][k-1]; _t_36_ = la[i][j][k+1] * met2[i][j][k+1]; _t_173_ = 2.0 * mu[i][j][k+1]; _t_173_ += la[i][j][k+1]; _t_172_ = _t_173_ * met2[i][j][k+1]; _t_34_ = _t_36_ * met1[i][j][k+1]; _t_26_ += _t_34_ * _t_35_; _t_170_ = _t_172_ * met1[i][j][k+1]; _t_31_ = mu[i][j][k+1] * met3[i][j][k+1]; _t_29_ = _t_31_ * met1[i][j][k+1]; _t_28_ = _t_29_ * _t_30_; _t_178_ = mu[i][j][k+1] * met3[i][j][k+1]; _t_176_ = _t_178_ * met1[i][j][k+1]; _t_27_ = _t_28_ * stry[j-2]; _t_26_ += _t_27_ * strx[i]; _t_0_ = c1 * _t_26_; _t_11_ = la[i][j][k+2] * met2[i][j][k+2]; _t_134_ = 2.0 * mu[i][j][k+2]; _t_134_ += la[i][j][k+2]; _t_133_ = _t_134_ * met2[i][j][k+2]; _t_9_ = _t_11_ * met1[i][j][k+2]; _t_1_ += _t_9_ * _t_10_; _t_131_ = _t_133_ * met1[i][j][k+2]; _t_6_ = mu[i][j][k+2] * met3[i][j][k+2]; _t_4_ = _t_6_ * met1[i][j][k+2]; _t_3_ = _t_4_ * _t_5_; _t_139_ = mu[i][j][k+2] * met3[i][j][k+2]; _t_137_ = _t_139_ * met1[i][j][k+2]; _t_2_ = _t_3_ * stry[j+2]; _t_1_ += _t_2_ * strx[i]; _t_0_ += c2 * _t_1_; r1ic0jc0kc0 += _t_0_; _t_145_ = mu[i][j][k+2] * met4[i][j][k+2]; _t_143_ = _t_145_ * met1[i][j][k+2]; _t_164_ = mu[i][j][k-2] * met4[i][j][k-2]; _t_162_ = _t_164_ * met1[i][j][k-2]; _t_184_ = mu[i][j][k+1] * met4[i][j][k+1]; _t_182_ = _t_184_ * met1[i][j][k+1]; _t_203_ = mu[i][j][k-1] * met4[i][j][k-1]; _t_201_ = _t_203_ * met1[i][j][k-1]; _t_135_ = -u1[i-2][j][k+2]; _t_77_ = u1[i-2][j][k+2]; _t_135_ += u1[i+2][j][k+2]; _t_132_ = c2 * _t_135_; _t_59_ = u1[i+2][j][k+2]; _t_77_ -= u1[i-2][j][k-2]; _t_154_ = -u1[i-2][j][k-2]; _t_59_ -= u1[i+2][j][k-2]; _t_154_ += u1[i+2][j][k-2]; _t_151_ = c2 * _t_154_; _t_56_ = c2 * _t_59_; _t_74_ = c2 * _t_77_; _t_136_ = -u1[i-1][j][k+2]; _t_114_ = u1[i-1][j][k+2]; _t_136_ += u1[i+1][j][k+2]; _t_132_ += c1 * _t_136_; _t_130_ = _t_131_ * _t_132_; _t_129_ = _t_130_ * strx[i]; _t_128_ = _t_129_ * stry[j]; _t_96_ = u1[i+1][j][k+2]; _t_114_ -= u1[i-1][j][k-2]; _t_155_ = -u1[i-1][j][k-2]; _t_96_ -= u1[i+1][j][k-2]; _t_155_ += u1[i+1][j][k-2]; _t_151_ += c1 * _t_155_; _t_149_ = _t_150_ * _t_151_; _t_148_ = _t_149_ * strx[i]; _t_128_ += _t_148_ * stry[j]; _t_111_ = c2 * _t_114_; _t_93_ = c2 * _t_96_; _t_140_ = -u2[i-2][j][k+2]; _t_83_ = u2[i-2][j][k+2]; _t_140_ += u2[i+2][j][k+2]; _t_138_ = c2 * _t_140_; _t_65_ = u2[i+2][j][k+2]; _t_83_ -= u2[i-2][j][k-2]; _t_159_ = -u2[i-2][j][k-2]; _t_65_ -= u2[i+2][j][k-2]; _t_159_ += u2[i+2][j][k-2]; _t_157_ = c2 * _t_159_; _t_63_ = c2 * _t_65_; _t_81_ = c2 * _t_83_; _t_141_ = -u2[i-1][j][k+2]; _t_120_ = u2[i-1][j][k+2]; _t_141_ += u2[i+1][j][k+2]; _t_138_ += c1 * _t_141_; _t_128_ += _t_137_ * _t_138_; _t_102_ = u2[i+1][j][k+2]; _t_120_ -= u2[i-1][j][k-2]; _t_160_ = -u2[i-1][j][k-2]; _t_102_ -= u2[i+1][j][k-2]; _t_160_ += u2[i+1][j][k-2]; _t_157_ += c1 * _t_160_; _t_128_ += _t_156_ * _t_157_; _t_100_ = c2 * _t_102_; _t_118_ = c2 * _t_120_; _t_146_ = -u3[i-2][j][k+2]; _t_88_ = u3[i-2][j][k+2]; _t_146_ += u3[i+2][j][k+2]; _t_144_ = c2 * _t_146_; _t_70_ = u3[i+2][j][k+2]; _t_88_ -= u3[i-2][j][k-2]; _t_165_ = -u3[i-2][j][k-2]; _t_70_ -= u3[i+2][j][k-2]; _t_165_ += u3[i+2][j][k-2]; _t_163_ = c2 * _t_165_; _t_68_ = c2 * _t_70_; _t_86_ = c2 * _t_88_; _t_147_ = -u3[i-1][j][k+2]; _t_125_ = u3[i-1][j][k+2]; _t_147_ += u3[i+1][j][k+2]; _t_144_ += c1 * _t_147_; _t_142_ = _t_143_ * _t_144_; _t_128_ += _t_142_ * stry[j]; _t_107_ = u3[i+1][j][k+2]; _t_125_ -= u3[i-1][j][k-2]; _t_166_ = -u3[i-1][j][k-2]; _t_107_ -= u3[i+1][j][k-2]; _t_166_ += u3[i+1][j][k-2]; _t_163_ += c1 * _t_166_; _t_161_ = _t_162_ * _t_163_; _t_128_ += _t_161_ * stry[j]; _t_127_ = c2 * _t_128_; _t_105_ = c2 * _t_107_; _t_123_ = c2 * _t_125_; _t_174_ = -u1[i-2][j][k+1]; _t_78_ = u1[i-2][j][k+1]; _t_174_ += u1[i+2][j][k+1]; _t_171_ = c2 * _t_174_; _t_60_ = u1[i+2][j][k+1]; _t_78_ -= u1[i-2][j][k-1]; _t_74_ += c1 * _t_78_; _t_193_ = -u1[i-2][j][k-1]; _t_60_ -= u1[i+2][j][k-1]; _t_56_ += c1 * _t_60_; _t_193_ += u1[i+2][j][k-1]; _t_190_ = c2 * _t_193_; _t_175_ = -u1[i-1][j][k+1]; _t_115_ = u1[i-1][j][k+1]; _t_175_ += u1[i+1][j][k+1]; _t_171_ += c1 * _t_175_; _t_169_ = _t_170_ * _t_171_; _t_97_ = u1[i+1][j][k+1]; _t_168_ = _t_169_ * strx[i+2]; _t_167_ = _t_168_ * stry[j]; _t_115_ -= u1[i-1][j][k-1]; _t_111_ += c1 * _t_115_; _t_194_ = -u1[i-1][j][k-1]; _t_97_ -= u1[i+1][j][k-1]; _t_93_ += c1 * _t_97_; _t_194_ += u1[i+1][j][k-1]; _t_190_ += c1 * _t_194_; _t_188_ = _t_189_ * _t_190_; _t_187_ = _t_188_ * strx[i-2]; _t_167_ += _t_187_ * stry[j]; _t_179_ = -u2[i-2][j][k+1]; _t_84_ = u2[i-2][j][k+1]; _t_179_ += u2[i+2][j][k+1]; _t_177_ = c2 * _t_179_; _t_66_ = u2[i+2][j][k+1]; _t_84_ -= u2[i-2][j][k-1]; _t_81_ += c1 * _t_84_; _t_198_ = -u2[i-2][j][k-1]; _t_66_ -= u2[i+2][j][k-1]; _t_63_ += c1 * _t_66_; _t_198_ += u2[i+2][j][k-1]; _t_196_ = c2 * _t_198_; _t_180_ = -u2[i-1][j][k+1]; _t_121_ = u2[i-1][j][k+1]; _t_180_ += u2[i+1][j][k+1]; _t_177_ += c1 * _t_180_; _t_167_ += _t_176_ * _t_177_; _t_103_ = u2[i+1][j][k+1]; _t_121_ -= u2[i-1][j][k-1]; _t_118_ += c1 * _t_121_; _t_199_ = -u2[i-1][j][k-1]; _t_103_ -= u2[i+1][j][k-1]; _t_100_ += c1 * _t_103_; _t_199_ += u2[i+1][j][k-1]; _t_196_ += c1 * _t_199_; _t_167_ += _t_195_ * _t_196_; _t_185_ = -u3[i-2][j][k+1]; _t_89_ = u3[i-2][j][k+1]; _t_185_ += u3[i+2][j][k+1]; _t_183_ = c2 * _t_185_; _t_71_ = u3[i+2][j][k+1]; _t_89_ -= u3[i-2][j][k-1]; _t_86_ += c1 * _t_89_; _t_204_ = -u3[i-2][j][k-1]; _t_71_ -= u3[i+2][j][k-1]; _t_68_ += c1 * _t_71_; _t_204_ += u3[i+2][j][k-1]; _t_202_ = c2 * _t_204_; _t_186_ = -u3[i-1][j][k+1]; _t_126_ = u3[i-1][j][k+1]; _t_186_ += u3[i+1][j][k+1]; _t_183_ += c1 * _t_186_; _t_181_ = _t_182_ * _t_183_; _t_167_ += _t_181_ * stry[j]; _t_108_ = u3[i+1][j][k+1]; _t_126_ -= u3[i-1][j][k-1]; _t_123_ += c1 * _t_126_; _t_205_ = -u3[i-1][j][k-1]; _t_108_ -= u3[i+1][j][k-1]; _t_105_ += c1 * _t_108_; _t_205_ += u3[i+1][j][k-1]; _t_202_ += c1 * _t_205_; _t_200_ = _t_201_ * _t_202_; _t_167_ += _t_200_ * stry[j]; _t_127_ += c1 * _t_167_; r1ic0jc0kc0 += _t_127_; _t_113_ = 2.0 * mu[i-1][j][k]; _t_113_ += la[i-1][j][k]; _t_112_ = _t_113_ * met2[i-1][j][k]; _t_58_ = 2.0 * mu[i+2][j][k]; _t_58_ += la[i+2][j][k]; _t_57_ = _t_58_ * met2[i+2][j][k]; _t_76_ = 2.0 * mu[i-2][j][k]; _t_76_ += la[i-2][j][k]; _t_75_ = _t_76_ * met2[i-2][j][k]; _t_95_ = 2.0 * mu[i+1][j][k]; _t_95_ += la[i+1][j][k]; _t_94_ = _t_95_ * met2[i+1][j][k]; _t_110_ = _t_112_ * met1[i-1][j][k]; _t_109_ = _t_110_ * _t_111_; _t_90_ = _t_109_ * strx[i]; _t_119_ = la[i-1][j][k] * met3[i-1][j][k]; _t_117_ = _t_119_ * met1[i-1][j][k]; _t_116_ = _t_117_ * _t_118_; _t_90_ += _t_116_ * stry[j]; _t_124_ = la[i-1][j][k] * met4[i-1][j][k]; _t_122_ = _t_124_ * met1[i-1][j][k]; _t_90_ += _t_122_ * _t_123_; _t_55_ = _t_57_ * met1[i+2][j][k]; _t_54_ = _t_55_ * _t_56_; _t_53_ = _t_54_ * strx[i]; _t_64_ = la[i+2][j][k] * met3[i+2][j][k]; _t_62_ = _t_64_ * met1[i+2][j][k]; _t_61_ = _t_62_ * _t_63_; _t_53_ += _t_61_ * stry[j]; _t_69_ = la[i+2][j][k] * met4[i+2][j][k]; _t_67_ = _t_69_ * met1[i+2][j][k]; _t_53_ += _t_67_ * _t_68_; _t_73_ = _t_75_ * met1[i-2][j][k]; _t_72_ = _t_73_ * _t_74_; _t_53_ += _t_72_ * strx[i]; _t_82_ = la[i-2][j][k] * met3[i-2][j][k]; _t_80_ = _t_82_ * met1[i-2][j][k]; _t_79_ = _t_80_ * _t_81_; _t_53_ += _t_79_ * stry[j]; _t_87_ = la[i-2][j][k] * met4[i-2][j][k]; _t_85_ = _t_87_ * met1[i-2][j][k]; _t_53_ += _t_85_ * _t_86_; _t_52_ = c2 * _t_53_; _t_92_ = _t_94_ * met1[i+1][j][k]; _t_91_ = _t_92_ * _t_93_; _t_90_ += _t_91_ * strx[i]; _t_101_ = la[i+1][j][k] * met3[i+1][j][k]; _t_99_ = _t_101_ * met1[i+1][j][k]; _t_98_ = _t_99_ * _t_100_; _t_90_ += _t_98_ * stry[j]; _t_106_ = la[i+1][j][k] * met4[i+1][j][k]; _t_104_ = _t_106_ * met1[i+1][j][k]; _t_90_ += _t_104_ * _t_105_; _t_52_ += c1 * _t_90_; _t_51_ = _t_52_ * stry[j]; r1ic0jc0kc0 += _t_51_; r1[i][j][k] = r1ic0jc0kc0; } } } __global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_i= (int)(blockDim.z); int i0 = (int)(blockIdx.z)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.z); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) { double _t_12_; double _t_13_; double _t_10_; double _t_19_; double _t_17_; double _t_20_; double _t_24_; double _t_22_; double _t_25_; double _t_32_; double _t_30_; double _t_33_; double _t_37_; double _t_35_; double _t_38_; double _t_44_; double _t_42_; double _t_45_; double _t_49_; double _t_47_; double _t_50_; double _t_7_; double _t_5_; double _t_8_; double _t_23_; double _t_21_; double _t_1_; double _t_18_; double _t_16_; double _t_15_; double _t_60_; double _t_58_; double _t_14_; double _t_48_; double _t_46_; double _t_26_; double _t_43_; double _t_41_; double _t_40_; double _t_39_; double _t_71_; double _t_69_; double _t_36_; double _t_34_; double _t_31_; double _t_29_; double _t_28_; double _t_66_; double _t_64_; double _t_27_; double _t_0_; double _t_11_; double _t_9_; double _t_6_; double _t_4_; double _t_3_; double _t_55_; double _t_53_; double _t_2_; double r1ic0jc0kc0 = r1[i][j][k]; double _t_56_; double _t_83_; double _t_61_; double _t_81_; double _t_78_; double _t_54_; double _t_59_; double _t_76_; double _t_57_; double _t_94_; double _t_62_; double _t_92_; double _t_52_; double _t_89_; double _t_87_; double _t_51_; double _t_79_; double _t_72_; double _t_84_; double _t_70_; double _t_67_; double _t_65_; double _t_68_; double _t_95_; double _t_73_; double _t_63_; double _t_90_; double _t_82_; double _t_80_; double _t_74_; double _t_93_; double _t_91_; double _t_85_; double _t_88_; double _t_86_; double _t_77_; double _t_75_; _t_12_ = -u2[i][j+2][k-2]; _t_12_ += u2[i][j+2][k+2]; _t_13_ = -u2[i][j+2][k-1]; _t_13_ += u2[i][j+2][k+1]; _t_10_ = c1 * _t_13_; _t_10_ += c2 * _t_12_; _t_19_ = -u1[i][j-2][k-2]; _t_19_ += u1[i][j-2][k+2]; _t_17_ = c2 * _t_19_; _t_20_ = -u1[i][j-2][k-1]; _t_20_ += u1[i][j-2][k+1]; _t_17_ += c1 * _t_20_; _t_24_ = -u2[i][j-2][k-2]; _t_24_ += u2[i][j-2][k+2]; _t_22_ = c2 * _t_24_; _t_25_ = -u2[i][j-2][k-1]; _t_25_ += u2[i][j-2][k+1]; _t_22_ += c1 * _t_25_; _t_32_ = -u1[i][j+1][k-2]; _t_32_ += u1[i][j+1][k+2]; _t_30_ = c2 * _t_32_; _t_33_ = -u1[i][j+1][k-1]; _t_33_ += u1[i][j+1][k+1]; _t_30_ += c1 * _t_33_; _t_37_ = -u2[i][j+1][k-2]; _t_37_ += u2[i][j+1][k+2]; _t_35_ = c2 * _t_37_; _t_38_ = -u2[i][j+1][k-1]; _t_38_ += u2[i][j+1][k+1]; _t_35_ += c1 * _t_38_; _t_44_ = -u1[i][j-1][k-2]; _t_44_ += u1[i][j-1][k+2]; _t_42_ = c2 * _t_44_; _t_45_ = -u1[i][j-1][k-1]; _t_45_ += u1[i][j-1][k+1]; _t_42_ += c1 * _t_45_; _t_49_ = -u2[i][j-1][k-2]; _t_49_ += u2[i][j-1][k+2]; _t_47_ = c2 * _t_49_; _t_50_ = -u2[i][j-1][k-1]; _t_50_ += u2[i][j-1][k+1]; _t_47_ += c1 * _t_50_; _t_7_ = -u1[i][j+2][k-2]; _t_7_ += u1[i][j+2][k+2]; _t_5_ = c2 * _t_7_; _t_8_ = -u1[i][j+2][k-1]; _t_8_ += u1[i][j+2][k+1]; _t_5_ += c1 * _t_8_; _t_23_ = mu[i][j-2][k] * met2[i][j-2][k]; _t_21_ = _t_23_ * met1[i][j-2][k]; _t_1_ = _t_21_ * _t_22_; _t_18_ = mu[i][j-2][k] * met3[i][j-2][k]; _t_16_ = _t_18_ * met1[i][j-2][k]; _t_15_ = _t_16_ * _t_17_; _t_60_ = mu[i][j-2][k] * met1[i][j-2][k]; _t_58_ = _t_60_ * met1[i][j-2][k]; _t_14_ = _t_15_ * stry[j]; _t_1_ += _t_14_ * strx[i]; _t_48_ = mu[i][j-1][k] * met2[i][j-1][k]; _t_46_ = _t_48_ * met1[i][j-1][k]; _t_26_ = _t_46_ * _t_47_; _t_43_ = mu[i][j-1][k] * met3[i][j-1][k]; _t_41_ = _t_43_ * met1[i][j-1][k]; _t_40_ = _t_41_ * _t_42_; _t_39_ = _t_40_ * stry[j]; _t_26_ += _t_39_ * strx[i]; _t_71_ = mu[i][j-1][k] * met1[i][j-1][k]; _t_69_ = _t_71_ * met1[i][j-1][k]; _t_36_ = mu[i][j+1][k] * met2[i][j+1][k]; _t_34_ = _t_36_ * met1[i][j+1][k]; _t_26_ += _t_34_ * _t_35_; _t_31_ = mu[i][j+1][k] * met3[i][j+1][k]; _t_29_ = _t_31_ * met1[i][j+1][k]; _t_28_ = _t_29_ * _t_30_; _t_66_ = mu[i][j+1][k] * met1[i][j+1][k]; _t_64_ = _t_66_ * met1[i][j+1][k]; _t_27_ = _t_28_ * stry[j-1]; _t_26_ += _t_27_ * strx[i]; _t_0_ = c1 * _t_26_; _t_11_ = mu[i][j+2][k] * met2[i][j+2][k]; _t_9_ = _t_11_ * met1[i][j+2][k]; _t_1_ += _t_9_ * _t_10_; _t_6_ = mu[i][j+2][k] * met3[i][j+2][k]; _t_4_ = _t_6_ * met1[i][j+2][k]; _t_3_ = _t_4_ * _t_5_; _t_55_ = mu[i][j+2][k] * met1[i][j+2][k]; _t_53_ = _t_55_ * met1[i][j+2][k]; _t_2_ = _t_3_ * stry[j+1]; _t_1_ += _t_2_ * strx[i]; _t_0_ += c2 * _t_1_; r1ic0jc0kc0 += _t_0_; _t_56_ = -u2[i-2][j+2][k]; _t_83_ = u2[i-2][j+2][k]; _t_83_ -= u2[i-2][j-2][k]; _t_61_ = -u2[i-2][j-2][k]; _t_81_ = c2 * _t_83_; _t_61_ += u2[i+2][j-2][k]; _t_78_ = -u2[i+2][j-2][k]; _t_56_ += u2[i+2][j+2][k]; _t_78_ += u2[i+2][j+2][k]; _t_54_ = c2 * _t_56_; _t_59_ = c2 * _t_61_; _t_76_ = c2 * _t_78_; _t_57_ = -u2[i-1][j+2][k]; _t_94_ = u2[i-1][j+2][k]; _t_94_ -= u2[i-1][j-2][k]; _t_62_ = -u2[i-1][j-2][k]; _t_92_ = c2 * _t_94_; _t_62_ += u2[i+1][j-2][k]; _t_59_ += c1 * _t_62_; _t_52_ = _t_58_ * _t_59_; _t_89_ = -u2[i+1][j-2][k]; _t_57_ += u2[i+1][j+2][k]; _t_54_ += c1 * _t_57_; _t_52_ += _t_53_ * _t_54_; _t_89_ += u2[i+1][j+2][k]; _t_87_ = c2 * _t_89_; _t_51_ = c2 * _t_52_; _t_79_ = -u2[i+2][j-1][k]; _t_72_ = u2[i+2][j-1][k]; _t_72_ -= u2[i-2][j-1][k]; _t_84_ = -u2[i-2][j-1][k]; _t_70_ = c2 * _t_72_; _t_84_ += u2[i-2][j+1][k]; _t_81_ += c1 * _t_84_; _t_67_ = -u2[i-2][j+1][k]; _t_67_ += u2[i+2][j+1][k]; _t_79_ += u2[i+2][j+1][k]; _t_76_ += c1 * _t_79_; _t_65_ = c2 * _t_67_; _t_68_ = -u2[i-1][j+1][k]; _t_95_ = u2[i-1][j+1][k]; _t_95_ -= u2[i-1][j-1][k]; _t_92_ += c1 * _t_95_; _t_73_ = -u2[i-1][j-1][k]; _t_73_ += u2[i+1][j-1][k]; _t_70_ += c1 * _t_73_; _t_63_ = _t_69_ * _t_70_; _t_90_ = -u2[i+1][j-1][k]; _t_68_ += u2[i+1][j+1][k]; _t_65_ += c1 * _t_68_; _t_63_ += _t_64_ * _t_65_; _t_51_ += c1 * _t_63_; _t_90_ += u2[i+1][j+1][k]; _t_87_ += c1 * _t_90_; _t_82_ = la[i-2][j][k] * met1[i-2][j][k]; _t_80_ = _t_82_ * met1[i-2][j][k]; _t_74_ = _t_80_ * _t_81_; _t_93_ = la[i-1][j][k] * met1[i-1][j][k]; _t_91_ = _t_93_ * met1[i-1][j][k]; _t_85_ = _t_91_ * _t_92_; _t_88_ = la[i+1][j][k] * met1[i+1][j][k]; _t_86_ = _t_88_ * met1[i+1][j][k]; _t_85_ += _t_86_ * _t_87_; _t_51_ += c1 * _t_85_; _t_77_ = la[i+2][j][k] * met1[i+2][j][k]; _t_75_ = _t_77_ * met1[i+2][j][k]; _t_74_ += _t_75_ * _t_76_; _t_51_ += c2 * _t_74_; r1ic0jc0kc0 += _t_51_; r1[i][j][k] = r1ic0jc0kc0; } } extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) { double *r1; cudaMalloc (&r1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for r1\n"); cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u1; cudaMalloc (&u1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u1\n"); cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u2; cudaMalloc (&u2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u2\n"); cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u3; cudaMalloc (&u3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u3\n"); cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *mu; cudaMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *la; cudaMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met1; cudaMalloc (&met1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met1\n"); cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met2; cudaMalloc (&met2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met2\n"); cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met3; cudaMalloc (&met3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met3\n"); cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met4; cudaMalloc (&met4, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met4\n"); cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *strx; cudaMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice); double *stry; cudaMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); curvi_1 <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); dim3 blockconfig_1 (16, 2, 2); dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z)); curvi_2 <<<gridconfig_1, blockconfig_1>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); }
985
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "curand_kernel.h" #include <stdio.h> #include <time.h> __global__ void kernel_set_random(curandState *curand_states,int width,int height,long clock_for_rand) { int x = threadIdx.x + blockIdx.x*blockDim.x; if(x<0 || x>width) { return; } curand_init(clock_for_rand,x,0,&curand_states[x]); } __global__ void kernel_random(float *dev_random_array,int width,int height,curandState *curand_states) { int x = threadIdx.x + blockIdx.x*blockDim.x; if(x<0 || x>width) { return; } for(int y=0;y<height;y++) { int pos = y*width + x; dev_random_array[pos] = abs(curand_uniform(curand_states+x)); } } int main() { const int array_size_width = 10; const int array_size_height = 10; float random_array[array_size_width*array_size_height]; for(int i=0;i<array_size_width*array_size_height;i++) { random_array[i] = 0; } //error status cudaError_t cuda_status; //only chose one GPU cuda_status = cudaSetDevice(0); if(cuda_status != cudaSuccess) { fprintf(stderr,"cudaSetDevice failed! Do you have a CUDA-Capable GPU installed?"); return 0; } float *dev_random_array; curandState *dev_states; //allocate memory on the GPU cuda_status = cudaMalloc((void**)&dev_random_array,sizeof(float)*array_size_width*array_size_height); if(cuda_status != cudaSuccess) { fprintf(stderr,"dev_random_array cudaMalloc Failed"); exit( EXIT_FAILURE ); } cuda_status = cudaMalloc((void **)&dev_states,sizeof(curandState)*array_size_width*array_size_height); if(cuda_status != cudaSuccess) { fprintf(stderr,"dev_states cudaMalloc Failed"); exit( EXIT_FAILURE ); } long clock_for_rand = clock(); dim3 threads(16,1); dim3 grid((array_size_width+threads.x-1)/threads.x,1); kernel_set_random<<<grid,threads>>>(dev_states,array_size_width,array_size_height,clock_for_rand); printf("The first time \n"); { kernel_random<<<grid,threads>>>(dev_random_array,array_size_width,array_size_height,dev_states); //copy out the result cuda_status = cudaMemcpy(random_array,dev_random_array,sizeof(float)*array_size_width*array_size_height,cudaMemcpyDeviceToHost);//dev_depthMap if(cuda_status != cudaSuccess) { fprintf(stderr,"cudaMemcpy Failed"); exit( EXIT_FAILURE ); } for(int i=0;i<array_size_width*array_size_height;i++) { printf("%f\n",random_array[i]); } } printf("------------------------------------------------------- \n"); printf("The second time \n"); { kernel_random<<<grid,threads>>>(dev_random_array,array_size_width,array_size_height,dev_states); //copy out the result cuda_status = cudaMemcpy(random_array,dev_random_array,sizeof(float)*array_size_width*array_size_height,cudaMemcpyDeviceToHost);//dev_depthMap if(cuda_status != cudaSuccess) { fprintf(stderr,"cudaMemcpy Failed"); exit( EXIT_FAILURE ); } for(int i=0;i<array_size_width*array_size_height;i++) { printf("%f\n",random_array[i]); } } //free cudaFree(dev_random_array); cudaFree(dev_states); return 0; }
986
#include "includes.h" __global__ void ComputeSpeQtyKernel (double *Label, double *Dens, double *ExtLabel, int nrad, int nsec) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = threadIdx.y + blockDim.y*blockIdx.y; if (i<nrad && j<nsec){ Label[i*nsec + j] = ExtLabel[i*nsec + j]/Dens[i*nsec + j]; /* Compressive flow if line commentarized Label[i*nsec + j] = ExtLabel[i*nsec + j] */ } }
987
#include <stdio.h> #define RADIUS 3 #define BLOCK_SIZE 256 //#define BLOCK_SIZE 8 #define NUM_ELEMENTS (4096*2) //#define NUM_ELEMENTS (8*2) // CUDA API error checking macro #define cudaCheck(error) \ if (error != cudaSuccess) { \ printf("Fatal error: %s at %s:%d\n", \ cudaGetErrorString(error), \ __FILE__, __LINE__); \ exit(1); \ } __global__ void stencil_1d(int *in, int *out) { __shared__ int temp[2*RADIUS+BLOCK_SIZE]; int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS; int lindex = threadIdx.x + RADIUS; //printf("threadid:%d,block id:%d,block dim:%d\n",threadIdx.x,blockIdx.x,blockDim.x); printf("gindex:%d.lindex:%d\n",gindex,lindex); //printf("size of temp:%d\n",2*RADIUS+BLOCK_SIZE); // Read input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; //printf("max lindex:%d\n",lindex+BLOCK_SIZE); } __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex-RADIUS] = result; } int main() { unsigned int i; int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS]; int *d_in, *d_out; // Initialize host data for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i ) h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7 // Allocate space on the device cudaCheck( cudaMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) ); cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) ); // Copy input data to device cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice) ); stencil_1d<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out); cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) ); // Verify every out value is 7 for( i = 0; i < NUM_ELEMENTS; ++i ) if (h_out[i] != 7) { printf("Element h_out[%d] == %d != 7\n", i, h_out[i]); break; } if (i == NUM_ELEMENTS) printf("SUCCESS!\n"); // Free out memory cudaFree(d_in); cudaFree(d_out); return 0; }
988
#include <stdio.h> #define X 9 #define Y 8 #define THREAD_X 3 #define THREAD_Y 2 #define A(i, j) A[i*Y + j] __global__ void index(int *A){ int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //A(i, j) = threadIdx.x; A(i, j) = threadIdx.y; //A(i ,j) = blockIdx.y; } int main(){ int A[X][Y], *A_d; int i, j; dim3 dimBlock(THREAD_X, THREAD_Y); dim3 dimGrid(X/THREAD_X, Y/THREAD_Y); cudaMalloc((void**)&A_d, sizeof(int)*X*Y); for(i = 0; i < X; i++) for(j = 0; j < Y; j++) A[i][j] = -1; cudaMemcpy(A_d, A, sizeof(int)*X*Y, cudaMemcpyHostToDevice); index<<<dimGrid, dimBlock>>>(A_d); cudaMemcpy(A, A_d, sizeof(int)*X*Y, cudaMemcpyDeviceToHost); for(i = 0; i < X; i++){ for(j = 0; j < Y; j++){ printf("%d ", A[i][j]); } printf("\n"); } cudaFree(A_d); }
989
#include<iostream> #include<stdio.h> #include<stdlib.h> #include<stdio.h> #include<math.h> #include<fstream> #include<vector> using namespace std; // #define num_threads 50 // #define num_edges 700000 // #define num_vertices1 10000 // #define num_vertices2 10000 // #define num_edges 1000000 // #define num_vertices1 1000 // #define num_vertices2 1000 #define lli long long int // #define num_edges 2998468 // #define num_vertices1 100000 // #define num_vertices2 100000 const lli num_edges = 700000; const lli num_vertices1 = 100000; const lli num_vertices2 = 100000; // int h_flat_adj_list[2*num_edges]; // int h_degree[num_vertices1+num_vertices2+1]={0}; //store degree of each vertex // int h_list_ptr[num_vertices1+num_vertices2+2]; //1-indexed and extra element at the end for easy size access // Pointer to the start of adjacency list // int h_list_ptr_copy[num_vertices1+num_vertices2+2]; // Temporrary stuff, gotta sleep // bool h_is_matched_edge[(num_vertices1+ num_vertices2 + 1)*(num_vertices1 + num_vertices2+1)] = {0} ; // Adjacency matrix (1-indexed) // bool h_is_matched_vertex[num_vertices1 + num_vertices2 + 1] = {0}; //is the vertex matched // int h_partner_vertex[num_vertices1 + num_vertices2 + 1]; // int h_visited[num_vertices1 + num_vertices2 + 1] = {0}; // int h_bfs_parent[num_vertices1 + num_vertices2 + 1]; // bool h_is_parent_change[num_vertices1 + num_vertices2 + 1] = {0}; // int frontier[num_vertices1 + num_vertices2+1] = {0}; // int next_frontier[num_vertices1+num_vertices2+1] = {0}; int *h_flat_adj_list; int *h_degree; int * h_list_ptr; int *h_list_ptr_copy; bool *h_is_matched_edge; bool *h_is_matched_vertex; int *h_partner_vertex; int *h_visited; int *h_bfs_parent; bool *h_is_parent_change; int fc = num_vertices1; int num_aug_paths = 0; int *frontier; int *next_frontier; int get_is_matched_edge(int i, int j){ cout << i << " " << j << " " << i*(num_vertices1 + num_vertices2+1) + j << endl; return h_is_matched_edge[i*(num_vertices1 + num_vertices2+1) + j ]; cout << "Fault " << endl; } void set_is_matched_edge(int i, int j, int value){ h_is_matched_edge[i*(num_vertices1 + num_vertices2+1) + j ] = value; } // Checks if the matching is correct and also returns the total number of vertices matched int check_matching(){ int total_matched = 0; for(int i=1;i<=num_vertices1+num_vertices2;i++){ int vertex = i; int num_matched = 0; int start_edge = h_list_ptr[vertex]; int end_edge = h_list_ptr[vertex+1]; for(int j=start_edge;j<end_edge;j++){ int neighbor = h_flat_adj_list[j]; cout << "vertex-neighbor " << vertex << " " <<neighbor <<endl; if(get_is_matched_edge(vertex, neighbor)){ // cout << "Matched" << endl; // cout << vertex << " " << neighbor <<endl; num_matched++; } } if(num_matched==1){ total_matched++; } if(num_matched>1){ // cout << vertex << endl; // cout << "Error! Not a matching!"; // exit(0); } } cout << "Matching is correct! " << endl; return total_matched/2; } void clear_visited(){ for(int i=1;i<=num_vertices1+num_vertices2;i++){ h_visited[i] = 0; } } void clear_bfs_parent(){ for(int i=1;i<=num_vertices1+num_vertices2;i++){ h_bfs_parent[i] = i; } } void initialise_partner_vertex(){ for(int i=1;i<=num_vertices1+num_vertices2;i++){ h_partner_vertex[i] = -1; } } void clear_is_parent_change(){ for(int i=1;i<=num_vertices1+num_vertices2;i++){ h_is_parent_change[i] = 0; } } void print_matchings(){ cout << "Matchings: " << endl; for(int i=1;i<=num_vertices1+num_vertices2; i++){ cout<< i << " " << h_partner_vertex[i] << endl; } } void match_edges(int u, int v){ // h_is_matched_edge[u][v] = 1; // h_is_matched_edge[v][u] = 1; // cout << "Matching " << u << " " << v << endl; set_is_matched_edge(u,v,1); set_is_matched_edge(v,u,1); h_is_matched_vertex[u] = 1; h_is_matched_vertex[v] = 1; h_partner_vertex[u] = v; h_partner_vertex[v] = u; } // Unmatching edges also unmatches the vertices since the graph is a matching void unmatch_edges(int u, int v){ // h_is_matched_edge[u][v] = 0; // h_is_matched_edge[v][u] = 0; // cout << "UnMatching " << u << " " << v << endl; set_is_matched_edge(u,v,0); set_is_matched_edge(v,u,0); h_is_matched_vertex[u] = 0; h_is_matched_vertex[v] = 0; h_partner_vertex[u] = -1; h_partner_vertex[v] = -1; } void update_matchings(){ for(int i=1;i<=num_vertices1+num_vertices2;i++){ int vertex = i; if(h_is_parent_change[vertex] == true){ // cout << "Found aug. path till " << vertex << endl; // There should always be odd number of vertices in aug. path int path_length = 1; int parent = h_bfs_parent[vertex]; while(parent!=vertex){ // cout << vertex << " " <<parent << endl; if(path_length%2==1){ match_edges(vertex, parent); // cout << "Matching " << vertex << " and " << parent << endl; } else{ unmatch_edges(vertex, parent); // cout << "Unmatching " << vertex << " and " << parent << endl; } vertex = h_bfs_parent[vertex]; parent = h_bfs_parent[vertex]; path_length++; // cout << vertex << " " << parent << endl; } // cout << ". The path length is: " << path_length << endl; // break; } // return here to stop after updating only one path : Important for experiments } } int get_frontier_element(int ele){ for(int i=ele+1;i<=num_vertices1+num_vertices2+1;i++){ if(frontier[i]){ return i; } } return -1; } void copy_frontier(){ for(int i=0;i<=num_vertices1+num_vertices2;i++){ frontier[i] = next_frontier[i]; next_frontier[i] = 0; } } void bfs(bool binary_level){ // vector<int> next_frontier; int frontier_element = get_frontier_element(0); // int frontier_element = 9265; // cout << "Frontier: " << frontier_element << endl; // if(not frontier.empty()){ if(frontier_element!=-1){ // for(int i=0;i<frontier.size();i++){ // Iterate all frontier elements while(frontier_element!=-1){ // int vertex = frontier[i]; int vertex = frontier_element; h_visited[vertex] = true; cout << "Frontier: " << frontier_element << endl; // cout << "Continuining for vertex: " << vertex << endl; bool found_path = false; int start_edge = h_list_ptr[vertex]; int end_edge = h_list_ptr[vertex + 1]; // cout << "Start-End edge " << start_edge << " " << end_edge << endl; for(int j=start_edge;j<end_edge;j++){ if(found_path) break; int neighbor = h_flat_adj_list[j]; // cout << "Vertex- neighbor " << vertex << " " << neighbor <<endl; if(!h_visited[neighbor]){ // We want to alternate between unmatched and matched edges, otherwise we ignore h_visited[neighbor] = true; // cout << "Processing: " << vertex << " " << neighbor << endl; // exit(0); h_bfs_parent[neighbor] = vertex; if( binary_level==0 && get_is_matched_edge(vertex, neighbor)==0 && h_is_matched_vertex[neighbor]==1 ){ // next_frontier.push_back(neighbor); next_frontier[neighbor] = 1; } // is_matched_vertex is implicitly true since the edge is matched // In level 1, we are only interested in matched edges else if( binary_level==1 && get_is_matched_edge(vertex, neighbor)==1 ){ // next_frontier.push_back(neighbor); next_frontier[neighbor] = 1; // If I have found a path to the next level; I have to break // found_path = 1; return; } // Changing parent change only for this node else if(binary_level==0 && get_is_matched_edge(vertex, neighbor)==0 && h_is_matched_vertex[neighbor]==0){ // cout << "Found a aug. path with " << neighbor << " with parent: " << vertex << endl; h_is_parent_change[neighbor] = 1; num_aug_paths++ ; // remove this return so that multiple paths can be found return; } } } frontier_element = get_frontier_element(vertex); } // frontier.clear(); // frontier.assign(next_frontier.begin(), next_frontier.end()); copy_frontier(); bfs(binary_level = !binary_level); } } void clear_frontier(){ for(int i=0;i<num_vertices1+num_vertices2+1;i++){ frontier[i] = 0; } } int bfs_util(){ clear_visited(); clear_bfs_parent(); clear_is_parent_change(); // frontier.clear(); clear_frontier(); //Can add fairness here num_aug_paths = 0; // Special style bfs for(int i=1;i<=num_vertices1;i++){ if(!h_visited[i] && !h_is_matched_vertex[i]){ // frontier.clear(); clear_frontier(); // frontier.push_back(i); frontier[i] = 1; bfs(0); // cout << "Loop"; } // break; } // cout << "Printing parents: " << endl; // for(int i=1;i<=num_vertices2+num_vertices1;i++){ // cout << i << " " << h_bfs_parent[i] <<endl; // } if(num_aug_paths > 0){ update_matchings(); } return num_aug_paths; } int main(){ h_is_matched_edge = (bool *)calloc( (num_vertices1+ num_vertices2 + 1)*(num_vertices1 + num_vertices2+1), sizeof(bool)); // cout << (num_vertices1+ num_vertices2 + 1)*(num_vertices1 + num_vertices2+1) ; // cout << h_is_matched_edge[4]; // exit(0); h_flat_adj_list = (int *)malloc(2*num_edges*sizeof(int)); h_degree = (int *)malloc((num_vertices1+num_vertices2+1)*sizeof(int)); h_list_ptr = (int *)malloc((num_vertices1+num_vertices2+2)*sizeof(int)); h_list_ptr_copy = (int *)malloc((num_vertices1+num_vertices2+2)*sizeof(int)); h_is_matched_vertex = (bool *)malloc((num_vertices1+num_vertices2+1)*sizeof(bool)); h_partner_vertex = (int *)malloc((num_vertices1+num_vertices2+1)*sizeof(int)); h_visited = (int *)malloc((num_vertices1+num_vertices2+1)*sizeof(int)); h_bfs_parent = (int *)malloc((num_vertices1+num_vertices2+1)*sizeof(int)); h_is_parent_change = (bool *)malloc((num_vertices1+num_vertices2+1)*sizeof(bool)); frontier = (int *)malloc((num_vertices1+num_vertices2+1)*sizeof(int)); next_frontier = (int *)malloc((num_vertices1+num_vertices2+1)*sizeof(int)); // Add a check for null memory memset(h_degree, 0, num_vertices1 + num_vertices2 +1); // memset(h_is_matched_edge, 0, (num_vertices1 + num_vertices2 +1)*(num_vertices1+num_vertices2+1)); memset(h_is_matched_vertex, 0, num_vertices1 + num_vertices2 +1); memset(h_visited, 0, num_vertices1 + num_vertices2 +1); memset(h_is_parent_change, 0, num_vertices1 + num_vertices2 +1); memset(frontier, 0, num_vertices1 + num_vertices2 +1); memset(next_frontier, 0, num_vertices1 + num_vertices2 +1); // int h_flat_adj_list[2*num_edges]; // int h_degree[num_vertices1+num_vertices2+1]={0}; //store degree of each vertex // int h_list_ptr[num_vertices1+num_vertices2+2]; //1-indexed and extra element at the end for easy size access // Pointer to the start of adjacency list // int h_list_ptr_copy[num_vertices1+num_vertices2+2]; // Temporrary stuff, gotta sleep // // bool h_is_matched_edge[(num_vertices1+ num_vertices2 + 1)*(num_vertices1 + num_vertices2+1)] = {0} ; // Adjacency matrix (1-indexed) // bool h_is_matched_vertex[num_vertices1 + num_vertices2 + 1] = {0}; //is the vertex matched // int h_partner_vertex[num_vertices1 + num_vertices2 + 1]; // int h_visited[num_vertices1 + num_vertices2 + 1] = {0}; // int h_bfs_parent[num_vertices1 + num_vertices2 + 1]; // bool h_is_parent_change[num_vertices1 + num_vertices2 + 1] = {0}; // to and from of edges // int h_edges_u[num_edges], h_edges_v[num_edges]; // Make this dynamic memory and free it once we have our 2 pass initialisation phase int *h_edges_u, *h_edges_v; h_edges_u = (int *)malloc((num_edges)*sizeof(int)); h_edges_v = (int *)malloc((num_edges)*sizeof(int)); cout << " Hi" << endl; ifstream fin; // fin.open("FC_" + to_string(fc) + "_" + to_string(fc) + ".txt", ios::in); fin.open("random_" + to_string(num_vertices1) + "_" + to_string(num_vertices2) + ".txt", ios::in); int u, v; // cout << "Printing all the edges: \n"; // Vertices with 0 edges are implicitly ignored while reading the file itself for(int i=0;i<num_edges;i++){ // cout << i << endl; fin >> u >> v; // cout << u << " " << v <<endl; h_edges_u[i] = u; h_edges_v[i] = v; h_degree[u]++; h_degree[v]++; } cout << "Done reading edges" << endl; // Get pointer to adjacency list using prefix sum (no opti here since other parts are more complex anyway) // Index 0 will never be used.... the last elem h_list_ptr[1] = 0; h_list_ptr_copy[1] = h_list_ptr[1]; for(int i=2;i<=num_vertices1+num_vertices2;i++){ h_list_ptr[i] = h_list_ptr[i-1] + h_degree[i-1]; h_list_ptr_copy[i] = h_list_ptr[i]; } h_list_ptr[num_vertices1+num_vertices2+1] = 2*num_edges; //For easy coding h_list_ptr_copy[num_vertices1+num_vertices2+1] = 2*num_edges; // list_ptr has the start of the adj list ; list_ptr_copy has the current position cout << "Pointer updated " << endl; for(int i=0;i<num_edges;i++){ h_flat_adj_list[h_list_ptr_copy[h_edges_u[i]]] = h_edges_v[i]; h_flat_adj_list[h_list_ptr_copy[h_edges_v[i]]] = h_edges_u[i]; h_list_ptr_copy[h_edges_u[i]]++; h_list_ptr_copy[h_edges_v[i]]++; } cout << "Pointer updated 2 " << endl; // for(int i=1;i<=num_vertices1+num_vertices2;i++){ // for(int j=1;j<=num_vertices1+num_vertices2+1;j++){ // h_is_matched_edge[j*num_vertices2 + i] = 0; // } // } // sleep(20000); initialise_partner_vertex(); cout << "Partner vertex initialized " << endl; // for(int i=1;i<=num_vertices1+num_vertices2;i++){ // cout << h_degree[i] << " "; // } // for(int i=0;i<2*num_edges;i++){ // cout << h_flat_adj_list[i] << " "; // } // cout << endl; // for(int i=0;i<=num_vertices1+num_vertices2;i++){ // cout << h_list_ptr[i] << " "; // // } // cout << " ------------------------" <<endl; // for(int i=1;i<=num_vertices1;i++){ // for(int j=1;j<=num_vertices2;j++){ // get_is_matched_edge(i,j); // } // } // cout << get_frontier_element(9265); int x = check_matching(); cout << "Matching checked " << endl; bfs_util(); print_matchings(); x = check_matching(); cout << "Number of matchings: " << x << endl; // int x = check_matching(); // cout << "Total matches before running code: " << x << endl; // int aug_paths = bfs_util(); // cout << "Main : Number of augmenting paths " << aug_paths << endl; // // print_matchings(); // while(aug_paths>0) // { // aug_paths = bfs_util(); // cout << "Main : Number of augmenting paths " << aug_paths << endl; // // print_matchings(); // break; // } // x = check_matching(); // cout << "Total matches " << x/2 << endl; }
990
/* * ExUpdater.cpp * * Created on: 01 февр. 2016 г. * Author: aleksandr */ #include "ExUpdater.h" #include "SmartIndex.h" // indx - индекс вдоль правой или левой границы по y от firstY до lastY __host__ __device__ void ExUpdater::operator() (const int indx) { // correct Ex along the bottom /*nn = firstY; for (mm = firstX; mm < lastX; mm++) Ex(mm, nn) -= Cexh(mm, nn) * Hz1G(g1, mm); // correct Ex along the top nn = lastY; for (mm = firstX; mm < lastX; mm++) Ex(mm, nn) += Cexh(mm, nn) * Hz1G(g1, mm);*/ float Cexh = S*377.0; int n = firstY; Ex(indx, n) = Ex(indx, n) - Cexh * Hz1D[indx]; n = lastY; Ex(indx, n) = Ex(indx, n) + Cexh * Hz1D[indx]; }
991
#include "includes.h" __global__ void deviceAddVector(int *d_a, int *d_b, int *d_c, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { d_c[i] = d_a[i] + d_b[i]; // printf("Tread %d make sum %d + %d = %d", i, d_a[i], d_b[i], d_c[i]); } }
992
#include "includes.h" __global__ void A_for_lightning_estimation(float* rho, float* N, int npix, float* A) { int i = blockIdx.x*blockDim.x + threadIdx.x; // pixel index int c = blockIdx.y*blockDim.y + threadIdx.y; // channel index int h = blockIdx.z*blockDim.z + threadIdx.z; // harmonic index if (i < npix) { A[c*npix * 4 + h*npix + i] = rho[c*npix + i] * N[h*npix + i]; } }
993
#include <iostream> #include <math.h> #include <cuda_runtime.h> // #include <helper_cuda.h> // the __global__ keyword changes the function to a CUDA Kernel __global__ void add(int n, float *x, float *y, float *z) { // index of the current thread within it's block int index = blockIdx.x * blockDim.x + threadIdx.x; // number of threads in the block int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) z[i] = x[i] + y[i]; } int main(void) { int deviceCount = 0; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { printf("cudaGetDeviceCount returned %d\n->%s\n", static_cast<int>(error_id), cudaGetErrorString(error_id)); exit(EXIT_FAILURE); } int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); int maxThreadsPerMultiProcessor = deviceProp.maxThreadsPerMultiProcessor; int maxThreadsPerBlock = deviceProp.maxThreadsPerBlock; std::cout << "Max Threads Per Multi Processor: " << maxThreadsPerMultiProcessor << std::endl; std::cout << "Max Threads Per Block: " << maxThreadsPerBlock << std::endl; int N = 1<<20; // create and allocate the memory // this is called Unified Memory - accessible from CPU or GPU float *x, *y, *z; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); cudaMallocManaged(&z, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 2.0f; y[i] = 5.0f; } // Run kernal of 1M elements on the gpu // 1. // 2. Number of threads in a thread block int blocksize = maxThreadsPerBlock; int numblocks = (N + blocksize - 1) / blocksize; add<<<numblocks, blocksize>>>(N, x, y, z); // wait for gpu to finish before accessing on host cudaDeviceSynchronize(); float maxError = 0.0f; for (int i = 0; i < N; i++) { // std::cout << "Amount: " << z[i] << std::endl; maxError = fmax(maxError, fabs(z[i] - 7.0f)); } std::cout << "Max Error: " << maxError << std::endl; // free memory cudaFree(x); cudaFree(y); return 0; }
994
#include<iostream> #include<string> #include<sstream> #include<fstream> #include <cstdio> #include <vector> #include <set> #define SIZE 5 //Matrix size #define INPUTSIZE 2306451 //THIS IS how many constraints per instance (at max): 60 //THIS IS how many grids: 144000 __device__ bool FindUnassignedLocation(int* matrix, int &row, int &col) { for (row = 0; row < 5; row++) for (col = 0; col < 5; col++) if (matrix[row * 5 + col] == -2) return true; return false; } __device__ bool isSafe(int* matrix, int row, int col, int num, int * constraints, int constraint_size) { for (int row = 0; row < 5; row++) if (matrix[row * 5 + col] == num) return false; for (int col = 0; col < 5; col++) if (matrix[row * 5 + col] == num) return false; for(long unsigned int i = 0; i < constraint_size; i+=4) { if(row == constraints[i] && col == constraints[i+1] && matrix[constraints[i+2] * 5 + constraints[i+3]] != -2 && num < matrix[constraints[i+2] * 5 + constraints[i+3]]) return false; else if(row == constraints[i+2] && col == constraints[i+3] && matrix[constraints[i] * 5 + constraints[i+1]] != -2 && matrix[constraints[i] * 5 + constraints[i+1]] < num) return false; } return true; } __global__ void GPU_Futoshiki(int* grids, int* constraints, int* constraint_sizes, int* constraint_beginnings) { int tid = threadIdx.x; int x_block = blockIdx.x; volatile __shared__ bool not_found_flag; // shared flag to stop other threads when solution found __shared__ int local_constraints[60]; // this is a dummy size, large enough to fit for all cases __shared__ int constraint_size; // to get how many constraints do we have (item-wise) __shared__ int constraint_start; // to get where we should start from __shared__ int local_final[25]; // for storing the final results locally (also used as a temporary storage) // initialization of the shared variables not_found_flag = true; constraint_size = constraint_sizes[x_block] * 4; constraint_start = constraint_beginnings[x_block] * 4; if(tid < constraint_size) local_constraints[tid] = constraints[constraint_start + tid]; /* if(blockIdx.x == 3 && tid == 0) { for(int yy = 0; yy < constraint_size; yy+=4) { printf("%d, %d, %d, %d\n", local_constraints[yy]+1, local_constraints[yy+1]+1, local_constraints[yy+2]+1, local_constraints[yy+3]+1); } } */ if(tid < 25) // we will assign 5 thread to each cell, 1 thread per value in a cell { local_final[tid] = grids[x_block * 25 + tid]; // saving the grid into shared memory first (local_final used as temporary storage) __syncthreads(); } if(tid < 5) // we will assign 5 thread to each cell, 1 thread per value in a cell { int futoshiki[25]; // copy per thread for(int q = 0; q < 25; q++) // filling the copy per thread from shared mem futoshiki[q] = local_final[q]; // :) // start solving the futoshiki if(futoshiki[tid] == -2) // if that cell is empty { int row; int col; //futoshiki[row * 5 + col] = value; // if so, change the value of the current cell int staque[50]; // stack implementation via int list int stack_counter = 0; // need for stack implementation, consider as bookmark xd int toStack; // value to store what's popped from the stack //int kaputt = 0; // control variable for if there is no solution int allowed_value = 0; // for roll-back mechanism, allowing us to remember which value to try next bool dead_end; // if we need to roll-back or not while(not_found_flag) { if(!FindUnassignedLocation(futoshiki, row, col)) { not_found_flag = false; for(int x = 0; x < 25; x++) local_final[x] = futoshiki[x]; } else { //printf("RETURNED row: %d, col: %d\n", row, col); dead_end = true; // dummy initialization, if it does not turn to false in the for loop below, it means we have a problem :) //printf("banned value: %d, deadend: %d\n", banned_value, dead_end); for (int num = allowed_value; num < 5 && dead_end; num++) // try the values { if (isSafe(futoshiki, row, col, num, local_constraints, constraint_size)) // if the value fits, put it { //printf("now chainging row: %d, col: %d, with num: %d\n", row, col, num); dead_end = false; // set the dead end to false, since we found a new value allowed_value = 0; // reset this, since we found a new value futoshiki[row * 5 + col] = num; // a new step in the matrix toStack = row * 100 + col * 10 + num; staque[stack_counter++] = toStack; // push this cell and it's value into the stack, in case of we screw things up XD } } if(dead_end) // means we have a dead end in this cell, so we need to roll back { // by popping once from our stack toStack = staque[--stack_counter]; // pop the stack and store the value allowed_value = toStack % 10 + 1; // we need to add 1, otherwise we will be trying the same value over and over again col = (toStack/10) % 10; // get the cell col row = toStack / 100; // get the cell row futoshiki[row * 5 + col] = -2; // set this cell to uninitialized again } } } } __syncthreads(); // wait for all other threads } if(not_found_flag == false) if(tid < 25) // this is put for debugging, remove and merge with above if when debug done grids[x_block * 25 + tid] = local_final[tid]; // write the result back to the global gpu memory } int main(int argc, char** argv) { std::string filename(argv[1]); std::ifstream file(filename.c_str()); std::ifstream scout(filename.c_str()); int no_grids; file >> no_grids; int dummy; scout >> dummy; int* grids = new int[no_grids * 25]; int elem0, elem1, elem2, elem3, elem4; int pre_cursor = 0; int cursor = 0; int csize = 0; std::string file_line; std::string scout_line; int* constraint_sizes = new int[no_grids]; int* constraint_beginnings = new int[no_grids]; std::getline(scout, scout_line);//These are for spare lines std::getline(scout, scout_line); for(int i = 0; i < INPUTSIZE; i++) { std::getline(scout, scout_line); if(scout_line == "-------") { csize = i - pre_cursor - 5; constraint_sizes[cursor] = csize; cursor++; pre_cursor = i+1; } } int sum = 0; int temp_size; std::vector<int> constraint_vector; // we need a dynamic one that can expand std::set<int> constraint_set; // there are multiple copies of the constraints, WHY ARE you torturing us :( std::getline(file, file_line); for(int i = 0; i < no_grids; i++) { std::getline(file, file_line); for(int j = 0; j < SIZE; j++) { std::getline(file, file_line); std::istringstream iss(file_line); iss >> elem0 >> elem1 >> elem2 >> elem3 >> elem4; grids[i*25 + j*5 + 0] = elem0 - 1; grids[i*25 + j*5 + 1] = elem1 - 1; grids[i*25 + j*5 + 2] = elem2 - 1; grids[i*25 + j*5 + 3] = elem3 - 1; grids[i*25 + j*5 + 4] = elem4 - 1; } for(int c = 0; c < constraint_sizes[i]; c++) { std::getline(file, file_line); std::istringstream iss(file_line); iss >> elem0 >> elem1 >> elem2 >> elem3; elem4 = elem0 * 1000 + elem1 * 100 + elem2 * 10 + elem3; constraint_set.insert(elem4); } temp_size = constraint_set.size(); constraint_sizes[i] = temp_size; constraint_beginnings[i] = sum; sum += temp_size; for (std::set<int>::iterator it=constraint_set.begin(); it!=constraint_set.end(); ++it) { elem4 = *it; constraint_vector.push_back(elem4 / 1000 - 1); // 4th digit constraint_vector.push_back((elem4 / 100) % 10 - 1); // 3rd digit constraint_vector.push_back((elem4 / 10) % 10 - 1); // 2nd digit constraint_vector.push_back(elem4 % 10 - 1); // 1st digit } constraint_set.clear(); } temp_size = constraint_vector.size(); int * constraints = new int[temp_size]; elem4 = 0; // my favourite dummy int :) for (std::vector<int>::iterator it = constraint_vector.begin(); it != constraint_vector.end(); it++) { constraints[elem4++] = *it; } int *grids_d, *constraints_d, *constraint_sizes_d, *constraint_beginnings_d; float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //YOUR MEMORY OPERATIONS//Time accordingly cudaEventRecord(start, 0); cudaMalloc((void**)&grids_d, no_grids * 25 * sizeof(int)); cudaMemcpy(grids_d, grids, no_grids * 25 * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&constraints_d, temp_size * sizeof(int)); cudaMemcpy(constraints_d, constraints, temp_size * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&constraint_sizes_d, no_grids * sizeof(int)); cudaMemcpy(constraint_sizes_d, constraint_sizes, no_grids * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&constraint_beginnings_d, no_grids * sizeof(int)); cudaMemcpy(constraint_beginnings_d, constraint_beginnings, no_grids * sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("GPU Memory preparation duration: %f ms \n", time); //YOUR MEMORY OPERATIONS// //KERNEL CALL//Time accordingly cudaEventRecord(start, 0); GPU_Futoshiki<<<no_grids, 96>>>(grids_d, constraints_d, constraint_sizes_d, constraint_beginnings_d); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Kernel Duration: %f ms \n", time); //KERNEL CALL// //YOUR MEMORY OPERARIONS//Time accordingly cudaEventRecord(start, 0); cudaMemcpy(grids, grids_d, no_grids * 25 * sizeof(int), cudaMemcpyDeviceToHost); // copy the result back to CPU cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("GPU to CPU Data Transfer Duration: %f ms \n", time); //YOUR MEMORY OPERARIONS// // free cuda mem cudaFree(grids_d); cudaFree(constraints_d); cudaFree(constraint_sizes_d); cudaFree(constraint_beginnings_d); //OUTPUT FILE std::ofstream myfile; myfile.open("solution.txt"); myfile << no_grids << "\n" << "-------" << "\n"; for(int i = 0; i < no_grids; i++) { for(int j = 0; j < SIZE; j++) { elem0 = grids[i*25 + j*5 + 0] + 1; elem1 = grids[i*25 + j*5 + 1] + 1; elem2 = grids[i*25 + j*5 + 2] + 1; elem3 = grids[i*25 + j*5 + 3] + 1; elem4 = grids[i*25 + j*5 + 4] + 1; myfile << elem0 << " " << elem1 << " " << elem2 << " " << elem3 << " " << elem4 << "\n"; } myfile << "-------" << "\n"; } myfile.close(); //OUTPUT FILE /* cudaFree(grids_d); cudaFree(constraints_d); cudaFree(constraint_sizes_d); cudaFree(constraint_beginnings_d); */ //Deallocate delete[] grids; delete[] constraints; delete[] constraint_sizes; delete[] constraint_beginnings; }
995
#include "includes.h" __global__ void findAdjacencySizesKernel(int size, int *adjIndexes, int *output) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { output[idx] = adjIndexes[idx + 1] - adjIndexes[idx]; } }
996
#include "includes.h" __global__ void tile_MatrixMul(int* a, int* b, int* c, int n, int tile_size) { //statically-sized memory __shared__ int A[Shared_Mem_Size]; __shared__ int B[Shared_Mem_Size]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; //cal global row and col postions for this thread int row = by * tile_size + ty; int col = bx * tile_size + tx; //Intermidiate sum for element being written int temp_val = 0; //sweet tiles over entire matrix for (int i = 0; i < (n / tile_size); i++) { /* Every thread in a threadblock loads one element into shared memory The element location in shared memory corresponds to the thread's position in the threadblock (e.g thread[0,0] loads for A[0 * tile_size + 0] and B[0 * tile_size + 0]) Explanation of indexing parameters for A: row*n: Indexes the global row for this thread (loop invariant) i*tile_size: Indexes new set of column each iteration tx: Indexes the column within that set for B: col: Indexes the global column this thread (loop invariant) i*tile_size*n: Indexes next set of rows each iteration ty*n: Indexes the row within that set */ A[(ty * tile_size) + tx] = a[row * n + (i * tile_size + tx)]; B[(ty * tile_size) + tx] = b[(i * tile_size * n + ty * n) + col]; //Ensure all threads have loaded their data before proceeding __syncthreads(); //cal all temp values for this tile for (int j = 0; j < tile_size; j++) { temp_val += A[(ty * tile_size) + j] * B[(j * tile_size) + tx]; } //Ensure some threads dont progress and stomp current shared memory values __syncthreads(); } c[(row * n) + col] = temp_val; }
997
#include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/fill.h> #include <thrust/copy.h> struct saxpy_functor { const float a; saxpy_functor(float _a) : a(_a) {} __host__ __device__ int operator()(const int& x, const int& y) const { return a * x + y; } }; void saxpy_fast(float A, thrust::device_vector<int>& X, thrust::device_vector<int> Y) { // Y <- A * X + Y thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(A)); } int main(int argc, char** argv) { thrust::device_vector<int> X(10); thrust::device_vector<int> Y(10); thrust::device_vector<int> Z(10); thrust::sequence(X.begin(),X.end()); thrust::fill(Y.begin(),Y.end(),1); //saxpy_fast(0.1,X,Y); thrust::transform(X.begin(), X.end(), Y.begin(), Z.begin(), saxpy_functor(2)); thrust::copy(Z.begin(),Z.end(),std::ostream_iterator<int>(std::cout, "\n")); return 0; }
998
#include "cuda.h" #include <cassert> #include <chrono> #include <iostream> #include <stdio.h> #define N 800 #define TPB 16 #define RED_NB 50 #define BigN 4000 #define BigTPB 20 #define memN 200000000 #define memTPB 500 using namespace std; using chrono_clock = std::chrono::high_resolution_clock; using sec_dur = std::chrono::duration<double, std::ratio<1, 1>>; __global__ void bigstencil(int* a) { unsigned long long i = blockIdx.x * blockDim.x + threadIdx.x; a[i] = 7; } __global__ void indexfill(int* a) { int i = blockIdx.x * blockDim.x + threadIdx.x; a[i] = i; } __global__ void dotProduct(int* a, int* b, int* c) { __shared__ int temp[N]; int i = blockIdx.x * blockDim.x + threadIdx.x; temp[i] = a[i] * b[i]; __syncthreads(); if(i == 0) { int sum = 0; for(int i = 0; i < N; i++) sum += temp[i]; *c = sum; } } __global__ void reduction(int* input, int* output) { __shared__ int tmp[TPB]; tmp[threadIdx.x] = input[threadIdx.x + blockIdx.x * blockDim.x]; __syncthreads(); if(threadIdx.x < blockDim.x / 2) tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x / 2]; __syncthreads(); if(threadIdx.x < blockDim.x / 4) tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x / 4]; __syncthreads(); if(threadIdx.x < blockDim.x / 8) tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x / 8]; __syncthreads(); if(threadIdx.x == 0) { tmp[threadIdx.x] += tmp[threadIdx.x + 1]; output[blockIdx.x] = tmp[threadIdx.x]; } } __global__ void bigstencil(int* in, int* out) { int i = blockIdx.x * blockDim.x + threadIdx.x; out[i] = in[i] + 2; } int main() { // CSCS: Initialize CUDA runtime outside measurement region cudaFree(0); chrono_clock::time_point time_start, time_end; time_start = chrono_clock::now(); // ===------------------------------------------------=== // Setup for the kernels // ===------------------------------------------------=== // Indexing int h_indices[N]; int* d_indices; cudaMalloc((void**)&d_indices, N * sizeof(int)); // Dot Product int h_a[N], h_b[N], h_c; int *d_a, *d_b, *d_c; cudaMalloc((void**)&d_a, N * sizeof(int)); cudaMalloc((void**)&d_b, N * sizeof(int)); cudaMalloc((void**)&d_c, sizeof(int)); for(int i = 0; i < N; i++) { h_a[i] = 1; h_b[i] = i; } cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice); // Reduction int h_input[N], h_output[RED_NB]; int *d_input, *d_output; for(int i = 0; i < N; i++) h_input[i] = 1; cudaMalloc((void**)&d_input, N * sizeof(int)); cudaMalloc((void**)&d_output, RED_NB * sizeof(int)); cudaMemcpy(d_input, h_input, N * sizeof(int), cudaMemcpyHostToDevice); // Time consuming Stencil int *d_in, *d_out; int h_in[BigN], h_out[BigN]; for(int i = 0; i < BigN; ++i) { h_in[i] = 1; } cudaMalloc((void**)&d_in, BigN * sizeof(int)); cudaMalloc((void**)&d_out, BigN * sizeof(int)); cudaMemcpy(d_in, h_in, BigN * sizeof(int), cudaMemcpyHostToDevice); // ===------------------------------------------------=== // Running the kernels // ===------------------------------------------------=== indexfill<<<N / TPB, TPB>>>(d_indices); dotProduct<<<1, N>>>(d_a, d_b, d_c); reduction<<<RED_NB, TPB>>>(d_input, d_output); bigstencil<<<BigN / BigTPB, BigTPB>>>(d_in, d_out); // ===------------------------------------------------=== // Retrieving GPU Data // ===------------------------------------------------=== // Indexing cudaMemcpy(&h_indices, d_indices, N * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_indices); // Dot Product cudaMemcpy(&h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Reduction cudaMemcpy(h_output, d_output, RED_NB * sizeof(int), cudaMemcpyDeviceToHost); // Bigger stencil cudaMemcpy(h_out, d_out, BigN * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(h_out); cudaFree(h_in); // do a lot of memcpy to see timing stability int nIterMemCpy = 1000000; int* mem_h; int mem_d[memN]; for(int i = 0; i < memN; ++i) { mem_d[i] = i; } cudaMalloc((void**)&mem_h, memN * sizeof(int)); for(int i = 0; i < nIterMemCpy; ++i) { cudaMemcpy(mem_d, mem_h, memN * sizeof(int), cudaMemcpyHostToDevice); bigstencil<<<memN / memTPB, memTPB>>>(mem_d); cudaMemcpy(&mem_h, mem_d, memN * sizeof(int), cudaMemcpyDeviceToHost); mem_d[i]++; } cudaFree(mem_h); time_end = chrono_clock::now(); // ===------------------------------------------------=== // Verify results // ===------------------------------------------------=== bool testpass = true; // Indexing int sum = 0; for(int i = 0; i < N; ++i) { sum += h_indices[i]; } int ref = N * (N - 1) / 2; testpass = testpass * (ref == sum); // Dot Product testpass = testpass * (ref == h_c); // Reduction for(int i = 0; i < RED_NB; ++i) { testpass = testpass * (16 == h_output[i]); } int res = 0; for(int i = 0; i < BigN; ++i) { res += h_out[i]; } testpass = testpass * (3 * BigN == res); // ===-------------------------------------------------------=== // Output // ===-------------------------------------------------------=== std::string result = (testpass) ? "OK" : "ERROR"; auto secs = std::chrono::duration_cast<sec_dur>(time_end - time_start); std::cout << "Result: " << result << std::endl; std::cout << "Timing: " << secs.count() << std::endl; return 0; }
999
#include "cuda_runtime.h" #include"stdio.h" //#include "matrixmul.cuh" #define BLOCK_SIZE 16 #define A_HEIGHT 128 #define A_WIDTH 128 #define B_HEIGHT 128 #define B_WIDTH 128 #define C_HEIGHT A_HEIGHT #define C_WIDTH B_WIDTH __global__ void matrix_mulKernel(int *c, int *a, int *b, int a_height,int a_width, int b_width, int c_width) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(row >= a_height || col >= b_width) return; int i, sum=0; for(i =0; i<a_width; ++i) { sum += a[row * a_width + i] * b[i * b_width + col]; } c[row * c_width + col] =sum; } void matrix_multiplication(const int *a, const int *b, int *c, int a_hiehgt, int b_width, int b_height); void print_matrix( int *matrix, int height, int width); int main() { int *a = (int*) calloc(A_HEIGHT * A_WIDTH, sizeof(unsigned int) ); int *b = (int*) calloc(B_HEIGHT * B_WIDTH, sizeof(unsigned int) ); int *c = (int*) calloc(C_HEIGHT * C_WIDTH, sizeof(unsigned int) ); int *d = (int*) calloc(C_HEIGHT * C_WIDTH, sizeof(unsigned int) ); int i; for (i =0; i < A_HEIGHT * A_WIDTH; i++) { a[i] = i; b[i] = i; } int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; // Allocate GPU buffers for three vectors (two input, one output) . cudaMalloc((void**)&dev_c, C_HEIGHT * C_WIDTH * sizeof(unsigned int)); cudaMalloc((void**)&dev_a, A_HEIGHT * A_WIDTH * sizeof(unsigned int)); cudaMalloc((void**)&dev_b, B_HEIGHT * B_WIDTH * sizeof(unsigned int)); // Copy input vectors from host memory to GPU buffers. cudaMemcpy(dev_a, a, A_HEIGHT * A_WIDTH * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, B_HEIGHT * B_WIDTH * sizeof(unsigned int), cudaMemcpyHostToDevice); dim3 dimBlock (BLOCK_SIZE,BLOCK_SIZE); // block( blockIdx, blockIDy) dim3 grid ((B_WIDTH + dimBlock.x - 1) / dimBlock.x, (A_HEIGHT + dimBlock.y - 1) / dimBlock.y); // grid(gloalsizeX + blockidx -1)/blockidx,gloalsizeY + blockidy -1)/blockidy); matrix_mulKernel<<<grid, dimBlock>>>(dev_c, dev_a, dev_b, A_HEIGHT, A_WIDTH, B_WIDTH, C_WIDTH); // Copy output vector from GPU buffer to host memory. cudaMemcpy(c, dev_c, C_HEIGHT * C_WIDTH * sizeof(int), cudaMemcpyDeviceToHost); matrix_multiplication(a, b ,d, A_HEIGHT, B_WIDTH, B_HEIGHT); bool flag = true; for(int i = 0; i < A_HEIGHT * B_WIDTH; i++ ) { if (c[i] != d[i]) { printf("Verification fail\n"); flag = false; break; } } if (flag) printf("Verification pass\n"); //printf("Matrix A:\n"); //print_matrix(a, size); //printf("Matrix B:\n"); //print_matrix(b, size); printf("Matrix C:\n"); print_matrix(c, 10, 10); printf("Matrix D:\n"); print_matrix(d,10, 10); } void matrix_multiplication(const int *a, const int *b, int *c, int a_height, int b_width, int b_height) { for(int i = 0; i < a_height; i++) { for(int j = 0; j < b_width; j++) { int sum = 0; for(int k = 0; k < b_height ; k++) { sum+= a[i * b_height + k] * b[k * b_width + j]; } c[i * b_width + j] = sum; } } } void print_matrix( int *matrix, int height, int width) { int i , j; for(i = 0; i < width; i++) { for(j = 0; j < height; j++) printf("%5d", matrix[i * width + j]); printf("\n"); } }
1,000
#include "includes.h" __device__ int position; //index of the largest value __device__ int largest; //value of the largest value int lenString = 593; int maxNumStrings = 1000000; int threshold = 2; __global__ void anyLeft(int *d_c, int *remaining, int size) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if((d_c[my_id] == 0) && (my_id < size)) { *remaining = 0; } }