serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
6,501
/* Using the CUDA framework for the array addition in the GPU*/ /* Allocate the memory in device, declare the addition function in device, pass the array from host to device, Call the device function, Copy the results to the host, Free the device memory*/ #include <stdio.h> #define N 10 __global__ void add(int *a, int *b, int *c){ int tid=blockIdx.x; // Used to get the index of the thread kernel if(tid<N) c[tid]=a[tid]+b[tid]; } int main() { int a[N],b[N],c[N]; int *devA,*devB,*devC; // Enter the values in the arrays for(int i=0;i<N;i++) { a[i]=-i; b[i]=2*i; } //Allocate the memory in the device cudaMalloc(&devA,N*sizeof(int)); cudaMalloc(&devB,N*sizeof(int)); cudaMalloc(&devC,N*sizeof(int)); //Copy the values to the device memory cudaMemcpy(devA , a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(devB , b, N*sizeof(int), cudaMemcpyHostToDevice); //Call the device function add<<<N,1>>>(devA,devB,devC); //Copy the results into the host memory location cudaMemcpy(c,devC,N*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<N;i++) { printf("A + B = C: %d + %d = %d \n" ,a[i],b[i],c[i]); } //Free the memory alloted in the device cudaFree(devA); cudaFree(devB); cudaFree(devC); return 0; }
6,502
#include <iostream> #include <cstdlib> #include <cstdio> #include <curand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> #include <sys/time.h> using namespace std; __global__ void partition_step (curandState * state, unsigned long seed ) { int i= blockDim.x * blockIdx.x + threadIdx.x; curand_init (seed, i, 0, &state[i]); } __global__ void randomColouring (curandState* globalState, int *degreeCount, int n, int limit){ int i= blockDim.x * blockIdx.x + threadIdx.x; curandState localState = globalState[i]; float RANDOM = curand_uniform( &localState ); globalState[i] = localState; RANDOM *= (limit - 1 + 0.999999); RANDOM += 1; degreeCount[i] = (int) RANDOM; } __global__ void conflictDetection (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m, int *detectConflict){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int myColour = degreeCount[i]; int incoming = -1, stop = -1; incoming = vertexArray[i]; if (i==n-1){ stop = m; } else{ stop = vertexArray[i+1]; } for (int j=incoming; j<stop; j++){ if (degreeCount[neighbourArray[j]-1] == myColour){ detectConflict[i]=1; break; } } } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int incoming = -1, stop = -1; int diff=0; incoming = vertexArray[i]; if (i==n-1){ stop = m; } else{ stop = vertexArray[i+1]; } diff = stop-incoming; atomicAdd(&degreeCount[i], diff); for (int j=incoming; j<stop; j++){ atomicAdd(&degreeCount[neighbourArray[j]-1], 1); } } int main(int argc, char const *argv[]) { int n, m; // Enter number of vertices and edges cin>>n>>m; int h_vertexArray[n]; int h_neighbourArray[m]; int h_degreeCount[n]; int h_detectConflict[n]; // Cuda memory allocation size_t bytes = n*sizeof(int); int *d_vertexArray = NULL; cudaMalloc((void **)&d_vertexArray, bytes); int *d_neighbourArray = NULL; cudaMalloc((void **)&d_neighbourArray, m*sizeof(int)); int *d_detectConflict = NULL; cudaMalloc((void **)&d_detectConflict, bytes); cudaMemset((void *)d_detectConflict, 0,bytes); int *d_degreeCount = NULL; cudaMalloc((void **)&d_degreeCount, bytes); cudaMemset((void *)d_degreeCount, 0, bytes); curandState* partition_states; cudaMalloc ( &partition_states, n*sizeof( curandState ) ); for (int i = 0; i < n; ++i) { /* code */ h_vertexArray[i]=m; } int temp = 0; int current = 0; int mark = 1; // Add the graph based on input file for (int i = 0; i < m; ++i) { /* code */ int incoming; int end; cin>>incoming>>end; incoming++; end++; if (incoming!=mark){ if (incoming == mark+1 && h_vertexArray[mark-1]!=m){ } else{ for (int j = mark; j<incoming; j++){ h_vertexArray[j-1]=temp; } } mark = incoming; } if (incoming==current){ h_neighbourArray[temp]=end; temp++; } else { current = incoming; h_vertexArray[current-1]=temp; h_neighbourArray[temp]=end; temp++; } } cudaMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_neighbourArray, h_neighbourArray, m*sizeof(int), cudaMemcpyHostToDevice); int threadsPerBlock = 512; int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock; struct timeval startTime; struct timeval endTime; struct timezone startZone; struct timezone endZone; long startt,endt; double overhead; cout<<threadsPerBlock<<" "<<blocksPerGrid<<endl; gettimeofday(&startTime,&startZone); // Step 0 : Calculate degree of each vertex degreeCalc<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m); thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount); int max = *(thrust::max_element(d_ptr, d_ptr + n)); cout<<"Max number of colours = "<<max<<endl; partition_step <<<blocksPerGrid, threadsPerBlock>>> ( partition_states, time(NULL) ); // Step 1 - Randomly assign colours randomColouring<<<blocksPerGrid, threadsPerBlock>>>(partition_states, d_degreeCount, n, max); cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost); cout<<"randomColouring"<<endl; for (int i=0; i<n; i++){ cout<<"Color of"<<i<<": "<<h_degreeCount[i]<<endl; } cout<<endl; conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict); thrust::device_ptr<int> d_detectConflict_ptr = thrust::device_pointer_cast(d_detectConflict); int count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n); cudaMemcpy(h_detectConflict, d_detectConflict, n*sizeof(int), cudaMemcpyDeviceToHost); int countnew=0; int old_colors[n]; for (int i = 0; i < n; ++i) { /* code */ old_colors[i] = -1; } for (int i=0; i<n-1; i++){ if (h_detectConflict[i]==0){ continue; } countnew++; bool usedColours[n]; fill(usedColours, usedColours+n, false); int incoming = -1, stop = -1; incoming = h_vertexArray[i]; stop = h_vertexArray[i+1]; old_colors[i] = h_degreeCount[i]; for (int j=incoming; j<stop; j++){ usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true; } for (int j=0; j<n; j++){ if (usedColours[j]==false){ h_degreeCount[i]=j+1; break; } } } if (h_detectConflict[n-1]!=0){ bool usedColours[n]; countnew++; fill(usedColours, usedColours+n, false); int incoming = -1, stop = -1; incoming = h_vertexArray[n-1]; stop = m; for (int j=incoming; j<stop; j++){ usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true; } for (int j=0; j<n; j++){ if (usedColours[j]==false){ h_degreeCount[n-1]=j+1; break; } } } for (int i = 0; i < n; ++i) { cout<<"Colour of i from" <<i <<" "<<old_colors[i]<<":"<<h_degreeCount[i]<<endl; } cudaMemset((void *)d_detectConflict, 0, (n)*sizeof(int)); cudaMemcpy(d_degreeCount, h_degreeCount, n*sizeof(int), cudaMemcpyHostToDevice); conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict); gettimeofday(&endTime,&endZone); startt = startTime.tv_sec*1000000+startTime.tv_usec; endt = endTime.tv_sec*1000000+endTime.tv_usec; overhead = (endt-startt)/1000000.0; count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n); cout<<"Count: "<<count1<<" "<<countnew<<endl; cout<<"time taken is"<<overhead<<endl; cudaFree(d_neighbourArray); cudaFree(d_vertexArray); cudaFree(d_degreeCount); cudaFree(d_detectConflict); cudaDeviceReset(); return 0; }
6,503
#include <stdio.h> #include <malloc.h> const int N = 1 << 20; // 2^20 __global__ void gTest(float* a) { //функция для device int i = threadIdx.x + blockIdx.x * blockDim.x; a[i] = 0.2 * (float)i; } int main() { float *a_d, *a_h; a_h = (float*) calloc(N, sizeof(float)); cudaMalloc((void**)&a_d, N * sizeof(float)); //выделить память для переменной device gTest <<< N/256, 256 >>> (a_d); //запуск фу-ии на GPU cudaDeviceSynchronize(); //синхронизация потоков //копируем результат cudaMemcpy(a_h, a_d, N * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) fprintf(stderr, "%g\n", a_h[i]); free(a_h); cudaFree(a_d); return 0; }
6,504
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> __global__ void matrixMult(double *a, double *b, double *c, int n) { int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; if (row < n && col < n) { for (int i = 0; i < n; ++i) { c[row*n + col] += a[row*n + i] * b[i*n + col]; } } } int main(int argc, char **argv) { int N = 32; int size = N*N; // Matrix size int threads = 32; // Threads per block int iters = 300; // Number of kernel executions double *a, *b, *c; // Host copies of matrix a, b, c double *d_a, *d_b, *d_c; // Device copies of a, b, c // Alloc memory in host for double array a = (double*) malloc(size*sizeof(double)); b = (double*) malloc(size*sizeof(double)); c = (double*) malloc(size*sizeof(double)); // Init a simple matrix in vector format for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { a[i*N + j] = i*N + j; b[i*N + j] = i*N + j; } } // Alloc memory in device for double array cudaMalloc(&d_a, size*sizeof(double)); cudaMalloc(&d_b, size*sizeof(double)); cudaMalloc(&d_c, size*sizeof(double)); // Copy from host to device cudaMemcpy(d_a, a, size*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size*sizeof(double),cudaMemcpyHostToDevice); // Nº of threads and blocks to use dim3 blocksPerGrid(N / threads, N / threads); dim3 threadsPerBlock(threads, threads); cudaEvent_t start, stop; float elapsedTime; // Start recording time elapsed cudaEventCreate(&start); cudaEventRecord(start,0); // Launch kernel for(int i = 0; i < iters; i++) matrixMult<<<blocksPerGrid,threadsPerBlock>>>(d_a,d_b,d_c,N); // Wait until the kernel has finished its execution cudaDeviceSynchronize(); // Stop recording time elapsed cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); printf("Elapsed time : %f ms\n" , elapsedTime/iters); // Copy from device to host the result cudaMemcpy(c, d_c, size*sizeof(double), cudaMemcpyDeviceToHost); // Uncomment this block to visualize the result /* for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { printf("%f \t", c[i*N + j]); } printf("\n"); } */ // Free memory used in both host and device free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("Done\n"); return 0; }
6,505
#include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void mult(int *d_a,int *d_b, int *d_c, int m) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < m && row < m) { for(int i = 0; i < m; i++) { sum += d_a[row * m + i] * d_b[i * m + col]; } d_c[row * m + col] = sum; } } int main(int argc, char* argv[]) { int m; printf("Ingresa el tamaño de la matrix cuadrada \n"); scanf("%d", &m); //int blockSize = 256; //int numBlocks = (N + blockSize - 1) / blockSize; // Allocate memory space on the device int *d_a, *d_b, *d_c; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&d_a, sizeof(int)*m*m); cudaMallocManaged(&d_b, sizeof(int)*m*m); cudaMallocManaged(&d_c, sizeof(int)*m*m); int i, j; //initialize matrix A for (i = 0; i < m; ++i) { for (j = 0; j < m; ++j) { d_a[i * m + j] = 1; } } //initialize matrix B for (i = 0; i < m; ++i) { for (j = 0; j < m; ++j) { d_b[i * m + j] = 2; } } int N = m; dim3 threadsPerBlock(N, N); dim3 blocksPerGrid(1, 1); if (N*N > 512){ threadsPerBlock.x = 512; threadsPerBlock.y = 512; blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y)); } mult<<<blocksPerGrid,threadsPerBlock>>>(d_a, d_b, d_c, m); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); printf("\n Resultado \n"); /* Check for errors (all values should be 3.0f) */ int res = m*2; int maxError = 0; for (i = 0; i < m; ++i) { for (j = 0; j < m; ++j) { maxError = fmax(maxError, fabs( d_c[i * m + j]-res)); printf("%d ", d_c[i * m + j] ); } printf("\n"); } printf("Max error: %d \n", maxError); // free memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
6,506
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> extern "C" __global__ void matrixMulkernel(float * A, float *B, float *C) { int I = blockIdx.x * blockDim.x + threadIdx.x; int J = blockIdx.y*blockDim.y + threadIdx.y; int N = blockDim.y*gridDim.y; if ((I < N) && (J < N)) { float _c = 0; for (unsigned int k = 0; k < N; k++) { float a = A[I*N + k]; float b = B[k*N + J]; _c += a*b; } C[I*N + J] = _c; } }
6,507
#include "includes.h" /* Program Parameters */ #define MAXN 8000 /* Max value of N */ int N; /* Matrix size */ // Thread block size #define BLOCK_SIZE 16 /* Matrices */ float A[MAXN][MAXN], B[MAXN][MAXN]; /* junk */ #define randm() 4|2[uid]&3 /* Prototype */ /* ------------------ Cuda Code --------------------- */ /****** You will replace this routine with your own parallel version *******/ /* Provided global variables are MAXN, N, A[][] and B[][], * defined in the beginning of this code. B[][] is initialized to zeros. */ /* returns a seed for srand based on the time */ __global__ void matrixNorm(float* d_in, float* d_out, float* d_mean, float* d_sd, int N) { int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = idx_y * N + idx_x; if (d_sd[blockIdx.y] == 0.0) d_out[i] = 0.0; else d_out[i] = (d_in[i] - d_mean[blockIdx.x]) / d_sd[blockIdx.x]; }
6,508
#include "includes.h" // helper for CUDA error handling __global__ void getSufficientComponentNum(const double* eigenvalues, std::size_t* componentNum, std::size_t eigenRows, double epsilon) { double variance = 0; for(std::size_t i = 0; i < eigenRows; ++i) { variance += eigenvalues[i]; } variance *= eigenRows; (*componentNum) = 1; double subVariance = eigenRows * eigenvalues[eigenRows - 1]; double explanatoryScore = subVariance / variance; for(; (*componentNum) < eigenRows && explanatoryScore <= epsilon; (*componentNum) += 1) { subVariance += eigenRows * eigenvalues[eigenRows - (*componentNum) - 1]; explanatoryScore = subVariance / variance; } }
6,509
/***************************************************************************** Example : cuda-vector-vector-multiplication-mGPU.cu Objective : Write a CUDA Program to perform Vector Vector multiplication using global memory implementation to be executed on multiple GPUs. Input : None Output : Execution time in seconds , Gflops achieved Created : Aug 2011 E-mail : RarchK ****************************************************************************/ #include<stdio.h> #include<cuda.h> #include<pthread.h> #include<error.h> #define EPS 1.0e-12 #define GRIDSIZE 10 #define BLOCKSIZE 16 #define SIZE 128 int blocksPerGrid; int gridsPerBlock; struct Data { int deviceId; int size; double* a; double* b; double retVal; double Tsec; }; __global__ void vvmul(int len,double* A,double* B,double *C) { int tid= blockIdx.x*blockDim.x*blockDim.y + threadIdx.x +threadIdx.y * blockDim.x; while(tid < len) { C[tid] = A[tid] * B[tid]; tid += blockDim.x * gridDim.x; } } /* Check for safe return of all calls to the devic */ void CUDA_SAFE_CALL(cudaError_t call) { cudaError_t ret = call; //printf("RETURN FROM THE CUDA CALL:%d\t:",ret); switch(ret) { case cudaSuccess: // printf("Success\n"); break; /* case cudaErrorInvalidValue: { printf("ERROR: InvalidValue:%i.\n",__LINE__); exit(-1); break; } case cudaErrorInvalidDevicePointer: { printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__); exit(-1); break; } case cudaErrorInvalidMemcpyDirection: { printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__); exit(-1); break; } */ default: { printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,cudaGetErrorString(ret)); exit(-1); break; } } } /* Get the number of GPU devices present on the host */ int get_DeviceCount() { int count; cudaGetDeviceCount(&count); return count; } /* Function for vector multiplication on host*/ void host_vvmul(double* A,double* B,int len,double &C) { int i; for(i = 0;i <len;i++) C += A[i]*B[i]; } /* Function to calulate Gflops */ double calculate_gflops(double &Tsec) { //printf("time taken is %.8lf\n",Tsec); double gflops=(1.0e-9 * (( 2.0 * SIZE )/Tsec)); //printf("Gflops is \t%f\n",gflops); return gflops; } /* Function to display output */ void display(double* arr,int size) { int i; for(i=0;i<size;i++) printf("%f ",arr[i]); printf("\t%d\n",i); } /*Function doing device related computations */ void* routine(void* givendata) { Data *data = (Data*)givendata; int len = data->size; double *a,*b,*part_c; double *d_a,*d_b,*d_part_c; double c; cudaEvent_t start,stop; float elapsedTime; a=data->a; b=data->b; part_c = (double*)malloc(len*sizeof(double)); CUDA_SAFE_CALL(cudaSetDevice(data->deviceId)); CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&stop)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_a,len*sizeof(double))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_b,len*sizeof(double))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_part_c,len*sizeof(double))); CUDA_SAFE_CALL(cudaMemcpy(d_a,a,len*sizeof(double),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_b,b,len*sizeof(double),cudaMemcpyHostToDevice)); dim3 threadsPerBlock(16,16); int numBlocks; if( len /256 == 0) numBlocks=1; else numBlocks = len/100; dim3 blocksPerGrid(numBlocks ,1); CUDA_SAFE_CALL(cudaEventRecord(start,0)); vvmul<<<blocksPerGrid,threadsPerBlock>>>(len,d_a,d_b,d_part_c); if(cudaPeekAtLastError()) printf("KERNEL ERROR: %s\t on device:%d\n",cudaGetErrorString(cudaPeekAtLastError()),data->deviceId); CUDA_SAFE_CALL(cudaEventRecord(stop,0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); CUDA_SAFE_CALL(cudaMemcpy(part_c,d_part_c,len*sizeof(double),cudaMemcpyDeviceToHost)); int ind; for(ind=0;ind<len;ind++) c += part_c[ind]; CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime,start,stop)); data->Tsec=elapsedTime*(1.0e-3); CUDA_SAFE_CALL(cudaFree(d_a)); CUDA_SAFE_CALL(cudaFree(d_b)); CUDA_SAFE_CALL(cudaFree(d_part_c)); free(part_c); data->retVal=c; return 0; } void relError(double* dRes,double* hRes,int size) { double relativeError=0.0,errorNorm=0.0; int flag=0; int i; for( i = 0; i < size; ++i) { if (fabs(hRes[i]) > fabs(dRes[i])) relativeError = fabs((hRes[i] - dRes[i]) / hRes[i]); else relativeError = fabs((dRes[i] - hRes[i]) / dRes[i]); if (relativeError > EPS && relativeError != 0.0e+00 ) { if(errorNorm < relativeError) { errorNorm = relativeError; flag=1; } } } if( flag == 1) { printf(" \n Results verfication : Failed"); printf(" \n Considered machine precision : %e", EPS); printf(" \n Relative Error : %e\n", errorNorm); } else printf("\n Results verfication : Success\n"); } /* prints the result in screen */ void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0 { printf("\n---------------%s----------------\n",program_name); printf("\tSIZE\t TIME_SEC\t Gflops\n"); if(flag==1) printf("\t%d\t%f\t%lf\t",size,tsec,gflops); else printf("\t%d\t%lf\t%lf\t",size,"---","---"); } int main(int argc,char** argv) { int devCount; CUDA_SAFE_CALL(cudaGetDeviceCount(&devCount)); if(devCount < 2) { printf("Atleast 2 GPU's are needed :%d\n",devCount); exit(0); } double *hVectA,*hVectB,hRes,host_hRes; int vlen=SIZE; int ind; hVectA=(double*)malloc(vlen*sizeof(double)); hVectB=(double*)malloc(vlen*sizeof(double)); for(ind=0;ind < vlen;ind++) { hVectA[ind]=2.00; hVectB[ind]=2.00; } Data vector[2]; vector[0].deviceId = 0; vector[0].size =vlen/2; vector[0].a =hVectA; vector[0].b =hVectB; vector[1].deviceId = 1; vector[1].size =vlen/2; vector[1].a =hVectA + vlen/2 ; vector[1].b =hVectB + vlen/2 ; pthread_t thread; if(pthread_create(&thread,NULL,routine,(void*)&vector[0]) != 0) perror("Thread creation error\n"); routine(&vector[1]); pthread_join(thread,NULL); hRes=vector[0].retVal + vector[1].retVal; /* ---------Check result with host CPU result ---------*/ host_vvmul(hVectA,hVectB,vlen,host_hRes); relError(&hRes,&host_hRes,1); print_on_screen("MatMatMult_mGPU",vector[0].Tsec,calculate_gflops(vector[0].Tsec),vlen,1); print_on_screen("MatMatMult_mGPU",vector[1].Tsec,calculate_gflops(vector[1].Tsec),vlen,1); free(hVectA); free(hVectB); return 0; }
6,510
#include <stdlib.h> #include <stdio.h> #include <time.h> #define THREADS 128 // 2^7 #define BLOCKS 1024 // 2^10 #define NUM_VALS THREADS*BLOCKS #define swap(A,B) { int temp = A; A = B; B = temp;} void print_elapsed(clock_t start, clock_t stop) { double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.3fs\n", elapsed); } __global__ void odd_even_sort(int *c,int *count) { int l; if(*count%2==0) l=*count/2; else l=(*count/2)+1; for(int i=0;i<l;i++) { if((!(threadIdx.x&1)) && (threadIdx.x<(*count-1))) //even phase { if(c[threadIdx.x]>c[threadIdx.x+1]) swap(c[threadIdx.x], c[threadIdx.x+1]); } __syncthreads(); if((threadIdx.x&1) && (threadIdx.x<(*count-1))) //odd phase { if(c[threadIdx.x]>c[threadIdx.x+1]) swap(c[threadIdx.x], c[threadIdx.x+1]); } __syncthreads(); } } void odd_even_caller(int *values) { int *dev_values, *count; size_t size = NUM_VALS * sizeof(int); int n = NUM_VALS; cudaMalloc((void**) &dev_values, size); cudaMalloc((void**)&count,sizeof(int)); cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice); cudaMemcpy(count,&n,sizeof(int),cudaMemcpyHostToDevice); dim3 blocks(BLOCKS,1); dim3 threads(THREADS,1); odd_even_sort<<<blocks, threads>>>(dev_values, count); cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost); cudaFree(dev_values); } int main(int argc, char const *argv[]) { clock_t start, stop; int *values = (int*)malloc(NUM_VALS * sizeof(int)); FILE *f = fopen("reverse_dataset.txt", "r"); for(int i=0;i< NUM_VALS; i++) { fscanf(f, "%d\n", &values[i]); } printf("Hello\n"); start = clock(); odd_even_caller(values); for(int i=0; i < 20 ;i ++) { printf("%d\n", values[i]); } stop = clock(); print_elapsed(start, stop); return 0; }
6,511
#include "includes.h" __global__ void multiplyBy2_l(int *size, long *in, long *out) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < *size) { out[ix] = in[ix] * 2; } }
6,512
#include "includes.h" __device__ float maxMetricPoints(const float* g_uquery, const float* g_vpoint, int pointdim, int signallength){ float r_u1; float r_v1; float r_d1,r_dim=0; r_dim=0; for(int d=0; d<pointdim; d++){ r_u1 = *(g_uquery+d*signallength); r_v1 = *(g_vpoint+d*signallength); r_d1 = r_v1 - r_u1; r_d1 = r_d1 < 0? -r_d1: r_d1; //abs r_dim= r_dim < r_d1? r_d1: r_dim; } return r_dim; } __device__ float insertPointKlist(int kth, float distance, int indexv,float* kdistances, int* kindexes){ int k=0; while( (distance>*(kdistances+k)) && (k<kth-1)){k++;} //Move value to the next for(int k2=kth-1;k2>k;k2--){ *(kdistances+k2)=*(kdistances+k2-1); *(kindexes+k2)=*(kindexes+k2-1); } //Replace *(kdistances+k)=distance; *(kindexes+k)=indexv; //printf("\n -> Modificacion pila: %.f %.f. New max distance: %.f", *kdistances, *(kdistances+1), *(kdistances+kth-1)); return *(kdistances+kth-1); } __global__ void kernelKNN(const float* g_uquery, const float* g_vpointset, int *g_indexes, float* g_distances, int pointdim, int triallength, int signallength, int kth, int exclude) { const unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; //const unsigned int tidim = tid*pointdim; const unsigned int itrial = tid / triallength; // indextrial int kindexes[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; float kdistances[]= {INFINITY, INFINITY, INFINITY, INFINITY, INFINITY, \ INFINITY, INFINITY, INFINITY, INFINITY, INFINITY}; if(tid<signallength){ //int r_index; float r_kdist=INFINITY; int indexi = tid-triallength*itrial; for(int t=0; t<triallength; t++){ int indexu = tid; int indexv = (t + itrial*triallength); int condition1=indexi-exclude; int condition2=indexi+exclude; if((t<condition1)||(t>condition2)){ float temp_dist = maxMetricPoints(g_uquery+indexu, g_vpointset+indexv,pointdim, signallength); if(temp_dist <= r_kdist){ r_kdist = insertPointKlist(kth,temp_dist,t,kdistances,kindexes); //printf("\nId: %d, Temp_dist: %.f. r_index: %d", tid, temp_dist, r_index); } } //printf("tid:%d indexes: %d, %d distances: %.f %.f\n",tid, *kindexes, *(kindexes+1), *kdistances, *(kdistances+1)); } __syncthreads(); //COPY TO GLOBAL MEMORY for(int k=0;k<kth;k++){ g_indexes[tid+k*signallength] = *(kindexes+k); g_distances[tid+k*signallength]= *(kdistances+k); } } }
6,513
#include "includes.h" __device__ int addem( int a, int b ) { return a + b; } __global__ void add( int a, int b, int *c ) { *c = addem( a, b ); }
6,514
/*Ex. 5 1. implement matrix transpose in cuda with shared memory (use block algorithm/FAST TRANSPOSE). 2. MATRIX SIZE = 8192X8192 and register the time of solution compared with naive one for 64,512, 1024 threads/block. 3. Try to reach mem band = 100 Gb/s. */ #include <stdio.h> // kernels transpose a tile of TILE_DIM x TILE_DIM elements // using a TILE_DIM x BLOCK_ROWS thread block, so that each thread // transposes TILE_DIM/BLOCK_ROWS elements. TILE_DIM is an // integral multiple of BLOCK_ROWS #define TILE_DIM 32 //#define BLOCK_ROWS 2 //64 threads/block //#define BLOCK_ROWS 16 //512 threads/block #define BLOCK_ROWS 32 //1024 threads/block // Number of repetitions used for timing. #define NUM_REPS 100 // routine to print the matrix on the host __host__ void printMatrix(const float* const data, const size_t size_x, const size_t size_y, const size_t a, const size_t b){ for(size_t i=0; i<a; i++){ printf("%5.2f ", data[i]); } printf("\n"); } // routine to check results __host__ int compareRes(const float* const odata, const float* const gold, const size_t msize){ int res = 0; for(size_t i = 0; i<msize; i++){ if (odata[i] != gold[i]) ++res; } return res; } // naive transpose __global__ void transposeNaive(float *odata, const float* const idata, const int width, const int height) { int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; int index_in = xIndex + width * yIndex; int index_out = yIndex + height * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i] = idata[index_in+i*width]; } } // naive transpose on host to check results __host__ void computeTransposeHost(float* odata, const float* const h_idata, const int size_x, const int size_y){ for (size_t i = 0; i < size_x; i++) { for (size_t j = 0; j < size_y; j++) { odata[j+i*size_x] = h_idata[i + j*size_y]; } } } // transpose with coalesced memory and padding __global__ void transposeCoalesced(float *odata, const float *const idata, const int width, const int height){ __shared__ float tile[TILE_DIM][TILE_DIM+1]; //padding so different threads access different shared memory banks and no bottleneck int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; // each thread copy more than one memory location. The overhead of index calculation is amortized over threads for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { // use a tile like a buffer to transpose blocks tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); // to ensure threads have finished to copy from idata to tile // access shared memory noncontiguously to access odata contiguously for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } int main( int argc, char** argv) { // set matrix size const int size_x = 8192; const int size_y = 8192; // execution configuration parameters dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); // CUDA events for timing cudaEvent_t start, stop; // size of memory required to store the matrix const int mem_size = sizeof(float) * size_x*size_y; // allocate host memory float *h_idata = (float*) malloc(mem_size); float *h_odata = (float*) malloc(mem_size); float *transposeHost = (float *) malloc(mem_size); // allocate device memory float *d_idata, *d_odata; cudaMalloc( (void**) &d_idata, mem_size); cudaMalloc( (void**) &d_odata, mem_size); // initalize host data for(int i = 0; i < (size_x*size_y); ++i) h_idata[i] = (float) i; printf("Initial matrix:\n"); printMatrix(h_idata, size_x, size_y, 10, 10); // copy host data to device cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice ); // Compute reference transpose solution computeTransposeHost(transposeHost, h_idata, size_x, size_y); printf("Reference solution:\n"); printMatrix(transposeHost, size_x, size_y, 10, 10); printf("\nMatrix size: %dx%d, tile: %dx%d, block: %dx%d\n\n", size_x, size_y, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS); printf("Kernel\t\t\tLoop over kernel\n"); printf("------\t\t\t----------------\n"); // initialize events, EC parameters cudaEventCreate(&start); cudaEventCreate(&stop); //*********************************time transposeNaive************************************* cudaEventRecord(start, 0); for (int i=0; i < NUM_REPS; i++) { // repeate timing to obtain a more precise bandwidth transposeNaive<<<grid, threads>>>(d_odata, d_idata,size_x,size_y); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float outerTime; cudaEventElapsedTime(&outerTime, start, stop); cudaMemcpy(h_odata,d_odata, mem_size, cudaMemcpyDeviceToHost); //check correctness of solution int res = compareRes(transposeHost, h_odata, size_x*size_y); if (res != 0) printf("*** transposeNaive kernel FAILED ***\n"); else printf("***transposeNaive kernel OK***\n"); // report effective bandwidths float outerBandwidth = 2.*1000*mem_size/(1024*1024*1024)/(outerTime/NUM_REPS); printf("transposeNaive\t\t%5.2f GB/s\n", outerBandwidth); //printf("transposeNaive:\n"); //printMatrix(h_odata, size_x, size_y, 10, 10); //*********************************time transposeCoalesced************************************ cudaEventRecord(start, 0); for (int i=0; i < NUM_REPS; i++) { transposeCoalesced<<<grid, threads>>>(d_odata, d_idata,size_x,size_y); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); //float outerTime1; cudaEventElapsedTime(&outerTime, start, stop); cudaMemcpy(h_odata,d_odata, mem_size, cudaMemcpyDeviceToHost); //check corrctness of result res = compareRes(transposeHost, h_odata, size_x*size_y); if (res != 0) printf("*** transposeCoalasced kernel FAILED ***\n"); else printf("***transposeCoalasced kernel OK***\n"); // report effective bandwidths outerBandwidth = 2.*1000*mem_size/(1024*1024*1024)/(outerTime/NUM_REPS); printf("transposeCoalesced\t%5.2f GB/s\n", outerBandwidth); //printf("transposeCoalesced\n"); //printMatrix(h_odata, size_x, size_y, 10, 10); // cleanup memory free(h_idata); free(h_odata); free(transposeHost); cudaFree(d_idata); cudaFree(d_odata); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
6,515
/** * Nearest neighbor search * マップ内に店ゾーンが20%の確率で配備されている時、 * 住宅ゾーンから直近の店ゾーンまでのマンハッタン距離を計算する。 * Kd-treeなどのアルゴリズムだと、各住宅ゾーンから直近の店までの距離の計算にO(log M)。 * 従って、全ての住宅ゾーンについて調べると、O(N log M)。 * 一方、本実装では、各店ゾーンから周辺ゾーンに再帰的に距離を更新していくので、O(N)で済む。 * しかも、GPUで並列化することで、さらに計算時間を短縮できる。 * * 残念ながら、CPU版より遅い。。。 * 1つの原因が、atomicMinだ。処理が完了するまでメモリをロックするらしい。そりゃ遅くなるよね。 */ #include <stdio.h> #include <stdlib.h> #include <vector> #include <list> #include <time.h> #define CITY_SIZE 100 #define NUM_GPU_BLOCKS 4 #define NUM_GPU_THREADS 32 #define NUM_FEATURES 1 #define CUDA_CALL(x) {if((x) != cudaSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE);}} struct ZoneType { int type; int level; }; struct ZoningPlan { ZoneType zones[CITY_SIZE][CITY_SIZE]; }; struct DistanceMap { int distances[CITY_SIZE][CITY_SIZE][NUM_FEATURES]; }; struct Point2D { int x; int y; __host__ __device__ Point2D() : x(0), y(0) {} __host__ __device__ Point2D(int x, int y) : x(x), y(y) {} }; __host__ __device__ unsigned int rand(unsigned int* randx) { *randx = *randx * 1103515245 + 12345; return (*randx)&2147483647; } __host__ __device__ float randf(unsigned int* randx) { return rand(randx) / (float(2147483647) + 1); } __host__ __device__ float randf(unsigned int* randx, float a, float b) { return randf(randx) * (b - a) + a; } __host__ __device__ int sampleFromCdf(unsigned int* randx, float* cdf, int num) { float rnd = randf(randx, 0, cdf[num-1]); for (int i = 0; i < num; ++i) { if (rnd <= cdf[i]) return i; } return num - 1; } __host__ __device__ int sampleFromPdf(unsigned int* randx, float* pdf, int num) { if (num == 0) return 0; float cdf[40]; cdf[0] = pdf[0]; for (int i = 1; i < num; ++i) { if (pdf[i] >= 0) { cdf[i] = cdf[i - 1] + pdf[i]; } else { cdf[i] = cdf[i - 1]; } } return sampleFromCdf(randx, cdf, num); } /** * ゾーンプランを生成する。 */ __host__ void generateZoningPlan(ZoningPlan& zoningPlan, std::vector<float> zoneTypeDistribution) { std::vector<float> numRemainings(zoneTypeDistribution.size()); for (int i = 0; i < zoneTypeDistribution.size(); ++i) { numRemainings[i] = CITY_SIZE * CITY_SIZE * zoneTypeDistribution[i]; } unsigned int randx = 0; for (int r = 0; r < CITY_SIZE; ++r) { for (int c = 0; c < CITY_SIZE; ++c) { int type = sampleFromPdf(&randx, numRemainings.data(), numRemainings.size()); zoningPlan.zones[r][c].type = type; numRemainings[type] -= 1; } } } __host__ void showPlan(ZoningPlan* zoningPlan) { if (CITY_SIZE > 8) return; for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", zoningPlan->zones[r][c].type); } printf("\n"); } printf("\n"); } /** * 直近の店までの距離を計算する(マルチスレッド版) */ __global__ void computeDistanceToStore(ZoningPlan* zoningPlan, DistanceMap* distanceMap) { int idx = blockDim.x * blockIdx.x + threadIdx.x; // キュー const int queue_size = 1000; Point2D queue[queue_size]; int queue_begin = 0; int queue_end = 0; int stride = ceilf((float)(CITY_SIZE * CITY_SIZE) / NUM_GPU_BLOCKS / NUM_GPU_THREADS); // 分割された領域内で、店を探す for (int i = 0; i < stride; ++i) { int r = (idx * stride + i) / CITY_SIZE; int c = (idx * stride + i) % CITY_SIZE; if (zoningPlan->zones[r][c].type == 1) { queue[queue_end++] = Point2D(c, r); distanceMap->distances[r][c][0] = 0; } else { distanceMap->distances[r][c][0] = 9999; } } // 距離マップを生成 while (queue_begin < queue_end) { Point2D pt = queue[queue_begin++]; if (queue_begin >= queue_size) queue_begin = 0; int d = distanceMap->distances[pt.y][pt.x][0]; if (pt.y > 0) { int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][0], d + 1); if (old > d + 1) { queue[queue_end++] = Point2D(pt.x, pt.y-1); if (queue_end >= queue_size) queue_end = 0; } } if (pt.y < CITY_SIZE - 1) { int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][0], d + 1); if (old > d + 1) { queue[queue_end++] = Point2D(pt.x, pt.y+1); if (queue_end >= queue_size) queue_end = 0; } } if (pt.x > 0) { int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][0], d + 1); if (old > d + 1) { queue[queue_end++] = Point2D(pt.x-1, pt.y); if (queue_end >= queue_size) queue_end = 0; } } if (pt.x < CITY_SIZE - 1) { int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][0], d + 1); if (old > d + 1) { queue[queue_end++] = Point2D(pt.x+1, pt.y); if (queue_end >= queue_size) queue_end = 0; } } } } /** * 直近の店までの距離を計算する(CPU版) */ __host__ void computeDistanceToStoreCPU(ZoningPlan* zoningPLan, DistanceMap* distanceMap) { std::list<Point2D> queue; for (int i = 0; i < CITY_SIZE * CITY_SIZE; ++i) { int r = i / CITY_SIZE; int c = i % CITY_SIZE; if (zoningPLan->zones[r][c].type == 1) { queue.push_back(Point2D(c, r)); distanceMap->distances[r][c][0] = 0; } else { distanceMap->distances[r][c][0] = 9999; } } while (!queue.empty()) { Point2D pt = queue.front(); queue.pop_front(); int d = distanceMap->distances[pt.y][pt.x][0]; if (pt.y > 0) { if (distanceMap->distances[pt.y-1][pt.x][0] > d + 1) { distanceMap->distances[pt.y-1][pt.x][0] = d + 1; queue.push_back(Point2D(pt.x, pt.y-1)); } } if (pt.y < CITY_SIZE - 1) { if (distanceMap->distances[pt.y+1][pt.x][0] > d + 1) { distanceMap->distances[pt.y+1][pt.x][0] = d + 1; queue.push_back(Point2D(pt.x, pt.y+1)); } } if (pt.x > 0) { if (distanceMap->distances[pt.y][pt.x-1][0] > d + 1) { distanceMap->distances[pt.y][pt.x-1][0] = d + 1; queue.push_back(Point2D(pt.x-1, pt.y)); } } if (pt.x < CITY_SIZE - 1) { if (distanceMap->distances[pt.y][pt.x+1][0] > d + 1) { distanceMap->distances[pt.y][pt.x+1][0] = d + 1; queue.push_back(Point2D(pt.x+1, pt.y)); } } } } int main() { time_t start, end; ZoningPlan* hostZoningPlan = (ZoningPlan*)malloc(sizeof(ZoningPlan)); DistanceMap* hostDistanceMap = (DistanceMap*)malloc(sizeof(DistanceMap)); DistanceMap* hostDistanceMap2 = (DistanceMap*)malloc(sizeof(DistanceMap)); // 距離を初期化 memset(hostDistanceMap, 9999, sizeof(DistanceMap)); memset(hostDistanceMap2, 9999, sizeof(DistanceMap)); std::vector<float> zoneTypeDistribution(2); zoneTypeDistribution[0] = 0.8f; zoneTypeDistribution[1] = 0.2f; // 初期プランを生成 // 同時に、店の座標リストを作成 start = clock(); generateZoningPlan(*hostZoningPlan, zoneTypeDistribution); end = clock(); printf("generateZoningPlan: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); if (CITY_SIZE <= 8) { for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostZoningPlan->zones[r][c].type); } printf("\n"); } printf("\n"); } // 初期プランをデバイスバッファへコピー ZoningPlan* devZoningPlan; CUDA_CALL(cudaMalloc((void**)&devZoningPlan, sizeof(ZoningPlan))); CUDA_CALL(cudaMemcpy(devZoningPlan, hostZoningPlan, sizeof(ZoningPlan), cudaMemcpyHostToDevice)); // 距離マップ用に、デバイスバッファを確保 DistanceMap* devDistanceMap; CUDA_CALL(cudaMalloc((void**)&devDistanceMap, sizeof(DistanceMap))); DistanceMap* devInitialDistanceMap; CUDA_CALL(cudaMalloc((void**)&devInitialDistanceMap, sizeof(DistanceMap))); /////////////////////////////////////////////////////////////////////// // CPU版で、直近の店までの距離を計算 start = clock(); for (int iter = 0; iter < 1000; ++iter) { computeDistanceToStoreCPU(hostZoningPlan, hostDistanceMap2); } end = clock(); printf("computeDistanceToStore CPU: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); /////////////////////////////////////////////////////////////////////// // マルチスレッドで、直近の店までの距離を計算 cudaMemcpy(devInitialDistanceMap, hostDistanceMap, sizeof(DistanceMap), cudaMemcpyHostToDevice); // スコアの直近の店までの距離を並列で計算 start = clock(); for (int iter = 0; iter < 1000; ++iter) { computeDistanceToStore<<<NUM_GPU_BLOCKS, NUM_GPU_THREADS>>>(devZoningPlan, devDistanceMap); cudaDeviceSynchronize(); } end = clock(); printf("computeDistanceToStore: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); // 距離をCPUバッファへコピー CUDA_CALL(cudaMemcpy(hostDistanceMap, devDistanceMap, sizeof(DistanceMap), cudaMemcpyDeviceToHost)); // CPU版とマルチスレッドの結果を比較 { bool err = false; for (int r = CITY_SIZE - 1; r >= 0 && !err; --r) { for (int c = 0; c < CITY_SIZE && !err; ++c) { if (hostDistanceMap->distances[r][c][0] != hostDistanceMap2->distances[r][c][0]) { err = true; printf("ERROR! %d,%d\n", r, c); } } } } if (CITY_SIZE <= 8) { for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostDistanceMap->distances[r][c][0]); } printf("\n"); } printf("\n"); for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostDistanceMap2->distances[r][c][0]); } printf("\n"); } printf("\n"); } // デバイスバッファの開放 cudaFree(devZoningPlan); cudaFree(devDistanceMap); // CPUバッファの開放 free(hostZoningPlan); free(hostDistanceMap); free(hostDistanceMap2); cudaDeviceReset(); }
6,516
// Vector Addition #include <stdio.h> #include <stdlib.h> __global__ void vectorAdd(int* a, int* b, int* c, int n) { //Calculate Index Thread int tid = blockIdx.x * blockDim.x + threadIdx.x; //Make sure we stay in bounds if(tid<n) //Vector Addition c[tid] = a[tid] + b[tid]; } int main() { //Number of elements int n = 16; //Host Pointers int* h_a; int* h_b; int* h_c; //Device Pointers int* d_a; int* d_b; int* d_c; size_t bytes = n * sizeof(int); //Allocating memory on Host side h_a = (int*)malloc(bytes); h_b = (int*)malloc(bytes); h_c = (int*)malloc(bytes); //Initializing host vectors for(int i=0;i<n;i++) { h_a[i]=1; h_b[i]=2; } printf("Matrix A: \n"); for(int i=0;i<n;i++) { printf("%d ",h_a[i]); } printf("\nMatrix B: \n"); for(int i=0;i<n;i++) { printf("%d ",h_b[i]); } //Allocating memory on Device side cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); //Init block and grid size int block_size = 4; int grid_size = (int)ceil( (float) n / block_size); printf("Grid size is %d\n",grid_size); cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); vectorAdd<<< grid_size, block_size >>>(d_a, d_b, d_c, n); cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); printf("Matrix C: \n"); for(int i=0;i<n;i++) { printf("%d ",h_c[i]); } printf("Completed Successfully!\n"); //Clean-Up free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
6,517
#include <stdio.h> #include <sys/time.h> #define ARRAY_SIZE 1000000 #define TPB 256 #define MARGIN 1e-6 double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } float* saxpy(float* x, float* y, float a) { int i = 0; float* res = (float *)malloc(ARRAY_SIZE*sizeof(float)); for(i = 0; i < ARRAY_SIZE; i++) { res[i] = a*x[i]+y[i]; } return res; } __global__ void saxpy_gpu(float* res, float* x, float* y, float a) { const int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < ARRAY_SIZE) { res[id] = a*x[id]+y[id]; } } int main() { float* res; float* res2 = (float *)malloc(ARRAY_SIZE*sizeof(float)); float* x = (float *)malloc(ARRAY_SIZE*sizeof(float)); float* y = (float *)malloc(ARRAY_SIZE*sizeof(float)); float* res_gpu; float* d_x; float* d_y; float a = 10; double t1; double t2; double timeCPU; double timeGPU; printf("Filling arrays...\n"); for(int i = 0; i < ARRAY_SIZE; i++) { x[i] = i; y[i] = 2*i; } printf("Done!\n"); printf("Computing in CPU...\n"); t1 = cpuSecond(); res = saxpy(x, y, a); t2 = cpuSecond(); timeCPU = t2 - t1; printf("Done! %f s\n", timeCPU); printf("Computing in GPU...\n"); cudaMalloc(&res_gpu, ARRAY_SIZE*sizeof(float)); cudaMalloc(&d_x, ARRAY_SIZE*sizeof(float)); cudaMalloc(&d_y, ARRAY_SIZE*sizeof(float)); cudaMemcpy(d_x, x, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); t1 = cpuSecond(); saxpy_gpu<<<(ARRAY_SIZE+TPB-1)/TPB, TPB>>>(res_gpu, d_x, d_y, a); cudaDeviceSynchronize(); t2 = cpuSecond(); timeGPU = t2 - t1; printf("Done! %f s\n", timeGPU); cudaMemcpy(res2, res_gpu, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(res_gpu); for(int i = 0; i<ARRAY_SIZE; i++) { //printf("%d -> %f \t %f\n", i, res[i], res2[i]); if(res[i] - res2[i] > MARGIN) { printf("This is bad, %d\n", i); exit(0); } } printf("Hurray!\n"); }
6,518
#include <thrust/sort.h> #include <thrust/execution_policy.h> extern "C" { __global__ void sort_thrust(int num_component, int *arr){ thrust::sort(thrust::device, arr, (arr + num_component)); } __global__ void sort_by_key_thrust( int num_component, int *key, int *value){ thrust::sort_by_key(thrust::device, key, (key + num_component), value); } }
6,519
#include "includes.h" __global__ void SigmoidProbPolynomProbsImpl( const float* features, int batchSize, const int* splits, const float* conditions, const int* polynomOffsets, int polynomCount, float lambda, float* probs) { if (threadIdx.x < batchSize) { int polynomId = blockIdx.x; features += threadIdx.x; probs += threadIdx.x; while (polynomId < polynomCount) { int offset = polynomOffsets[polynomId]; int nextOffset = polynomOffsets[polynomId + 1]; const int depth = nextOffset - offset; // bool isTrue = true; float logProb = 0; for (int i = 0; i < depth; ++i) { const int f = __ldg(splits + offset + i); const float c = __ldg(conditions + offset + i); const float x = __ldg(features + f * batchSize); const float val = -lambda * (x - c); // isTrue = x <= c? false : isTrue; const float expVal = 1.0f + expf(val); // p( split = 1) = 1.0 / (1.0 + exp(-(x - c))) // c = 0, x= inf, p = 1.0 / (1.0 + exp(-inf) = 0 // log(p) = -log(1.0 + exp(-(x - c)) const float isTrueLogProb = isfinite(expVal) ? log(expVal) : val; logProb -= isTrueLogProb; } const float prob = expf(logProb); // const float prob = isTrue ? 1 : 0;//exp(logProb); probs[polynomId * batchSize] = prob; polynomId += gridDim.x; } } }
6,520
/* * Sergio Isaac Mercado Silvano * A01020382 * CUDA - Lab 2 * 19/11/2019 */ #include "cuda_runtime.h" #include <stdio.h> #define N 10000 __global__ void kernel(float * a, float * b, float * c, int n){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float temp = 0.0; while(row < n && col < n){ for(int i = 0; i < n; i++){ temp += a[row * n + i] * b[i * n + col]; } } c[row * n + col] = temp; } void fill(float * arr, int n, float val){ for(int i = 0; i < n; i++){ arr[i] = val/(float)i; } } void display(float * arr, int n){ for(int i = 0; i < n; i++){ printf("%5f ", arr[i]); } } int main(void){ float * a, *b, *c; float *d_a, *d_b, *d_c; a = (float *)malloc(sizeof(float) * N * N); b = (float *)malloc(sizeof(float) * N * N); c = (float *)malloc(sizeof(float) * N * N); cudaMalloc((void**) &d_a, sizeof(float) * N * N); cudaMalloc((void**) &d_b, sizeof(float) * N * N); cudaMalloc((void**) &d_c, sizeof(float) * N * N); fill(a, N * N, 1.0); fill(b, N * N, 2.0); dim3 threadsPerBlock(N,N); dim3 blocksPerGrid(1,1); cudaMemcpy(d_a, &a, N * N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, N * N * sizeof(float), cudaMemcpyHostToDevice); kernel <<<blocksPerGrid,threadsPerBlock>>>(d_a,d_b,d_c, N * N); cudaMemcpy(c, &d_c, N * N * sizeof(float), cudaMemcpyDeviceToHost); display(c, N * N); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
6,521
#include <math.h> #define IA 16807 #define IM 2147483647 #define AM (1.0/IM) #define IQ 127773 #define IR 2836 #define NTAB 32 #define NDIV (1+(IM-1)/NTAB) #define EPS 1.2e-7 #define RNMX (1.0-EPS) float ran1(long *idum) { int j; long k; static long iy=0; static long iv[NTAB]; float temp; if (*idum <= 0 || !iy) { if (-(*idum) < 1) *idum=1; else *idum = -(*idum); for (j=NTAB+7;j>=0;j--) { k=(*idum)/IQ; *idum=IA*(*idum-k*IQ)-IR*k; if (*idum < 0) *idum += IM; if (j < NTAB) iv[j] = *idum; } iy=iv[0]; } k=(*idum)/IQ; *idum=IA*(*idum-k*IQ)-IR*k; if (*idum < 0) *idum += IM; j=iy/NDIV; iy=iv[j]; iv[j] = *idum; temp=(float)AM*iy; if (temp > RNMX) return (float)RNMX; else return temp; } #undef IA #undef IM #undef AM #undef IQ #undef IR #undef NTAB #undef NDIV #undef EPS #undef RNMX float gasdev(long *idum) { float ran1(long *idum); static int iset=0; static double gset; double fac,rsq,v1,v2; if (iset == 0) { do { v1=2.0*ran1(idum)-1.0; v2=2.0*ran1(idum)-1.0; rsq=v1*v1+v2*v2; } while (rsq >= 1.0 || rsq == 0.0); fac=sqrt(-2.0*log(rsq)/rsq); gset=v1*fac; iset=1; return (float)(v2*fac); } else { iset=0; return (float)gset; } }
6,522
#include "includes.h" __device__ double2 add(double2 a, double2 b){ return {a.x+b.x, a.y+b.y}; } __global__ void add_test(double2 *a, double2 *b, double2 *c){ c[0] = add(a[0],b[0]); }
6,523
__device__ int _logarithm_2(int m) { int count = 0; while (m != 1) { m = m/2; count++; } return count; } __global__ void kernel_compute_gpu(int n, int nb_iters, int *T) { int iter; int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) for(iter=0; iter<nb_iters; iter++) T[id] = _logarithm_2(T[id]*T[id]+T[id]+iter); }
6,524
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) { if (comp >= cosf(+1.5867E-42f + var_1)) { comp = (+1.8348E35f / fmodf(var_3 - var_4 / atan2f((var_5 * (-1.0175E-37f / var_6)), sqrtf(var_7 * +1.8963E-42f)), (-0.0f * (var_8 / var_9)))); comp = var_10 + (+1.1630E-35f * (var_11 + (-1.7856E34f - var_12))); for (int i=0; i < var_2; ++i) { comp += (var_13 - (-1.4030E36f / +1.1043E35f)); comp += var_14 * -1.3204E-44f - (+0.0f - var_15); comp += sqrtf((-1.1641E-42f - var_16)); } if (comp <= cosf(var_17 / -1.8231E-36f)) { float tmp_1 = (-1.1561E-37f - (+0.0f + fabsf(coshf(-1.3657E36f - var_18 + logf(var_19 / +0.0f))))); float tmp_2 = var_20 - -1.3905E-35f; comp = tmp_2 - tmp_1 - -1.8159E19f - -1.4234E-41f; comp += (var_21 / var_22); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23); cudaDeviceSynchronize(); return 0; }
6,525
/* Copyright 2018 - The OPRECOMP Project Consortium, Alma Mater Studiorum Università di Bologna. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdio.h> #include <cuda_fp16.h> #define SIZE 1000000 #define nTPB 256 #define FLOAT half __global__ void init(int n, half2 *x, half2 *y) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < n) { x[idx] = y[idx] = __float2half2_rn((float)(idx%15)); } } __global__ void saxpy(int n, float a, half2 *x, half2 *y) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < n) { half2 a2 = __float2half2_rn(a); y[idx] = __hfma2(a2, x[idx], y[idx]); } } int main(){ FLOAT *hin, *hout, *din, *dout; hin = (FLOAT *)malloc(SIZE*sizeof(FLOAT)); hout = (FLOAT *)malloc(SIZE*sizeof(FLOAT)); // for (int i = 0; i < SIZE; i++) hin[i] = i%15; // for (int i = 0; i < SIZE; i++) hout[i] = i%15; cudaMalloc(&din, SIZE*sizeof(FLOAT)); cudaMalloc(&dout, SIZE*sizeof(FLOAT)); // cudaMemcpy(din, hin, SIZE*sizeof(FLOAT), cudaMemcpyHostToDevice); // cudaMemcpy(dout, hout, SIZE*sizeof(FLOAT), cudaMemcpyHostToDevice); init<<<(SIZE/2+nTPB-1)/nTPB,nTPB>>>(SIZE/2, (half2 *)din, (half2 *)dout); int k; for(k=0; k<5; ++k) saxpy<<<(SIZE/2+nTPB-1)/nTPB,nTPB>>>(SIZE/2, 0.5, (half2 *)din, (half2 *)dout); cudaMemcpy(hout, dout, SIZE*sizeof(FLOAT), cudaMemcpyDeviceToHost); // for (int i = 0; i < DSIZE; i++) printf("%f ... %f\n", hout[0], hout[SIZE-1]); return 0; }
6,526
#include "includes.h" __global__ void dot(float * val, int *row_ind, int *col_ind, int nnz, float * ret, float * w) { const int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < nnz) { int r = row_ind[tid]; int c = col_ind[tid]; float v = val[tid]; atomicAdd(&ret[r], v * w[c]); } }
6,527
#include<stdio.h> int main() { int n_devices; cudaGetDeviceCount(&n_devices); for (int i = 0; i < n_devices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf(" Device number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory clock rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory bus width (bits): %d\n", prop.memoryBusWidth); printf(" Peak memory bandwidth (GB/s): %f\n", 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6); printf(" Maximum number of grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" Total constant memory: %d\n", prop.totalConstMem); printf(" Warp size: %d\n", prop.warpSize); } return 0; }
6,528
#include <stdio.h> //__global__ void clock_block(int kernel_time, int clockRate, int *d_result) __global__ void clock_block(int kernel_time, int clockRate) { int finish_clock; int start_time; for(int temp=0; temp<kernel_time; temp++){ start_time = clock(); finish_clock = start_time + clockRate; bool wrapped = finish_clock < start_time; while( clock() < finish_clock || wrapped) wrapped = clock()>0 && wrapped; } // (*d_result)= kernel_tim;e } //void sleep(cudaStream_t stream, int kernel_time, int *d_result){ void sleep(cudaStream_t stream, int kernel_time){ int cuda_device = 7; cudaDeviceProp deviceProp; cudaGetDevice(&cuda_device); cudaGetDeviceProperties(&deviceProp, cuda_device); int clockRate = deviceProp.clockRate; // clock_block<<<1,1,1,stream>>>(kernel_time, clockRate, d_result); clock_block<<<1,1,1,stream>>>(kernel_time, clockRate); }
6,529
#include <stdio.h> #define NUM_BLOCKS 32 #define BLOCK_WIDTH 5 __global__ void hello() { printf("\nThread %d in block %d", threadIdx.x, blockIdx.x); } int main(int argc, char** argv) { hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>(); cudaDeviceSynchronize(); printf("\nDone\n"); return 0; }
6,530
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <device_launch_parameters.h> #include <time.h> #include <unistd.h> void importData(const char *file, float *X, float *Y, float *data, const int n); float getDelta(const float *data, const int n); void getR(float *result, const float *data, const float delta, const int n); float getError(const float *data, const float *result, const int n); void exportData(const char *file, const float *X, const float *Y, const float *Z, const int n); int main(void){ // Declaration int N = 2048; float delta, error; char *rFName = (char *)"r_data.dat"; char *uFName = (char *)"u_data.dat"; char *RFName = (char *)"R_data.dat"; // Initialization printf("Input the sample size:\n"); scanf("%d", &N); printf("Allocating memory for the data...\n"); float *rData = (float *)malloc(sizeof(float) * N * N); float *uData = (float *)malloc(sizeof(float) * N * N); float *r = (float *)malloc(sizeof(float) * N * N); float *X = (float *)malloc(sizeof(float) * N); float *Y = (float *)malloc(sizeof(float) * N); printf("Finish!\n"); // Retrive Data printf("Retriving data from \"%s\"...\n", rFName); importData(rFName, X, Y, rData, N); printf("Finish!\n"); printf("Retriving data from \"%s\"...\n", uFName); importData(uFName, X, Y, uData, N); printf("Finish!\n"); // Evaluate error printf("Evaluating the average error...\n"); delta = getDelta(X, N); printf("delta = %f\n", delta); getR(r, uData, delta, N); error = getError(rData, r, N); printf("Finish!\n"); printf("Average error = %f\n", error); printf("Exporting R data...\n"); exportData(RFName, X, Y, r, N); // Clean-up free(rData); free(uData); free(r); free(X); free(Y); return 0; } void importData(const char *file, float *X, float *Y, float *data, const int n){ FILE *dataFile = fopen(file, "r"); float *Xtmp = (float *)malloc(sizeof(float) * n * n); float *Ytmp = (float *)malloc(sizeof(float) * n * n); if(dataFile != NULL){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++){ fscanf(dataFile, "%f\t%f\t%f", &Xtmp[i+j*n], &Ytmp[i+j*n], &data[i+j*n]); } } printf("All data from \"%s\" have been retrieved.\n", file); fclose(dataFile); }else{ printf("File not found!\n"); } for(int i = 0; i < n; i++){ X[i] = Xtmp[i]; Y[i] = Ytmp[i*n]; } free(Xtmp); free(Ytmp); } float getDelta(const float *X, const int n){ float delta = 0.0f; for(int i = 1; i < n; i++){ delta += abs(X[i] - X[i-1]); } delta /= (float)(n-1); return delta; } void getR(float *result, const float *data, const float delta, const int n){ float iDelta2 = 1.0f / (delta * delta); const float EPSILON = 8.85418782 * pow(10, -12); // Fix boundary to be zero for(int i = 0; i < n; i++){ result[i] = 0.0f; result[i+(n-1)*n] = 0.0f; result[0+i*n] = 0.0f; result[(n-1)+i*n] = 0.0f; } // Finite Difference for(int j = 1; j < n - 1; j++){ for(int i = 1; i < n - 1; i++){ result[i+j*n] = EPSILON * iDelta2 * (data[(i-1)+j*n] + data[(i+1)+j*n] + data[i+(j-1)*n] + data[i+(j+1)*n] - 4 * data[i+j*n]); result[i+j*n] *= n * n; /* if(result[i+j*n] != 0){ printf("%d:%f ", i+j*n, result[i+j*n]); } */ } } } float getError(const float *data, const float *result, const int n){ float totalError = 0.0f; float averageError = 0.0f; for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++){ totalError += abs(data[i+j*n] - result[i+j*n]); } } averageError = totalError / (float)(n * n); return averageError; } void exportData(const char *file, const float *X, const float *Y, const float *Z, const int n){ FILE *dataFile = fopen(file, "w"); if(dataFile != NULL){ for(int j = 0; j < n ; j++){ for(int i = 0; i < n; i++){ fprintf(dataFile, "%f\t%f\t%f\n", X[i], Y[j], Z[i+j*n]); } } printf("All data have been stored in \"%s\".\n", file); fclose(dataFile); }else{ printf("File not found!"); } }
6,531
#include "includes.h" __global__ void BilinearResampleSubImageKernel_ForManyProposals(const float *input, float *output, const float* subImageDefs, bool safeBounds, int subImageDefsDim, int inputWidth, int inputHeight, int outputWidth, int outputHeight, int numberSubImages, int outputSize) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int px = id % outputWidth; // line in the single output image int subim_id = id / outputWidth / outputHeight; // which image it is int py = (id / outputWidth) % outputHeight; // column in the single output image if (id<outputSize) { float subImgCX = subImageDefs[0 + subim_id*subImageDefsDim]; // <-1, 1> float subImgCY = subImageDefs[1 + subim_id*subImageDefsDim]; // <-1, 1> float subImgDiameter = subImageDefs[2 + subim_id*subImageDefsDim]; // <0,1> int maxDiameter = min(inputWidth - 1, inputHeight - 1); int diameterPix = (int)(subImgDiameter * maxDiameter); diameterPix = max(1, diameterPix); diameterPix = min(maxDiameter, diameterPix); int subImgX = (int)(inputWidth * (subImgCX + 1) * 0.5f) - diameterPix / 2; int subImgY = (int)(inputHeight * (subImgCY + 1) * 0.5f) - diameterPix / 2; if (safeBounds) { subImgX = max(subImgX, 1); subImgY = max(subImgY, 1); subImgX = min(subImgX, inputWidth - diameterPix - 1); subImgY = min(subImgY, inputHeight - diameterPix - 1); } float xRatio = (float)(diameterPix - 1) / (outputWidth - 1); float yRatio = (float)(diameterPix - 1) / (outputHeight - 1); int x = (int) (xRatio * px); int y = (int) (yRatio * py); if (x + subImgX >= 0 && y + subImgY >= 0 && x + subImgX < inputWidth && y + subImgY < inputHeight) { //--- X and Y distance difference float xDist = (xRatio * px) - x; float yDist = (yRatio * py) - y; //--- Points float topLeft= input[(y + subImgY) * inputWidth + x + subImgX]; float topRight = input[(y + subImgY) * inputWidth + x + subImgX + 1]; float bottomLeft = input[(y + subImgY + 1) * inputWidth + x + subImgX]; float bottomRight = input[(y + subImgY + 1) * inputWidth + x + subImgX + 1 ]; float result = topLeft * (1 - xDist) * (1 - yDist) + topRight * xDist * (1 - yDist) + bottomLeft * yDist * (1 - xDist) + bottomRight * xDist * yDist; output[py * outputWidth + px + subim_id*outputWidth*outputHeight] = result; } } }
6,532
/* ********************************************** * CS314 Principles of Programming Languages * * Spring 2020 * ********************************************** */ #include <stdio.h> #include <stdlib.h> __global__ void strongestNeighborScan_gpu(int * src, int * oldDst, int * newDst, int * oldWeight, int * newWeight, int * madeChanges, int distance, int numEdges) { /*YOUR CODE HERE*/ int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index while( tid < numEdges )//as long as thread id is in bounds of the array { if( tid - distance >= 0 )//then we can do something, it's in bounds of the array { if( src[tid - distance] == src[tid] )//check if the two are in the same segment { if( oldWeight[tid - distance] >= oldWeight[tid] )//check the max, and update accordingly { newDst[tid] = oldDst[tid - distance]; newWeight[tid] = oldWeight[tid - distance]; (*madeChanges) = 1; } else { newDst[tid] = oldDst[tid]; newWeight[tid] = oldWeight[tid]; } } else { newDst[tid] = oldDst[tid]; newWeight[tid] = oldWeight[tid]; } } else { newDst[tid] = oldDst[tid]; newWeight[tid] = oldWeight[tid]; } tid += ( blockDim.x * gridDim.x ); } return; }
6,533
#include <stdio.h> __global__ void kernel() { printf("foo\n"); } void cuda_stuff() { kernel<<<1, 1>>>(); cudaDeviceSynchronize(); }
6,534
#include <iostream> #include <cstdlib> #include <curand_kernel.h> __global__ void init_stuff(curandState *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; curand_init(1337, idx, 0, &state[idx]); } __global__ void make_rand(curandState *state, float *randArray) { int idx = blockIdx.x * blockDim.x + threadIdx.x; randArray[idx] = curand_normal(&state[idx]); } void SaveData( int npts, float *x, char *filename) { FILE *fp = fopen(filename,"w"); for (int i=0;i<npts;i++) { fprintf(fp,"%f\n",x[i]); } fclose(fp); } int main( int argc, char* argv[]) { curandState *d_state; float* randArray; float* host_randArray; int nThreads = 512; int nBlocks = 1000; host_randArray = (float*) malloc( nThreads*nBlocks*sizeof(float)); cudaMalloc (&d_state, nThreads*nBlocks*sizeof(curandState)); cudaMalloc( &randArray, nThreads*nBlocks*sizeof(float)); init_stuff<<<nBlocks, nThreads>>>(d_state); make_rand<<<nBlocks, nThreads>>>(d_state, randArray); cudaMemcpy( host_randArray, randArray, nThreads*nBlocks*sizeof(float), cudaMemcpyDeviceToHost); char filename[] = "testPRNG.dat"; SaveData(nThreads*nBlocks,host_randArray,filename); free(host_randArray); cudaFree(randArray); cudaFree(d_state); return 0; }
6,535
#include <pthread.h> #include <stdint.h> #include <ctype.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <malloc.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <time.h> #define REPS 1 void SortChunkI(long ChunkSize, int Chunk_ID, signed int *ary); void SortChunkL(long ChunkSize, int Chunk_ID, signed long long *ary); void SortChunkF(long ChunkSize, int Chunk_ID, float *ary); void SortChunkD(long ChunkSize, int Chunk_ID, double *ary); void MergeSortI(signed int *list, long length); void MergeSortL(signed long long *list, long length); void MergeSortF(float *list, long length); void MergeSortD(double *list, long length); __global__ void SortChunkGI(signed int ary[], long ChunkSize) { unsigned long i,j; unsigned long sp; int temp; sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize; printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]); for(i = 0; i< ChunkSize; i++) { for(j = sp; j< (ChunkSize+sp-1-i); j++) { if( (ary[j]) > (ary[j+1])) { temp = ary[j+1]; ary[j+1] = ary[j]; ary[j] = temp; } } } } __global__ void SortChunkGL(signed long long ary[], long ChunkSize) { unsigned long i,j; unsigned long sp; long long temp; sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize; //printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]); for(i = 0; i< ChunkSize; i++) { for(j = sp; j< (ChunkSize+sp-1-i); j++) { if( (ary[j]) > (ary[j+1])) { temp = ary[j+1]; ary[j+1] = ary[j]; ary[j] = temp; } } } } __global__ void SortChunkGF(float ary[], long ChunkSize) { unsigned long i,j; unsigned long sp; float temp; sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize; printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]); for(i = 0; i< ChunkSize; i++) { for(j = sp; j< (ChunkSize+sp-1-i); j++) { if( (ary[j]) > (ary[j+1])) { temp = ary[j+1]; ary[j+1] = ary[j]; ary[j] = temp; } } } } __global__ void SortChunkGD(double ary[], long ChunkSize) { unsigned long i,j; unsigned long sp; double temp; sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize; printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]); for(i = 0; i< ChunkSize; i++) { for(j = sp; j< (ChunkSize+sp-1-i); j++) { if( (ary[j]) > (ary[j+1])) { temp = ary[j+1]; ary[j+1] = ary[j]; ary[j] = temp; } } } } int main(int argc, char** argv) { //**************** HOST variable ****************** struct timeval t; double StartTime, EndTime; double TimeElapsed; long a,i; long NumOfChunk; long HowMany; long ChunkSize; char Type; signed int *InputArrayI,*SortedArrayI; signed long long *InputArrayL,*SortedArrayL; float *InputArrayF,*SortedArrayF; double *InputArrayD,*SortedArrayD; long BlockSize; long NumOfBlock; //**************** GPU variable ****************** signed int *InputArrayG_I,*SortedArrayG_I; signed long long *InputArrayG_L,*SortedArrayG_L; float *InputArrayG_F,*SortedArrayG_F; double *InputArrayG_D,*SortedArrayG_D; FILE *ff = fopen("BubbleSortResult.txt", "w"); FILE *fp = fopen("MergeSortResult.txt", "w"); if(argc != 5) { printf("\n Argument is not correct \n\n"); printf("Nothing executed ... Exiting ...\n\n"); exit(EXIT_FAILURE); } HowMany = atoi(argv[1]); ChunkSize = atoi(argv[3]); Type = toupper(argv[2][0]); BlockSize = atoi(argv[4]); NumOfChunk = HowMany/(ChunkSize); NumOfBlock = HowMany/((ChunkSize * BlockSize)); printf("\nElement type : %c\n",Type); printf("BlockSize : %li\n",BlockSize); printf("\nNumberOfChunk : %li\n",NumOfChunk); printf("Total Block : %li\n",NumOfBlock); printf("Total Element : %li\n\n\n\n",NumOfBlock*ChunkSize*BlockSize); srand(time(NULL)); // HOST*************inital rand number switch(Type) { case 'I': InputArrayI = (signed int *)malloc(HowMany * sizeof(signed int)); SortedArrayI = (signed int *)malloc(HowMany * sizeof(signed int)); for(i=0;i<HowMany;i++) { InputArrayI[i] = ( ((-1)^i)*rand() ); } break; //******************************************* case 'L': InputArrayL = (signed long long *)malloc(HowMany * sizeof(signed long long )); SortedArrayL = (signed long long *)malloc(HowMany * sizeof(signed long long )); for(i=0;i<HowMany;i++) { InputArrayL[i] =(long long )( ((-1)^i)*rand()<<32 | rand() ); } break; //******************************************* case 'F': InputArrayF = (float *)malloc(HowMany * sizeof(float)); SortedArrayF = (float *)malloc(HowMany * sizeof(float)); int my_random_int; for(i=0;i<HowMany;i++) { my_random_int = ((-1)^i)*rand() ; InputArrayF[i] = *(float*)&my_random_int; if(isnan(InputArrayF[i])){i--;} } break; //******************************************* case 'D': InputArrayD = (double *)malloc(HowMany * sizeof(double)); SortedArrayD = (double *)malloc(HowMany * sizeof(double)); long long int my_random_long; for(i=0;i<HowMany;i++) { my_random_long = (long long )(( ((-1)^i)*rand()<<32) | rand() ); InputArrayD[i] = *(double*)&my_random_long; if(isnan(InputArrayD[i])){i--;} } break; } // GPU*********** inital GPU and transfer data HtoD switch(Type) { case 'I': cudaMalloc ((signed int **)&InputArrayG_I, HowMany*sizeof(signed int)); cudaMalloc ((signed int **)&SortedArrayG_I, HowMany*sizeof(signed int)); cudaMemcpy (InputArrayG_I, InputArrayI, HowMany*sizeof(signed int), cudaMemcpyHostToDevice); cudaMemcpy (SortedArrayG_I, InputArrayI, HowMany*sizeof(signed int), cudaMemcpyHostToDevice); break; //******************************************* case 'L': cudaMalloc ((signed long long **)&InputArrayG_L, HowMany* sizeof(signed long long )); cudaMalloc ((signed long long **)&SortedArrayG_L, HowMany* sizeof(signed long long )); cudaMemcpy (InputArrayG_L, InputArrayL, HowMany, cudaMemcpyHostToDevice); cudaMemcpy (SortedArrayG_L, InputArrayL, HowMany*sizeof(signed int), cudaMemcpyHostToDevice); break; //******************************************* case 'F': cudaMalloc ((float **)&InputArrayG_F, HowMany); cudaMalloc ((float **)&SortedArrayG_F, HowMany); cudaMemcpy (InputArrayG_F, InputArrayF, HowMany, cudaMemcpyHostToDevice); cudaMemcpy (SortedArrayG_F, InputArrayF, HowMany*sizeof(signed int), cudaMemcpyHostToDevice); break; //******************************************* case 'D': cudaMalloc ((double **)&InputArrayG_D, HowMany); cudaMalloc ((double **)&SortedArrayG_D, HowMany); cudaMemcpy (InputArrayG_D, InputArrayD, HowMany, cudaMemcpyHostToDevice); cudaMemcpy (SortedArrayG_D, InputArrayD, HowMany*sizeof(signed int), cudaMemcpyHostToDevice); break; } gettimeofday(&t, NULL); StartTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec); //******************* sort *************** for(a=0; a<REPS; a++) { switch(Type) { case 'I': // tot block element per block SortChunkGI<<< NumOfBlock, BlockSize>>> (SortedArrayG_I,ChunkSize); cudaMemcpy (SortedArrayI, SortedArrayG_I, HowMany*sizeof(signed int), cudaMemcpyDeviceToHost); fprintf(ff, "Bubble sorted result of int done by GPU\n***********************************\n"); for(i=0;i<HowMany;i++) { fprintf(ff, "%i \n", SortedArrayI[i]); if((i+1)%ChunkSize ==0){fprintf(ff, " \n");} } MergeSortI(SortedArrayI, HowMany); break; //******************************************* case 'L': // tot block element per block SortChunkGL<<< NumOfBlock, BlockSize>>> (SortedArrayG_L,ChunkSize); cudaMemcpy (SortedArrayL, SortedArrayG_L, HowMany*sizeof(signed long long), cudaMemcpyDeviceToHost); fprintf(ff, "Bubble sorted result of long done by GPU\n***********************************\n"); for(i=0;i<HowMany;i++) { fprintf(ff, "%lli \n", SortedArrayL[i]); if((i+1)%ChunkSize ==0){fprintf(ff, " \n");} } MergeSortL(SortedArrayL, HowMany); break; //******************************************* case 'F': // tot block element per block SortChunkGF<<< NumOfBlock, BlockSize>>> (SortedArrayG_F,ChunkSize); cudaMemcpy (SortedArrayF, SortedArrayG_F, HowMany*sizeof(float), cudaMemcpyDeviceToHost); fprintf(ff, "Bubble sorted result of float done by GPU\n***********************************\n"); for(i=0;i<HowMany;i++) { fprintf(ff, "%f \n", SortedArrayF[i]); if((i+1)%ChunkSize ==0){fprintf(ff, " \n");} } MergeSortF(SortedArrayF, HowMany); break; //******************************************* case 'D': // tot block element per block SortChunkGD<<< NumOfBlock, BlockSize>>> (SortedArrayG_D,ChunkSize); cudaMemcpy (SortedArrayD, SortedArrayG_D, HowMany*sizeof(double), cudaMemcpyDeviceToHost); fprintf(ff, "Bubble sorted result of double done by GPU\n***********************************\n"); for(i=0;i<HowMany;i++) { fprintf(ff, "%lf \n", InputArrayD[i]); if((i+1)%ChunkSize ==0){fprintf(ff, " \n");} } MergeSortD(SortedArrayD, HowMany); break; } } gettimeofday(&t, NULL); EndTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec); TimeElapsed=(EndTime-StartTime)/1000.00; TimeElapsed/=(double)REPS; printf("\n\nExecution time:%10.4f ms ",TimeElapsed); // print result switch(Type) { case 'I': fprintf(fp, "Merge sorted result of int done by GPU\n***********************************\n"); for(i=0;i<HowMany;i++) { fprintf(fp, "%i \n", SortedArrayI[i]); } break; //******************************************* case 'L': fprintf(fp, "Merge sorted result of long done by GPU\n***********************************\n"); for(i=0;i<HowMany;i++) { fprintf(fp, "%lli \n", SortedArrayL[i]); } break; //******************************************* case 'F': fprintf(fp, "Merge sorted result of float done by GPU\n***********************************\n"); for(i=0;i<HowMany;i++) { fprintf(fp, "%f \n", SortedArrayF[i]); } break; //****************************************** case 'D': fprintf(fp, "Merge sorted result of double done by GPU\n***********************************\n"); for(i=0;i<HowMany;i++) { fprintf(fp, "%lf \n", SortedArrayD[i]); } break; } //free memoary switch(Type) { case 'I': free(InputArrayI); free(SortedArrayI); cudaFree(InputArrayG_I); cudaFree(SortedArrayG_I); break; //******************************************* case 'L': free(InputArrayL); free(SortedArrayL); cudaFree(InputArrayG_L); cudaFree(SortedArrayG_L); break; //******************************************* case 'F': free(InputArrayF); free(SortedArrayF); cudaFree(InputArrayG_F); cudaFree(SortedArrayG_F); break; //******************************************* case 'D': free(InputArrayD); free(SortedArrayD); cudaFree(InputArrayG_D); cudaFree(SortedArrayG_D); break; } fclose(ff); fclose(fp); return (EXIT_SUCCESS); } //************** bubble sort HOST**************** void SortChunkI(long ChunkSize, int Chunk_ID, signed int *ary) { long i,j; long sp; int temp; sp = Chunk_ID * ChunkSize; for(i = 0; i< (ChunkSize-1); i++) { for(j = sp; j< ((ChunkSize+sp)-1-i); j++) { if(ary[j] > ary[j+1]) { temp = ary[j+1]; ary[j+1] = ary[j]; ary[j] = temp; } } } for(i = sp; i< (ChunkSize+sp); i++) { printf("\n %i",ary[i]); } printf("\n \n"); return; } void SortChunkL(long ChunkSize, int Chunk_ID, signed long long *ary) { long i,j; long sp; long long temp; sp = Chunk_ID * ChunkSize; for(i = 0; i< (ChunkSize-1); i++) { for(j = sp; j< ((ChunkSize+sp)-1-i); j++) { if(ary[j] > ary[j+1]) { temp = ary[j+1]; ary[j+1] = ary[j]; ary[j] = temp; } } } for(i = sp; i< (ChunkSize+sp); i++) { printf("\n %lli",ary[i]); } printf("\n \n"); return; } void SortChunkF(long ChunkSize, int Chunk_ID, float *ary) { long i,j; long sp; float temp; sp = Chunk_ID * ChunkSize; for(i = 0; i< (ChunkSize-1); i++) { for(j = sp; j< ((ChunkSize+sp)-1-i); j++) { if(ary[j] > ary[j+1]) { temp = ary[j+1]; ary[j+1] = ary[j]; ary[j] = temp; } } } for(i = sp; i< (ChunkSize+sp); i++) { printf("\n %f",ary[i]); } printf("\n \n"); return; } void SortChunkD(long ChunkSize, int Chunk_ID, double *ary) { long i,j; long sp; double temp; sp = Chunk_ID * ChunkSize; for(i = 0; i< (ChunkSize-1); i++) { for(j = sp; j< ((ChunkSize+sp)-1-i); j++) { if(ary[j] > ary[j+1]) { temp = ary[j+1]; ary[j+1] = ary[j]; ary[j] = temp; } } } for(i = sp; i< (ChunkSize+sp); i++) { printf("\n %f",ary[i]); } printf("\n \n"); return; } //************** merge sort HOST ***************** void MergeSortI(signed int *list, long length) { long i; long left_min, left_max, right_min, right_max, next; signed int *tmp = (int*)malloc(sizeof(int) * length); if (tmp == NULL) { fputs("Error: out of memory\n", stderr); abort(); } for (i = 1; i < length; i *= 2) { for (left_min = 0; left_min < length - i; left_min = right_max) { right_min = left_max = left_min + i; right_max = left_max + i; if (right_max > length) right_max = length; next = 0; while (left_min < left_max && right_min < right_max) tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++]; while (left_min < left_max) list[--right_min] = list[--left_max]; while (next > 0) list[--right_min] = tmp[--next]; } } free(tmp); return; } void MergeSortL(signed long long *list, long length) { long i; long left_min, left_max, right_min, right_max, next; signed long long *tmp = (long long *)malloc(sizeof(long long ) * length); if (tmp == NULL) { fputs("Error: out of memory\n", stderr); abort(); } for (i = 1; i < length; i *= 2) { for (left_min = 0; left_min < length - i; left_min = right_max) { right_min = left_max = left_min + i; right_max = left_max + i; if (right_max > length) right_max = length; next = 0; while (left_min < left_max && right_min < right_max) tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++]; while (left_min < left_max) list[--right_min] = list[--left_max]; while (next > 0) list[--right_min] = tmp[--next]; } } free(tmp); return; } void MergeSortF(float *list, long length) { long i; long left_min, left_max, right_min, right_max, next; float *tmp = (float *)malloc(sizeof(float ) * length); if (tmp == NULL) { fputs("Error: out of memory\n", stderr); abort(); } for (i = 1; i < length; i *= 2) { for (left_min = 0; left_min < length - i; left_min = right_max) { right_min = left_max = left_min + i; right_max = left_max + i; if (right_max > length) right_max = length; next = 0; while (left_min < left_max && right_min < right_max) tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++]; while (left_min < left_max) list[--right_min] = list[--left_max]; while (next > 0) list[--right_min] = tmp[--next]; } } free(tmp); return; } void MergeSortD(double *list, long length) { long i; long left_min, left_max, right_min, right_max, next; double *tmp = (double *)malloc(sizeof(double ) * length); if (tmp == NULL) { fputs("Error: out of memory\n", stderr); abort(); } for (i = 1; i < length; i *= 2) { for (left_min = 0; left_min < length - i; left_min = right_max) { right_min = left_max = left_min + i; right_max = left_max + i; if (right_max > length) right_max = length; next = 0; while (left_min < left_max && right_min < right_max) tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++]; while (left_min < left_max) list[--right_min] = list[--left_max]; while (next > 0) list[--right_min] = tmp[--next]; } } free(tmp); return; }
6,536
#include <stdio.h> #include <cuda.h> // from http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void test() { __shared__ int i, mutex; if (threadIdx.x == 0) { i = 0; mutex = 0; } __syncthreads(); while( atomicCAS(&mutex, 0, 1) != 0); i++; printf("thread %d: %d\n", threadIdx.x, i); atomicExch(&mutex,0); } int main(void) { // run GPU code test<<<2, 32>>>(); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); }
6,537
#include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void addVectors(int N, float *a, float *b, float *c) { int n = threadIdx.x + blockIdx.x*blockDim.x; if(n<N) { c[n] = a[n] + b[n]; } } int main(int argc, char **argv) { int N = 100; //Host memory allocation float *h_a = (float*) malloc(N*sizeof(float)); float *h_b = (float*) malloc(N*sizeof(float)); float *h_c = (float*) malloc(N*sizeof(float)); int n; for(n=0;n<N;n++) { h_a[n] = 1+n; h_b[n] = 1-n; } // Device memory allocation float *d_a, *d_b, *d_c; cudaMalloc(&d_a, N*sizeof(float)); cudaMalloc(&d_b, N*sizeof(float)); cudaMalloc(&d_c, N*sizeof(float)); // Copy data from host to device cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, N*sizeof(float), cudaMemcpyHostToDevice); //save this for later int NthreadsPerBlock = 10; int NthreadBlocks = (N+NthreadsPerBlock-1)/NthreadsPerBlock ; addVectors<<<NthreadBlocks, NthreadsPerBlock>>>(N,d_a,d_b,d_c); //copy result from device to host cudaMemcpy(h_c, d_c, N*sizeof(float), cudaMemcpyDeviceToHost); for(n=0;n<5;++n) { printf("h_c[%d] = %g\n",n,h_c[n]); } free(h_a); free(h_b); free(h_b); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
6,538
#include "includes.h" __global__ void kernel_image2D1C_ConvolveRow(float* img, int n_x, int n_y, short k, float *kernel, float* out) { // Find index of current thread int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; if (idx_x>=n_x) return; if (idx_y>=n_y) return; float sum=0; for (short i=-k;i<=k;i++) { short x=idx_x+i; if (x<0) x=0; if (x>=n_x) x=n_x-1; sum+=kernel[i+k]*img[idx_y*n_x+x]; } out[idx_y*n_x+idx_x]=sum; }
6,539
#pragma once #include <thrust/tuple.h> #include <thrust/functional.h> namespace nearptd { // Typedef for tuple of size N and type T template<typename T, size_t N> struct ntuple {}; template<typename T> struct ntuple<T, 1> { typedef thrust::tuple<T> tuple; __host__ __device__ tuple make_tuple(T* a) {return thrust::make_tuple(a[0]);} __host__ __device__ void make_array(const tuple& a, T* b) { b[0] = thrust::get<0>(a); } }; template<typename T> struct ntuple<T, 2> { typedef thrust::tuple<T, T> tuple; __host__ __device__ tuple make_tuple(T* a) {return thrust::make_tuple(a[0],a[1]);} __host__ __device__ void make_array(const tuple& a, T* b) { b[0] = thrust::get<0>(a); b[1] = thrust::get<1>(a); } }; template<typename T> struct ntuple<T, 3> { typedef thrust::tuple<T, T, T> tuple; __host__ __device__ tuple make_tuple(T* a) {return thrust::make_tuple(a[0],a[1],a[2]);} __host__ __device__ void make_array(const tuple& a, T* b) { b[0] = thrust::get<0>(a); b[1] = thrust::get<1>(a); b[2] = thrust::get<2>(a); } }; template<typename T> struct ntuple<T, 4> { typedef thrust::tuple<T, T, T, T> tuple; __host__ __device__ tuple make_tuple(T* a) {return thrust::make_tuple(a[0],a[1],a[2],a[3]);} __host__ __device__ void make_array(const tuple& a, T* b) { b[0] = thrust::get<0>(a); b[1] = thrust::get<1>(a); b[2] = thrust::get<2>(a); b[3] = thrust::get<3>(a); } }; template<typename T> struct ntuple<T, 5> { typedef thrust::tuple<T, T, T, T, T> tuple; __host__ __device__ tuple make_tuple(T* a) {return thrust::make_tuple(a[0],a[1],a[2],a[3],a[4]);} __host__ __device__ void make_array(const tuple& a, T* b) { b[0] = thrust::get<0>(a); b[1] = thrust::get<1>(a); b[2] = thrust::get<2>(a); b[3] = thrust::get<3>(a); b[4] = thrust::get<4>(a); } }; template<typename T> struct ntuple<T, 6> { typedef thrust::tuple<T, T, T, T, T, T> tuple; __host__ __device__ tuple make_tuple(T* a) {return thrust::make_tuple(a[0],a[1],a[2],a[3],a[4],a[5]);} __host__ __device__ void make_array(const tuple& a, T* b) { b[0] = thrust::get<0>(a); b[1] = thrust::get<1>(a); b[2] = thrust::get<2>(a); b[3] = thrust::get<3>(a); b[4] = thrust::get<4>(a); b[5] = thrust::get<5>(a); } }; template<typename T> struct ntuple<T, 7> { typedef thrust::tuple<T, T, T, T, T, T, T> tuple; __host__ __device__ tuple make_tuple(T* a) {return thrust::make_tuple(a[0],a[1],a[2],a[3],a[4],a[5],a[6]);} __host__ __device__ void make_array(const tuple& a, T* b) { b[0] = thrust::get<0>(a); b[1] = thrust::get<1>(a); b[2] = thrust::get<2>(a); b[3] = thrust::get<3>(a); b[4] = thrust::get<4>(a); b[5] = thrust::get<5>(a); b[6] = thrust::get<6>(a); } }; template<typename T> struct ntuple<T, 8> { typedef thrust::tuple<T, T, T, T, T, T, T, T> tuple; __host__ __device__ tuple make_tuple(T* a) {return thrust::make_tuple(a[0],a[1],a[2],a[3],a[4],a[5],a[6],a[7]);} __host__ __device__ void make_array(const tuple& a, T* b) { b[0] = thrust::get<0>(a); b[1] = thrust::get<1>(a); b[2] = thrust::get<2>(a); b[3] = thrust::get<3>(a); b[4] = thrust::get<4>(a); b[5] = thrust::get<5>(a); b[6] = thrust::get<6>(a); b[7] = thrust::get<7>(a); } }; template<typename T> struct ntuple<T, 9> { typedef thrust::tuple<T, T, T, T, T, T, T, T, T> tuple; __host__ __device__ tuple make_tuple(T* a) {return thrust::make_tuple(a[0],a[1],a[2],a[3],a[4],a[5],a[6],a[7],a[8]);} __host__ __device__ void make_array(const tuple& a, T* b) { b[0] = thrust::get<0>(a); b[1] = thrust::get<1>(a); b[2] = thrust::get<2>(a); b[3] = thrust::get<3>(a); b[4] = thrust::get<4>(a); b[5] = thrust::get<5>(a); b[6] = thrust::get<6>(a); b[7] = thrust::get<7>(a); b[8] = thrust::get<8>(a); } }; // Apply function across a tuple of T1s, returning a new tuple of T2s template<typename T1, typename T2, typename UnFunc, size_t N> struct tuple_unary_apply {}; template<typename T1, typename T2, typename UnFunc> struct tuple_unary_apply<T1, T2, UnFunc, 1> { __host__ __device__ T2 operator()(const T1&a, UnFunc func) { return thrust::make_tuple(func(thrust::get<0>(a))); } }; template<typename T1, typename T2, typename UnFunc> struct tuple_unary_apply<T1, T2, UnFunc, 2> { __host__ __device__ T2 operator()(const T1&a, UnFunc func) { return thrust::make_tuple(func(thrust::get<0>(a)),func(thrust::get<1>(a))); } }; template<typename T1, typename T2, typename UnFunc> struct tuple_unary_apply<T1, T2, UnFunc, 3> { __host__ __device__ T2 operator()(const T1&a, UnFunc func) { return thrust::make_tuple(func(thrust::get<0>(a)),func(thrust::get<1>(a)), func(thrust::get<2>(a))); } }; template<typename T1, typename T2, typename UnFunc> struct tuple_unary_apply<T1, T2, UnFunc, 4> { __host__ __device__ T2 operator()(const T1&a, UnFunc func) { return thrust::make_tuple(func(thrust::get<0>(a)),func(thrust::get<1>(a)), func(thrust::get<2>(a)),func(thrust::get<3>(a))); } }; template<typename T1, typename T2, typename UnFunc> struct tuple_unary_apply<T1, T2, UnFunc, 5> { __host__ __device__ T2 operator()(const T1&a, UnFunc func) { return thrust::make_tuple(func(thrust::get<0>(a)),func(thrust::get<1>(a)), func(thrust::get<2>(a)),func(thrust::get<3>(a)), func(thrust::get<4>(a))); } }; template<typename T1, typename T2, typename UnFunc> struct tuple_unary_apply<T1, T2, UnFunc, 6> { __host__ __device__ T2 operator()(const T1&a, UnFunc func) { return thrust::make_tuple(func(thrust::get<0>(a)),func(thrust::get<1>(a)), func(thrust::get<2>(a)),func(thrust::get<3>(a)), func(thrust::get<4>(a)),func(thrust::get<5>(a))); } }; template<typename T1, typename T2, typename UnFunc> struct tuple_unary_apply<T1, T2, UnFunc, 7> { __host__ __device__ T2 operator()(const T1&a, UnFunc func) { return thrust::make_tuple(func(thrust::get<0>(a)),func(thrust::get<1>(a)), func(thrust::get<2>(a)),func(thrust::get<3>(a)), func(thrust::get<4>(a)),func(thrust::get<5>(a)), func(thrust::get<6>(a))); } }; template<typename T1, typename T2, typename UnFunc> struct tuple_unary_apply<T1, T2, UnFunc, 8> { __host__ __device__ T2 operator()(const T1&a, UnFunc func) { return thrust::make_tuple(func(thrust::get<0>(a)),func(thrust::get<1>(a)), func(thrust::get<2>(a)),func(thrust::get<3>(a)), func(thrust::get<4>(a)),func(thrust::get<5>(a)), func(thrust::get<6>(a)),func(thrust::get<7>(a))); } }; template<typename T1, typename T2, typename UnFunc> struct tuple_unary_apply<T1, T2, UnFunc, 9> { __host__ __device__ T2 operator()(const T1&a, UnFunc func) { return thrust::make_tuple(func(thrust::get<0>(a)),func(thrust::get<1>(a)), func(thrust::get<2>(a)),func(thrust::get<3>(a)), func(thrust::get<4>(a)),func(thrust::get<5>(a)), func(thrust::get<6>(a)),func(thrust::get<7>(a)), func(thrust::get<8>(a))); } }; // Apply function across two tuples of T1s and T2s, returning a tuple of T3s template<typename T1, typename T2, typename T3, typename BinFunc, size_t N> struct tuple_binary_apply {}; template<typename T1, typename T2, typename T3, typename BinFunc> struct tuple_binary_apply<T1, T2, T3, BinFunc, 1> { __host__ __device__ T3 operator()(const T1& a, const T2& b, BinFunc func) const { return thrust::make_tuple(func(thrust::get<0>(a), thrust::get<0>(b))); } }; template<typename T1, typename T2, typename T3, typename BinFunc> struct tuple_binary_apply<T1, T2, T3, BinFunc, 2> { __host__ __device__ T3 operator()(const T1&a, const T2& b, BinFunc func) const { return thrust::make_tuple(func(thrust::get<0>(a), thrust::get<0>(b)), func(thrust::get<1>(a), thrust::get<1>(b))); } }; template<typename T1, typename T2, typename T3, typename BinFunc> struct tuple_binary_apply<T1, T2, T3, BinFunc, 3> { __host__ __device__ T3 operator()(const T1&a, const T2& b, BinFunc func) const { return thrust::make_tuple(func(thrust::get<0>(a), thrust::get<0>(b)), func(thrust::get<1>(a), thrust::get<1>(b)), func(thrust::get<2>(a), thrust::get<2>(b))); } }; template<typename T1, typename T2, typename T3, typename BinFunc> struct tuple_binary_apply<T1, T2, T3, BinFunc, 4> { __host__ __device__ T3 operator()(const T1&a, const T2& b, BinFunc func) const { return thrust::make_tuple(func(thrust::get<0>(a), thrust::get<0>(b)), func(thrust::get<1>(a), thrust::get<1>(b)), func(thrust::get<2>(a), thrust::get<2>(b)), func(thrust::get<3>(a), thrust::get<3>(b))); } }; template<typename T1, typename T2, typename T3, typename BinFunc> struct tuple_binary_apply<T1, T2, T3, BinFunc, 5> { __host__ __device__ T3 operator()(const T1&a, const T2& b, BinFunc func) const { return thrust::make_tuple(func(thrust::get<0>(a), thrust::get<0>(b)), func(thrust::get<1>(a), thrust::get<1>(b)), func(thrust::get<2>(a), thrust::get<2>(b)), func(thrust::get<3>(a), thrust::get<3>(b)), func(thrust::get<4>(a), thrust::get<4>(b))); } }; template<typename T1, typename T2, typename T3, typename BinFunc> struct tuple_binary_apply<T1, T2, T3, BinFunc, 6> { __host__ __device__ T3 operator()(const T1&a, const T2& b, BinFunc func) const { return thrust::make_tuple(func(thrust::get<0>(a), thrust::get<0>(b)), func(thrust::get<1>(a), thrust::get<1>(b)), func(thrust::get<2>(a), thrust::get<2>(b)), func(thrust::get<3>(a), thrust::get<3>(b)), func(thrust::get<4>(a), thrust::get<4>(b)), func(thrust::get<5>(a), thrust::get<5>(b))); } }; template<typename T1, typename T2, typename T3, typename BinFunc> struct tuple_binary_apply<T1, T2, T3, BinFunc, 7> { __host__ __device__ T3 operator()(const T1&a, const T2& b, BinFunc func) const { return thrust::make_tuple(func(thrust::get<0>(a), thrust::get<0>(b)), func(thrust::get<1>(a), thrust::get<1>(b)), func(thrust::get<2>(a), thrust::get<2>(b)), func(thrust::get<3>(a), thrust::get<3>(b)), func(thrust::get<4>(a), thrust::get<4>(b)), func(thrust::get<5>(a), thrust::get<5>(b)), func(thrust::get<6>(a), thrust::get<6>(b))); } }; template<typename T1, typename T2, typename T3, typename BinFunc> struct tuple_binary_apply<T1, T2, T3, BinFunc, 8> { __host__ __device__ T3 operator()(const T1&a, const T2& b, BinFunc func) const { return thrust::make_tuple(func(thrust::get<0>(a), thrust::get<0>(b)), func(thrust::get<1>(a), thrust::get<1>(b)), func(thrust::get<2>(a), thrust::get<2>(b)), func(thrust::get<3>(a), thrust::get<3>(b)), func(thrust::get<4>(a), thrust::get<4>(b)), func(thrust::get<5>(a), thrust::get<5>(b)), func(thrust::get<6>(a), thrust::get<6>(b)), func(thrust::get<7>(a), thrust::get<7>(b))); } }; template<typename T1, typename T2, typename T3, typename BinFunc> struct tuple_binary_apply<T1, T2, T3, BinFunc, 9> { __host__ __device__ T3 operator()(const T1&a, const T2& b, BinFunc func) const { return thrust::make_tuple(func(thrust::get<0>(a), thrust::get<0>(b)), func(thrust::get<1>(a), thrust::get<1>(b)), func(thrust::get<2>(a), thrust::get<2>(b)), func(thrust::get<3>(a), thrust::get<3>(b)), func(thrust::get<4>(a), thrust::get<4>(b)), func(thrust::get<5>(a), thrust::get<5>(b)), func(thrust::get<6>(a), thrust::get<6>(b)), func(thrust::get<7>(a), thrust::get<7>(b)), func(thrust::get<8>(a), thrust::get<8>(b))); } }; // Apply function across a tuple of T1s as a reduction, returning a single T2 value template<typename T1, typename T2, typename Function, size_t N> struct tuple_reduce { __host__ __device__ T2 operator()(const T1& a, Function func) { tuple_reduce<T1, T2, Function, N-1> op; return func(thrust::get<N-1>(a), op(a, func)); } }; template<typename T1, typename T2, typename Function> struct tuple_reduce<T1, T2, Function, 1> { __host__ __device__ T2 operator()(const T1& a, Function func) { return static_cast<T2>(thrust::get<0>(a)); } }; };
6,540
#include "includes.h" const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// // THIS UPDATE DOES NOT UPDATE ELOSS? ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void timeFilter(const double *Params, const float *data, const float *W,float *conv_sig){ volatile __shared__ float sW2[81*NrankMax], sW[81*NrankMax], sdata[(Nthreads+81)*NrankMax]; float x; int tid, tid0, bid, i, nid, Nrank, NT, Nfilt, nt0; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nfilt = (int) Params[1]; Nrank = (int) Params[6]; nt0 = (int) Params[4]; if(tid<nt0*Nrank) sW[tid] = W[tid%nt0 + (bid + Nfilt * (tid/nt0))* nt0]; __syncthreads(); tid0 = 0; while (tid0<NT-Nthreads-nt0+1){ if (tid<nt0*NrankMax) sdata[tid%nt0 + (tid/nt0)*(Nthreads+nt0)] = data[tid0 + tid%nt0+ NT*(bid + Nfilt*(tid/nt0))]; #pragma unroll 3 for(nid=0;nid<Nrank;nid++){ sdata[tid + nt0+nid*(Nthreads+nt0)] = data[nt0+tid0 + tid+ NT*(bid +nid*Nfilt)]; } __syncthreads(); x = 0.0f; for(nid=0;nid<Nrank;nid++){ #pragma unroll 4 for(i=0;i<nt0;i++) x += sW[i + nid*nt0] * sdata[i+tid + nid*(Nthreads+nt0)]; } conv_sig[tid0 + tid + NT*bid] = x; tid0+=Nthreads; __syncthreads(); } }
6,541
#define BLOCK_WIDTH 16 #define BLOCK_HEIGHT 16 enum NEIGH_TYPE {NEIGH_FOUR = 0, NEIGH_EIGHT = 1}; __device__ int getNeighboursLocalIndexes(int neighbours[], int nType); __device__ int getLocalIndex(int localRow, int localCol); __device__ bool inLocalBorder(); __device__ int findRoot(int equivalenceMatrix[], int elementIndex); __device__ bool threadInImage(int height, int width); __device__ int localAddrToGlobal(int label, int imHeight); __global__ void localCCL(const int* input, int* output, const int height, const int width){ __shared__ int segments[BLOCK_WIDTH * BLOCK_HEIGHT]; __shared__ int labels[BLOCK_WIDTH * BLOCK_HEIGHT]; __shared__ int changed; int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int localRow = threadIdx.x; int localCol = threadIdx.y; int localIndex = localCol * blockDim.y + localRow; int globalIndex = col * height + row; int newLabel; int nType = NEIGH_EIGHT; // load corresponding image tile to shared memory segments[localIndex] = input[globalIndex]; // clear borders in every tile // if(inLocalBorder()){ // segments[localIndex] = 0; // } __syncthreads(); int label = localIndex; int neighboursIndexes[8]; int numOfNeighbours; if(threadInImage(height, width)){ while(1){ labels[localIndex] = label; if(localRow == 0 && localCol == 0) changed = 0; __syncthreads(); newLabel = label; numOfNeighbours = getNeighboursLocalIndexes(neighboursIndexes, nType); for(int n = 0; n < numOfNeighbours; n++) if(segments[localIndex] == segments[neighboursIndexes[n]]) newLabel = min(newLabel, labels[neighboursIndexes[n]]); __syncthreads(); if(newLabel < label){ atomicMin(labels + label, newLabel); changed = 1; } __syncthreads(); if(changed == 0) break; label = findRoot(labels, label); __syncthreads(); } } output[globalIndex] = localAddrToGlobal(label, height); // if(input[globalIndex] == 0) // output[globalIndex] = 0; } __device__ int localAddrToGlobal(int label, int imHeight){ int row = blockIdx.y*blockDim.y + label/BLOCK_WIDTH; int col = blockIdx.x*blockDim.x + label%16; return col * imHeight + row; } //returns length of neighbours list __device__ int getNeighboursLocalIndexes(int neighbours[], int nType){ int localRow = threadIdx.x; int localCol = threadIdx.y; int length; if(nType == NEIGH_FOUR){ if(localRow == 0){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); length = 2; } else if(localCol == BLOCK_HEIGHT - 1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); length = 2; } else{ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[3] = getLocalIndex(localRow + 1, localCol); length = 3; } } else if(localRow == BLOCK_HEIGHT-1){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); length = 2; } else if(localCol == BLOCK_HEIGHT - 1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); length = 2; } else{ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[2] = getLocalIndex(localRow - 1, localCol); length = 3; } } else if(localCol == 0){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[2] = getLocalIndex(localRow + 1, localCol); length = 3; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow, localCol - 1); neighbours[2] = getLocalIndex(localRow + 1, localCol); length = 3; } else{ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[2] = getLocalIndex(localRow + 1, localCol); neighbours[3] = getLocalIndex(localRow, localCol - 1); length = 4; } } else if(nType == NEIGH_EIGHT){ if(localRow == 0){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); neighbours[2] = getLocalIndex(localRow + 1, localCol + 1); length = 3; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); neighbours[2] = getLocalIndex(localRow + 1, localCol - 1); length = 3; } else{ neighbours[0] = getLocalIndex(localRow + 1, localCol - 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); neighbours[2] = getLocalIndex(localRow + 1, localCol + 1); neighbours[3] = getLocalIndex(localRow, localCol - 1); neighbours[4] = getLocalIndex(localRow, localCol + 1); length = 5; } } else if(localRow == BLOCK_HEIGHT-1){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol + 1); length = 3; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol - 1); length = 3; } else{ neighbours[0] = getLocalIndex(localRow - 1, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol + 1); neighbours[3] = getLocalIndex(localRow, localCol - 1); neighbours[4] = getLocalIndex(localRow, localCol + 1); length = 5; } } else if(localCol == 0){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow - 1, localCol + 1); neighbours[2] = getLocalIndex(localRow, localCol + 1); neighbours[3] = getLocalIndex(localRow + 1, localCol); neighbours[4] = getLocalIndex(localRow + 1, localCol + 1); length = 5; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow - 1, localCol - 1); neighbours[2] = getLocalIndex(localRow, localCol - 1); neighbours[3] = getLocalIndex(localRow + 1, localCol); neighbours[4] = getLocalIndex(localRow + 1, localCol - 1); length = 5; } else{ neighbours[0] = getLocalIndex(localRow - 1, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol + 1); neighbours[3] = getLocalIndex(localRow, localCol + 1); neighbours[4] = getLocalIndex(localRow + 1, localCol + 1); neighbours[5] = getLocalIndex(localRow + 1, localCol); neighbours[6] = getLocalIndex(localRow + 1, localCol - 1); neighbours[7] = getLocalIndex(localRow, localCol - 1); length = 8; } } return length; } __device__ int getLocalIndex(int localRow, int localCol){ return localCol * blockDim.y + localRow; } __device__ bool inLocalBorder(){ return (threadIdx.x == 0 || threadIdx.x == BLOCK_WIDTH-1 || threadIdx.y == 0 || threadIdx.y == BLOCK_HEIGHT-1); } __device__ bool threadInImage(int height, int width){ int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; return (row >= 0 || row <= height-1 || col >= 0 || col <= width-1); } __device__ int findRoot(int equivalenceMatrix[], int elementIndex){ while(equivalenceMatrix[elementIndex] != elementIndex) elementIndex = equivalenceMatrix[elementIndex]; return elementIndex; }
6,542
/* Implement your CUDA kernel in this file */ #define TILE_DIM 32 __global__ void mirror_boundaries(double *E_prev, const int n, const int m) { int row = blockIdx.y*blockDim.y + threadIdx.y + 1; int col = blockIdx.x*blockDim.x + threadIdx.x + 1; if (col == 1) { E_prev[row*(n+2)] = E_prev[row*(n+2) + 2]; E_prev[row*(n+2) + n + 1] = E_prev[row*(n+2) + n - 1]; } if (row == 1) { E_prev[col] = E_prev[2*(n+2) + col]; E_prev[(m+1)*(n+2) + col] = E_prev[(m-1)*(n+2) + col]; } } __global__ void simulate (double *E, double *E_prev, double *R, const double alpha, const int n, const int m, const double kk, const double dt, const double a, const double epsilon, const double M1,const double M2, const double b) { __shared__ double E_Block[TILE_DIM][TILE_DIM]; __shared__ double R_Block[TILE_DIM][TILE_DIM]; int ty = threadIdx.y; int tx = threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y + 1; int col = blockIdx.x*blockDim.x + threadIdx.x + 1; if ((row - 1 < m) && (col - 1 < n)) { E[row*(n+2)+col] = E_prev[row*(n+2)+col] + alpha*(E_prev[row*(n+2)+col+1] + E_prev[row*(n+2)+col-1] - 4*E_prev[row*(n+2)+col] + E_prev[(row+1)*(n+2)+col] + E_prev[(row-1)*(n+2)+col]); E_Block[ty][tx] = E[row*(n+2) + col]; R_Block[ty][tx] = R[row*(n+2) + col]; E[row*(n+2)+col] = E_Block[ty][tx] = E_Block[ty][tx] - dt*(kk*E_Block[ty][tx]*(E_Block[ty][tx] - a)*(E_Block[ty][tx] - 1) + E_Block[ty][tx]*R_Block[ty][tx]); R[row*(n+2)+col] = R_Block[ty][tx] + dt*(epsilon + M1*R_Block[ty][tx]/(E_Block[ty][tx] + M2))*(-R_Block[ty][tx] - kk*E_Block[ty][tx]*(E_Block[ty][tx] - b - 1)); } }
6,543
#include <stdio.h> __global__ void helloKernel() { printf("Hello world from GPU!\n"); } void hello() { helloKernel<<<1,1>>>(); cudaDeviceSynchronize(); }
6,544
#include "stdio.h" #include <iostream> #include <cuda.h> #include <cuda_runtime.h> class A { public: int * x; }; A var = A(); __global__ void test(A & dvar){ //dvar.x = y; dvar.x[0] = 10; dvar.x[1] = 20; } int main(int argc, char* argv[]) { cudaMalloc(&(var.x),sizeof(int)*2); test<<<1,1>>>(var); int p[2]; cudaMemcpy(p,var.x,sizeof(int)*2,cudaMemcpyDefault); printf("Result: %i, %i\n",p[0], p[1]); }
6,545
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void changeTable(int *input, int *output, size_t pitch) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; output[i,j] = blockIdx.x; } int main() { //allocation of matrix dim3 sizeOfDim(10,10); int *h_array; int *d_array; int *output_array; size_t size = sizeOfDim.x * sizeOfDim.y * sizeof(int); size_t pitch; //allocation memory to host array h_array = (int*)malloc(size); for (int i = 0; i < sizeOfDim.x; i++) { for(int j = 0; j < sizeOfDim.y; j++) { h_array[i,j] = i+j; } } //allocation device array /*cudaMalloc(&d_array,size); cudaMalloc(&output_array, size);*/ cudaMallocPitch(&d_array,&pitch, sizeOfDim.x * sizeof(int), sizeOfDim.y); cudaMallocPitch(&output_array, &pitch, sizeOfDim.x * sizeof(int), sizeOfDim.y); //copy data from host array to device array //cudaMemcpy(d_array, h_array, size, cudaMemcpyHostToDevice); cudaMemcpy2D(d_array,pitch,h_array,sizeOfDim.x * sizeof(int),sizeOfDim.x * sizeof(int),sizeOfDim.y,cudaMemcpyHostToDevice); //initialize block and threads dim3 threadsPerBlock(1,1); dim3 numberOfBlock(sizeOfDim.x / threadsPerBlock.x, sizeOfDim.y / threadsPerBlock.y); //do some cuda things changeTable<<<numberOfBlock,threadsPerBlock>>>(d_array, output_array, pitch); //copy result data from device to host //cudaMemcpy(h_array, output_array, size, cudaMemcpyDeviceToHost); cudaMemcpy2D(h_array, sizeOfDim.x * sizeof(int), output_array, pitch, sizeOfDim.x * sizeof(int), sizeOfDim.y, cudaMemcpyDeviceToHost); printf("Changed array \n"); for (int i = 0; i < sizeOfDim.x; i++) { for (int j = 0; j < sizeOfDim.y; j++) { printf("%d \n",h_array[i,j]); } } /*cudaFree(d_array); cudaFree(output_array); free(h_array);*/ getchar(); }
6,546
#include <stdio.h> const int TILE_DIM = 32; const int BLOCK_ROWS = 8; /** * the non-square CUDA transpose kernel is * writtern By Amir Hossein Bakhtiary, use as you wish. Shouldn't have any copyright problems. */ // http://amirsworklog.blogspot.gr/2015/01/cuda-matrix-transpose-code.html __global__ void transposeCoalesced(double *odata, const double *idata, int rows,int cols) { __shared__ double tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; // if (x >= cols||y >= rows){ // return; // } int maxJ = TILE_DIM; int maxJ2 = TILE_DIM; int otherMaxJ = rows - y; if (maxJ > otherMaxJ) maxJ = otherMaxJ; if ( x < cols ){ for (int j = 0; j < maxJ; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*cols + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; int otherMaxJ2 = cols - y; if (maxJ2 > otherMaxJ2){ maxJ2 = otherMaxJ2; } if ( x < rows){ for (int j = 0; j < maxJ2; j += BLOCK_ROWS) odata[(y+j)*rows + x] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void cuconvolve_youngCausal(double * in, double * out, int rows, int columns, double B, double *bf) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx<columns) { /* Compute first 3 output elements */ out[idx] = B*in[idx]; out[idx+columns] = B*in[idx+columns] + bf[2]*out[idx]; out[idx+2*columns] = B*in[idx+2*columns] + (bf[1]*out[idx]+bf[2]*out[idx+columns]); /* Recursive computation of output in forward direction using filter parameters bf and B */ for(int i=3; i<rows; i++) { out[idx+i*columns] = B*in[idx+i*columns]; for(int j=0; j<3; j++) { out[idx+i*columns] += bf[j]*out[idx + (i-(3-j))*columns]; } } } } __global__ void cuconvolve_youngAnticausal(double * in, double * out, int rows, int columns, double B, double *bb) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; int total = columns*(rows-1); if(idx<columns) { /* Compute last 3 output elements */ out[total + idx] = B*in[total + idx]; out[total + idx - columns] = B*in[total + idx - columns] + bb[0]*out[total + idx]; out[total + idx - 2*columns] = B*in[total + idx - 2*columns] + (bb[0]*out[total + idx - columns]+bb[1]*out[total + idx]); /* Recursive computation of output in backward direction using filter parameters bb and B */ for (int i=3; i<rows-1; i++) { out[total + idx - i*columns] = B*in[total + idx - i*columns]; for (int j=0; j<3; j++) { out[total + idx - i*columns] += bb[j]*out[total + idx - (i-(j+1))*columns]; } } } } extern "C" void cudaYoung(double * in, double * out, int rows, int columns, double *bf, double *bb, double B) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); /** \brief Array to store output of Causal filter convolution */ double *d_input, *d_output, *d_bf, *d_bb; cudaMalloc((void**) &d_input, rows*columns*sizeof(double)); cudaMalloc((void**) &d_output, rows*columns*sizeof(double)); cudaMalloc((void**) &d_bf, rows*columns*sizeof(double)); cudaMalloc((void**) &d_bb, rows*columns*sizeof(double)); cudaMemcpy(d_input, in, rows*columns*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_bf, bf, 3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_bb, bb, 3*sizeof(double), cudaMemcpyHostToDevice); dim3 dimGrid1((columns+TILE_DIM-1)/TILE_DIM,(rows+TILE_DIM-1)/TILE_DIM, 1); dim3 dimGrid2((rows+TILE_DIM-1)/TILE_DIM,(columns+TILE_DIM-1)/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); // -------- Convolve Rows---------- transposeCoalesced<<< dimGrid1, dimBlock>>>(d_output, d_input, rows, columns); cuconvolve_youngCausal<<<rows/256 + 1 , 256>>>(d_output, d_input, columns, rows, B, d_bf); cuconvolve_youngAnticausal<<<rows/256 + 1, 256>>>(d_input, d_output, columns, rows, B, d_bb); // -------- Convolve Columns ---------- transposeCoalesced<<< dimGrid2, dimBlock>>>(d_input, d_output, columns, rows); cuconvolve_youngCausal<<<columns/256 + 1, 256>>>(d_input, d_output, rows, columns, B, d_bf); cuconvolve_youngAnticausal<<<columns/256 + 1, 256>>>(d_output, d_input, rows, columns, B, d_bb); cudaMemcpy(in, d_input, rows*columns*sizeof(double), cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Execution time elapsed: %f ms\n", milliseconds); cudaFree(d_input); cudaFree(d_output); cudaFree(d_bf); cudaFree(d_bb); }
6,547
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <cassert> #include <iostream> using namespace std; const int Tile_size = 2; // Compute C = A * B //************************************************************* //Kernel for shared memory/ Tiled execution __global__ void matrixMul(double *A, double *X, double *B, long rows, long cols){ int tid= threadIdx.x + blockIdx.x * blockDim.x; double sum= 0; if(tid < rows){ for(int i=0; i < cols; i++){ sum += X[i] * A[(i * rows) + tid]; } B[tid]=sum; } } //************************************************************* void Print_Mat(int Row,int Col,double * Mat){//Function To print the Matrix for(int i=0; i < Row; ++i){ for(int j=0; j < Col; ++j){ cout << Mat[i * Row + j] << " "; } cout << endl; } }//Function close //************************************************************* //Normal CPU Matrix Multiplication void matMultiplyOnHost(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns){ for (int i=0; i < numARows; i ++){ for (int j= 0; j < numAColumns; j++){ C[i*numCColumns + j ] = 0.0; for (int k = 0; k < numCColumns; k++){ C[i*numCColumns + j ] += A[i*numAColumns + k] * B [k*numBColumns + j]; } } } } //************************************************************* extern "C++" void generate_b_gpu(double *hostA, double *hostX, double *hostB, long cols, long rows) { double *deviceA; double *deviceB; double *deviceX; // Allocating GPU memory assert(cudaSuccess == cudaMalloc((void **)&deviceA, sizeof(double)*cols*rows)); assert(cudaSuccess == cudaMalloc((void **)&deviceB, sizeof(double)*rows)); assert(cudaSuccess == cudaMalloc((void **)&deviceX, sizeof(double)*rows)); // Copy memory to the GPU assert(cudaSuccess == cudaMemcpy(deviceA, hostA, sizeof(double)*cols*rows, cudaMemcpyHostToDevice)); assert(cudaSuccess == cudaMemcpy(deviceX, hostX, sizeof(float)*rows, cudaMemcpyHostToDevice)); // Initialize the grid and block dimensions dim3 dimGrid((1/Tile_size) + 1, (rows/Tile_size) + 1, 1);//Number of Blocks required dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block //@@ Launch the GPU Kernel here matrixMul<<<dimGrid, dimBlock>>>(deviceA, deviceX, deviceB, rows, cols); cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call cudaDeviceSynchronize();//To synchronize the device // Copy the results in GPU memory back to the CPU assert(cudaSuccess == cudaMemcpy(hostB, deviceB, sizeof(float)*rows, cudaMemcpyDeviceToHost)); cout << "GPU A" << endl; Print_Mat(rows, cols, hostA); //matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); //printf("\nMatrix C From Host\n"); //Print_Mat(numCRows,numCColumns,hostComputedC);//Function Call //printf("\n Number of Blocks Created:%d \n",((1/Tile_size) + 1)*((1/Tile_size) + 1)); //printf("\n Number of Threads Per Block: %d \n",(Tile_size*Tile_size)); // Free the GPU memory assert(cudaSuccess == cudaFree(deviceA)); assert(cudaSuccess == cudaFree(deviceB)); assert(cudaSuccess == cudaFree(deviceX)); //Free the Pointer Memory //free(hostA); //free(hostB); //free(hostX); }
6,548
#include <cstdlib> #include <cassert> #include <iostream> #include <cuda_runtime.h> // __global__ indicates it will called from the host and run on the device // __device__ is for device/device and __host__ for host/host __global__ void vectorAdd (float* a, float* b, float* c, int N) { // get the global thread ID int TID = blockIdx.x * blockDim.x + threadIdx.x; // put a predication to check whether the element for that thread // exists in the array if (TID < N) { c[TID] = a[TID] + b[TID]; } } int main () { int N = 1 << 20; size_t bytes = N * sizeof(float); // Allocate memory on the host side float *host_a = new float[N]; float *host_b = new float[N]; float *host_c = new float[N]; // Allocate memory on the device side float *dev_a = new float[N]; float *dev_b = new float[N]; float *dev_c = new float[N]; cudaMalloc(&dev_a, bytes); cudaMalloc(&dev_b, bytes); cudaMalloc(&dev_c, bytes); for (int i = 0; i < N; i++) { host_a[i] = rand() % 100; host_b[i] = rand() % 100; } cudaMemcpy(dev_a, host_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, host_b, bytes, cudaMemcpyHostToDevice); // number of thread blocks and threads per block const int THREADS = 256; const int BLOCKS = (N + THREADS - 1)/THREADS; void* args[4] = {&dev_a, &dev_b, &dev_c, &N}; cudaLaunchKernel((const void*) &vectorAdd, BLOCKS, THREADS, (void**) &args); cudaDeviceSynchronize(); cudaMemcpy(host_c, dev_c, bytes, cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { assert(host_c[i] == host_a[i] + host_b[i]); } std::cout << "Program completed!" << std::endl; return 0; }
6,549
#include<stdio.h> #include<time.h> #include<stdlib.h> #include<curand_kernel.h> #include<cuda.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 #define PI 3.1415926535989 #define CUDA_CALL(x) do { if(x!= cudaSuccess) {\ printf("Error at %s:%d -- %s \n", __FILE__,__LINE__, cudaGetErrorString(x)); \ return EXIT_FAILURE;}} while(0) __global__ void monteWithGPU( curandState *states, float * estimate) { double x, y; int toss, number_in_circle=0; int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seed = id; curand_init(1234, seed ,0 ,&states[id]); for(toss=0; toss <TRIALS_PER_THREAD; toss++) { /*curand_uniform() range 0~1*/ x = curand_uniform(&states[id])*2 - 1; y = curand_uniform(&states[id])*2 - 1; if (x*x+y*y <= 1.0f) number_in_circle++; } estimate[id] = 4.0f *number_in_circle / (TRIALS_PER_THREAD) ; } int main(void) { double pi_estimate; //time variables clock_t start, end; double cpu_time_used; float number_in_circle = 0; curandState *devStates; float *dev, *host; start = clock(); host = (float*)malloc(sizeof(float)* THREADS * BLOCKS ); cudaMalloc((void**) &dev, BLOCKS * THREADS * sizeof(float)); cudaMalloc((void**)&devStates, THREADS * BLOCKS * sizeof(curandState)); monteWithGPU<<<BLOCKS, THREADS>>>(devStates, dev); cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost); for (int i=0; i < BLOCKS * THREADS ; i++){ number_in_circle+=host[i]; } pi_estimate = number_in_circle/ (BLOCKS * THREADS); end = clock(); cpu_time_used = ((double)(end - start)) /CLOCKS_PER_SEC; printf("pi_estimate = %f \n", pi_estimate); printf("Elapsed time = %f seconds \n", cpu_time_used); return 0; }
6,550
#include<iostream> using namespace std; __global__ void kernel(void){ } int main(){ kernel<<<1,1>>>(); cout<<"Hello world"<<endl; return 0; }
6,551
#include "CopyToOpenMM_kernel.cu" extern "C" void TestCopyTo( const int n, float* input, float4* output ) { float *out_gpu_positions; cudaMalloc( ( void ** ) &out_gpu_positions, n * sizeof( float4 ) ); cudaMemcpy( out_gpu_positions, output, n * sizeof( float4 ), cudaMemcpyHostToDevice ); float *in_gpu_positions; cudaMalloc( ( void ** ) &in_gpu_positions, 3 * n * sizeof( float ) ); cudaMemcpy( in_gpu_positions, input, 3 * n * sizeof( float ), cudaMemcpyHostToDevice ); copyToOpenMM<<<3 * n, 1>>>( out_gpu_positions, in_gpu_positions, 3 * n ); cudaMemcpy( output, out_gpu_positions, n * sizeof( float4 ), cudaMemcpyDeviceToHost ); cudaFree( in_gpu_positions ); cudaFree( out_gpu_positions ); }
6,552
#include "shared.cuh" template <class T> struct View { int size; T *ptr; }; struct SizedParticleView { View<double> x, y, z, u, v, w, nextdist; __device__ int size() const { return x.size; } __device__ Point get_pos(int i) const { return {x.ptr + i, y.ptr + i, z.ptr + i}; } __device__ Point get_dir(int i) const { return {u.ptr + i, v.ptr + i, w.ptr + i}; } __device__ double get_nextdist(int i) const { return nextdist.ptr[i]; } }; __device__ inline void move_impl(const SizedParticleView &view, int i) { *view.get_pos(i).x += *view.get_dir(i).x * view.get_nextdist(i); *view.get_pos(i).y += *view.get_dir(i).y * view.get_nextdist(i); *view.get_pos(i).z += *view.get_dir(i).z * view.get_nextdist(i); } __global__ void move(SizedParticleView view) { int i = thread_id(); if (i >= view.size()) return; move_impl(view, i); }
6,553
// Matrix multiplication by parts // Elements stored in row-major order using namespace std; #include <stdio.h> #include <iostream> #include <fstream> #include <cuda.h> #include <math.h> #include <cuda_runtime.h> #define BLOCK_SIZE 16 typedef struct { int width; int height; float *elements; } Matrix; // Forward declaration of matrix mult __global__ void MatMulKernel (const Matrix, const Matrix, Matrix); // Host code void GpuMatMul(const Matrix A, const Matrix B, Matrix C) { // Load matrices A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc((void**) &d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc((void**) &d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // allocate C in device Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = d_C.width * d_C.height * sizeof(float); cudaMalloc((void**) &d_C.elements, size); // call kernel dim3 thredsPerBlock(BLOCK_SIZE, BLOCK_SIZE, 1); // threads per block? dim3 numBlock(ceil(((double)A.width)/BLOCK_SIZE), ceil(((double)A.width)/BLOCK_SIZE), 1); // number of blocks? MatMulKernel<<<numBlock, thredsPerBlock>>>(d_A, d_B, d_C); cudaThreadSynchronize(); // copy C to host cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } //matrix multiplication kernel __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // each thread computes one element of C and acumulates results to Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row>=A.height) || (col>=B.width)){ return; } for (int e=0; e<A.width; e++) { Cvalue += A.elements[row*A.width + e] * B.elements[e*B.width + col]; } C.elements[row*C.width + col] = Cvalue; } void CpuMatMul(const Matrix A, const Matrix B, Matrix C) { float sum; int N = A.width; for (int row = 0; row < N; ++row) { for (int column = 0; column < N; ++column) { sum = 0.0f; for (int k = 0; k < N; ++k) { sum += A.elements[row * N + k] * B.elements[k * N + column]; } C.elements[row * N + column] = sum; } } } int main(int argc, char ** argv) { int tests[] = {452}; for (int test = 0; test < sizeof(tests)/sizeof(int); ++ test) { int Width = tests[test]; Matrix A; Matrix B; Matrix C; A.width = Width; B.width = Width; C.width = Width; A.height = Width; B.height = Width; C.height = Width; A.elements = new float[Width*Width]; B.elements = new float[Width*Width]; C.elements = new float[Width*Width]; //fill matrices std::ifstream A_input; std::ifstream B_input; float a, b; for (int i = 0, max = Width * Width; i < max; ++i) { A.elements[i] = rand(); B.elements[i] = rand(); } A_input.close(); B_input.close(); const int N = 100; float gpuTime, cpuTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int i = 0; i < N; ++i) { GpuMatMul(A, B, C); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpuTime, start, stop); gpuTime /= N; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); /*for (int i = 0; i < N; ++i) { CpuMatMul(A, B, C); }*/ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpuTime, start, stop); cpuTime /= N; /* CpuMatMul(A, B, C); std::ofstream C_output; C_output.open("C.txt"); for (int i=0; i<Width; i++) { for (int j=0; j<Width; j++) C_output<<C.elements[i*Width+j]<<"\t"; C_output<<endl; } */ cout << "matrixSize: " << Width << " gpuTime: " << gpuTime << "[ms] cpuTime: " << cpuTime << "[ms]\n"; } }
6,554
#include "includes.h" // ERROR CHECKING MACROS ////////////////////////////////////////////////////// __device__ void solveLinearSystem(int dims, float *A, float *B, float *C) { // First generate upper triangular matrix for the augmented matrix float *swapRow; swapRow = (float*)malloc((dims+1)*sizeof(float)); for (int ii = 0; ii < dims; ii++) { C[ii] = B[ii]; } for (int ii = 0; ii < dims; ii++) { // Search for maximum in this column float maxElem = fabsf(A[ii*dims+ii]); int maxRow = ii; for (int jj = (ii+1); jj < dims; jj++) { if (fabsf(A[ii*dims+jj] > maxElem)) { maxElem = fabsf(A[ii*dims+jj]); maxRow = jj; } } // Swap maximum row with current row if needed if (maxRow != ii) { for (int jj = ii; jj < dims; jj++) { swapRow[jj] = A[jj*dims+ii]; A[jj*dims+ii] = A[jj*dims+maxRow]; A[jj*dims+maxRow] = swapRow[jj]; } swapRow[dims] = C[ii]; C[ii] = C[maxRow]; C[maxRow] = swapRow[dims]; } // Make all rows below this one 0 in current column for (int jj = (ii+1); jj < dims; jj++) { float factor = -A[ii*dims+jj]/A[ii*dims+ii]; // Work across columns for (int kk = ii; kk < dims; kk++) { if (kk == ii) { A[kk*dims+jj] = 0.0; } else { A[kk*dims+jj] += factor*A[kk*dims+ii]; } } // Results vector C[jj] += factor*C[ii]; } } free(swapRow); // Solve equation for an upper triangular matrix for (int ii = dims-1; ii >= 0; ii--) { C[ii] = C[ii]/A[ii*dims+ii]; for (int jj = ii-1; jj >= 0; jj--) { C[jj] -= C[ii]*A[ii*dims+jj]; } } } __global__ void multiLocLinReg(int noPoints, int noDims, int dimRes, int nYears, int noControls, int year, int control, int k, int* dataPoints, float *xvals, float *yvals, float *regression, float* xmins, float* xmaxes, float *dist, int *ind) { // Global thread index int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < noPoints) { if (dataPoints[control] < 3) { regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,noDims)*2) + dimRes* noDims + idx] = 0.0; } else { // First, deconstruct the index into the index along each dimension int *dimIdx; dimIdx = (int*)malloc(noDims*sizeof(int)); int rem = idx; for (int ii = 0; ii < noDims; ii++) { int div = (int)(rem/pow(dimRes,noDims-ii-1)); dimIdx[ii] = div; rem = rem - div*pow(dimRes,noDims-ii-1); } // Get the query point coordinates float *xQ; xQ = (float*)malloc(noDims*sizeof(float)); for (int ii = 0; ii < noDims; ii++) { xQ[ii] = ((float)dimIdx[ii])*(xmaxes[ii] - xmins[ii])/(float)( dimRes - 1) + xmins[ii]; } // 1. First find the k nearest neighbours to the query point (already) // computed prior). // 2. Build the matrices used in the calculation // A - Input design matrix // B - Input known matrix // C - Output matrix of coefficients float *A, *B, *X; A = (float*)malloc(pow(noDims+1,2)*sizeof(float)); B = (float*)malloc((noDims+1)*sizeof(float)); X = (float*)malloc((noDims+1)*sizeof(float)); // Bandwidth for kernel float h = dist[noPoints*(k-1) + idx]; for (int ii = 0; ii <= noDims; ii++) { // We will use a kernel and normalise by the distance of // the furthest point of the nearest k neighbours. // Initialise values to zero B[ii] = 0.0; for (int kk = 0; kk < k; kk++) { float d = dist[noPoints*kk + idx]; // Gaussian kernel (Not used for now) float z = exp(-(d/h)*(d/h)/2)/sqrt(2*M_PI); // Epanechnikov kernel // float z = 0.75*(1-pow(d/h,2)); if (ii == 0) { B[ii] += yvals[ind[noPoints*kk + idx] - 1]*z; } else { B[ii] += yvals[ind[noPoints*kk + idx] - 1]*(xvals[(ind[noPoints *kk + idx] - 1)*noDims + ii - 1] - xQ[ii-1])*z; } } for (int jj = 0; jj <= noDims; jj++) { A[jj*(noDims+1)+ii] = 0.0; for (int kk = 0; kk < k; kk++) { // float h = d_h[ind[kk]]; float d = dist[noPoints*kk + idx]; // For Gaussian kernel. Not used. float z = exp(-(d/h)*(d/h)/2)/sqrt(2*M_PI); // float z = 0.75*(1-pow(d/h,2)); if ((ii == 0) && (jj == 0)) { A[jj*(noDims+1)+ii] += 1.0*z; } else if (ii == 0) { A[jj*(noDims+1)+ii] += (xvals[(ind[noPoints*kk + idx] - 1 )*noDims + jj - 1] - xQ[jj - 1])*z; } else if (jj == 0) { A[jj*(noDims+1)+ii] += (xvals[(ind[noPoints*kk + idx] - 1 )*noDims + ii - 1] - xQ[ii - 1])*z; } else { A[jj*(noDims+1)+ii] += (xvals[(ind[noPoints*kk + idx] - 1 )*noDims + jj - 1] - xQ[jj-1])*(xvals[(ind[ noPoints*kk + idx] - 1)*noDims + ii - 1] - xQ[ii - 1])*z; } } } } // Solve the linear system using LU decomposition. solveLinearSystem(noDims+1,A,B,X); // 4. Compute the y value at the x point of interest using the just- // found regression coefficients. This is simply the y intercept we // just computed and save to the regression matrix. regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,noDims)*2) + dimRes* noDims + idx] = /*yvals[ind[idx] - 1]*/ X[0]; // Free memory free(A); free(B); free(X); free(xQ); free(dimIdx); } } }
6,555
#include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void print(char *a, int N) { char p[11]="Hello CUDA"; int idx = blockIdx.x * blockDim.x + threadIdx.x; //printf("Hello\n"); if(idx < N) { a[idx]=p[idx]; } } int main(void) { char *a_h, *a_d; // _h for the host and _d for the device based pointers const int N = 11; size_t size = N * sizeof(char); a_h = (char *) malloc(size); // allocating the array on the host cudaMalloc((void **) &a_d, size); // allocating the array on the device // initialize the host array for (int i = 0; i < N; i++) { a_h[i] = 0; } // Copy the array on the host to the device cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); int blocksize = 4; int nblock = N/blocksize + (N%blocksize == 0 ? 0 : 1); print <<< nblock, blocksize >>>(a_d, N); // Run the kernel on the device cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); // copy from the device memory to the host memory cudaMemcpy(a_h, a_d, sizeof(char) * N, cudaMemcpyDeviceToHost); // print the array on the host for (int i = 0; i < N; i++) { printf("%c", a_h[i]); } printf("\n"); free(a_h); cudaFree(a_d); }
6,556
//#include "Globals.h" //#include "Pickcuda.cuh" //__global__ void AnimateCU(float timedelta, int cnt, int MiddleSpeed) //{ // int i = threadIdx.x; // if (i < cnt) // { // //Bird[i].Vel += ((MiddleSpeed - D3DXVec3Length(&Bird[i].Vel))*0.001F)*Bird[i].Vel; // Bird[i].Vel += Bird[i].Acc * timedelta; // Bird[i].Pos += Bird[i].Vel * timedelta; // } //} // //cudaError CudaAnimate(Globs *pGlbs, float timeDelta) //{ // int NoBrds = pGlbs->BirdsCount; // int AveSpeed = (pGlbs->BirdTopVel + pGlbs->BirdBottomVel) / 2; // int NBlks = NoBrds / ThdsPerBlk; // AnimateCU <<< NBlks, ThdsPerBlk >>> (timeDelta, NoBrds, AveSpeed); // return cudaGetLastError(); //}
6,557
//pass //--blockDim=1024 --gridDim=1 --no-inline #include <cuda.h> #include <stdio.h> #define N 2 //1024 __global__ void definitions (int* A, unsigned int* B, unsigned long long int* C, float* D) { atomicExch(A,10); atomicExch(B,100); atomicExch(C,20); atomicExch(D,200.0); }
6,558
#include <stdio.h> #define N 2048 __global__ void block_sum(const int *input, int *per_block_results, const size_t n) { extern __shared__ int sdata[]; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory int x = 0; if(i < n) { x = input[i]; } sdata[threadIdx.x] = x; __syncthreads(); // contiguous range pattern for(int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if(threadIdx.x < offset) { // add a partial sum upstream to our own sdata[threadIdx.x] += sdata[threadIdx.x + offset]; } // wait until all threads in the block have // updated their partial sums __syncthreads(); } // thread 0 writes the final result if(threadIdx.x == 0) { per_block_results[blockIdx.x] = sdata[0]; } } int main( void ) { int host_a[N]; for (int i=0; i<N; i++) { host_a[i] = 2; } const size_t block_size = 512; const size_t num_blocks = (N/block_size) + ((N%block_size) ? 1 : 0); int *dev_a; cudaMalloc(&dev_a, sizeof(int) * N); int *d_partial_sums_and_total = 0; cudaMalloc((void**)&d_partial_sums_and_total, sizeof(int) * (num_blocks + 1)); cudaMemcpy(dev_a, host_a, sizeof(int) * N, cudaMemcpyHostToDevice); block_sum<<<num_blocks,block_size,block_size * sizeof(int)>>>(dev_a, d_partial_sums_and_total, N); block_sum<<<1,num_blocks,num_blocks * sizeof(int)>>>(d_partial_sums_and_total, d_partial_sums_and_total + num_blocks, num_blocks); int device_result = 0; cudaMemcpy(&device_result, d_partial_sums_and_total + num_blocks, sizeof(int), cudaMemcpyDeviceToHost); printf("%d\n", device_result); }
6,559
__global__ void vec_add_kernel(float *c, float *a, float *b, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x; // Oops! Something is not right here, please fix it! c[i] = a[i] + b[i]; }
6,560
#include <stdlib.h> #include <iostream> int main(){ int status; status = system("mkdir data/output/test1"); std::cout << "status = " << status << std::endl; status = system("mkdir data/output/test1"); std::cout << "status = " << status << std::endl; return 0; }
6,561
#include <stdio.h> #include <stdlib.h> #include "cs_cuda.h" #include "cs_helper.h" #include "cs_dbg.h" #include "cs_interpolate.h" // #define CUDA_DBG // #define CUDA_DBG1 __global__ void d_make_interpolate_420_1 ( int *input, int *output, int xdim, int ydim, int zdim, int frsize, int nxdim, int nydim, int nfrsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / nfrsize ; i = t_idx % nfrsize ; row_idx = i / nxdim ; if (!( row_idx & 1 )) { row_idx >>= 1 ; i %= nxdim ; column_idx = ( i >> 1 ) ; i = frame_n * frsize + row_idx * xdim + column_idx ; output[ t_idx ] = input [ i ] ; } t_idx += CUDA_MAX_THREADS ; } } // xdim/ydim/frsize are all for the new interpolated data __global__ void d_make_interpolate_420_2 ( int *input, int xdim, int ydim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_row_1, from_row_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; if ( row_idx & 1 ) { from_row_1 = row_idx - 1 ; from_row_2 = row_idx + 1 ; column_idx = i % xdim ; if ( from_row_2 == ydim ) { input[ t_idx ] = input [ frame_n * frsize + from_row_1 * xdim + column_idx ] ; } else { input[ t_idx ] = ( input [ frame_n * frsize + from_row_1 * xdim + column_idx ] + input [ frame_n * frsize + from_row_2 * xdim + column_idx ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } // take care of the columns __global__ void d_make_interpolate_420_3 ( int *input, int xdim, int frsize, int size #ifdef CUDA_OBS , int *cudadbgp #endif ) { int from_col_1, from_col_2, i, row_idx, column_idx, frame_n ; int t_idx = blockIdx.x*blockDim.x + threadIdx.x; // the size is the total size on device while ( t_idx < size ) { frame_n = t_idx / frsize ; i = t_idx % frsize ; row_idx = i / xdim ; column_idx = i % xdim ; if ( column_idx & 1 ) { from_col_1 = column_idx - 1 ; from_col_2 = column_idx + 1 ; if ( from_col_2 != xdim ) { input[ t_idx ] = ( input [ frame_n * frsize + row_idx * xdim + from_col_1 ] + input [ frame_n * frsize + row_idx * xdim + from_col_2 ] ) / 2 ; } } t_idx += CUDA_MAX_THREADS ; } } /* input : device addr ... also the output addr ... pls note output : device addr xdim : x dimension of frame ydim : y dimension of frame zdim : z dimension of frame, i.e. temporal scheme : INT_YUV420 currently */ int h_make_interpolate ( int *d_input, int *d_output, int xdim, int ydim, int zdim, int scheme #ifdef CUDA_OBS , int *cudadbgp #endif ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int oframe_size, nframe_size, nn, nBlocks ; switch ( scheme ) { case INT_YUV420 : oframe_size = xdim * ydim ; nframe_size = oframe_size * 4 ; // YUV420 nn = nframe_size * zdim ; // nBlocks = ( nn + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ; h_block_adj ( nn, nThreadsPerBlock, &nBlocks ) ; #ifdef CUDA_DBG fprintf( stderr, "%s: din %p dout %p x/y/z %d %d %d sche %d\n", __func__, d_input, d_output, xdim, ydim, zdim, scheme ) ; #endif d_make_interpolate_420_1 <<< nBlocks, nThreadsPerBlock >>> ( d_input, d_output, xdim, ydim, zdim, oframe_size, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; cudaThreadSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_1", d_output, nn ) ; #endif d_make_interpolate_420_2 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, ydim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; cudaThreadSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_2", d_output, nn ) ; #endif d_make_interpolate_420_3 <<< nBlocks, nThreadsPerBlock >>> ( d_output, xdim << 1, nframe_size, nn #ifdef CUDA_OBS , cudadbgp #endif ) ; cudaThreadSynchronize() ; #ifdef CUDA_DBG dbg_p_d_data_i ( "make_interpolate_3", d_output, nn ) ; #endif break ; default : return ( 0 ) ; } return ( 1 ) ; }
6,562
#include "includes.h" __global__ void sumMat(double *A, double *B, double *C, int N) { int col = blockDim.x*blockIdx.x + threadIdx.x; int row = blockDim.y*blockIdx.y + threadIdx.y; if( (col < N) && (row < N)){ C[col*N + row] = A[col*N + row] + B[col*N + row]; //C[col][row] = B[col][row] + A[col][row]; } }
6,563
#include <complex> #include <iostream> #include <cuda_runtime.h> #include <sys/time.h> #include <thrust/complex.h> #include <math.h> using namespace std; typedef thrust::complex<float> complex_f; __global__ void mandelbrot_CUDA(char *mat, int max_row, int max_column, int max_n) { int pos = (blockDim.x * threadIdx.x) + threadIdx.y; pos = pos + (blockDim.x*blockDim.x) * blockIdx.x; int r = float(pos) / float(max_column); int c = pos - (max_column * r); if(pos < max_row * max_column){ complex_f z; int n = 0; while(abs(z) < 2 && ++n < max_n) z = z*z + complex_f( (float)c * 2 / max_column - 1.5, (float)r * 2 / max_row - 1 ); mat[pos]=(n == max_n ? '#' : '.'); } } int main(){ int max_row, max_column, max_n; cin >> max_row; cin >> max_column; cin >> max_n; cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); // start time timeval start, end; gettimeofday(&start, NULL); char *mat; cudaMallocManaged(&mat, max_row * max_column * sizeof(char)); if(max_row * max_column <= devProp.maxThreadsDim[0]){ dim3 thr_per_block(max_row, max_column); mandelbrot_CUDA<<<1, thr_per_block>>> (mat, max_row, max_column, max_n); }else{ float Blocks = (float(max_row) * float(max_column)) / float(devProp.maxThreadsDim[1]); float threads = sqrt(devProp.maxThreadsDim[1]); int iblocks, ithreads; iblocks = ceil(Blocks); ithreads = round(threads); dim3 thr_per_block(ithreads, ithreads); mandelbrot_CUDA<<<iblocks, thr_per_block>>> (mat, max_row, max_column, max_n); } // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // end time gettimeofday(&end, NULL); double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0; printf("%.4f\n", runtime); // for(int r = 0; r < max_row; ++r){ // for(int c = 0; c < max_column; ++c) // std::cout << mat[r * max_column + c]; // cout << '\n'; // } }
6,564
#include "includes.h" __global__ void MyKernel(int *a, int *b, int *c, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } }
6,565
// // (C) 2021, E. Wes Bethel // sobel_gpu.cpp // usage: // sobel_gpu [no args, all is hard coded] // #include <iostream> #include <vector> #include <chrono> #include <unistd.h> #include <string.h> #include <math.h> // see https://en.wikipedia.org/wiki/Sobel_operator // easy-to-find and change variables for the input. // specify the name of a file containing data to be read in as bytes, along with // dimensions [columns, rows] // this is the original laughing zebra image //static char input_fname[] = "../data/zebra-gray-int8"; //static int data_dims[2] = {3556, 2573}; // width=ncols, height=nrows //char output_fname[] = "../data/processed-raw-int8-cpu.dat"; // this one is a 4x augmentation of the laughing zebra static char input_fname[] = "../data/zebra-gray-int8-4x"; static int data_dims[2] = {7112, 5146}; // width=ncols, height=nrows char output_fname[] = "../data/processed-raw-int8-4x-cpu.dat"; // see https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api // macro to check for cuda errors. basic idea: wrap this macro around every cuda call #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // // this function is callable only from device code // // perform the sobel filtering at a given i,j location // input: float *s - the source data // input: int i,j - the location of the pixel in the source data where we want to center our sobel convolution // input: int nrows, ncols: the dimensions of the input and output image buffers // input: float *gx, gy: arrays of length 9 each, these are logically 3x3 arrays of sobel filter weights // // this routine computes Gx=gx*s centered at (i,j), Gy=gy*s centered at (i,j), // and returns G = sqrt(Gx^2 + Gy^2) // see https://en.wikipedia.org/wiki/Sobel_operator // __device__ float sobel_filtered_pixel(float *s, int i, int j , int ncols, int nrows, float *gx, float *gy) { float gradX = 0.0; float gradY = 0.0; // ADD CODE HERE: add your code here for computing the sobel stencil computation at location (i,j) // of input s, returning a float if ((i > 0 && i < nrows -1) && (j > 0 && j < ncols -1)){ float gradX = gradX + gx[0] * s[(i * ncols + j) - ncols - 1] + gx[1] * s[(i * ncols + j) - ncols] + gx[2] * s[(i * ncols + j) - (ncols + 1)] + gx[3] * s[(i * ncols + j) - 1] + gx[4] * s[(i * ncols + j)] + gx[5] * s[(i * ncols + j) + 1] + gx[6] * s[(i * ncols + j) + ncols - 1] + gx[7] * s[(i * ncols + j) + ncols] + gx[8] * s[(i * ncols + j) + ncols + 1]; float gradY = gradY + gy[0] * s[(i * ncols + j) - ncols - 1] + gy[1] * s[(i * ncols + j) - ncols] + gy[2] * s[(i * ncols + j) - ncols + 1] + gy[3] * s[(i * ncols + j) - 1] + gy[4] * s[(i * ncols + j)] + gy[5] * s[(i * ncols + j) + 1] + gy[6] * s[(i * ncols + j) + ncols - 1] + gy[7] * s[(i * ncols + j) + ncols] + gy[8] * s[(i * ncols + j) + ncols + 1]; } float gradXsquared = gradX * gradX; float gradYsquared = gradY * gradY; return sqrt(gradXsquared + gradYsquared); } // // this function is the kernel that runs on the device // // this code will look at CUDA variables: blockIdx, blockDim, threadIdx, blockDim and gridDim // to compute the index/stride to use in striding through the source array, calling the // sobel_filtered_pixel() function at each location to do the work. // // input: float *s - the source data, size=rows*cols // input: int i,j - the location of the pixel in the source data where we want to center our sobel convolution // input: int nrows, ncols: the dimensions of the input and output image buffers // input: float *gx, gy: arrays of length 9 each, these are logically 3x3 arrays of sobel filter weights // output: float *d - the buffer for the output, size=rows*cols. // __global__ void sobel_kernel_gpu(float *s, // source image pixels float *d, // dst image pixels int n, // size of image cols*rows, int nrows, int ncols, float *gx, float *gy) // gx and gy are stencil weights for the sobel filter { // ADD CODE HERE: insert your code here that iterates over every (i,j) of input, makes a call // to sobel_filtered_pixel, and assigns the resulting value at location (i,j) in the output. // because this is CUDA, you need to use CUDA built-in variables to compute an index and stride // your processing motif will be very similar here to that we used for vector add in Lab #2 int dim = blockDim.x * gridDim.x; int index = blockIdx.x * blockDim.x + threadIdx.x; for (int i = index; i < n; i += dim){ int row = i / ncols; int col = i % ncols; d[i] = sobel_filtered_pixel(s, row, col, ncols, nrows, gx, gy); } } int main (int ac, char *av[]) { // input, output file names hard coded at top of file // load the input file off_t nvalues = data_dims[0]*data_dims[1]; unsigned char *in_data_bytes = (unsigned char *)malloc(sizeof(unsigned char)*nvalues); FILE *f = fopen(input_fname,"r"); if (fread((void *)in_data_bytes, sizeof(unsigned char), nvalues, f) != nvalues*sizeof(unsigned char)) { printf("Error reading input file. \n"); fclose(f); return 1; } else printf(" Read data from the file %s \n", input_fname); fclose(f); #define ONE_OVER_255 0.003921568627451 // now convert input from byte, in range 0..255, to float, in range 0..1 float *in_data_floats; gpuErrchk( cudaMallocManaged(&in_data_floats, sizeof(float)*nvalues) ); for (off_t i=0; i<nvalues; i++) in_data_floats[i] = (float)in_data_bytes[i] * ONE_OVER_255; // now, create a buffer for output float *out_data_floats; gpuErrchk( cudaMallocManaged(&out_data_floats, sizeof(float)*nvalues) ); for (int i=0;i<nvalues;i++) out_data_floats[i] = 1.0; // assign "white" to all output values for debug // define sobel filter weights, copy to a device accessible buffer float Gx[9] = {1.0, 0.0, -1.0, 2.0, 0.0, -2.0, 1.0, 0.0, -1.0}; float Gy[9] = {1.0, 2.0, 1.0, 0.0, 0.0, 0.0, -1.0, -2.0, -1.0}; float *device_gx, *device_gy; gpuErrchk( cudaMallocManaged(&device_gx, sizeof(float)*sizeof(Gx)) ); gpuErrchk( cudaMallocManaged(&device_gy, sizeof(float)*sizeof(Gy)) ); for (int i=0;i<9;i++) // copy from Gx/Gy to device_gx/device_gy { device_gx[i] = Gx[i]; device_gy[i] = Gy[i]; } // now, induce memory movement to the GPU of the data in unified memory buffers int deviceID=0; // assume GPU#0, always. OK assumption for this program cudaMemPrefetchAsync((void *)in_data_floats, nvalues*sizeof(float), deviceID); cudaMemPrefetchAsync((void *)out_data_floats, nvalues*sizeof(float), deviceID); cudaMemPrefetchAsync((void *)device_gx, sizeof(Gx)*sizeof(float), deviceID); cudaMemPrefetchAsync((void *)device_gy, sizeof(Gy)*sizeof(float), deviceID); // set up to run the kernel int nBlocks=1, nThreadsPerBlock=256; // ADD CODE HERE: insert your code here to set a different number of thread blocks or # of threads per block if (ac > 1){ nThreadsPerBlock = atoi(av[1]); nBlocks = atoi(av[2]); } printf(" GPU configuration: %d blocks, %d threads per block \n", nBlocks, nThreadsPerBlock); // invoke the kernel on the device sobel_kernel_gpu<<<nBlocks, nThreadsPerBlock>>>(in_data_floats, out_data_floats, nvalues, data_dims[1], data_dims[0], device_gx, device_gy); // wait for it to finish, check errors gpuErrchk ( cudaDeviceSynchronize() ); // write output after converting from floats in range 0..1 to bytes in range 0..255 unsigned char *out_data_bytes = in_data_bytes; // just reuse the buffer from before for (off_t i=0; i<nvalues; i++) out_data_bytes[i] = (unsigned char)(out_data_floats[i] * 255.0); f = fopen(output_fname,"w"); if (fwrite((void *)out_data_bytes, sizeof(unsigned char), nvalues, f) != nvalues*sizeof(unsigned char)) { printf("Error writing output file. \n"); fclose(f); return 1; } else printf(" Wrote the output file %s \n", output_fname); fclose(f); } // eof
6,566
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> int thread_num=800; __device__ int mandel(float c_re, float c_im, int count) { float z_re = c_re, z_im = c_im; int i; for (i = 0; i < count; ++i) { if (z_re * z_re + z_im * z_im > 4.f) break; float new_re = z_re * z_re - z_im * z_im; float new_im = 2.f * z_re * z_im; z_re = c_re + new_re; z_im = c_im + new_im; } return i; } __global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY, int *cu_img, int width, int maxIterations, int thread_num, size_t pitch_img) { int x_idx, y_idx, tot; tot = blockIdx.x*thread_num+threadIdx.x; x_idx = tot%(width); y_idx = tot/(width); *((int*)((char*)cu_img+(y_idx*pitch_img))+x_idx) = mandel(lowerX + x_idx * stepX, lowerY + y_idx * stepY, maxIterations); } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int *cu_img; size_t pitch_img; cudaMallocPitch((void**)&cu_img, &pitch_img, sizeof(int)*resX, resY); mandelKernel<<<resX*resY/thread_num, thread_num>>>(lowerX, lowerY, stepX, stepY, cu_img, resX, maxIterations, thread_num, pitch_img); cudaMemcpy2D(img, resX*sizeof(int), cu_img, pitch_img, sizeof(int)*resX, resY, cudaMemcpyDeviceToHost); }
6,567
#include "includes.h" /*Title: Vector addition and subtraction in CUDA. A simple way to understand how CUDA can be used to perform arithmetic operations. */ using namespace std; # define size 5 //Global functions //******************************************************** __global__ void SubIntsCUDA(int *a, int *b) { int tid=blockIdx.x*blockDim.x+threadIdx.x; b[tid] = a[tid] - b[tid]; }
6,568
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> using namespace std; int main() { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); for (int i = 0; i < count; i++) { cudaGetDeviceProperties(&prop, i); cout << "information for device " << i << endl; cout << "name:" << prop.name << endl; cout << "capability:" << prop.major << "." << prop.minor << endl; cout << "clock rate:" << prop.clockRate << endl; cudaDeviceProp sDevProp = prop; printf("%d \n", i); printf("Device name: %s\n", sDevProp.name); printf("Device memory: %d\n", sDevProp.totalGlobalMem); printf("Memory per-block: %d\n", sDevProp.sharedMemPerBlock); printf("Register per-block: %d\n", sDevProp.regsPerBlock); printf("Warp size: %d\n", sDevProp.warpSize); printf("Memory pitch: %d\n", sDevProp.memPitch); printf("Constant Memory: %d\n", sDevProp.totalConstMem); printf("Max thread per-block: %d\n", sDevProp.maxThreadsPerBlock); printf("Max thread dim: ( %d, %d, %d )\n", sDevProp.maxThreadsDim[0], sDevProp.maxThreadsDim[1], sDevProp.maxThreadsDim[2]); printf("Max grid size: ( %d, %d, %d )\n", sDevProp.maxGridSize[0], sDevProp.maxGridSize[1], sDevProp.maxGridSize[2]); printf("Ver: %d.%d\n", sDevProp.major, sDevProp.minor); printf("Clock: %d\n", sDevProp.clockRate); printf("textureAlignment: %d\n", sDevProp.textureAlignment); } cout << "next" << endl; int dev; cudaGetDevice(&dev); cout << "id of current cuda device:" << dev << endl; cudaDeviceProp temp_prop; memset(&temp_prop, 0, sizeof(cudaDeviceProp)); temp_prop.major = 1; temp_prop.minor = 3; cudaChooseDevice(&dev, &prop); cout << "id of cuda device closest to revision 1.3:" << dev << endl; cudaSetDevice(dev); }
6,569
#include "includes.h" __global__ void windowHamming(float* idata, int length) { int tidx = threadIdx.x + blockIdx.x*blockDim.x; if (tidx < length) { idata[tidx] = 0.54 - 0.46 * cos(2*tidx*PI_F / (length - 1)); } }
6,570
#include "includes.h" __global__ void hoCalc(double* rn, double* soilHeat, double* ho, int width_band) { int col = threadIdx.x + blockIdx.x * blockDim.x; while (col < width_band) { ho[col] = rn[col] - soilHeat[col]; col += blockDim.x * gridDim.x; } }
6,571
#include "includes.h" __global__ void emptyKernel() {}
6,572
#include <cuda.h> #include <cuda_runtime.h>
6,573
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #define BYTE_MAX 28 #define ATTEMPTS 100 #define PINNED 0 const uint32_t BYTES[BYTE_MAX] = {1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1<<24, 1<<25, 1<<26, 1<<27}; const uint32_t BYTE_TOTAL[4] = {28, 27, 26, 25}; int main(int argc, char *argv[]) { if (argc != 3) { printf("Usage: HtoD_TimeByte_1a array iteration\n"); printf("Example: HtoD_TimeByte_1a 1 1000\n"); return 0; } int array = atoi(argv[1]); int iterations = atoi(argv[2]); printf("Starting HostToDevice Bytes vs. Time Data Collection.......\n\n"); printf("Number of Arrays: %d\n............\n", array); for (int run=0; run<iterations; run++) { printf("\r%d/%d", run+1, iterations); fflush(stdout); // Initialize parameters FILE *outFile; for (int i=0; i<28; i++) { size_t size = BYTES[i]*sizeof(float); // Declare Host variables float *a; // Declare Device variables float *d_a; // Allocate Host variables a = (float*)malloc(size); // Allocate Device variables cudaMalloc(&d_a, size); // Get the GPU Pointers // cudaHostGetDevicePointer(&d_a, a, 0); // Set Host variables for (uint32_t j=0; j<BYTES[i]; j++) { int n0 = rand()%1000000; int n1 = rand()%1000000; a[j] = n0*n1; } // Initialize Device Event Handeling cudaFree(0); float milli = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); ///////////////////////////////////////////////////////// ///////////////////////////////////////////////////////// // Set Device variables cudaEventRecord(start, 0); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); // Write Data to File int iBuf = i; char fn[50] = "rawData_bytes_1a_"; char s_byte[50]; snprintf(s_byte, 50, "%i", iBuf); strcat(fn, s_byte); strcat(fn, ".txt"); outFile = fopen(fn, "a"); char buffer_0[50]; snprintf(buffer_0, 50, "%f", milli); strcat(buffer_0, ","); fputs(buffer_0, outFile); char buffer_1[50]; snprintf(buffer_1, 50, "%i", iBuf); strcat(buffer_1, "\n"); fputs(buffer_1, outFile); fclose(outFile); // Deallocate Host variables free(a); // Deallocate Device variables cudaFree(d_a); } } printf("\n............\n\n"); printf("Complete!!\n"); return 0; }
6,574
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #define VECT_SIZE 10000000 #define THRESHOLD 1.e-7 __global__ void VectorAdd(float *w, float *u, float *v, int n) { for (int i = 0; i < n; i++) { w[i] = u[i] + v[i]; } } void fill(float *v) { for (int i = 0; i < VECT_SIZE; i++) { v[i] = (float)i; } } int main() { // Host pointers float *u, *v, *w; // Device pointers float *u_device, *v_device, *w_device; // Alloco mem host u = (float *) malloc(sizeof(float)*VECT_SIZE); v = (float *) malloc(sizeof(float)*VECT_SIZE); w = (float *) malloc(sizeof(float)*VECT_SIZE); fill(u); fill(v); // Alloco mem device cudaMalloc((void **) &u_device, sizeof(float)*VECT_SIZE); cudaMalloc((void **) &v_device, sizeof(float)*VECT_SIZE); cudaMalloc((void **) &w_device, sizeof(float)*VECT_SIZE); // H --> D cudaMemcpy(u_device, u, sizeof(float)*VECT_SIZE, cudaMemcpyHostToDevice); cudaMemcpy(v_device, v, sizeof(float)*VECT_SIZE, cudaMemcpyHostToDevice); // Kernel call VectorAdd<<<1,1>>>(w_device, u_device, v_device, VECT_SIZE); // D --> H cudaMemcpy(w, w_device, sizeof(float)*VECT_SIZE, cudaMemcpyDeviceToHost); // check ? for (int i = 0; i < VECT_SIZE; i++) { if (!((w[i]-u[i]-v[i]) < THRESHOLD)) { fprintf(stderr,"Got mistake!\n"); } } cudaFree(u_device); cudaFree(v_device); cudaFree(w_device); free(u); free(v); free(w); }
6,575
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15) { if (comp <= var_1 - (var_2 + asinf(ceilf((var_3 / var_4))))) { comp += (+1.0969E-25f * asinf(var_5 / var_6)); float tmp_1 = asinf((var_7 + var_8 / (-1.6537E-16f * (-1.7489E-43f - -1.1432E-35f)))); comp += tmp_1 / var_9 * (-1.5681E-42f - (var_10 / +1.9191E35f - (-0.0f / +1.2688E-36f))); if (comp < -1.1507E-35f / cosf((var_11 / (-0.0f + (var_12 * (-1.7008E-17f * +1.6368E-37f / var_13)))))) { comp += (var_14 - (-1.2497E-44f - (-1.9869E-27f / var_15 / -1.7224E-43f))); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16); cudaDeviceSynchronize(); return 0; }
6,576
#ifdef __cplusplus extern "C" { #endif __global__ void hys_kernel( int* data, int* out, int rows, int cols) { float lowThresh = 60; float highThresh = 170; int g_row = threadIdx.y + (blockIdx.y * blockDim.y); int g_col = threadIdx.x + (blockIdx.x * blockDim.x); int pos = g_col * cols + g_row; const int EDGE = 16777215; int magnitude = data[pos]; if (magnitude >= highThresh) out[pos] = EDGE; else if (magnitude <= lowThresh) out[pos] = EDGE; else { float med = (highThresh + lowThresh)/2; if (magnitude >= med) out[pos] = EDGE; else out[pos] = 0; } } #ifdef __cplusplus } #endif
6,577
#include "cuda_runtime.h" int *d_a,*d_b,*d_c; int *h_a,*h_b,*h_c; __host__ void init(){ h_a = (int*)malloc(sizeof(int)); h_b = (int*)malloc(sizeof(int)); h_c = (int*)malloc(sizeof(int)); *h_c=0; cudaMalloc((void**)&d_a,sizeof(int)); cudaMalloc((void**)&d_b,sizeof(int)); cudaMalloc((void**)&d_c,sizeof(int)); cudaMemcpy(d_c,h_c,sizeof(int),cudaMemcpyHostToDevice); } __host__ void assign(int a,int b){ *h_a = a; *h_b = b; cudaMemcpy(d_a,h_a,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,sizeof(int),cudaMemcpyHostToDevice); } __global__ void add(int *a,int *b,int *c){ *c += *a + *b; } __host__ void process(){ add<<<1,1>>>(d_a,d_b,d_c); } __host__ int get(){ cudaMemcpy(h_c,d_c,sizeof(int),cudaMemcpyDeviceToHost); return *h_c; }
6,578
/* Name: Paul Talaga Date: Nov 27, 2017 Desc: Program to add two arrays using the GPU Only 1 thread in 1 block, so this will NOT be fast, but it is using the GPU To compile this, do: nvcc add-single.cu */ #include <iostream> using namespace std; // CUDA kernel function to add to arrays element by element // This will add all elements in the array in 1 call. __global__ void add(int size, int* x, int* y, int* z){ for(int i = 0; i < size; i++){ z[i] = x[i] + y[i]; } } int main(){ // Size of the arrays we'll be adding const unsigned N = 100; // To used unified memory (CUDA takes care of data movement) // all memory must be allocated via the cudaMallocManaged call below. int* x; int* y; int* z; cudaMallocManaged(&x, N * sizeof(int)); cudaMallocManaged(&y, N * sizeof(int)); cudaMallocManaged(&z, N * sizeof(int)); // Fill the arrays with numbers for(int i = 0; i < N; i++){ x[i] = i; y[i] = 2 * i; } // Call the add function, with 1 block, and 1 thread add<<<1,1>>>(N, x, y, z); // Wait until the device is done before proceeding, otherwise we'd be // accessing x, y, and z in the loop below before the add function completes // on the device. cudaDeviceSynchronize(); // Check to see if the math is correct int errors = 0; for(int i = 0; i < N; i++){ if(z[i] != x[i] + y[i]){ cout << i << " did not add correctly!" << endl; errors++; } } if(!errors)cout << "All good!" << endl; return 0; }
6,579
#include "radonusfft.cuh" #include "kernels_radonusfft.cuh" #include <stdio.h> radonusfft::radonusfft(size_t N_, size_t Ntheta_, size_t Nz_) { N = N_; Ntheta = Ntheta_; Nz = Nz_; float eps = 1e-3; mu = -log(eps)/(2*N*N); M = ceil(2*N*1/PI*sqrt(-mu*log(eps)+(mu*N)*(mu*N)/4)); cudaMalloc((void**)&f,N*N*Nz*sizeof(float2)); cudaMalloc((void**)&g,N*Ntheta*Nz*sizeof(float2)); cudaMalloc((void**)&fde,(2*N+2*M)*(2*N+2*M)*Nz*sizeof(float2)); cudaMalloc((void**)&x,N*Ntheta*sizeof(float)); cudaMalloc((void**)&y,N*Ntheta*sizeof(float)); cudaMalloc((void**)&theta,Ntheta*sizeof(float)); int ffts[2]; int idist;int odist; int inembed[2];int onembed[2]; //fft 2d ffts[0] = 2*N; ffts[1] = 2*N; idist = (2*N+2*M)*(2*N+2*M);odist = (2*N+2*M)*(2*N+2*M); inembed[0] = 2*N+2*M; inembed[1] = 2*N+2*M; onembed[0] = 2*N+2*M; onembed[1] = 2*N+2*M; cufftPlanMany(&plan2d, 2, ffts, inembed, 1, idist, onembed, 1, odist, CUFFT_C2C, Nz); //fft 1d ffts[0] = N; idist = N;odist = N; inembed[0] = N;onembed[0] = N; cufftPlanMany(&plan1d, 1, ffts, inembed, 1, idist, onembed, 1, odist, CUFFT_C2C, Ntheta*Nz); } radonusfft::~radonusfft() { cudaFree(f); cudaFree(g); cudaFree(fde); cudaFree(x); cudaFree(y); cudaFree(theta); cufftDestroy(plan2d); cufftDestroy(plan1d); } void radonusfft::fwdR(float2* g_, float2* f_, float* theta_, cudaStream_t s) { dim3 BS2d(32,32); dim3 BS3d(32,32,1); dim3 GS2d0(ceil(N/(float)BS2d.x),ceil(Ntheta/(float)BS2d.y)); dim3 GS3d0(ceil(N/(float)BS3d.x),ceil(N/(float)BS3d.y),ceil(Nz/(float)BS3d.z)); dim3 GS3d1(ceil(2*N/(float)BS3d.x),ceil(2*N/(float)BS3d.y),ceil(Nz/(float)BS3d.z)); dim3 GS3d2(ceil((2*N+2*M)/(float)BS3d.x),ceil((2*N+2*M)/(float)BS3d.y),ceil(Nz/(float)BS3d.z)); dim3 GS3d3(ceil(N/(float)BS3d.x),ceil(Ntheta/(float)BS3d.y),ceil(Nz/(float)BS3d.z)); cudaMemcpyAsync(f,f_,N*N*Nz*sizeof(float2),cudaMemcpyDefault,s); cudaMemcpyAsync(theta,theta_,Ntheta*sizeof(float),cudaMemcpyDefault,s); cudaMemsetAsync(fde,0,(2*N+2*M)*(2*N+2*M)*Nz*sizeof(float2),s); takexy<<<GS2d0, BS2d,0,s>>>(x,y,theta,N,Ntheta); divphi<<<GS3d0, BS3d,0,s>>>(fde,f,mu,M,N,Nz); fftshiftc<<<GS3d2, BS3d,0,s>>>(fde,2*N+2*M,Nz); cufftSetStream(plan2d,s); cufftExecC2C(plan2d, (cufftComplex*)&fde[M+M*(2*N+2*M)],(cufftComplex*)&fde[M+M*(2*N+2*M)],CUFFT_FORWARD); fftshiftc<<<GS3d2, BS3d,0,s>>>(fde,2*N+2*M,Nz); wrap<<<GS3d2, BS3d,0,s>>>(fde,N,Nz,M); gather<<<GS3d3, BS3d,0,s>>>(g,fde,x,y,M,mu,N,Ntheta,Nz); fftshift1c<<<GS3d3, BS3d,0,s>>>(g,N,Ntheta,Nz); cufftSetStream(plan1d,s); cufftExecC2C(plan1d, (cufftComplex*)g,(cufftComplex*)g,CUFFT_INVERSE); fftshift1c<<<GS3d3, BS3d,0,s>>>(g,N,Ntheta,Nz); mulr<<<GS3d3,BS3d,0,s>>>(g,1.0f/(4*N*N*N*sqrt(N*Ntheta)),N,Ntheta,Nz); cudaMemcpyAsync(g_,g,N*Ntheta*Nz*sizeof(float2),cudaMemcpyDefault,s); } void radonusfft::adjR(float2* f_, float2* g_, float* theta_, bool filter, cudaStream_t s) { dim3 BS2d(32,32); dim3 BS3d(32,32,1); dim3 GS2d0(ceil(N/(float)BS2d.x),ceil(Ntheta/(float)BS2d.y)); dim3 GS3d0(ceil(N/(float)BS3d.x),ceil(N/(float)BS3d.y),ceil(Nz/(float)BS3d.z)); dim3 GS3d1(ceil(2*N/(float)BS3d.x),ceil(2*N/(float)BS3d.y),ceil(Nz/(float)BS3d.z)); dim3 GS3d2(ceil((2*N+2*M)/(float)BS3d.x),ceil((2*N+2*M)/(float)BS3d.y),ceil(Nz/(float)BS3d.z)); dim3 GS3d3(ceil(N/(float)BS3d.x),ceil(Ntheta/(float)BS3d.y),ceil(Nz/(float)BS3d.z)); cudaMemcpyAsync(g,g_,N*Ntheta*Nz*sizeof(float2),cudaMemcpyDefault,s); cudaMemcpyAsync(theta,theta_,Ntheta*sizeof(float),cudaMemcpyDefault,s); cudaMemsetAsync(fde,0,(2*N+2*M)*(2*N+2*M)*Nz*sizeof(float2),s); takexy<<<GS2d0, BS2d,0,s>>>(x,y,theta,N,Ntheta); fftshift1c<<<GS3d3, BS3d,0,s>>>(g,N,Ntheta,Nz); cufftSetStream(plan1d,s); cufftExecC2C(plan1d, (cufftComplex*)g,(cufftComplex*)g,CUFFT_FORWARD); fftshift1c<<<GS3d3, BS3d,0,s>>>(g,N,Ntheta,Nz); if(filter) applyfilter<<<GS3d3, BS3d,0,s>>>(g,N,Ntheta,Nz); scatter<<<GS3d3, BS3d,0,s>>>(fde,g,x,y,M,mu,N,Ntheta,Nz); wrapadj<<<GS3d2, BS3d,0,s>>>(fde,N,Nz,M); fftshiftc<<<GS3d2, BS3d,0,s>>>(fde,2*N+2*M,Nz); cufftSetStream(plan2d,s); cufftExecC2C(plan2d, (cufftComplex*)&fde[M+M*(2*N+2*M)],(cufftComplex*)&fde[M+M*(2*N+2*M)],CUFFT_INVERSE); fftshiftc<<<GS3d2, BS3d,0,s>>>(fde,2*N+2*M,Nz); unpaddivphi<<<GS3d0, BS3d,0,s>>>(f,fde,mu,M,N,Nz); mulr<<<GS3d0,BS3d,0,s>>>(f,1.0f/(4*N*N*N*sqrt(N*Ntheta)),N,N,Nz); cudaMemcpyAsync(f_,f,N*N*Nz*sizeof(float2),cudaMemcpyDefault,s); }
6,580
#include <stdint.h> #include <stdio.h> #include <assert.h> /******************************************************************* IPv4 Lookup with DIR-24-8-BASIC algorithm from Infocom'98 paper: <Routing Lookups in Hardware at Memory Access Speeds> ******************************************************************/ extern "C" __global__ void ipv4lookup(const uint32_t *input_buf, const uint64_t job_num, uint8_t *output_buf, const uint16_t *tbl24) { /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; int step = blockDim.x * gridDim.x; int i; uint32_t hash; uint16_t value_tb1; for (i = idx; i < job_num; i += step) { hash = input_buf[i] >> 8; value_tb1 = tbl24[hash]; output_buf[i] = (uint8_t)value_tb1; //FIXME //printf("in %x [%x - hash %x], v %x, uint8 %x\n", input_buf[i], i, hash, value_tb1, (uint8_t)value_tb1); } return; } /************************************************************************** Exported C++ function wrapper function for CUDA kernel ***************************************************************************/ extern "C" void IPv4_Lookup(const uint32_t *input_buf, const uint32_t job_num, uint8_t *output_buf, const uint16_t *tbl24, const unsigned int threads_per_blk, const unsigned int num_cuda_blks, cudaStream_t stream) { //printf("%d = %d\n", threads_per_blk, num_cuda_blks); if (stream == 0) { ipv4lookup<<<num_cuda_blks, threads_per_blk>>>( input_buf, job_num, output_buf, tbl24); } else { ipv4lookup<<<num_cuda_blks, threads_per_blk, 0, stream>>>( input_buf, job_num, output_buf, tbl24); } }
6,581
#include "FbpClass_Agent.cuh" #include <stdio.h> #define PI 3.1415926536f __global__ void InitU(float* u, const int N, const float du, const float offcenter) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < N) { u[tid] = (tid - (N - 1) / 2.0f) * du + offcenter; } } __global__ void InitBeta(float* beta, const int V, const float rotation) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid<V) { beta[tid] = (360.0f / V * tid + rotation) * PI / 180; } } __global__ void InitReconKernel_Hamming(float* reconKernel, const int N, const float du, const float t) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < 2* N - 1) { // the center element index is N-1 int n = tid - (N - 1); // ramp part if (n==0) reconKernel[tid] = t / (4 * du*du); else if (n%2 ==0) reconKernel[tid] = 0; else reconKernel[tid] = -t / (n*n * PI*PI * du*du); // cosine part int sgn = n % 2 == 0 ? 1 : -1; reconKernel[tid] += (1 - t)* (sgn / (2 * PI*du*du) * (1.0f / (1 + 2 * n) + 1.0f / (1 - 2 * n)) - 1 / (PI*PI*du*du) * (1.0f / (1 + 2 * n) / (1 + 2 * n) + 1.0f / (1 - 2 * n) / (1 - 2 * n))); } } __global__ void InitReconKernel_Quadratic(float* reconKernel, const int N, const float du, const int paramNum, const float p1, const float p2, const float p3) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < 2*N -1 ) { float a, b, c; float kn = 1 / (2 * du); if (paramNum==2) { // p1 = t, p2 = h, p3 is ignored a = (p2 - 1) / (kn*kn * (1 - 2 * p1)); b = -2 * a*p1*kn; c = 1.0f; } else { a = p1; b = p2; c = p3; } reconKernel[idx] = 0.0f; float du2 = du * du; float du3 = du2 * du; float du4 = du3 * du; int n = idx - (N - 1); if (n==0) { // H3(x) reconKernel[idx] += a / 32 / du4; // H2(x) reconKernel[idx] += b / 12 / du3; // H1(x) reconKernel[idx] += c / 4 / du2; } else if (n%2==0) { // H3(x) reconKernel[idx] += a * 3 / (8 * n*n * PI*PI * du4); // H2(x) reconKernel[idx] += b / (2 * n*n * PI*PI * du3); // H1(x) // do nothing, H1(even) is zero } else { // H3(x) reconKernel[idx] += a * 3 / (8 * n*n * PI*PI * du4) * (4 /(n*n*PI*PI) - 1); // H2(x) reconKernel[idx] += -b / (2 * n*n * PI*PI * du3); // H1(x) reconKernel[idx] += -c / (n*n * PI*PI * du2); } } } __global__ void InitReconKernel_Polynomial(float* reconKernel, const int N, const float du, const float p6, const float p5, const float p4, const float p3, const float p2, const float p1, const float p0) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < 2 * N -1) { int n = idx - (N - 1); reconKernel[idx] = 0.0f; float kn = 1 / (2 * du); float du2 = du * du; float du3 = du2 * du; float du4 = du3 * du; if (n==0) { // H7(x) reconKernel[idx] += p6 * powf(kn, 8) / 4; // H6(x) reconKernel[idx] += p5 * powf(kn, 7) * 2 / 7; // H5(x) reconKernel[idx] += p4 * powf(kn, 6) / 3; // H4(x) reconKernel[idx] += p3 * powf(kn, 5) * 2 /5; // H3(x) reconKernel[idx] += p2 * powf(kn,4) / 2; // H2(x) reconKernel[idx] += p1 * 2 * kn*kn*kn / 3; // H1(x) reconKernel[idx] += p0 * kn*kn; } else if (n%2==0) { // H7(x) reconKernel[idx] += p6 * 7 * (360 - 30 * n*n*PI*PI + powf(n*PI, 4)) / (128 * du2* powf(du*n*PI, 6)); // H6(x) reconKernel[idx] += p5 * 3 * (120 - 20 * n*n*PI*PI + powf(n*PI, 4)) / (32 * du*powf(du*n*PI, 6)); // H5(x) reconKernel[idx] += p4 * 5 * (n*n*PI*PI - 12) / (32 * du2 *powf(du*n*PI, 4)); // H4(x) reconKernel[idx] += p3 * (n*n*PI*PI - 6) / (4 * du * powf(du*n*PI, 4)); // H3(x) reconKernel[idx] += p2 * 3 / (8 * du4 * n*n * PI*PI); // H2(x) reconKernel[idx] += p1 / (2 * n*n *PI*PI * du3); // H1(x) // do nothing, H1(even) is zero } else { // H7(x) reconKernel[idx] += p6 * 7 * (1440 - 360 * n*n*PI*PI + 30 * powf(n*PI, 4) - powf(n*PI, 6)) / (128 * powf(du*n*PI, 8)); // H6(x) reconKernel[idx] += -p5 * 3 * (120 - 20 * n*n*PI*PI + powf(n*PI, 4)) / (32 * du*powf(du*n*PI, 6)); // H5(x) reconKernel[idx] += -p4 * 5 * (48 - 12 * n*n*PI*PI + powf(n*PI, 4)) / (32 * powf(du*n*PI, 6)); // H4(x) reconKernel[idx] += p3 * (6 - n*n*PI*PI) / (4 * du * powf(du*n*PI, 4)); // H3(x) reconKernel[idx] += p2 * (4 - n * n*PI*PI) * 3 / (8 * powf(du*n*PI, 4)); // H2(x) reconKernel[idx] += -p1 / (2 * n*n *PI*PI * du3); // H1(x) reconKernel[idx] += -p0 / (n*n *PI*PI * du2); } } } // weight the sinogram data // sgm: sinogram (width x height x slice) // N: width // V: height (views) // S: slice // sdd: source to detector distance __global__ void WeightSinogram_device(float* sgm, const float* u, const int N, const int V, const int S, float sdd) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; if (col<N && row <V) { for (int i = 0; i < S; i++) { sgm[row*N + col + i * N*V] *= sdd * sdd / sqrtf(u[col] * u[col] + sdd * sdd); } } } // convolve the sinogram data // sgm_flt: sinogram data after convolving // sgm: initial sinogram data // reconKernel: reconstruction kernel // N: sinogram width // H: sinogram height // V: number of views // S: number of slices // u: the position (coordinate) of each detector element // du: detector element size [mm] __global__ void ConvolveSinogram_device(float* sgm_flt, const float* sgm, float* reconKernel, const int N, const int H, const int V, const int S, const float* u, const float du) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; if (col < N && row<V) { for (int slice = 0; slice < S; slice++) { sgm_flt[row*N + col + slice * N*V] = 0; for (int i = 0; i < N; i++) { sgm_flt[row*N + col + slice * N*V] += sgm[row*N + i + slice * N*H] * reconKernel[N - 1 - col + i]; } sgm_flt[row*N + col + slice * N*V] *= du; } } } // backproject the image using pixel-driven method // sgm: sinogram data // img: image data // U: each detector element position [mm] // beta: view angle [radius] // N: number of detector elements // V: number of views // S: number of slices // M: image dimension // sdd: source to detector distance [mm] // sid: source to isocenter distance [mm] // du: detector element size [mm] // dx: image pixel size [mm] // (xc, yc): image center position [mm, mm] __global__ void BackprojectPixelDriven_device(float* sgm, float* img, float* u, float* beta, const int N, const int V, const int S, const int M, const float sdd, const float sid, const float du, const float dx, const float xc, const float yc) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; if (col<M && row<M) { float x = (col - (M - 1) / 2.0f)*dx + xc; float y = ((M - 1) / 2.0f - row)*dx + yc; float U, u0; float w; int k; for (int slice = 0; slice < S; slice++) { img[row*M + col + slice * M*M] = 0; for (int view = 0; view < V; view++) { U = sid - x * cosf(beta[view]) - y * sinf(beta[view]); u0 = sdd * (x*sinf(beta[view]) - y * cosf(beta[view])) / U; k = floorf((u0 - u[0]) / du); if (k<0 || k+1>N-1) { img[row*M + col + slice * M*M] = 0; break; } w = (u0 - u[k]) / du; img[row*M + col + slice * M*M] += sid / U / U * (w*sgm[view*N + k + 1 + slice * N*V] + (1 - w)*sgm[view*N + k + slice * N*V]); } img[row*M + col + slice * M*M] *= PI / V; } } } void InitializeU_Agent(float* &u, const int N, const float du, const float offcenter) { if (u!=nullptr) cudaFree(u); cudaMalloc((void**)&u, N * sizeof(float)); InitU <<<(N + 511) / 512, 512 >>> (u, N, du, offcenter); } void InitializeBeta_Agent(float* &beta, const int V, const float rotation) { if (beta!=nullptr) cudaFree(beta); cudaMalloc((void**)&beta, V * sizeof(float)); InitBeta<<< (V+511)/512, 512>>> (beta, V, rotation); } void InitializeReconKernel_Agent(float* &reconKernel, const int N, const float du, const std::string& kernelName, const std::vector<float>& kernelParam) { if (reconKernel!=nullptr) cudaFree(reconKernel); cudaMalloc((void**)&reconKernel, (2 * N - 1) * sizeof(float)); if (kernelName=="HammingFilter") { InitReconKernel_Hamming << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, kernelParam[0]); } else if (kernelName=="QuadraticFilter") { float lastParam = 0.0f; if (kernelParam.size() == 3) lastParam = kernelParam[2]; InitReconKernel_Quadratic << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, int(kernelParam.size()), kernelParam[0], kernelParam[1], lastParam); } else if (kernelName=="Polynomial") { // TODO: // InitReconKernel_Polynomial <<<...>>> (...); float p[7] = { 0 }; for (size_t i = 0; i < kernelParam.size(); i++) { p[i] = kernelParam[kernelParam.size() -1 - i]; } //InitReconKernel_Polynomial << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, p[0], p[1], p[2], p[3], p[4], p[5], p[6]); InitReconKernel_Polynomial << <(2 * N - 1 + 511) / 512, 512 >> > (reconKernel, N, du, p[6], p[5], p[4], p[3], p[2], p[1], p[0]); } } void MallocManaged_Agent(float * &p, const int size) { cudaMallocManaged((void**)&p, size); } void FilterSinogram_Agent(float * sgm, float* sgm_flt, float* reconKernel, float* u, mango::Config & config) { // Step 1: weight the sinogram dim3 grid((config.sgmWidth + 15) / 16, (config.sgmHeight + 15) / 16); dim3 block(16, 16); WeightSinogram_device << <grid, block >> >(sgm, u, config.sgmWidth, config.sgmHeight, config.sliceCount, config.sdd); // Step 2: convolve the sinogram ConvolveSinogram_device << <grid, block >> > (sgm_flt, sgm, reconKernel, config.sgmWidth, config.sgmHeight, config.views, config.sliceCount, u, config.detEltSize); cudaDeviceSynchronize(); } void BackprojectPixelDriven_Agent(float * sgm_flt, float * img, float * u, float* beta, mango::Config & config) { dim3 grid((config.imgDim + 15) / 16, (config.imgDim + 15) / 16); dim3 block(16, 16); BackprojectPixelDriven_device<<<grid,block>>>(sgm_flt, img, u, beta, config.sgmWidth, config.views, config.sliceCount, config.imgDim, config.sdd, config.sid, config.detEltSize, config.pixelSize, config.xCenter, config.yCenter); cudaDeviceSynchronize(); } void FreeMemory_Agent(float* &p) { cudaFree(p); p = nullptr; }
6,582
/* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_50 -cubin spool_avg.cu extern "C" __global__ void spool_avg ( const float* param_I, float* param_O, float* param_B, int param_mode, int param_N, int param_W, int param_H, int param_D, int param_C, int param_WN, int param_HWN, int param_DHWN, int param_P, int param_magic_P, int param_shift_P, int param_QN, int param_PQN, int param_MPQN, int param_pad_j, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_j, int param_str_d, int param_str_h, int param_str_w, int param_S, int param_RS, int param_RST, int param_JRST, int param_magic_S, int param_shift_S, int param_magic_RS, int param_shift_RS, int param_magic_RST, int param_shift_RST, int param_overlap ) { param_O[threadIdx.x] = threadIdx.x; }
6,583
//pass //--blockDim=2048 --gridDim=64 struct s { int x; }; struct t : s { }; struct u : t { }; __global__ void foo(u p, u q) { p.x = q.x; }
6,584
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ // INSERT KERNEL CODE HERE __shared__ float temp[BLOCK_SIZE]; int vec_pos = blockIdx.x * blockDim.x + threadIdx.x; int offset = gridDim.x * BLOCK_SIZE; float x; float y; if (vec_pos < size) { if (vec_pos + offset < size) { x = in[vec_pos]; y = in[vec_pos + offset]; temp[threadIdx.x] = fma(1.0f, x, y); } else { temp[threadIdx.x] = in[vec_pos]; } __syncthreads(); for (int stride = 1; stride <= (BLOCK_SIZE / 2); stride *= 2) { if (threadIdx.x + stride < BLOCK_SIZE) { x = temp[threadIdx.x]; y = temp[threadIdx.x + stride]; temp[threadIdx.x] = fma(1.0f, x, y); __syncthreads(); } } __syncthreads(); out[blockIdx.x] = temp[0]; } }
6,585
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda.h> #define THREADS 64 #define BLOCKS 16 #define SIZE 512 __global__ void add(int* a, int* b, int* c) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > SIZE) return; c[idx] = a[idx] + b[idx]; } void init(int* h_v, int numb) { for (int i = 0; i < SIZE; i++) { h_v[i] = numb; } } int main( void ) { int *result, *h_a, *h_b; int *dev_a, *dev_b, *dev_c; int size = SIZE * sizeof(int); result = (int*) malloc( size ); h_a = (int*) malloc( size ); h_b = (int*) malloc( size ); init(h_a, 5); init(h_b, 5); memset(result, 0, size); cudaMalloc( &dev_a, size ); cudaMalloc( &dev_b, size ); cudaMalloc( &dev_c, size ); // se transfieren los datos a memoria de dispositivo. cudaMemcpy( dev_a, h_a, size, cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, h_b, size, cudaMemcpyHostToDevice ); cudaMemset( dev_c, 0, size ); add<<<BLOCKS, THREADS>>>( dev_a, dev_b, dev_c ); // se transfieren los datos del dispositivo a memoria. cudaMemcpy( result, dev_c, size, cudaMemcpyDeviceToHost ); for (int i = 0; i < SIZE; i++) { fprintf(stdout, " %d ", result[i]); if ((i + 1) % 10 == 0) fprintf(stdout, "%s\n", ""); } fprintf(stdout, "%s\n", ""); free(h_a), free(h_b), free(result); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
6,586
#include <stdio.h> int main() { printf("Hello World from CPU!\n"); }
6,587
#include <stdio.h> #include <iostream> __global__ void sum(int* arr, int threadSize) { arr[(blockIdx.x * threadSize) + threadIdx.x] = blockIdx.x + threadIdx.x; } int main() { const size_t blockSize = 2; const size_t threadSize = 8; int* dArray; int *hostArray = (int*)malloc(blockSize * threadSize * sizeof(int)); cudaMalloc(&dArray, blockSize * threadSize * sizeof(int)); sum<<<2, 8>>>(dArray, threadSize); cudaDeviceSynchronize(); cudaMemcpy(hostArray, dArray, blockSize * threadSize * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < threadSize*blockSize; i++) { printf("%d\n", hostArray[i]); } }
6,588
#include <stdio.h> #include <cuda_runtime.h> #define M_SIZE 32 #define BLOCK_SIZE 16 //Calculate the multiplication of two 32*32 matrices A and B in gpu and store the result in C. //Each block calculate 16*16 submaxtrix of C. __global__ void Mul(int *A,int *B,int *C,int width_A,int width_B) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; //Index of the first sub-matrix of A and B processed by the block int aBegin=width_A*BLOCK_SIZE*by; int bBegin=BLOCK_SIZE*bx; //Csub of each thread stores corresponding element of the block sub-matrix. int Csub=0; int a=aBegin,b=bBegin; // Shared memory for the sub-matrix of A __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; // Shared memory for the sub-matrix of B __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; for (;a<aBegin+width_A-1;a+=BLOCK_SIZE,b+=BLOCK_SIZE*width_B) { // Load the matrices from global memory to shared memory; // each thread loads one element of each matrix As[ty][tx] = A[a+width_A*ty + tx]; Bs[ty][tx] = B[b+width_B*ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); for (int k=0;k<BLOCK_SIZE;k++) Csub+=As[ty][k]*Bs[k][tx]; //Make sure computation is done before loading new matrices for global memory. __syncthreads(); } //Write the block sub-matrix to global memory. int c=width_B*BLOCK_SIZE*by+BLOCK_SIZE*bx; C[c+width_B*ty+tx] = Csub; } int main() { int size = M_SIZE*M_SIZE*sizeof(int); int *h_A=(int*)malloc(size); int *h_B=(int*)malloc(size); int *h_C=(int*)malloc(size); int *d_A,*d_B,*d_C; int i, j; //Intialize A,B,C for(i=0;i<M_SIZE*M_SIZE;i++) { h_A[i]=1; h_B[i]=1; h_C[i]=0; } //Allocate the memory in GPU to store the content of A,B,C cudaMalloc((void **)&d_A, size); cudaMalloc((void **)&d_B, size); cudaMalloc((void **)&d_C, size); //Copy h_A, h_B to d_A,d_B cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); //Allocate 4 blocks and 256 threads per block. dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(2,2); Mul<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, M_SIZE, M_SIZE); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); //print the result for(i=0; i<M_SIZE; i++) { for(j=0; j<M_SIZE; j++) { printf("%d ", h_C[i*M_SIZE+j]); } printf("\n"); } printf("Press Enter to stop\n"); getchar(); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
6,589
#include <stdio.h> #include <time.h> #include <unistd.h> #include <stdlib.h> #include <math.h> __global__ void soma1(int x[],int hist[], int n, int blks) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while(thread_id < n){ int index = x[thread_id]; //atomicAdd(&hist[x[thread_id]],1); hist[x[thread_id]]++; thread_id+=blks*blockDim.x; } } /* *argumentos *1 - n_elementos *2 - threads por bloco *3 - n_blocos *4 - print */ int main(int argc, char* argv[]) { int n, th_p_blk; int *h_x; int *d_x; int * d_hist,* h_hist,* h_hist_res; size_t size,size_hist; int range = 10; int print = 0; th_p_blk = 1024; n = 1024; if(argc > 1) n = atoi(argv[1]); if(argc > 2) th_p_blk = atoi(argv[2]); int blks = ceil((float)n/(float)th_p_blk); if(argc > 3) blks = atoi(argv[3]); if(argc > 4) print=atoi(argv[4]); size = n*sizeof(int); size_hist = n*sizeof(int); // Allocate memory for the vectors on host memory. h_x = (int*) malloc(size); h_hist_res = (int*) malloc(size_hist); h_hist = (int*) malloc(size_hist); for (int i = 0; i < range; i++) { h_hist[i] = 0; h_hist_res[i] = 0; } for (int i = 0; i < n; i++) { h_x[i] = (int)rand()%range; h_hist_res[h_x[i]]+=1; } /* Allocate vectors in device memory */ cudaMalloc(&d_x, size); cudaMalloc(&d_hist, size_hist); /* Copy vectors from host memory to device memory */ cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice); cudaMemcpy(d_hist, h_hist, size_hist, cudaMemcpyHostToDevice); clock_t Ticks[2]; Ticks[0] = clock(); cudaEvent_t start, stop; cudaEventCreate (&start); cudaEventCreate (&stop); cudaEventRecord (start, 0); // 0 is the stream number // do Work… /* Kernel Call */ soma1<<<blks,th_p_blk>>>(d_x,d_hist, n,blks); cudaEventRecord (stop, 0); cudaEventSynchronize (stop); float elapsedTime; cudaEventElapsedTime (&elapsedTime, start, stop); //printf ("Total GPU Time: %.5f ms \n", elapsedTime); printf ("[%d,%.5f],\n", n,elapsedTime); cudaEventDestroy(start); cudaThreadSynchronize(); cudaMemcpy(h_hist, d_hist, size, cudaMemcpyDeviceToHost); bool certo=true; for (int i = 0; i < range; i++){ if(h_hist[i] != h_hist_res[i]) certo=false; } // printf("\n*****\n certo = %s\n*****\n", certo ? "true" : "false"); /* Free device memory */ cudaFree(d_x); cudaFree(d_hist); /* Free host memory */ free(h_x); free(h_hist); free(h_hist_res); return 0; } /* main */
6,590
// MyCudafy.CudafyMulti extern "C" __global__ void LaplaceSolver( double* prev, int prevLen0, double* next, int nextLen0, int* sizes, int sizesLen0, int* extV, int extVLen0, int* intV, int intVLen0, double* w, int wLen0, double* b, int bLen0, double* c, int cLen0); // MyCudafy.CudafyMulti extern "C" __global__ void LaplaceSolverWithRelax( double* array, int arrayLen0, int* sizes, int sizesLen0, int* extV, int extVLen0, int* intV, int intVLen0, double* w, int wLen0, double* b, int bLen0, double* c, int cLen0, int p); // MyCudafy.CudafyMulti extern "C" __global__ void Copy( double* prev, int prevLen0, double* next, int nextLen0); // MyCudafy.CudafyMulti extern "C" __global__ void Clear( double* array, int arrayLen0); // MyCudafy.CudafyMulti extern "C" __global__ void Square( double* prev, int prevLen0, double* next, int nextLen0, double* delta, int deltaLen0); // MyCudafy.CudafyMulti extern "C" __global__ void Delta( double* prev, int prevLen0, double* next, int nextLen0, double* delta, int deltaLen0); // MyCudafy.CudafyMulti extern "C" __global__ void Max( double* prev, int prevLen0, double* next, int nextLen0); // MyCudafy.CudafyMulti extern "C" __global__ void Sum( double* prev, int prevLen0, double* next, int nextLen0); // MyCudafy.CudafyMulti __constant__ double _a[100]; #define _aLen0 100 // MyCudafy.CudafyMulti __constant__ double _b[1]; #define _bLen0 1 // MyCudafy.CudafyMulti __constant__ double _c[1]; #define _cLen0 1 // MyCudafy.CudafyMulti __constant__ int _sizes[2]; #define _sizesLen0 2 // MyCudafy.CudafyMulti __constant__ double _lengths[2]; #define _lengthsLen0 2 // MyCudafy.CudafyMulti __constant__ int _intV[3]; #define _intVLen0 3 // MyCudafy.CudafyMulti __constant__ int _extV[3]; #define _extVLen0 3 // MyCudafy.CudafyMulti __constant__ double _w[3]; #define _wLen0 3 // MyCudafy.CudafyMulti extern "C" __global__ void LaplaceSolver( double* prev, int prevLen0, double* next, int nextLen0, int* sizes, int sizesLen0, int* extV, int extVLen0, int* intV, int intVLen0, double* w, int wLen0, double* b, int bLen0, double* c, int cLen0) { double num = 0.0; double num2 = 0.0; for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < intV[(sizesLen0)]; i += blockDim.x * gridDim.x) { int num3 = 0; int j = 0; int num4 = i; while (j < sizesLen0) { int num5 = 1 + num4 % (sizes[(j)] - 2); num3 += num5 * extV[(j)]; num4 /= sizes[(j)] - 2; j++; } double num6 = prev[(num3)]; double num7 = num6 * w[(sizesLen0)]; for (int k = 0; k < sizesLen0; k++) { num7 += (prev[(num3 - extV[(k)])] + prev[(num3 + extV[(k)])]) * w[(k)]; } next[(num3)] = num7; double num8 = num6 - num7; double num9 = num6 + num7; num8 *= num8; num9 *= num9; num += num8; num2 += num9; } b[(blockDim.x * blockIdx.x + threadIdx.x)] = num; c[(blockDim.x * blockIdx.x + threadIdx.x)] = num2; } // MyCudafy.CudafyMulti extern "C" __global__ void LaplaceSolverWithRelax( double* array, int arrayLen0, int* sizes, int sizesLen0, int* extV, int extVLen0, int* intV, int intVLen0, double* w, int wLen0, double* b, int bLen0, double* c, int cLen0, int p) { double num = b[(blockDim.x * blockIdx.x + threadIdx.x)]; double num2 = c[(blockDim.x * blockIdx.x + threadIdx.x)]; for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < intV[(sizesLen0)]; i += blockDim.x * gridDim.x) { int num3 = 0; int num4 = 0; int j = 0; int num5 = i; while (j < sizesLen0) { int num6 = 1 + num5 % (sizes[(j)] - 2); num4 += num6; num3 += num6 * extV[(j)]; num5 /= sizes[(j)] - 2; j++; } if (num4 % 2 == p) { double num7 = array[(num3)]; double num8 = num7 * w[(sizesLen0)]; for (int k = 0; k < sizesLen0; k++) { num8 += (array[(num3 - extV[(k)])] + array[(num3 + extV[(k)])]) * w[(k)]; } array[(num3)] = num8; double num9 = num7 - num8; double num10 = num7 + num8; num9 *= num9; num10 *= num10; num += num9; num2 += num10; } } b[(blockDim.x * blockIdx.x + threadIdx.x)] = num; c[(blockDim.x * blockIdx.x + threadIdx.x)] = num2; } // MyCudafy.CudafyMulti extern "C" __global__ void Copy( double* prev, int prevLen0, double* next, int nextLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < prevLen0; i += blockDim.x * gridDim.x) { next[(i)] = prev[(i)]; } } // MyCudafy.CudafyMulti extern "C" __global__ void Clear( double* array, int arrayLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < arrayLen0; i += blockDim.x * gridDim.x) { array[(i)] = 0.0; } } // MyCudafy.CudafyMulti extern "C" __global__ void Square( double* prev, int prevLen0, double* next, int nextLen0, double* delta, int deltaLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < prevLen0; i += blockDim.x * gridDim.x) { double num = next[(i)]; num *= num; delta[(i)] = num; } } // MyCudafy.CudafyMulti extern "C" __global__ void Delta( double* prev, int prevLen0, double* next, int nextLen0, double* delta, int deltaLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < prevLen0; i += blockDim.x * gridDim.x) { double num = next[(i)] * (prev[(i)] - next[(i)]); num *= num; delta[(i)] = num; } } // MyCudafy.CudafyMulti extern "C" __global__ void Max( double* prev, int prevLen0, double* next, int nextLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < nextLen0; i += blockDim.x * gridDim.x) { next[(i)] = 0.0; int num = 0; while (num * nextLen0 + i < prevLen0) { int num2 = num * nextLen0 + i; if (prev[(num2)] > next[(i)]) { next[(i)] = prev[(num2)]; } num++; } } } // MyCudafy.CudafyMulti extern "C" __global__ void Sum( double* prev, int prevLen0, double* next, int nextLen0) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < nextLen0; i += blockDim.x * gridDim.x) { next[(i)] = 0.0; int num = 0; while (num * nextLen0 + i < prevLen0) { int num2 = num * nextLen0 + i; next[(i)] += prev[(num2)]; num++; } } }
6,591
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <time.h> /** * Using CPU to calculate pi * @param a Lower Integral Bounds * @param b Upper Integral Bounds * @param Integral Value */ const int N = 1024 * 1024 * 64; void pi_by_cpu(double a, double b, double *integral) { int i; double x, temp = 0; for (i = 0; i < N; i++) { x = a + (double)(b - a) / N * (i + 0.5); temp += 4 / (1 + x * x); } temp *= (double)(b - a) / N; *integral = temp; } // Using CUDA device to calculate pi #include <stdio.h> #include <cuda.h> #define NBIN N // Number of bins #define NUM_BLOCK 64 // Number of thread blocks #define NUM_THREAD 256 // Number of threads per block int tid; double pi = 0; // Kernel that executes on the CUDA device __global__ void cal_pi(double *sum, int nbin, double step, int nthreads, int nblocks) { int i; double x; int idx = blockIdx.x*blockDim.x + threadIdx.x; // Sequential thread index across the blocks for (i = idx; i < nbin; i += nthreads * nblocks) { x = (i + 0.5)*step; sum[idx] += 4.0 / (1.0 + x * x); } } // Main routine that executes on the host int main(void) { //Using CPU to calculate pi double a, b; double integral; clock_t clockBegin, clockEnd; double duration; a = 0; b = 1; clockBegin = clock(); pi_by_cpu(a, b, &integral); clockEnd = clock(); duration = (double)1000 * (clockEnd - clockBegin) / CLOCKS_PER_SEC; printf("CPU Result: %.11lf\n", integral); printf("CPU Elapsed time: %.6lfms\n\n", duration); // Using CUDA device to calculate pi cudaEvent_t start, stop; cudaEventCreate(&start); //event cudaEventCreate(&stop); //event cudaEventRecord(start, 0); //record time float tm; dim3 dimGrid(NUM_BLOCK, 1, 1); // Grid dimensions dim3 dimBlock(NUM_THREAD, 1, 1); // Block dimensions double *sumHost, *sumDev; // Pointer to host & device arrays double step = 1.0 / NBIN; // Step size size_t size = NUM_BLOCK * NUM_THREAD * sizeof(double); //Array memory size sumHost = (double *)malloc(size); // Allocate array on host cudaMalloc((void **)&sumDev, size); // Allocate array on device // Initialize array in device to 0 cudaMemset(sumDev, 0, size); // Do calculation on device cal_pi <<<dimGrid, dimBlock >>> (sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel // Retrieve result from device and store it in host array cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost); for (tid = 0; tid < NUM_THREAD*NUM_BLOCK; tid++) pi += sumHost[tid]; pi *= step; // Print results printf("GPU Result: %.11lf\n", pi); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tm, start, stop); printf("GPU Elapsed time:%.6f ms.\n\n", tm); // Cleanup free(sumHost); cudaFree(sumDev); printf("Press to exit.\n"); getchar(); return 0; }
6,592
#include "includes.h" __global__ void Subsample_Bilinear_ushort2(cudaTextureObject_t ushort2_tex, ushort2 *dst, int dst_width, int dst_height, int dst_pitch2, int src_width, int src_height) { int xo = blockIdx.x * blockDim.x + threadIdx.x; int yo = blockIdx.y * blockDim.y + threadIdx.y; if (yo < dst_height && xo < dst_width) { float hscale = (float)src_width / (float)dst_width; float vscale = (float)src_height / (float)dst_height; float xi = (xo + 0.5f) * hscale; float yi = (yo + 0.5f) * vscale; // 3-tap filter weights are {wh,1.0,wh} and {wv,1.0,wv} float wh = min(max(0.5f * (hscale - 1.0f), 0.0f), 1.0f); float wv = min(max(0.5f * (vscale - 1.0f), 0.0f), 1.0f); // Convert weights to two bilinear weights -> {wh,1.0,wh} -> {wh,0.5,0} + {0,0.5,wh} float dx = wh / (0.5f + wh); float dy = wv / (0.5f + wv); ushort2 c0 = tex2D<ushort2>(ushort2_tex, xi-dx, yi-dy); ushort2 c1 = tex2D<ushort2>(ushort2_tex, xi+dx, yi-dy); ushort2 c2 = tex2D<ushort2>(ushort2_tex, xi-dx, yi+dy); ushort2 c3 = tex2D<ushort2>(ushort2_tex, xi+dx, yi+dy); int2 uv; uv.x = ((int)c0.x+(int)c1.x+(int)c2.x+(int)c3.x+2) >> 2; uv.y = ((int)c0.y+(int)c1.y+(int)c2.y+(int)c3.y+2) >> 2; dst[yo*dst_pitch2+xo] = make_ushort2((unsigned short)uv.x, (unsigned short)uv.y); } }
6,593
#include <cuda.h> #include <iostream> #include <cstdlib> #include <sys/time.h> #define TILE_SX 32 #define TILE_SY 32 #define TILES_X 10 #define TILES_Y 10 float __device__ dx(const float * v) { return 0.5*(v[+1] - v[-1]); } float __device__ dy(const float * v, const int dj) { return 0.5*(v[dj] - v[-dj]); } float __device__ dxy(const float * v, const int dj) { return 0.25*((v[dj+1]-v[dj-1]) - (v[-dj+1] - v[-dj-1])); } #define CUDA_CHECK do { \ cudaError res = cudaGetLastError(); \ if(res != cudaSuccess) { \ std::cerr << "CUDA Failure at " << __LINE__ << " " << cudaGetErrorString(res) << "\n"; \ exit(1); \ } \ } while(0) void __global__ derivs(const float * v, float *v_x, float *v_y, float *v_xy) { float __shared__ tile[TILE_SX * TILE_SY]; float __shared__ tile_x[TILE_SX * TILE_SY]; int tidx = threadIdx.x + threadIdx.y*TILE_SX; int idx = threadIdx.x + threadIdx.y*TILE_SX + blockIdx.x*TILE_SX*TILE_SY + blockIdx.y*TILES_X*TILE_SX*TILE_SY; tile[tidx] = v[idx]; __syncthreads(); if(threadIdx.x > 0 && threadIdx.x < TILE_SX - 1) { v_x[idx] = tile_x[tidx] = dx(&tile[tidx]); } __syncthreads(); if(threadIdx.y > 0 && threadIdx.y < TILE_SY - 1) { v_y[idx] = dy(&tile[tidx], TILE_SY); v_xy[idx] = dy(&tile_x[tidx], TILE_SY); } } void __global__ derivs_naive(const float * v, float *v_x, float *v_y, float *v_xy) { int idx = threadIdx.x + threadIdx.y*TILE_SX + blockIdx.x*TILE_SX*TILE_SY + blockIdx.y*TILES_X*TILE_SX*TILE_SY; if(threadIdx.x > 0 && threadIdx.x < TILE_SX - 1 && threadIdx.y > 0 && threadIdx.y < TILE_SY - 1) { v_x[idx] = dx(&v[idx]); v_y[idx] = dy(&v[idx], TILE_SY); v_xy[idx] = dxy(&v[idx], TILE_SY); } } float now() { timeval t; gettimeofday(&t, NULL); return t.tv_sec + t.tv_usec*1e-6; } int main(void) { float *v = (float*)calloc(TILE_SX * TILE_SY * TILES_X * TILES_Y, sizeof(*v)); float *v_x = (float*)calloc(TILE_SX * TILE_SY * TILES_X * TILES_Y, sizeof(*v_x)); float *v_y = (float*)calloc(TILE_SX * TILE_SY * TILES_X * TILES_Y, sizeof(*v_y)); float *v_xy = (float*)calloc(TILE_SX * TILE_SY * TILES_X * TILES_Y, sizeof(*v_xy)); float *d_v, *d_v_x, *d_v_y, *d_v_xy; cudaMalloc(&d_v, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v)); CUDA_CHECK; cudaMalloc(&d_v_x, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v_x)); CUDA_CHECK; cudaMalloc(&d_v_y, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v_y)); CUDA_CHECK; cudaMalloc(&d_v_xy, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v_xy)); CUDA_CHECK; // disable caches for testing cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); for(int j=0;j<2;j++) { { cudaMemcpy(d_v, v, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v), cudaMemcpyHostToDevice); CUDA_CHECK; dim3 dimBlock(TILE_SX, TILE_SY, 1); dim3 dimGrid(TILES_X, TILES_Y, 1); double start = now(); // TODO: run one without timint to get rid of initialization ost for(int i = 0 ; i < 1000 ; i++) { derivs<<<dimGrid, dimBlock>>>(d_v, d_v_x, d_v_y, d_v_xy); } double end = now(); std::cout << "tiled took " << (end-start) << " s\n"; CUDA_CHECK; cudaMemcpy(v_x, d_v_x, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v_x), cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy(v_y, d_v_y, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v_y), cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy(v_xy, d_v_xy, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v_xy), cudaMemcpyDeviceToHost); CUDA_CHECK; } { cudaMemcpy(d_v, v, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v), cudaMemcpyHostToDevice); CUDA_CHECK; dim3 dimBlock(TILE_SX, TILE_SY); dim3 dimGrid(TILES_X, TILES_Y); double start = now(); // TODO: run one without timint to get rid of initialization ost for(int i = 0 ; i < 1000 ; i++) { derivs_naive<<<dimGrid, dimBlock>>>(d_v, d_v_x, d_v_y, d_v_xy); } double end = now(); std::cout << "naive took " << (end-start) << " s\n"; CUDA_CHECK; cudaMemcpy(v_x, d_v_x, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v_x), cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy(v_y, d_v_y, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v_y), cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy(v_xy, d_v_xy, TILE_SX * TILE_SY * TILES_X * TILES_Y * sizeof(*v_xy), cudaMemcpyDeviceToHost); CUDA_CHECK; } } return 0; }
6,594
/* Introduction code to CUDA * Final version, each block 256 threads, * each grid having n/256 blocks in it. * * Compile: nvcc -g -o vec_add vecAdd2.cu -lm */ #include <stdio.h> #include <stdlib.h> #include <math.h> // Kernel function to add the elements of two arrays __global__ void vecAdd(double *a, double *b, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < n; i += stride) b[i] = a[i] + b[i]; } int main( int argc, char* argv[] ) { // Size of vectors int n = 1<<20; // Device input vectors double *d_a; double *d_b; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&d_a, bytes); cudaMallocManaged(&d_b, bytes); // Initialize vectors on host for(int i = 0; i < n; i++ ) { d_a[i] = sin(i)*sin(i); d_b[i] = cos(i)*cos(i); } // Number of threads in each thread block int blockSize = 256; // Number of thread blocks in grid int gridSize = (int)ceil((float)n/blockSize); // Execute the kernel vecAdd<<<gridSize, blockSize>>>(d_a, d_b, n); // Wait for the GPU to finish cudaDeviceSynchronize(); /* Sum up vector d_b and print result divided by n, this should equal 1 within error */ double sum = 0.0; for(int i=0; i<n; i++) sum += d_b[i]; printf("final result: %f\n", sum/n); // Release Unified Memory cudaFree(d_a); cudaFree(d_b); return 0; }
6,595
// Type your code here, or load an example. __global__ void square(int* array, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < n) array[tid] = array[tid] * array[tid]; }
6,596
#include <chrono> #include <cstdio> #include <cuda.h> #include <curand_kernel.h> #include <iostream> typedef unsigned long long int uint64; using namespace std::chrono; const uint64 SIDE_PX = 8000; const double LEFT = -5.0 / 3; const double TOP = -6.5 / 3; const double SIDE = 10.0 / 3; const double RIGHT = LEFT + SIDE; const double BOTTOM = TOP + SIDE; const int CELLS_PER_SIDE = 100; const double CELL_SIZE = SIDE / CELLS_PER_SIDE; const int TOTAL_CELLS = CELLS_PER_SIDE * CELLS_PER_SIDE; const int ITERATIONS_PER_CELL = 20; const int POINTS_PER_CELL = 10000; const uint64 TOTAL_POINTS = 100 * 1000 * 1000; const uint64 ITERATIONS_PER_POINT = 1000 * 1000; const int THREADS = 50; const int BLOCKS = 2500; const int ITERATIONS_PER_BLOCK = TOTAL_POINTS / BLOCKS / THREADS; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } struct Complex { __device__ Complex() : re(0.0), im(0.0) {} __device__ Complex(double real, double imag) : re(real), im(imag) {} __device__ double abs() { return re * re + im * im; } __device__ Complex operator+(Complex other) { return Complex(re + other.re, im + other.im); } __device__ Complex operator-(Complex other) { return Complex(re - other.re, im - other.im); } __device__ Complex operator*(Complex other) { return Complex(re * other.re - im * other.im, re * other.im + im * other.re); } __device__ Complex operator/(Complex other) { double new_re = (re * other.re + im * other.im) / other.abs(); double new_im = (im * other.re - re * other.im) / other.abs(); return Complex(new_re, new_im); } __device__ bool operator==(Complex other) { return re == other.re && im == other.im; } __device__ Complex& operator=(Complex other) { re = other.re; im = other.im; return *this; } __device__ static Complex iterate(Complex x, Complex c) { double re = fma(x.re, x.re, c.re) - x.im * x.im; double im = fma(2*x.re, x.im, c.im); return Complex(re, im); } double re, im; }; __global__ void init_rand(curandState_t* states) { int idx = blockIdx.x * blockDim.x + threadIdx.x; curand_init(1337, idx, 0, &states[idx]); } __device__ Complex rand_complex_from_cell(curandState_t* rand_state, unsigned int cell_num) { double cell_top = TOP + CELL_SIZE * (cell_num / CELLS_PER_SIDE); double cell_left = LEFT + CELL_SIZE * (cell_num % CELLS_PER_SIDE); double d1 = curand_uniform_double(rand_state); double d2 = curand_uniform_double(rand_state); double re = cell_top + CELL_SIZE * d1; double im = cell_left + CELL_SIZE * d2; return Complex(re, im); } __device__ bool outside(Complex x) { return x.im < LEFT || x.im > RIGHT || x.re < TOP || x.re > BOTTOM; } struct Array { unsigned int *data; size_t *size; }; __global__ void find_edge_cells(curandState_t* rand_states, bool* on_edge) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= TOTAL_CELLS) { return; } bool has_converging = false; bool has_diverging = false; for (int point = 0; point < POINTS_PER_CELL && !(has_converging && has_diverging); point++) { Complex c, x; x = c = rand_complex_from_cell(&rand_states[idx], idx); int it; for (it = 0; it < ITERATIONS_PER_CELL; it++) { x = x * x + c; if (outside(x)) { has_diverging = true; break; } } if (it == ITERATIONS_PER_CELL) { has_converging = true; } } on_edge[idx] = has_converging && has_diverging; } __device__ Complex rand_complex(curandState_t* rand_state, Array edge_cells) { unsigned int cell = edge_cells.data[curand(rand_state) % *edge_cells.size]; return rand_complex_from_cell(rand_state, cell); } // Add amount to circle of diameter 3 around pixel __device__ void inc3(uint64* pic, Complex point, uint64 amount) { size_t y = (point.re - TOP) / SIDE * SIDE_PX; size_t x = (point.im - LEFT) / SIDE * SIDE_PX; if (x >= SIDE_PX || y >= SIDE_PX) { return; } atomicAdd(&pic[y * SIDE_PX + x], amount); if (x > 0) atomicAdd(&pic[y * SIDE_PX + (x - 1)], amount); if (y > 0) atomicAdd(&pic[(y - 1) * SIDE_PX + x], amount); if (x < SIDE_PX - 1) atomicAdd(&pic[y * SIDE_PX + (x + 1)], amount); if (y < SIDE_PX - 1) atomicAdd(&pic[(y + 1) * SIDE_PX + x], amount); } __device__ bool is_power_of_two(uint64 x) { return ((x != 0) && !(x & (x - 1))); } __global__ void generate(uint64* pic, curandState_t* rand_states, Array edge_cells) { int idx = blockIdx.x * blockDim.x + threadIdx.x; Complex init, c, x, old_x; curandState_t rand_state = rand_states[idx]; for (int block_it = 0; block_it < ITERATIONS_PER_BLOCK; block_it++) { old_x = x = c = init = rand_complex(&rand_state, edge_cells); bool has_diverged = false; uint64 it; for (it = 0; it < ITERATIONS_PER_POINT; it++) { x = Complex::iterate(x, c); if (outside(x)) { has_diverged = true; break; } if (x == old_x) { break; } if (is_power_of_two(it)) { old_x = x; } } if (has_diverged) { x = c = init; inc3(pic, x, it); for (uint64 i = 0; i < it; i++) { x = Complex::iterate(x, c); inc3(pic, x, it); } } } } Array calc_edge_cells(curandState_t* rand_states) { bool *on_edge; size_t on_edge_size = TOTAL_CELLS * sizeof(*on_edge); gpuErrchk(cudaMalloc(&on_edge, on_edge_size)); find_edge_cells<<<BLOCKS, THREADS>>>(rand_states, on_edge); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); bool *on_edge_host; gpuErrchk(cudaMallocHost(&on_edge_host, on_edge_size)); cudaMemcpy(on_edge_host, on_edge, on_edge_size, cudaMemcpyDeviceToHost); cudaFree(on_edge); size_t edge_count = 0; for (int i = 0; i < TOTAL_CELLS; i++) { if (on_edge_host[i]) { edge_count++; } } unsigned int *edge_cells_host; size_t edge_cells_size = edge_count * sizeof(*edge_cells_host); gpuErrchk(cudaMallocHost(&edge_cells_host, edge_cells_size)); edge_count = 0; for (int i = 0; i < TOTAL_CELLS; i++) { if (on_edge_host[i]) { edge_cells_host[edge_count] = i; edge_count++; } } cudaFreeHost(on_edge_host); Array edge_cells; gpuErrchk(cudaMalloc(&edge_cells.data, edge_cells_size)); gpuErrchk(cudaMalloc(&edge_cells.size, sizeof(edge_cells.size))); cudaMemcpy(edge_cells.data, edge_cells_host, edge_cells_size, cudaMemcpyHostToDevice); cudaMemcpy(edge_cells.size, &edge_count, sizeof(edge_cells.size), cudaMemcpyHostToDevice); cudaFreeHost(edge_cells_host); return edge_cells; } int main() { gpuErrchk(cudaSetDevice(1)); uint64 *pic; size_t pic_size = SIDE_PX * SIDE_PX * sizeof(*pic); gpuErrchk(cudaMalloc(&pic, pic_size)); gpuErrchk(cudaMemset(pic, 0, pic_size)); curandState_t *rand_states; size_t rand_states_size = BLOCKS * THREADS * sizeof(*rand_states); gpuErrchk(cudaMalloc(&rand_states, rand_states_size)); init_rand<<<BLOCKS, THREADS>>>(rand_states); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); Array edge_cells = calc_edge_cells(rand_states); auto start = steady_clock::now(); generate<<<BLOCKS, THREADS>>>(pic, rand_states, edge_cells); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); auto finish = steady_clock::now(); auto elapsed_time = duration_cast<duration<double>>(finish - start); std::cout << "Elapsed time: " << elapsed_time.count() << "s" << std::endl; uint64 *host_pic; cudaMallocHost(&host_pic, pic_size); cudaMemcpy(host_pic, pic, pic_size, cudaMemcpyDeviceToHost); FILE *output = fopen("pic.bin", "wb"); fwrite(host_pic, sizeof(*host_pic), SIDE_PX * SIDE_PX, output); fclose(output); cudaFreeHost(host_pic); cudaFree(edge_cells.data); cudaFree(edge_cells.size); cudaFree(rand_states); cudaFree(pic); }
6,597
#include <algorithm> #include <cassert> #include <cstdlib> #include <iostream> #include <numeric> #include <vector> using std::accumulate; using std::cout; using std::generate; using std::vector; #define SHMEM_SIZE 256 __global__ void sumReduction(int *v, int *v_r) { // Allocate shared memory __shared__ int partial_sum[SHMEM_SIZE]; // Calculate thread ID int tid = blockIdx.x * blockDim.x + threadIdx.x; // Load elements into shared memory partial_sum[threadIdx.x] = v[tid]; __syncthreads(); // Increase the stride of the access until we exceed the CTA dimensions for (int s = 1; s < blockDim.x; s *= 2) { // Change the indexing to be sequential threads int index = 2 * s * threadIdx.x; // Each thread does work unless the index goes off the block if (index < blockDim.x) { partial_sum[index] += partial_sum[index + s]; } __syncthreads(); } // Let the thread 0 for this block write it's result to main memory // Result is inexed by this block if (threadIdx.x == 0) { v_r[blockIdx.x] = partial_sum[0]; } } int main() { // Vector size int N = 1 << 16; size_t bytes = N * sizeof(int); // Host data vector<int> h_v(N); vector<int> h_v_r(N); // Initialize the input data generate(begin(h_v), end(h_v), []() { return rand() % 10; }); // Allocate device memory int *d_v, *d_v_r; cudaMalloc(&d_v, bytes); cudaMalloc(&d_v_r, bytes); // Copy to device cudaMemcpy(d_v, h_v.data(), bytes, cudaMemcpyHostToDevice); // TB Size const int TB_SIZE = 256; // Grid Size (No padding) int GRID_SIZE = N / TB_SIZE; // Call kernels sumReduction<<<GRID_SIZE, TB_SIZE>>>(d_v, d_v_r); sumReduction<<<1, TB_SIZE>>>(d_v_r, d_v_r); // Copy to host; cudaMemcpy(h_v_r.data(), d_v_r, bytes, cudaMemcpyDeviceToHost); // Print the result assert(h_v_r[0] == std::accumulate(begin(h_v), end(h_v), 0)); cout << "COMPLETED SUCCESSFULLY\n"; return 0; }
6,598
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void sumArraysOnHost(float *A, float *B, float *C, const int N) { for(int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnDevice(float *a, float *b, float *c, const int N) { int tID = blockIdx.x; printf("blockIdx.x = %d\n", blockIdx.x); // if (tID < N) { c[tID] = a[tID] + b[tID]; printf("tID = %d\n", tID); // } } void initialData(float *ip, int size) { time_t t; srand((unsigned int) time(&t)); for(int i = 0; i < size; i++) { ip[i] = (float) (rand() & 0xFF) / 10.0f; } } int main(int argc, char **argv) { int nElem = 1024; size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); float *d_A, *d_B, *d_C; cudaMalloc((float **) &d_A, nBytes); cudaMalloc((float **) &d_B, nBytes); cudaMalloc((float **) &d_C, nBytes); initialData(h_A, nElem); initialData(h_B, nElem); cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); // sumArraysOnHost(h_A, h_B, h_C, nElem); sumArraysOnDevice<<<32,32>>>(d_A, d_B, d_C, nElem); cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost); free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
6,599
// // Sample Program to test cmake build system // settings for CUDA // // Author : Siddhant Wadhwa // Creation Date : Thursday, April 21st, 2016 // Modification history : <none> // #include <iostream> #include <stdio.h> #define N 10 __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // handle the data at this index if (tid < N) c[tid] = a[tid] + b[tid]; } int main (void) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // fill the arrays 'a' and 'b' on the CPU for (int i=0; i<N; i++) { a[i] = -i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ); // Execute cuda kernel add<<<N,1>>>( dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ); // display the results for (int i=0; i<N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // free the memory allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
6,600
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <iostream> #define N 512000 #define HIST 256 using namespace std; void histograma_cpu(int * data, int * counter){ int i; for(i=0; i<N; i++) counter[data[i]]++; } __global__ void histograma_gpu_global(int * data, int * counter){ int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid < N) atomicAdd(&(counter[data[tid]]),1); } __global__ void histograma_gpu_shared(int * data, int * counter){ int tid = threadIdx.x + blockDim.x * blockIdx.x; __shared__ int h_sh[HIST]; int dev_data; if(tid < N){ if(threadIdx.x < 256) h_sh[threadIdx.x] = 0; } __syncthreads(); if(tid < N){ dev_data = data[tid]; atomicAdd(&(h_sh[dev_data]),1); } __syncthreads(); if(tid < N){ if(threadIdx.x < HIST) atomicAdd(&(counter[threadIdx.x]),h_sh[threadIdx.x]); } __syncthreads(); } void showDeviceProperties(){ cudaDeviceProp prop; cudaGetDeviceProperties(&prop,0); cout << "Max Threads: " << prop.maxThreadsPerBlock << endl; cout << "Max Grid Size: " << prop.maxGridSize[0] << " " << prop.maxGridSize[1] << " " << prop.maxGridSize[2] << endl; cout << "Max Threads Dim: " << prop.maxThreadsDim[0] << " " << prop.maxThreadsDim[1] << " " << prop.maxThreadsDim[2] << endl; } int main(){ //showDeviceProperties(); //#ifndef CUDA_NO_SM_11_ATOMIC_INTRINSICS // cout << "Not using atomics" << endl; //#endif /* Variables */ int i; int * data; int * dev_data; int * counter_cpu; int * counter_global; int * dev_global_counter; int * counter_shared; int * dev_shared_counter; cudaEvent_t start, stop; float time_global, time_shared, time_cpu; cudaEventCreate(&start); cudaEventCreate(&stop); /*Inicialización de datos y copiado a Device*/ data = (int *)malloc(N * sizeof(int)); cudaMalloc((void**)&dev_data, N * sizeof(int)); counter_global = (int *)malloc(HIST * sizeof(int)); counter_shared = (int *)malloc(HIST * sizeof(int)); counter_cpu = (int *)malloc(HIST * sizeof(int)); cudaMalloc((void**)&dev_global_counter, HIST * sizeof(int)); cudaMalloc((void**)&dev_shared_counter, HIST * sizeof(int)); for(i=0; i<N; i++) cin >> data[i]; for(i=0; i<HIST; i++){ counter_global[i] = 0; counter_shared[i] = 0; counter_cpu[i] = 0; } cudaMemcpy(dev_global_counter, counter_global, HIST * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_shared_counter, counter_shared, HIST * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_data, data, N * sizeof(int), cudaMemcpyHostToDevice); /* Llamado a CPU*/ cout << "Resultados histograma" << endl; cudaEventRecord(start, 0); histograma_cpu(data, counter_cpu); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_cpu, start, stop); /*Llamado a GPU Global*/ cudaEventRecord(start, 0); histograma_gpu_global<<< ceil(N/512.0) , 512 >>> (dev_data, dev_global_counter); //cudaThreadSynchronize(); //cout << cudaGetErrorString(cudaGetLastError()) << endl; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_global, start, stop); /*Llamado a GPU Shared*/ cudaEventRecord(start, 0); histograma_gpu_shared<<< ceil(N/512.0) , 512 >>> (dev_data, dev_shared_counter); //cudaThreadSynchronize(); //cout << cudaGetErrorString(cudaGetLastError()) << endl; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_shared, start, stop); /*Copiando al host*/ cudaMemcpy(counter_global, dev_global_counter, HIST * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(counter_shared, dev_shared_counter, HIST * sizeof(int), cudaMemcpyDeviceToHost); /*Resultados*/ cout << "Tiempo para Histograma CPU \t" << time_cpu << "[ms]" << endl; cout << "Tiempo para Histograma Global \t" << time_global << "[ms]" << endl; cout << "Tiempo para Histograma Shared \t" << time_shared << "[ms]" << endl; /*Mostrando por pantalla*/ //cout << "i\tGLOBAL\tSHARED\tCPU" << endl; //for(i=0; i<HIST; i++) // cout << i << "\t" << counter_global[i] << "\t" << counter_shared[i] << "\t" << counter_cpu[i] << endl; cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(dev_data); cudaFree(dev_global_counter); cudaFree(dev_shared_counter); free(data); free(counter_cpu); free(counter_global); free(counter_shared); return 0; }