serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
6,001
#include <bits/stdc++.h> #include <chrono> using namespace std::chrono; using namespace std; typedef complex<float> base; int n = 4,m = 4,k = 3; template <typename T> ostream &operator<<(ostream &o, vector<T> v) { if (v.size() > 0) o << v[0]; for (unsigned i = 1; i < v.size(); i++) o << " " << v[i]; return o << endl; } /** * Parallel Functions for performing various tasks */ /** * Dividing by constant for inverse fft transform */ __global__ void inplace_divide_invert(float *A_r,float *A_i, int n, int threads) { int i = blockIdx.x * threads + threadIdx.x; if (i < n) { // printf("in divide"); A_r[i] /= n; A_i[i] /= n; } } /** * Reorders array by bit-reversing the indexes. */ __global__ void bitrev_reorder(float *__restrict__ r_r,float *__restrict__ r_i, float *__restrict__ d_r, float *__restrict__ d_i, int s, size_t nthr, int n) { int id = blockIdx.x * nthr + threadIdx.x; if (id < n and __brev(id) >> (32 - s) < n) { r_r[__brev(id) >> (32 - s)] = d_r[id]; r_i[__brev(id) >> (32 - s)] = d_i[id]; } } /** * Inner part of the for loop */ __device__ void inplace_fft_inner(float *__restrict__ A_r, float *__restrict__ A_i, int i, int j, int len, int n, bool invert) { if (i + j + len / 2 < n and j < len / 2) { float u_r, v_r; float u_i, v_i; float angle = (2 * 3.14 * j) / (len * (invert ? 1.0 : -1.0)); v_r = cos(angle); v_i = sin(angle); u_r = A_r[i + j]; u_i = A_i[i + j]; float temp_vr = v_r,temp_vi = v_i; v_r = A_r[i + j + len / 2]*temp_vr - A_i[i + j + len / 2]*temp_vi; v_i = A_i[i + j + len / 2]*temp_vr + A_r[i + j + len / 2]*temp_vi; A_r[i + j] = u_r + v_r; A_i[i + j] = u_i + v_i; A_r[i + j + len / 2] = u_r - v_r; A_i[i + j + len / 2] = u_i - v_i; } } /** * FFT if number of threads are sufficient. */ __global__ void inplace_fft(float *__restrict__ A_r, float *__restrict__ A_i, int i, int len, int n, int threads, bool invert) { int j = blockIdx.x * threads + threadIdx.x; inplace_fft_inner(A_r, A_i, i, j, len, n, invert); } /** * FFt if number of threads are not sufficient. */ __global__ void inplace_fft_outer(float *__restrict__ A_r, float *__restrict__ A_i, int len, int n, int threads, bool invert) { int i = (blockIdx.x * threads + threadIdx.x)*len; for (int j = 0; j < len / 2; j++) { inplace_fft_inner(A_r, A_i, i, j, len, n, invert); } } /** * parallel FFT transform and inverse transform * Arguments vector of complex numbers, invert, balance, number of threads * Perform inplace transform */ void fft(vector<base> &a, bool invert, int balance = 10, int threads = 32) { // Creating array from vector int n = (int)a.size(); int data_size = n * sizeof(float); // cout<<data_size<<endl; float *data_array_r = (float *)malloc(data_size); float *data_array_i = (float *)malloc(data_size); for (int i = 0; i < n; i++) { data_array_r[i] = a[i].real(); data_array_i[i] = a[i].imag(); } float *A_r, *dn_r; float *A_i, *dn_i; cudaMalloc((void **)&A_r, data_size); cudaMalloc((void **)&A_i, data_size); cudaMalloc((void **)&dn_r, data_size); cudaMalloc((void **)&dn_i, data_size); cudaMemcpy(dn_r, data_array_r, data_size, cudaMemcpyHostToDevice); cudaMemcpy(dn_i, data_array_i, data_size, cudaMemcpyHostToDevice); // Bit reversal reordering int s = log2(n); bitrev_reorder<<<ceil(float(n) / threads), threads>>>(A_r,A_i, dn_r, dn_i, s, threads, n); float *result_r; float *result_i; result_r = (float *)malloc(data_size); result_i = (float *)malloc(data_size); // Synchronize cudaDeviceSynchronize(); // Iterative FFT with loop parallelism balancing for (int len = 2; len <= n; len <<= 1) { if (n / len > balance) { inplace_fft_outer<<<ceil((float)n / threads / len), threads>>>(A_r,A_i, len, n, threads, invert); } else { for (int i = 0; i < n; i += len) { float repeats = len / 2; inplace_fft<<<ceil(repeats / threads), threads>>>(A_r,A_i, i, len, n, threads, invert); } } } if (invert) inplace_divide_invert<<<ceil(n * 1.00 / threads), threads>>>(A_r,A_i, n, threads); cudaMemcpy(result_r, A_r, data_size, cudaMemcpyDeviceToHost); cudaMemcpy(result_i, A_i, data_size, cudaMemcpyDeviceToHost); // Saving data to vector<complex> in input. for (int i = 0; i < n; i++) { a[i] = base(result_r[i], result_i[i]); } // Free the memory blocks free(data_array_r); free(data_array_i); cudaFree(A_r); cudaFree(A_i); cudaFree(dn_r); cudaFree(dn_i); return; } /** * Performs 2D FFT * takes vector of complex vectors, invert and verbose as argument * performs inplace FFT transform on input vector */ void fft2D(vector<vector<base>> &a, bool invert, int balance, int threads) { auto matrix = a; for (auto i = 0; i < matrix.size(); i++) { fft(matrix[i], invert, balance, threads); } a = matrix; matrix.resize(a[0].size()); for (int i = 0; i < matrix.size(); i++) matrix[i].resize(a.size()); // Transposing matrix for (int i = 0; i < a.size(); i++) { for (int j = 0; j < a[0].size(); j++) { matrix[j][i] = a[i][j]; } } for (auto i = 0; i < matrix.size(); i++) fft(matrix[i], invert, balance, threads); for (int i = 0; i < a.size(); i++) { for (int j = 0; j < a[0].size(); j++) { a[j][i] = matrix[i][j]; } } } #define N 100000 #define BALANCE 1024 int nextPowerOf2(int n) { unsigned count = 0; if (n && !(n & (n - 1))) return n; while( n != 0) { n >>= 1; count += 1; } return 1 << count; } int main() { int l = n+k-1; int new_n = nextPowerOf2(l); int new_m = nextPowerOf2(l); int old_n = n; int old_m = m; int old_k = k; vector<vector<int>> image(new_n, vector<int>(new_m)); vector<vector<int>> kernel(new_n, vector<int>(new_m)); // for (int i = 0; i < new_n; ++i) // { // for (int j = 0; j < new_m; ++j) // { // if(i < k && j < k) // kernel[i][j] = ke[i][j]; // else // kernel[i][j] = 0; // if(i < n && j < m) // image[i][j] = mat[i][j]; // else // image[i][j] = 0; // } // } for (int i = 0; i < new_n; ++i) { for (int j = 0; j < new_m; ++j) { if(i < k && j < k) kernel[i][j] = i+j+2; else kernel[i][j] = 0; if(i < n && j < m) image[i][j] = (i+1)*(j+1); else image[i][j] = 0; } } n = new_n; m = new_m; for(int i=0;i<n;i++) { for(int j=0;j<m;j++) { cout<<image[i][j]<<" "; } cout<<endl; } cout<<endl; cout<<endl; for(int i=0;i<n;i++) { for(int j=0;j<m;j++) { cout<<kernel[i][j]<<" "; } cout<<endl; } cout<<endl; cout<<endl; vector<vector<base>> complex_image(image.size(), vector<base>(image[0].size())); vector<vector<base>> complex_kernel(image.size(), vector<base>(image[0].size())); for (auto i = 0; i < image.size(); i++) { for (auto j = 0; j < image[0].size(); j++) { complex_image[i][j] = image[i][j]; } } for (auto i = 0; i < kernel.size(); i++) { for (auto j = 0; j < kernel[0].size(); j++) { complex_kernel[i][j] = kernel[i][j]; } } fft2D(complex_image, false, BALANCE, 10); for(auto i=0;i<n;i++) { for(auto j=0;j<m;j++) { cout<<real(complex_image[i][j])<<"\t"; } cout<<endl; } cout<<endl; cout<<endl; fft2D(complex_kernel, false, BALANCE, 10); for(auto i=0;i<n;i++) { for(auto j=0;j<m;j++) { cout<<real(complex_kernel[i][j])<<"\t"; } cout<<endl; } cout<<endl; cout<<endl; vector<vector<base>> complex_out(image.size(), vector<base>(image[0].size())); for(auto i=0;i<n;i++) { for(auto j=0;j<m;j++) { complex_out[i][j] = complex_image[i][j]*complex_kernel[i][j]; } } for(auto i=0;i<n;i++) { for(auto j=0;j<m;j++) { cout<<real(complex_out[i][j])<<"\t"; } cout<<endl; } cout<<endl; cout<<endl; cout<<"bt"<<endl; fft2D(complex_out, true, BALANCE, 10); vector<vector<base>> final(old_n+old_k-1, vector<base>(old_m+old_k-1)); for(auto i=0;i<complex_out.size();i++) { for(auto j=0;j<complex_out[0].size();j++) { cout<<real(complex_out[i][j])<<"\t"; // final[i][j] = complex_out[i][j]; } cout<<endl; } cout<<endl; return 0; }
6,002
// Note that in this model we do not check // the error codes and status of kernel call. #include <cstdio> #include <cmath> __global__ void set(int *A, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) A[idx] = idx; } int main(void) { const int N = 128; int *d_A; int *h_A; h_A = (int*) malloc(N * sizeof(int)); cudaMalloc((void**)&d_A, N * sizeof(int)); set<<<2, 64>>>(d_A, N); cudaMemcpy(h_A, d_A, N * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) printf("%i ", h_A[i]); printf("\n"); free(h_A); cudaFree((void*)d_A); return 0; }
6,003
#include <stdio.h> #include <stdlib.h> #include <math.h> // Maximo numero de blocks sem estourar a capacidade do hardware // (Esse não é o limite real do hardware do parsusy, é apenas uma estimação feita impiricamente) int MAX_N_BLOCK = 1024; // Maximo numero de threads sem estourar a capacidade do hardware // (Esse não é o limite real do hardware do parsusy, é apenas uma estimação feita impiricamente) int MAX_N_THREADS = 1024; /*Função que será executada na GPU*/ __global__ void sum_matrix(int *A, int *B, int *C, int n_process, int n) { // Calcula o index baseado no index do bloco, da thread e quatidade de elementos processados int index_begin = threadIdx.x * n_process + blockIdx.x * blockDim.x * n_process; int n_end = index_begin + n_process; if (n_end > n) n_end = n; for (int i = index_begin; i < n_end; ++i) C[i] = A[i] + B[i]; } int main() { int *A, *B, *C; int *d_A, *d_B, *d_C; int i, j; //Input int linhas, colunas; scanf("%d", &linhas); scanf("%d", &colunas); //Tamanho da memoria int size = sizeof(int) * linhas * colunas; //Quantidade de elementos long n = linhas * colunas; //Alocando memória na CPU A = (int *)malloc(size); B = (int *)malloc(size); C = (int *)malloc(size); //Alocando memória na GPU cudaMalloc((void **)&d_A, size); cudaMalloc((void **)&d_B, size); cudaMalloc((void **)&d_C, size); //Inicializar for(i = 0; i < linhas; i++){ for(j = 0; j < colunas; j++){ A[i*colunas+j] = B[i*colunas+j] = i+j; } } // Copy inputs to device cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); /*Chama a função que será executada na GPU*/ //Quantidade de blocos = 512 = N //Quantidade de threads por bloco = (linhas * colunas) / N = M int n_threads = MAX_N_THREADS; //Quantidade de threads que será utilizadas int n_blocks = MAX_N_BLOCK; //Quantidade de blocos int n_process = 1; //Quantidade de valores que cada thread deve processar if (n < MAX_N_THREADS) n_threads = n; //Para matriz mt pequena else if (n > (MAX_N_BLOCK * MAX_N_THREADS)) n_process = ceil(double(n) / (MAX_N_BLOCK * MAX_N_THREADS)); //Para matriz muito grande //Calculando a quantidade de blocos n_blocks = ceil(double(n) / (n_threads * n_process)); // chama a função que será executada na GPU sum_matrix <<< n_blocks, n_threads >>> (d_A, d_B, d_C, n_process, n); //Copia resultado do device para host cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); long long int somador=0; //Manter esta computação na CPU for(i = 0; i < linhas; i++){ for(j = 0; j < colunas; j++){ somador+=C[i*colunas+j]; } } printf("%lli\n", somador); free(A); free(B); free(C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
6,004
#include <stdio.h> /*CUDA error wraper*/ static void CUDA_ERROR( cudaError_t err) { if (err != cudaSuccess) { printf("CUDA ERROR: %s, exiting\n", cudaGetErrorString(err)); exit(-1); } } struct Arrays { int* i; int* j; int N; }; __global__ void increment(Arrays d_arrays) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i<d_arrays.N) { d_arrays.i[i] *= 2; d_arrays.j[i] *= -3; } } int main() { int N = 10; Arrays arrays; arrays.i = new int[N]; arrays.j = new int[N]; arrays.N = N; for (int i = 0; i < N; i++) { arrays.i[i] = i; arrays.j[i] = -i; printf("%d %d \n", arrays.i[i], arrays.j[i]); } Arrays d_arrays; d_arrays.N = N; CUDA_ERROR(cudaMalloc((void**)&(d_arrays.i), N*sizeof(int))); CUDA_ERROR(cudaMalloc((void**)&(d_arrays.j), N*sizeof(int))); printf("d_i mallocated\n"); CUDA_ERROR(cudaMemcpy(d_arrays.i, arrays.i, N*sizeof(int), cudaMemcpyHostToDevice)); CUDA_ERROR(cudaMemcpy(d_arrays.j, arrays.j, N*sizeof(int), cudaMemcpyHostToDevice)); printf("d_i memcpied\n"); increment<<<1,16>>>(d_arrays); CUDA_ERROR(cudaGetLastError()); printf("incremented\n"); CUDA_ERROR(cudaMemcpy(arrays.i, d_arrays.i, N*sizeof(int), cudaMemcpyDeviceToHost)); CUDA_ERROR(cudaMemcpy(arrays.j, d_arrays.j, N*sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < N; i++) { printf("%d %d \n", arrays.i[i], arrays.j[i]); } CUDA_ERROR(cudaFree(d_arrays.i)); CUDA_ERROR(cudaFree(d_arrays.j)); }
6,005
#include <stdio.h> #include <stdlib.h> typedef struct hib { int * h_a; int * d_a; }hib; int main() { return 0; }
6,006
#include <stdio.h> #include <sys/time.h> #define N 65535 #define T 1024 // max threads per block double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } __global__ void vecAdd (int *a, int *b, int *c); void vecAddCPU(int *a, int *b, int *c); int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; timeval start, end; // initialize a and b with real values for (int i = 0; i < N; i++) { a[i] = i; b[i] = N-i; c[i] = 0; } int size = N * sizeof(int); cudaMalloc((void**)&dev_a, size); cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_c, size); gettimeofday(&start, NULL); cudaMemcpy(dev_a, a, size,cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size,cudaMemcpyHostToDevice); //gettimeofday(&start, NULL); vecAdd<<<(int)ceil(N/T),T>>>(dev_a,dev_b,dev_c); //gettimeofday(&end, NULL); cudaMemcpy(c, dev_c, size,cudaMemcpyDeviceToHost); gettimeofday(&end, NULL); printf("GPU Time for %i additions: %f\n", N, myDiffTime(start, end)); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); gettimeofday(&start, NULL); vecAddCPU(a, b, c); gettimeofday(&end, NULL); printf("CPU Time for %i additions: %f\n", N, myDiffTime(start, end)); exit (0); } __global__ void vecAdd (int *a, int *b, int *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { c[i] = a[i] + b[i]; } } void vecAddCPU(int *a, int *b, int *c) { for (int i = 0; i < N; i++) c[i] = a[i] + b[i]; }
6,007
#include <stdio.h> #include <math.h> #ifndef ARRAY_SIZE #define ARRAY_SIZE 256 #endif // !ARRAY_SIZE #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif // !BLOCK_SIZE /* Declare statically two arrays of ARRAY_SIZE each */ unsigned int cpu_block[ARRAY_SIZE]; unsigned int cpu_thread[ARRAY_SIZE]; __global__ void what_is_my_id(unsigned int* block, unsigned int* thread) { const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x; block[thread_idx] = blockIdx.x; thread[thread_idx] = threadIdx.x; } void main_sub0() { /* Declare pointers for GPU based params */ unsigned int* gpu_block; unsigned int* gpu_thread; cudaMalloc((void**)&gpu_block, ARRAY_SIZE_IN_BYTES); cudaMalloc((void**)&gpu_thread, ARRAY_SIZE_IN_BYTES); cudaMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice); const unsigned int threads_per_block = BLOCK_SIZE; const unsigned int num_blocks = ceil((double) ARRAY_SIZE / threads_per_block); /* Execute our kernel */ what_is_my_id<<<num_blocks, threads_per_block>>>(gpu_block, gpu_thread); /* Free the arrays on the GPU as now we're done with them */ cudaMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaFree(gpu_block); cudaFree(gpu_thread); /* Iterate through the arrays and print */ for (unsigned int i = 0; i < ARRAY_SIZE; i++) { printf("i: %4u, Thread: %2u - Block: %2u\n", i, cpu_thread[i], cpu_block[i]); } } int main() { main_sub0(); return EXIT_SUCCESS; }
6,008
/*Ron Pyka CS 553 Assignment 1 GPU Benchmark */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/types.h> #include <sys/times.h> #include <sys/time.h> #include <time.h> #define BLOCK_SIZE 16 /* Arrays */ volatile float A[1], B[1000], C[1000000], D[10000][10000]; volatile int E[10000][10000]; /* Initialize D and E*/ void initialize_inputs() { int row, col; printf("\nInitializing...\n"); for (col = 0; col < 10000; col++) { for (row = 0; row < 10000; row++) { D[row][col] = (float)rand() / 32768.0; E[row][col] = (int)rand() / 32768; } } } __global__ void gpuFlopTest(float *X){ int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; X[row*10000 + col] = X [row*10000 + col] * 2.2; } __global__ void gpuIopTest(int *X){ int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; X[row*10000 + col] = X [row*10000 + col] + 2; } int main(void) { /* Timing variables */ struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */ struct timezone tzdummy; unsigned long long usecstart, usecstop; int sizeA = 1*sizeof(float); int sizeB = 1000*sizeof(float); int sizeC = 1000000*sizeof(float); int sizeD = 10000*10000*sizeof(float); int sizeE = 10000*10000*sizeof(int); float *X; int *Y; int K = ceil((float)10000/((float)BLOCK_SIZE)); dim3 threadBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(K, K); printf("Testing memory speed\n\n"); cudaMalloc(&X, sizeA); printf("4B write.\n"); printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); for(int i=0;i<500000;i++){ cudaMemcpy(X,(void **)A,sizeA,cudaMemcpyHostToDevice); } gettimeofday(&etstop, &tzdummy); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; printf("\nElapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); printf("4B read.\n"); printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); for(int i=0;i<500000;i++){ cudaMemcpy((void **)A,X,sizeA,cudaMemcpyDeviceToHost); } gettimeofday(&etstop, &tzdummy); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; printf("\nElapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); cudaFree(&X); cudaMalloc(&X, sizeB); printf("4KB write.\n"); printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); for(int i=0;i<500000;i++){ cudaMemcpy(X,(void **)B,sizeB,cudaMemcpyHostToDevice); } gettimeofday(&etstop, &tzdummy); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; printf("\nElapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); printf("4KB read.\n"); printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); for(int i=0;i<500000;i++){ cudaMemcpy((void **)B,X,sizeB,cudaMemcpyDeviceToHost); } gettimeofday(&etstop, &tzdummy); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; printf("\nElapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); cudaFree(&X); cudaMalloc(&X, sizeC); printf("4MB write.\n"); printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); for(int i=0;i<50000;i++){ cudaMemcpy(X,(void **)C,sizeC,cudaMemcpyHostToDevice); } gettimeofday(&etstop, &tzdummy); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; printf("\nElapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); printf("4MB read.\n"); printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); for(int i=0;i<50000;i++){ cudaMemcpy((void **)C,X,sizeC,cudaMemcpyDeviceToHost); } gettimeofday(&etstop, &tzdummy); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; printf("\nElapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); cudaFree(&X); printf("\n\nTesting Flops and Iops\n\n"); initialize_inputs(); printf("Flops.\n"); cudaMalloc(&X, sizeD); cudaMemcpy(X,(void **)D,sizeD,cudaMemcpyHostToDevice); printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); gpuFlopTest<<<grid, threadBlock>>>(X); gettimeofday(&etstop, &tzdummy); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; printf("\nElapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); cudaFree(&X); printf("Iops.\n"); cudaMalloc(&Y, sizeE); cudaMemcpy(Y,(void **)E,sizeE,cudaMemcpyHostToDevice); printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); gpuIopTest<<<grid, threadBlock>>>(Y); gettimeofday(&etstop, &tzdummy); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; printf("\nElapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); cudaFree(&Y); }
6,009
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #include <curand_kernel.h> // Random Gamma variates in CUDA... // Surprisingly, there doesn't seem to be any standard way to // generate these from the SDK. This is the CUDA port of the // rgamma code as used by R. extern "C" { // __constants__ (actually slower than keeping within kernel): /* __const__ float sqrt32 = 5.656854; __const__ float exp_m1 = 0.36787944117144232159; __const__ float q1 = 0.04166669; __const__ float q2 = 0.02083148; __const__ float q3 = 0.00801191; __const__ float q4 = 0.00144121; __const__ float q5 = -7.388e-5; __const__ float q6 = 2.4511e-4; __const__ float q7 = 2.424e-4; __const__ float a1 = 0.3333333; __const__ float a2 = -0.250003; __const__ float a3 = 0.2000062; __const__ float a4 = -0.1662921; __const__ float a5 = 0.1423657; __const__ float a6 = -0.1367177; __const__ float a7 = 0.1233795; */ __global__ void setup_kernel(curandState *state, int *seeds, int n) { // Usual block/thread indexing... int myblock = blockIdx.x + blockIdx.y * gridDim.x; int blocksize = blockDim.x * blockDim.y * blockDim.z; int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; if (idx < n){ curand_init(seeds[idx], idx, 0, &state[idx]); } return; } __global__ void rgamma_kernel(curandState *state, int state_len, float *vals, int n, float a, float scale) { float sqrt32 = 5.656854; float exp_m1 = 0.36787944117144232159; float q1 = 0.04166669; float q2 = 0.02083148; float q3 = 0.00801191; float q4 = 0.00144121; float q5 = -7.388e-5; float q6 = 2.4511e-4; float q7 = 2.424e-4; float a1 = 0.3333333; float a2 = -0.250003; float a3 = 0.2000062; float a4 = -0.1662921; float a5 = 0.1423657; float a6 = -0.1367177; float a7 = 0.1233795; // Maybe want each thread to generate multiple... // -- Would cut down on bloat from curandState *state, // -- Would increase computation per thread + scalability // // state_len :: length of rng states (one thread per rng_state) // if (idx >= state_len){ do nothing... } // // n_per_thread :: number of rng's to generate per thread // heurestically, each thread computes // n_per_thread = n / state_len // variates. However, if n_per_thread is // not an integer, then some do one less, // some one more. // // n(idx) :: number of rng's that thread idx will generate // // n_lo :: minimum number of rng's that a (used) thread // will generate // // n_hi :: maximum number of rng's that a (used) thread // will generate // // n_extra :: (= n % state_len) the number of threads that // will generate n_hi rng's // // n_regular :: (= state_len - n_extra) the number of threads that // will generate n_lo rng's // // Note: // // n = (n_lo * n_regular) + (n_hi * n_extra) // // e.g., // // n = 108, state_len = 40 // => n_lo = 2, n_hi = 3 // => n_extra = 28, n_regular = 12 // => threads 1-28 generate 3 rng's // => threads 29-40 generate 2 rngs' // // thread idx writes to: // // if (idx < n_extra){ // // 'extra' thread: // n_hi*idx : ((n_hi*idx) + (n_hi-1)) // } // if (idx >= n_extra){ // // 'regular' thread: // (n_hi*n_extra) + (n_lo*(idx-n_extra)):((n_lo*(idx-n_extra)) + (n_lo-1)) // } // // e.g., (cont...) // // thread 0 writes to: 0,1,2 // thread 1 writes to: 3,4,5 // ... // thread 27 writes to: 81,82,83 // thread 28 writes to: 84,85 // thread 29 writes to: 86,87 // ... // thread 40 writes to: 118,119 // // Usual block/thread indexing... int myblock = blockIdx.x + blockIdx.y * gridDim.x; int blocksize = blockDim.x * blockDim.y * blockDim.z; int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; if (idx >= state_len) { return; } // Number of regular and extra threads: int n_extra = (n % state_len); //int n_regular = n - n_extra; int n_lo = (int) (n/state_len); int n_hi = (n_lo+1); int out_idx_start, n_per_thread; if (idx < n_extra){ n_per_thread = n_hi; out_idx_start = n_hi*idx; } else { n_per_thread = n_lo; out_idx_start = n_hi*n_extra + n_lo*(idx-n_extra); } int gen_num; // State variables: float aa = 0.0; float aaa = 0.0; float s, s2, d; float q0, b, si, c; float e, p, q, r, t, u, v, w, x, ret_val; // Generate RNG's for this thread... for (gen_num=0; gen_num<n_per_thread; gen_num++){ // Note: Error checks on arguments removed! if (a < 1.0) { // GS algorithm for parameters a < 1 if (a == 0){ vals[out_idx_start+gen_num] = 0.0; continue; } e = 1.0 + exp_m1 * a; while (1){ p = e * curand_normal(&state[idx]); if (p >= 1.0) { x = -logf((e - p)/a); if (logf(curand_uniform(&state[idx])) <= (1.0 - a) * logf(x)){ break; } } else { x = expf(logf(p) / a); if (logf(curand_uniform(&state[idx])) <= x){ break; } } } vals[out_idx_start+gen_num] = scale * x; continue; } // --- a >= 1 : GD algorithm --- // Step 1: Recalculations of s2, s, d if a has changed if (a != aa) { aa = a; s2 = a - 0.5; s = sqrtf(s2); d = sqrt32 - s * 12.0; } // Step 2: t = standard normal deviate, // x = (s,1/2) -normal deviate. //printf("Step 2...\n"); // immediate acceptance (i) t = curand_normal(&state[idx]); x = s + 0.5 * t; ret_val = x * x; if (t >= 0.0){ vals[out_idx_start+gen_num] = scale * ret_val; continue; } //printf("Step 3...\n"); // Step 3: u = 0,1 - uniform sample. squeeze acceptance (s) u = curand_uniform(&state[idx]); if ((d*u) <= (t*t*t)){ vals[out_idx_start+gen_num] = scale * ret_val; continue; } //printf("Step 4...\n"); // Step 4: recalculations of q0, b, si, c if necessary if (a != aaa){ aaa = a; r = 1.0 / a; q0 = ((((((q7 * r + q6) * r + q5) * r + q4) * r + q3) * r + q2) * r + q1) * r; // Approximation depending on size of parameter a // The constants in the expressions for b, si and c // were established by numerical experiments if (a <= 3.686) { b = 0.463 + s + 0.178 * s2; si = 1.235; c = 0.195 / s - 0.079 + 0.16 * s; } else if (a <= 13.022) { b = 1.654 + 0.0076 * s2; si = 1.68 / s + 0.275; c = 0.062 / s + 0.024; } else { b = 1.77; si = 0.75; c = 0.1515 / s; } } //printf("Step 5...\n"); // Step 5: no quotient test if x not positive if (x > 0.0) { // Step 6: calculation of v and quotient q v = t / (s + s); if (fabs(v) <= 0.25){ q = q0 + 0.5 * t * t * ((((((a7 * v + a6) * v + a5) * v + a4) * v + a3) * v + a2) * v + a1) * v; } else { q = q0 - s * t + 0.25 * t * t + (s2 + s2) * log(1.0 + v); } // Step 7: quotient acceptance (q) if (logf(1.0 - u) <= q){ vals[out_idx_start+gen_num] = scale * ret_val; continue; } } //printf("Step 8...\n"); while (1){ // Step 8: e = standard exponential deviate // u = 0,1 -uniform deviate // t = (b,si)-double exponential (laplace) sample e = -logf(curand_uniform(&state[idx])); // Expo(1) u = curand_uniform(&state[idx]); u = u + u - 1.0; if (u < 0.0){ t = b - si * e; } else { t = b + si * e; } // Step 9: rejection if t < tau(1) = -0.71874483771719 if (t >= -0.71874483771719) { // Step 10: calculation of v and quotient q v = t / (s + s); if (fabs(v) <= 0.25){ q = q0 + 0.5 * t * t * ((((((a7 * v + a6) * v + a5) * v + a4) * v + a3) * v + a2) * v + a1) * v; } else { q = q0 - s * t + 0.25 * t * t + (s2 + s2) * log(1.0 + v); } // Step 11: hat acceptance (h) // (if q not positive go to step 8) if (q > 0.0) { w = expm1f(q); // expm1 is double prec... // ^^^^^ original code had approximation with rel.err < 2e-7 // if t is rejected sample again at step 8 if (c * fabs(u) <= w * expf(e - 0.5 * t * t)){ break; } } } } // repeat .. until `t' is accepted //printf("Returning at end...\n"); x = s + 0.5 * t; vals[out_idx_start+gen_num] = scale * x * x; continue; } // end gen_num loop return; } } /* // Full C wrapper for debugging... int main() { int n = 100000; int n_states = 1000; int threads_per_block = 512; int h_seeds[n_states]; int i; for (i=0; i<n_states; i++){ h_seeds[i] = 198+6*i; } int *d_seeds; cudaMalloc((void **)&d_seeds, n_states*sizeof(int)); cudaMemcpy(d_seeds,h_seeds,n_states*sizeof(int),cudaMemcpyHostToDevice); curandState *state; cudaMalloc((void**)&state, n_states*sizeof(curandState)); dim3 block_dims(threads_per_block,1,1); int grid_d1 = (int)floor(sqrt((float)n_states/(float)threads_per_block)); int grid_d2 = (int)ceil((float)n_states/(float)(grid_d1*threads_per_block)); dim3 grid_dims(grid_d1, grid_d2, 1); printf("Calling setup kernel...\n"); setup_kernel<<<grid_dims,block_dims>>>(state,d_seeds,n_states); printf("done with setup kernel. Calling rgamma kernel...\n"); float a = 10.0; float b = 1.2; float oob = 1.0/b; float h_vals[n]; for (i=0; i<n; i++){ h_vals[i] = 0.0; } float *d_vals; cudaMalloc((void**)&d_vals,n*sizeof(float)); cudaMemcpy(d_vals,h_vals,n_states*sizeof(int),cudaMemcpyHostToDevice); rgamma_kernel<<<grid_dims,block_dims>>>(state,n_states,d_vals,n,a,oob); printf("done with call. Copying results back...\n"); cudaMemcpy(h_vals,d_vals,n*sizeof(int),cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("done.\n"); printf("output:\n[1]"); for (i=0; i<n; i++){ printf(" %4.3f",h_vals[i]); if ((i+1)%8 == 0) printf("\n[%d]",i+2); } printf("\n"); return 0; } */
6,010
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #define NUM_THREADS 64 typedef double MYTYPE; __global__ void mat_trans(MYTYPE* a, MYTYPE* at, int size){ // MYTYPE tmp; // threadIdx from 0 to NUM_THREADS // blockIdx = (size*size + NUM_THREADS)/NUM_THREADS int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx/size; int col = idx % size; if( idx < size*size ){ at[ col*size + row ] = a[ row*size + col ]; } } void mat_print(MYTYPE *m, int dim){ int i, j; printf("\n"); for(i=0; i<dim; i++){ for(j=0; j<dim; j++) printf( "%.3lf\t", m[i*dim + j] ); printf("\n"); } printf("\n"); } int main(int argc, char *argv[]){ MYTYPE *h_a, *h_at; MYTYPE *d_a, *d_at; int dim, i; size_t m_size; if( argc != 2){ printf("\n"); printf("usage: a.out MAT_DIM\n\n"); exit(1); } dim = atoi(argv[1]); m_size = dim*dim * sizeof(MYTYPE); h_a = (MYTYPE*) malloc( m_size ); h_at = (MYTYPE*) malloc( m_size ); for(i=0; i<dim*dim; i++) h_a[i] = (MYTYPE)( rand() % 10 ); if( dim <= 10) mat_print(h_a, dim); cudaMalloc((void**) &d_a, m_size ); cudaMalloc((void**) &d_at, m_size ); cudaMemcpy(d_a, h_a, m_size, cudaMemcpyHostToDevice); mat_trans<<< (dim*dim + NUM_THREADS)/NUM_THREADS , NUM_THREADS >>>(d_a, d_at, dim); cudaMemcpy(h_at, d_at, m_size, cudaMemcpyDeviceToHost); if( dim <= 10) mat_print(h_at, dim); free(h_a); free(h_at); cudaFree(d_a); cudaFree(d_at); return 0; }
6,011
#include <algorithm> #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/random.h> #include <thrust/sort.h> #include <time.h> #define CUDA_CALL(x) \ { \ if ((x) != cudaSuccess) { \ printf("CUDA error at %s:%d\n", __FILE__, __LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE); \ } \ } int d = 1 << 5; __global__ void generatekey(float *Cx, float *Cy, float *Cz, int lenghtC, int *keys, int d) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < lenghtC; i = i + gridDim.x * blockDim.x) { int digit1 = Cx[i] * d; int digit2 = Cy[i] * d; int digit3 = Cz[i] * d; keys[i] = d * d * digit1 + d * digit2 + digit3; } } __global__ void findCellStartends(int *keys, int len, int *starts, int *ends) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i = i + gridDim.x * blockDim.x) { if (i > 0) { if (keys[i] != keys[i - 1]) { starts[keys[i]] = i; } } else { starts[keys[0]] = 0; } if (i != len - 1) { if (keys[i] != keys[i + 1]) { ends[keys[i]] = i; } } else { ends[keys[len - 1]] = len - 1; } } } __device__ float findBoarderdistance(float pointx, float pointy, float pointz, int d, int s, int digit1, int digit2, int digit3) { float bdist = 100; if (digit1 + s < d - 1) { bdist = -pointx + (float)(digit1 + s + 1) / d; } if (digit2 + s < d - 1) { bdist = fminf(-pointy + (float)(digit2 + s + 1) / d, bdist); } if (digit3 + s < d - 1) { bdist = fminf(-pointz + (float)(digit3 + s + 1) / d, bdist); } if (digit1 - s > 0) { bdist = fminf(+pointx + (float)(-digit1 + s) / d, bdist); } if (digit2 - s > 0) { bdist = fminf(+pointy + (float)(-digit2 + s) / d, bdist); } if (digit3 - s > 0) { bdist = fminf(+pointz + (float)(-digit3 + s) / d, bdist); } return bdist; } __device__ void searchCell(float pointx, float pointy, float pointz, int start, int end, float *Cx, float *Cy, float *Cz, float *minq, float *x, float *y, float *z) { float dist; if (start >= 0) { for (int j = start; j <= end; j++) { dist = (Cx[j] - pointx) * (Cx[j] - pointx) + (Cy[j] - pointy) * (Cy[j] - pointy) + (Cz[j] - pointz) * (Cz[j] - pointz); dist = sqrt(dist); if (dist < *minq) { *minq = dist; *x = Cx[j]; *y = Cy[j]; *z = Cz[j]; } } } } __global__ void searchGrid(float *Qx, float *Qy, float *Qz, int *starts, int *ends, float *Cx, float *Cy, float *Cz, float *Resx, float *Resy, float *Resz, int d, int lenghtQ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < lenghtQ; i = i + gridDim.x * blockDim.x) { float pointx = Qx[i]; float pointy = Qy[i]; float pointz = Qz[i]; int digit1 = pointx * d; int digit2 = pointy * d; int digit3 = pointz * d; int key = d * d * digit1 + d * digit2 + digit3; float x = 0, y = 0, z = 0; float minq = 100; int s = 0; float bdist = findBoarderdistance(pointx, pointy, pointz, d, s, digit1, digit2, digit3); searchCell(pointx, pointy, pointz, starts[key], ends[key], Cx, Cy, Cz, &minq, &x, &y, &z); int digx, digy, digz; while (1) { s++; if (minq < bdist) { break; } for (digx = digit1 - s; digx <= digit1 + s; digx++) { for (digy = digit2 - s; digy <= digit2 + s; digy++) { for (digz = digit3 - s; digz <= digit3 + s; digz++) { if (digy == digit2 - s || digy == digit2 + s || digz == digit3 - s || digz == digit3 + s || digx == digit1 - s || digx == digit1 + s) { key = d * d * digx + d * digy + digz; if (key >= 0 && key < d * d * d) { searchCell(pointx, pointy, pointz, starts[key], ends[key], Cx, Cy, Cz, &minq, &x, &y, &z); } } } } } bdist = findBoarderdistance(pointx, pointy, pointz, d, s, digit1, digit2, digit3); } Resx[i] = x; Resy[i] = y; Resz[i] = z; } } __global__ void distsQmin(float *Qx, float *Qy, float *Qz, float *Cx, float *Cy, float *Cz, int lenghtC, float *x, float *y, float *z, int lenghtQ) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < lenghtQ; i = i + gridDim.x * blockDim.x) { float minq = 100; float dist; float tempx, tempy, tempz; for (int j = 0; j < lenghtC; j++) { dist = (Cx[j] - Qx[i]) * (Cx[j] - Qx[i]) + (Cy[j] - Qy[i]) * (Cy[j] - Qy[i]) + (Cz[j] - Qz[i]) * (Cz[j] - Qz[i]); dist = sqrt(dist); if (dist < minq) { minq = dist; tempx = Cx[j]; tempy = Cy[j]; tempz = Cz[j]; } } x[i] = tempx; y[i] = tempy; z[i] = tempz; } } __global__ void gridval(int *starts, int *ends, float *Cx, float *Cy, float *Cz, int d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int digit1 = Cx[i] * d; int digit2 = Cy[i] * d; int digit3 = Cz[i] * d; int key = d * d * digit1 + d * digit2 + digit3; if (i > ends[key] || i < starts[key]) { printf("Error in grid construction \n"); } if (i > 0) { digit1 = Cx[i - 1] * d; digit2 = Cy[i - 1] * d; digit3 = Cz[i - 1] * d; int key2 = d * d * digit1 + d * digit2 + digit3; if (key2 > key) { printf("Error not sorted\n"); } } } void init_rand_points(float *p, int n) { int i; for (i = 0; i < n; i++) { p[i] = (float)(rand() - 1000) / (float)RAND_MAX; } } int main(int argc, char **argv) { /*Take the input lengths*/ if (argc != 4) { printf("Enter size of the set and queries as arguments and the number of " "grid cells as arguments"); exit(0); } srand(time(NULL)); int lenghtC = 1 << atoi(argv[1]); int lenghtQ = 1 << atoi(argv[2]); int d = 1 << atoi(argv[3]); int numblocks = 1 << 10; int threadsPerBlock = 32; printf("Size of set 2 to %d size of quiry set 2 to %d grid dimentions %d x %d x %d\n",atoi(argv[1]), atoi(argv[2]),d,d,d); printf("%d Threadblocks with %d threads per block \n", numblocks, threadsPerBlock ); float *Csx, *Csy, *Csz, *Qsx, *Qsy, *Qsz; Csx = (float *)malloc(sizeof(float) * lenghtC); Csy = (float *)malloc(sizeof(float) * lenghtC); Csz = (float *)malloc(sizeof(float) * lenghtC); Qsx = (float *)malloc(sizeof(float) * lenghtQ); Qsy = (float *)malloc(sizeof(float) * lenghtQ); Qsz = (float *)malloc(sizeof(float) * lenghtQ); /* Put numbers in the arrays*/ init_rand_points(Csx, lenghtC); init_rand_points(Csy, lenghtC); init_rand_points(Csz, lenghtC); init_rand_points(Qsx, lenghtQ); init_rand_points(Qsy, lenghtQ); init_rand_points(Qsz, lenghtQ); /*Allocate space in device memory*/ float *Cx, *Cy, *Cz, *Qx, *Qy, *Qz; float *Resx, *Resy, *Resz; CUDA_CALL(cudaMalloc(&Cx, lenghtC * sizeof(float))); CUDA_CALL(cudaMalloc(&Cy, lenghtC * sizeof(float))); CUDA_CALL(cudaMalloc(&Cz, lenghtC * sizeof(float))); CUDA_CALL(cudaMalloc(&Qx, lenghtQ * sizeof(float))); CUDA_CALL(cudaMalloc(&Qy, lenghtQ * sizeof(float))); CUDA_CALL(cudaMalloc(&Qz, lenghtQ * sizeof(float))); /*Copy the numbers in the device */ CUDA_CALL(cudaMemcpy(Cx, Csx, lenghtC * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(Cy, Csy, lenghtC * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(Cz, Csz, lenghtC * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(Qx, Qsx, lenghtQ * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(Qy, Qsy, lenghtQ * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(Qz, Qsz, lenghtQ * sizeof(float), cudaMemcpyHostToDevice)); /*Make pointers so we can use the thrust libraty*/ thrust ::device_ptr<float> Cx_ptr(Cx); thrust ::device_ptr<float> Cy_ptr(Cy); thrust ::device_ptr<float> Cz_ptr(Cz); thrust ::device_ptr<float> Qx_ptr(Qx); thrust ::device_ptr<float> Qy_ptr(Qy); thrust ::device_ptr<float> Qz_ptr(Qz); /*Find the grid node for each point in C*/ float gridMakeTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); int *keysC; CUDA_CALL(cudaMalloc(&keysC, lenghtC * sizeof(int))); generatekey<<<numblocks, threadsPerBlock>>>(Cx, Cy, Cz, lenghtC, keysC, d); cudaDeviceSynchronize(); /*Sort by grid node*/ thrust ::device_ptr<int> kc(keysC); thrust ::stable_sort_by_key( kc, kc + lenghtC, make_zip_iterator(make_tuple(Cx_ptr, Cy_ptr, Cz_ptr))); int *starts; CUDA_CALL(cudaMalloc(&starts, d * d * d * (sizeof(int)))); CUDA_CALL(cudaMemset(starts, -1, d * d * d * (sizeof(int)))); int *ends; CUDA_CALL(cudaMalloc(&ends, d * d * d * (sizeof(int)))); CUDA_CALL(cudaMemset(ends, -1, d * d * d * (sizeof(int)))); /*Find where its node starts and ends */ findCellStartends<<<numblocks, threadsPerBlock>>>(keysC, lenghtC, starts, ends); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gridMakeTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Time for grid creation %f ms \n", gridMakeTime); /*Validate the Grid*/ gridval<<<lenghtC / threadsPerBlock, threadsPerBlock>>>(starts, ends, Cx, Cy, Cz, d); CUDA_CALL(cudaMalloc(&Resx, lenghtQ * sizeof(float))); CUDA_CALL(cudaMalloc(&Resy, lenghtQ * sizeof(float))); CUDA_CALL(cudaMalloc(&Resz, lenghtQ * sizeof(float))); float elapsedTime; cudaEvent_t startse, stopse; cudaEventCreate(&startse); cudaEventCreate(&stopse); cudaEventRecord(startse, 0); /*Searrch for each query point */ searchGrid<<<numblocks, threadsPerBlock>>>(Qx, Qy, Qz, starts, ends, Cx, Cy, Cz, Resx, Resy, Resz, d, lenghtQ); cudaEventRecord(stopse, 0); cudaEventSynchronize(stopse); cudaEventElapsedTime(&elapsedTime, startse, stopse); cudaEventDestroy(startse); cudaEventDestroy(stopse); printf("Search Time %f ms \n", elapsedTime); float *x; float *z; float *y; CUDA_CALL(cudaMalloc(&x, lenghtQ * sizeof(float))); CUDA_CALL(cudaMalloc(&y, lenghtQ * sizeof(float))); CUDA_CALL(cudaMalloc(&z, lenghtQ * sizeof(float))); /*Validation*/ distsQmin<<<numblocks, threadsPerBlock>>>(Qx, Qy, Qz, Cx, Cy, Cz, lenghtC, x, y, z,lenghtQ); cudaDeviceSynchronize(); float *gridx, *gridy, *gridz, *minx, *miny, *minz; gridx = (float *)malloc(lenghtQ * sizeof(float)); gridy = (float *)malloc(lenghtQ * sizeof(float)); gridz = (float *)malloc(lenghtQ * sizeof(float)); minx = (float *)malloc(lenghtQ * sizeof(float)); miny = (float *)malloc(lenghtQ * sizeof(float)); minz = (float *)malloc(lenghtQ * sizeof(float)); cudaMemcpy(gridx, Resx, lenghtQ * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(gridy, Resy, lenghtQ * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(gridz, Resz, lenghtQ * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(minx, x, lenghtQ * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(miny, y, lenghtQ * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(minz, z, lenghtQ * sizeof(float), cudaMemcpyDeviceToHost); float *Qhx, *Qhy, *Qhz; Qhx = (float *)malloc(lenghtQ * sizeof(float)); Qhy = (float *)malloc(lenghtQ * sizeof(float)); Qhz = (float *)malloc(lenghtQ * sizeof(float)); cudaMemcpy(Qhx, Qx, lenghtQ * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Qhy, Qy, lenghtQ * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Qhz, Qz, lenghtQ * sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); int s = 0; int c = 0; for (int i = 0; i < lenghtQ; i++) { if (minx[i] != gridx[i] || miny[i] != gridy[i] || minz[i] != gridz[i]) { s++; } else { c++; } } printf("Wrong number of points %d ", s); printf("Ritght number of points %d \n", c); CUDA_CALL(cudaFree(Cx)); CUDA_CALL(cudaFree(Cy)); CUDA_CALL(cudaFree(Cz)); CUDA_CALL(cudaFree(Qx)); CUDA_CALL(cudaFree(Qy)); CUDA_CALL(cudaFree(Qz)); CUDA_CALL(cudaFree(Resx)); CUDA_CALL(cudaFree(Resy)); CUDA_CALL(cudaFree(Resz)); CUDA_CALL(cudaFree(keysC)); CUDA_CALL(cudaFree(ends)); CUDA_CALL(cudaFree(starts)); CUDA_CALL(cudaFree(x)); CUDA_CALL(cudaFree(y)); CUDA_CALL(cudaFree(z)); return 0; }
6,012
// CUDA programming // Exercise n. 00 #include <errno.h> #include <cuda.h> #include <stdio.h> #define BLOCKS 1 #define THREADS 32 // Prototypes void cpu_hello_world(void); __global__ void gpu_hello_world(void); int main(void) { // Call the CPU version cpu_hello_world(); // Call the GPU version gpu_hello_world<<< BLOCKS, THREADS >>>(); return(EXIT_SUCCESS); } // CPU version of hello world! void cpu_hello_world(void) { printf("Hello from the CPU!\n"); } // GPU version of hello world! __global__ void gpu_hello_world(void) { int threadId = threadIdx.x; printf("Hello from the GPU! My threadId is %d\n", threadId); }
6,013
// fermi /* * Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define EPS (1.0f) extern "C" { __global__ void fastnoise1Kernel(const int h, const int w, float* dxsdys, const float* input); } __global__ void fastnoise1Kernel(const int h, const int w, float* dxsdys, const float* input) { const int i = blockIdx.y; const int bj = blockIdx.x; const int wtj = threadIdx.y; const int ttj = threadIdx.x; const int nrThreadsW = min(1024, w); const int nrThreadsNrThreadsW = min(32, nrThreadsW); const int tj = wtj * (1 * nrThreadsNrThreadsW) + ttj; if (tj < nrThreadsW) { const int j = bj * (1 * nrThreadsW) + tj; if (j < w) { float dx; if (j == 0) dx = input[j + 1 + i * (1 * w)] - input[j + i * (1 * w)]; else if (j == w - 1) dx = input[j + i * (1 * w)] - input[j - 1 + i * (1 * w)]; else dx = 0.5 * (input[j + 1 + i * (1 * w)] - input[j - 1 + i * (1 * w)]); float dy; if (i == 0) dy = input[j + (i + 1) * (1 * w)] - input[j + i * (1 * w)]; else if (i == h - 1) dy = input[j + i * (1 * w)] - input[j + (i - 1) * (1 * w)]; else dy = 0.5 * (input[j + (i + 1) * (1 * w)] - input[j + (i - 1) * (1 * w)]); const float norm = sqrtf(dx * dx + dy * dy); const float scale = 1.0 / (EPS + norm); dxsdys[j + 0 * (1 * h * w) + i * (1 * w)] = dx * scale; dxsdys[j + 1 * (1 * h * w) + i * (1 * w)] = dy * scale; } } }
6,014
#include <stdio.h> #include <stdlib.h> #include <time.h> #define N (2048*2048) #define THREADS_PER_BLOCK 512 __global__ void add( int *a, int *b, int *c ) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } int main( void ) { int *a, *b, *c; // host copies of a, b, c int *dev_a, *dev_b, *dev_c; // device copies of a, b, c int size = N * sizeof( int ); // we need space for an integer clock_t start, end; double result; // allocate device copies of a, b, c cudaMalloc( (void**)&dev_a, size ); cudaMalloc( (void**)&dev_b, size ); cudaMalloc( (void**)&dev_c, size ); a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for(int i=0;i<N;i++) { a[i] = rand()%100; b[i] = rand()%100; } // random_ints( a, N); // random_ints( b, N); // copy inputs to device cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice ); start = clock(); // launch add() kernel with N parallel blocks add<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c ); end = clock(); // copy device result back to host copy of c cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost ); free(a); free(b); free(c); cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); /* for(int i=0; i<N; i++) printf("[%d] : %d + %d = %d\n",i,a[i],b[i],c[i]); */ result = (double)(end - start)/CLOCKS_PER_SEC; printf("processing time: %lfs\n",result); return 0; }
6,015
#include "includes.h" __global__ void mapScan(unsigned int *d_array, unsigned int *d_total, size_t n) { int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; if(index < n) { d_array[index] += d_total[bx]; } }
6,016
#include "gpu_automata_cuda.cuh" // TODO Experiment with this function #if 1 // 2D-GAME OF LIFE: __device__ bool update_fun(bool *neighbors) { int count = 0; for (int i= 9; i < 18; i++) { if (i != 13) { count += neighbors[i] ? 1 : 0; } } if (neighbors[13]) return count == 2 || count == 3; else return count == 3; } #else __device__ bool update_fun(bool *neighbors) { int count = 0; for (int i = 0; i < 27; i++) { count += neighbors[i] ? 1 : 0; } return count > 3 && count < 9; } #endif __device__ int get_index(int x, int y, int z, int xdim, int ydim, int zdim) { return (x * ydim * zdim) + (y * zdim) + z; } // Put the values of the neighbors of the given position into the given array // It is the responsibility of the caller to set initial values of the neighbors, // which is relevant for the edge cases which are missing some neighbors. // Each neighbor has a specific location in the neighbors array (see the // "n_idx = get_index(...)" line), in case some update functions want to abandon // symmetry __device__ void get_neighbors(bool *neighbors, int idx, int xdim, int ydim, int zdim, bool *field) { int x = idx / (ydim * zdim); int y = (idx % (ydim * zdim)) / zdim; int z = (idx % (ydim * zdim)) % zdim; int i_start = x == 0 ? 0 : -1; int j_start = y == 0 ? 0 : -1; int k_start = z == 0 ? 0 : -1; int i_end = x == xdim - 1 ? 0 : 1; int j_end = y == ydim - 1 ? 0 : 1; int k_end = z == zdim - 1 ? 0 : 1; for (int i = i_start; i <= i_end; i++) { for (int j = j_start; j <= j_end; j++) { for (int k = k_start; k <= k_end; k++) { if (i == 0 && y == 0 && k == 0) continue; // Find the index within the neighbors array int n_idx = get_index(i + 1, j + 1, k + 1, 3, 3, 3); neighbors[n_idx] = field[get_index(x + i, y + j, z + k, xdim, ydim, zdim)]; } } } } __global__ void cuda_init_field_kernel(float *rands, bool *field, int length) { // TODO Have more interesting field initialization int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = thread_idx; i < length; i += gridDim.x * blockDim.x) { field[i] = rands[i] < 0.1 ? true : false; } } __global__ void cuda_automaton_step_kernel(bool *old_field, bool *new_field, int xdim, int ydim, int zdim) { // I'm not using shared memory for two reasons here: First is that it // starts overflowing at the uninterestingly low seeming size of 36x36x36, // and the first couple pages of google say nothing about what overflowing // shared mem does, so I don't want to do it in case it produces garbage // data. // The other reason (which should perhaps just be counted as part of the // first reason), is that the 36x36x36 bound comes from assuming that each // block can freely use all the shared memory of it's SMP, which will be // false so the shared mem would overflow even earlier. int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = thread_idx; i < xdim * ydim * zdim; i += gridDim.x * blockDim.x) { bool neighbors[27] = {}; get_neighbors(neighbors, i, xdim, ydim, zdim, old_field); new_field[i] = update_fun(neighbors); } } void cuda_call_init_field_kernel(const unsigned int blocks, const unsigned int threadsPerBlock, float *rands, bool *field, int length) { cuda_init_field_kernel<<<blocks, threadsPerBlock>>>(rands, field, length); } void cuda_call_automaton_step_kernel(const unsigned int blocks, const unsigned int threadsPerBlock, bool *old_field, bool *new_field, int xdim, int ydim, int zdim) { cuda_automaton_step_kernel<<<blocks, threadsPerBlock>>> (old_field, new_field, xdim, ydim, zdim); }
6,017
/* ============================================================================ Name : Isolated_SW.cu Author : Vuong Pham Duy Version : Copyright : Your copyright notice Description : debugging Smith-Waterman Score Matrix Kernel ============================================================================ */ #include <cuda.h> #include <cuda_runtime.h> #include <stdlib.h> #include <stdio.h> #include <inttypes.h> #include <assert.h> #include <getopt.h> #define WARP 1024 #define LIKELY(x) __builtin_expect((x),1) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } #define DEBUG 1 #define A 1 #define B 4 #define MATH_SIZE 5 #define O_DEL 6 #define E_DEL 1 #define O_INS 6 #define E_INS 1 #define W 100 #define END_BONUS 5 #define ZDROP 100 #define H0 200 #define THREAD_CHECK 0 typedef struct { int32_t h, e; } eh_t; void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", \ cudaGetErrorString(code), file, line); if (abort) exit(code); } } void bwa_fill_scmat(int a, int b, int8_t mat[25]); int ksw_extend2(int qlen, const uint8_t *query, int tlen, const uint8_t *target, int m, const int8_t *mat, \ int o_del, int e_del, int o_ins, int e_ins, int w, int end_bonus, int zdrop, int h0); __device__ bool check_active(int32_t h, int32_t e, int *wait_cnt, int beg, int *out_h, int *out_e) { if(h != -1 && e != -1) { if(*wait_cnt == beg) return true; else { *wait_cnt += 1; *out_h = 0; *out_e = 0; return false; } } else return false; } __device__ void reset(int32_t *h, int32_t *e) { *h = -1; *e = -1; } __device__ int mLock = 0; extern __shared__ int32_t container[]; __global__ void sw_kernel(int *d_max, int *d_max_i, int *d_max_j, int *d_max_ie, int *d_gscore, int *d_max_off, \ int tcheck, int w, int oe_ins, int e_ins, int o_del, int e_del, int oe_del, int m, \ int tlen, int qlen, int passes, int t_lastp, int h0, int zdrop, \ int32_t *h, int8_t *qp, const uint8_t *target) { __shared__ int break_cnt; __shared__ int max; __shared__ int max_i; __shared__ int max_j; __shared__ int max_ie; __shared__ int gscore; __shared__ int max_off; __shared__ int out_h[WARP]; __shared__ int out_e[WARP]; bool blocked = true; int in_h, in_e; int i, k, wait_cnt; int active_ts, beg, end; int32_t *se, *sh; int8_t *sqp; /* Initialize */ if(threadIdx.x == 0) { max = h0; max_i = -1; max_j = -1; max_ie = -1; gscore = -1; max_off = 0; break_cnt = 0; } i = threadIdx.x; sh = container; se = (int32_t*)&sh[qlen + 1]; sqp = (int8_t*)&se[qlen + 1]; for(;;) { if(i < qlen + 1) { sh[i] = h[i]; se[i] = 0; } // qlen > 1, m = 5, qlen * m always bigger than qlen + 1 if(i < qlen * m) { sqp[i] = qp[i]; } else break; i += WARP; } __syncthreads(); for(int i = 0; i < passes; i++) { if(i == passes - 1) { if(threadIdx.x >= t_lastp) break; else active_ts = t_lastp; } else active_ts = WARP; reset(&in_h, &in_e); reset(&out_h[threadIdx.x], &out_e[threadIdx.x]); beg = 0; end = qlen; wait_cnt = 0; int t, row_i, f = 0, h1, local_m = 0, mj = -1; row_i = i * WARP + threadIdx.x; int8_t *q = &sqp[target[row_i] * qlen]; // apply the band and the constraint (if provided) if (beg < row_i - w) beg = row_i - w; if (end > row_i + w + 1) end = row_i + w + 1; if (end > qlen) end = qlen; // reset input, output if (beg == 0) { h1 = h0 - (o_del + e_del * (row_i + 1)); if (h1 < 0) h1 = 0; } else h1 = 0; __syncthreads(); for(k = beg; k <= end;) { if(k < end) { if(threadIdx.x == 0) { in_h = sh[k]; in_e = se[k]; } else { in_h = out_h[threadIdx.x - 1]; in_e = out_e[threadIdx.x - 1]; } } __syncthreads(); if(k == end) { out_h[threadIdx.x] = h1; out_e[threadIdx.x] = 0; if(threadIdx.x == active_ts - 1) { sh[end] = h1; se[end] = 0; } } __syncthreads(); if(k == end) break; if(check_active(in_h, in_e, &wait_cnt, beg, &out_h[threadIdx.x], &out_e[threadIdx.x])) { int local_h; if(threadIdx.x == tcheck && DEBUG == 1) printf("i = %d, j = %d, M = %d, h1 = %d\n", row_i, k, in_h, h1); out_h[threadIdx.x] = h1; if(threadIdx.x == active_ts - 1) sh[k] = h1; //in_h = in_h? in_h + q[beg] : 0; if(in_h) in_h = in_h + q[k]; else in_h = 0; // local_h = in_h > in_e? in_h : in_e; if(in_h > in_e) local_h = in_h; else local_h = in_e; // local_h = local_h > f? local_h : f; if(local_h < f) local_h = f; if(threadIdx.x == tcheck && DEBUG == 1) printf("i = %d, j = %d, h = %d\n", row_i, k, local_h); h1 = local_h; // mj = local_m > local_h? mj : beg; if(local_m <= local_h) mj = k; //local_m = local_m > local_h? local_m : local_h; if(local_m < local_h) local_m = local_h; t = in_h - oe_del; //t = t > 0? t : 0; if(t < 0) t = 0; in_e -= e_del; //in_e = in_e > t? in_e : t; if(in_e < t) in_e = t; out_e[threadIdx.x] = in_e; if(threadIdx.x == active_ts - 1) se[k] = in_e; t = in_h - oe_ins; //t = t > 0? t : 0; if(t < 0) t = 0; f -= e_ins; //f = f > t? f : t; if(f < t) f = t; if(threadIdx.x == tcheck && DEBUG == 1) printf("i = %d, j = %d, M = %d, h = %d, h1 = %d, e = %d, f = %d, t = %d\n", \ row_i, k, in_h, local_h, h1, in_e, f, t); reset(&in_h, &in_e); k += 1; } __syncthreads(); } blocked = true; while(blocked) { if(0 == atomicCAS(&mLock, 0, 1)) { // critical section if(k == qlen) { if(gscore < h1) { max_ie = row_i; gscore = h1; } else if(gscore == h1 && max_ie < row_i) { max_ie = row_i; } } atomicExch(&mLock, 0); blocked = false; } } blocked = true; while(blocked) { if(0 == atomicCAS(&mLock, 0, 1)) { if(local_m > max) { max = local_m, max_i = row_i, max_j = mj; max_off = max_off > abs(mj - row_i)? max_off : abs(mj - row_i); } // else if (zdrop > 0) { // if (row_i - max_i > mj - max_j) { // if (max - local_m - ((row_i - max_i) - (mj - max_j)) * e_del > zdrop) break_cnt += 1; // } else { // if (max - local_m - ((mj - max_j) - (row_i - max_i)) * e_ins > zdrop) break_cnt += 1; // } // } atomicExch(&mLock, 0); blocked = false; } } __syncthreads(); } __syncthreads(); *d_max = max; *d_max_i = max_i; *d_max_j = max_j; *d_max_ie = max_ie; *d_gscore = gscore; *d_max_off = max_off; } #define QLEN 1000 #define TLEN 1500 int main(int argc, char *argv[]) { int c, GPU = 1, tcheck = THREAD_CHECK, pr = 0; while ((c = getopt(argc, argv, "t:g:p:")) >= 0) { if (c == 't') tcheck = atoi(optarg); else if (c == 'g') GPU = atoi(optarg); else if (c == 'p') pr = atoi(optarg); else return 1; } int8_t mat[MATH_SIZE * MATH_SIZE]; uint8_t cquery[QLEN] = {2, 2, 1, 2, 0, 0, 3, 0, 2, 1, 2, 1, 3, 0, 1, 1, 0, 0, 3, 2, 0, 1, 3, 1, 3, 0, 1, 0, 0, 2, 0, 3, 0, 2, 1, 0, 2, 0, 1, 0, 2, 3, 2, 1, 3, 3, 2, 3, 3, 1, 1, 3, 2, 1, 0, 2, 1, 2, 2, 2, 0, 3, 1, 0, 1, 2, 0, 3, 2, 1, 3, 0, 1, 1, 1, 0, 0, 0, 0, 3, 1, 1, 3, 0, 2, 3, 2, 0, 1, 0, 2, 1, 3, 3, 1, 0, 1, 2, 3, 3, 3, 3, 0, 0, 0, 1, 1, 1, 1, 1, 0, 3, 2, 3, 3, 1, 3, 1, 1, 0, 1, 3, 2, 1, 2, 3, 1, 3, 1, 1, 2, 1, 0, 2, 1, 0, 0, 2, 1, 1, 3, 2, 0, 2, 1, 3, 3, 0, 0, 0, 1, 2, 3, 3, 3, 1, 2, 0, 0, 0, 1, 2, 1, 1, 1, 2, 2, 1, 1, 3, 2, 0, 1, 3, 2, 3, 2, 1, 3, 3, 1, 0, 1, 0, 3, 0, 1, 2, 0, 1, 2, 2, 0, 3, 3, 1, 1, 1, 2, 2, 1, 0, 3, 2, 3, 1, 1, 2, 3, 1, 1, 0, 1, 2, 1, 1, 2, 2, 3, 2, 0, 1, 0, 0, 0, 0, 1, 1, 1, 3, 0, 2, 3, 3, 1, 3, 0, 2, 1, 3, 3, 2, 0, 1, 0, 1, 2, 2, 3, 1, 0, 3, 2, 1, 3, 2, 1, 0, 3, 2, 3, 3, 1, 3, 2, 2, 2, 3, 0, 3, 2, 0, 1, 2, 1, 1, 3, 3, 3, 3, 0, 3, 2, 2, 0, 2, 0, 1, 2, 3, 0, 2, 3, 1, 1, 1, 3, 3, 0, 3, 2, 3, 3, 3, 1, 0, 0, 1, 3, 3, 0, 3, 2, 2, 1, 3, 0, 1, 0, 3, 1, 0, 1, 0, 1, 2, 1, 0, 1, 2, 0, 3, 1, 3, 2, 2, 0, 2, 3, 3, 1, 3, 3, 3, 2, 0, 2, 2, 2, 3, 1, 3, 3, 2, 3, 1, 0, 0, 1, 1, 2, 1, 0, 3, 1, 2, 2, 1, 0, 1, 0, 1, 1, 3, 1, 3, 0, 3, 1, 2, 2, 3, 1, 2, 1, 0, 3, 2, 0, 0, 3, 3, 2, 0, 2, 3, 2, 0, 0, 3, 2, 0, 0, 3, 0, 1, 2, 0, 1, 3, 2, 3, 2, 3, 1, 0, 3, 0, 2, 3, 1, 1, 2, 3, 1, 1, 2, 0, 1, 2, 3, 3, 2, 3, 2, 2, 1, 0, 2, 2, 0, 0, 1, 2, 3, 3, 2, 2, 3, 0, 2, 0, 2, 0, 3, 3, 1, 1, 3, 3, 3, 2, 2, 2, 2, 1, 0, 3, 1, 3, 1, 1, 3, 2, 0, 3, 1, 2, 1, 1, 3, 3, 1, 1, 0, 1, 0, 1, 2, 0, 0, 2, 2, 3, 0, 0, 0, 0, 3, 1, 3, 0, 3, 3, 3, 3, 2, 0, 1, 3, 1, 0, 3, 3, 1, 3, 0, 2, 0, 2, 2, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 3, 0, 3, 1, 0, 1, 1, 2, 1, 0, 1, 3, 3, 1, 1, 3, 3, 3, 0, 0, 3, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 1, 2, 2, 3, 2, 3, 0, 0, 0, 0, 1, 3, 3, 2, 0, 2, 2, 3, 3, 2, 3, 3, 2, 3, 0, 0, 0, 2, 2, 2, 0, 3, 0, 2, 2, 3, 1, 2, 3, 2, 2, 1, 1, 1, 3, 2, 0, 1, 1, 3, 3, 0, 2, 2, 0, 2, 2, 0, 0, 0, 3, 1, 3, 3, 3, 2, 2, 1, 0, 2, 3, 3, 3, 0, 0, 2, 2, 0, 0, 0, 3, 3, 0, 2, 1, 0, 0, 3, 1, 1, 3, 0, 2, 3, 3, 1, 1, 2, 2, 1, 0, 1, 0, 3, 2, 1, 1, 0, 1, 1, 0, 1, 1, 1, 3, 2, 1, 3, 2, 2, 0, 1, 2, 2, 0, 2, 0, 1, 0, 2, 3, 0, 0, 3, 3, 2, 0, 0, 2, 2, 2, 3, 3, 3, 0, 2, 1, 1, 1, 3, 0, 2, 1, 2, 0, 1, 0, 0, 3, 0, 3, 2, 0, 3, 1, 3, 1, 2, 0, 3, 0, 2, 2, 3, 1, 2, 1, 2, 0, 2, 2, 0, 0, 3, 2, 1, 0, 3, 1, 3, 3, 0, 1, 0, 3, 3, 3, 0, 1, 3, 0, 1, 1, 2, 0, 2, 1, 1, 1, 1, 3, 3, 1, 0, 2, 3, 1, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 1, 3, 3, 1, 3, 0, 2, 1, 0, 1, 2, 1, 2, 3, 1, 1, 0, 1, 3, 0, 2, 1, 2, 0, 3, 0, 3, 3, 2, 2, 1, 0, 1, 1, 1, 0, 1, 3, 1, 2, 0, 0, 3, 2, 3, 0, 3, 0, 1, 2, 0, 3, 0, 2, 0, 3, 3, 3, 2, 1, 1, 0, 1, 2, 1, 2, 2, 2, 2, 0, 0, 2, 0, 0, 1, 3, 0, 0, 3, 2, 3, 3, 1, 3, 2, 1, 2, 1, 1, 1, 2, 2, 1, 0, 1, 2, 2, 3, 0, 0, 3, 1, 3, 3, 1, 0, 3, 1, 0, 2, 3, 3, 2, 1, 2, 0, 2, 1, 1, 3, 2, 3, 2, 3, 3, 3, 1, 2, 2, 1, 2, 2, 2, 1, 1, 3, 1, 0, 1, 2, 3, 0, 1, 1, 1, 0, 1, 0, 1, 2, 3, 3, 1, 1, 2, 1, 0, 3, 3, 3, 0, 1, 1, 3, 3, 2, 2, 0, 3, 3, 2, 2, 0, 0, 3, 1, 0, 0, 1, 1, 2, 1, 0, 3, 2, 2, 0, 3, 1, 3, 2, 1, 1, 3, 0, 0, 1, 3, 0, 0, 2, 3, 2, 2, 3, 1, 0, 3}; uint8_t ctarget[TLEN] = {1, 1, 0, 3, 2, 0, 3, 1, 2, 3, 0, 3, 3, 2, 0, 0, 1, 1, 0, 2, 0, 0, 3, 2, 3, 1, 1, 2, 3, 1, 1, 0, 2, 1, 0, 1, 1, 3, 2, 3, 2, 2, 2, 1, 0, 3, 1, 1, 0, 1, 3, 0, 2, 2, 2, 1, 0, 3, 0, 3, 0, 1, 3, 3, 3, 3, 0, 0, 2, 2, 0, 1, 0, 2, 2, 0, 1, 0, 1, 1, 1, 0, 1, 3, 3, 0, 1, 3, 3, 1, 2, 0, 2, 1, 3, 1, 1, 3, 2, 3, 1, 2, 0, 1, 0, 3, 1, 2, 3, 2, 3, 0, 2, 1, 0, 1, 1, 1, 0, 0, 2, 2, 0, 0, 0, 3, 2, 1, 2, 0, 0, 3, 2, 1, 0, 2, 0, 1, 0, 3, 3, 0, 3, 2, 1, 3, 3, 2, 0, 0, 2, 2, 2, 3, 3, 2, 2, 1, 3, 1, 1, 0, 0, 3, 1, 1, 1, 1, 2, 2, 0, 2, 2, 3, 0, 3, 3, 3, 1, 3, 3, 3, 2, 2, 2, 1, 0, 1, 2, 0, 2, 3, 0, 2, 2, 1, 3, 3, 2, 2, 1, 2, 0, 3, 1, 0, 2, 0, 3, 3, 0, 3, 3, 2, 1, 1, 3, 1, 2, 1, 1, 0, 0, 1, 3, 2, 2, 2, 1, 0, 0, 3, 2, 0, 2, 0, 0, 1, 0, 0, 0, 0, 3, 3, 2, 0, 1, 1, 1, 3, 2, 3, 0, 2, 0, 3, 0, 3, 1, 2, 3, 2, 1, 2, 2, 3, 2, 3, 0, 2, 3, 1, 3, 2, 0, 1, 2, 1, 3, 3, 1, 1, 2, 1, 0, 3, 0, 0, 2, 1, 2, 1, 3, 3, 3, 2, 3, 1, 1, 3, 0, 0, 0, 3, 2, 1, 0, 0, 2, 3, 3, 3, 1, 2, 0, 1, 1, 0, 1, 3, 2, 0, 0, 1, 3, 0, 3, 2, 1, 0, 2, 1, 0, 2, 0, 2, 3, 1, 2, 2, 0, 2, 1, 1, 0, 2, 2, 1, 2, 0, 0, 0, 0, 0, 2, 3, 0, 1, 2, 2, 2, 0, 3, 2, 2, 0, 1, 2, 1, 3, 0, 1, 1, 1, 3, 1, 3, 1, 2, 2, 1, 2, 2, 1, 3, 0, 1, 3, 2, 3, 1, 0, 3, 1, 2, 1, 1, 3, 3, 2, 3, 3, 3, 0, 1, 2, 2, 0, 0, 0, 2, 1, 3, 1, 3, 2, 1, 0, 1, 3, 3, 3, 3, 2, 0, 2, 3, 1, 1, 3, 3, 0, 2, 2, 1, 3, 1, 3, 0, 1, 3, 2, 2, 2, 3, 1, 0, 1, 1, 2, 0, 0, 1, 0, 2, 1, 2, 2, 2, 3, 1, 1, 0, 3, 3, 1, 3, 0, 0, 3, 1, 3, 1, 0, 2, 1, 1, 2, 2, 3, 0, 2, 3, 1, 2, 2, 2, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 3, 1, 1, 2, 3, 0, 0, 3, 2, 1, 0, 1, 3, 3, 1, 1, 3, 3, 0, 1, 1, 0, 1, 2, 0, 2, 3, 0, 2, 0, 1, 2, 2, 2, 0, 1, 3, 0, 0, 1, 1, 0, 2, 0, 0, 0, 2, 3, 3, 2, 0, 0, 2, 1, 2, 3, 3, 2, 3, 1, 2, 1, 3, 0, 3, 0, 1, 2, 0, 1, 0, 2, 2, 2, 2, 2, 2, 0, 1, 1, 2, 1, 2, 1, 2, 0, 0, 1, 2, 3, 2, 1, 0, 2, 1, 0, 2, 3, 2, 2, 0, 2, 0, 2, 1, 3, 0, 3, 3, 1, 1, 2, 2, 3, 3, 0, 3, 3, 1, 2, 2, 0, 3, 3, 2, 0, 3, 0, 3, 1, 2, 0, 0, 3, 2, 1, 2, 3, 0, 1, 0, 1, 3, 3, 0, 2, 3, 0, 1, 1, 2, 0, 1, 1, 3, 3, 1, 2, 3, 1, 3, 1, 1, 3, 0, 3, 0, 2, 2, 1, 0, 3, 2, 3, 2, 3, 2, 1, 3, 3, 2, 1, 3, 3, 2, 2, 2, 3, 0, 1, 0, 0, 3, 1, 3, 3, 1, 0, 2, 3, 1, 2, 2, 3, 1, 0, 2, 3, 2, 1, 3, 0, 2, 2, 0, 0, 1, 2, 0, 1, 0, 0, 1, 3, 2, 1, 2, 3, 1, 0, 2, 2, 2, 1, 1, 0, 1, 0, 3, 3, 1, 2, 0, 0, 1, 0, 0, 2, 2, 0, 3, 2, 1, 1, 1, 3, 2, 0, 2, 3, 0, 0, 1, 3, 1, 2, 3, 3, 2, 2, 2, 0, 1, 2, 0, 2, 2, 0, 0, 1, 1, 3, 3, 2, 0, 1, 1, 2, 1, 3, 1, 1, 3, 2, 0, 1, 1, 3, 0, 3, 2, 2, 3, 3, 1, 3, 1, 3, 0, 1, 0, 1, 0, 0, 3, 1, 1, 0, 3, 2, 3, 1, 3, 2, 3, 0, 3, 0, 3, 3, 0, 1, 2, 3, 0, 3, 3, 1, 2, 3, 2, 3, 0, 3, 3, 3, 0, 0, 3, 3, 2, 2, 0, 1, 0, 0, 1, 0, 0, 1, 3, 0, 2, 1, 0, 3, 0, 3, 0, 3, 2, 3, 2, 2, 2, 1, 1, 2, 1, 0, 1, 3, 2, 2, 0, 2, 2, 2, 2, 2, 3, 2, 3, 1, 3, 3, 0, 0, 2, 1, 3, 0, 0, 1, 2, 2, 2, 3, 0, 3, 3, 1, 2, 1, 3, 2, 3, 1, 0, 2, 0, 3, 0, 3, 1, 3, 2, 1, 3, 0, 2, 2, 0, 2, 3, 2, 0, 1, 1, 0, 0, 0, 2, 2, 1, 1, 1, 0, 3, 1, 2, 3, 1, 2, 2, 2, 2, 0, 3, 1, 0, 2, 0, 0, 0, 3, 2, 1, 1, 3, 1, 1, 3, 3, 0, 0, 1, 1, 0, 0, 2, 3, 3, 3, 1, 1, 1, 3, 1, 1, 1, 1, 3, 1, 1, 3, 0, 3, 0, 1, 2, 2, 3, 1, 1, 3, 1, 2, 0, 1, 2, 2, 0, 1, 2, 2, 2, 3, 1, 3, 0, 2, 0, 3, 3, 1, 3, 0, 0, 3, 1, 2, 1, 0, 3, 3, 3, 0, 1, 3, 2, 0, 2, 2, 1, 0, 0, 0, 3, 2, 3, 0, 0, 0, 3, 0, 1, 2, 0, 2, 2, 1, 0, 3, 2, 0, 2, 1, 0, 0, 1, 2, 0, 3, 1, 1, 3, 1, 1, 2, 3, 1, 2, 0, 1, 2, 0, 2, 0, 0, 0, 2, 1, 1, 2, 3, 1, 0, 1, 1, 0, 2, 0, 0, 1, 1, 2, 0, 2, 3, 2, 2, 0, 1, 2, 1, 3, 2, 0, 3, 2, 0, 2, 3, 1, 0, 3, 2, 0, 0, 0, 1, 2, 0, 1, 3, 1, 3, 3, 3, 3, 1, 1, 3, 2, 3, 1, 1, 1, 1, 1, 3, 1, 3, 3, 3, 3, 2, 1, 3, 2, 1, 0, 0, 1, 2, 3, 2, 1, 2, 2, 0, 3, 3, 0, 2, 3, 1, 3, 0, 2, 0, 0, 3, 3, 3, 2, 2, 1, 0, 2, 3, 1, 2, 3, 3, 0, 2, 1, 2, 0, 3, 2, 3, 3, 2, 1, 2, 3, 1, 2, 1, 1, 2, 1, 1, 1, 3, 3, 2, 3, 1, 1, 1, 0, 0, 0, 0, 2, 1, 2, 2, 1, 1, 2, 0, 3, 3, 2, 3, 0, 0, 0, 2, 3, 1, 3, 0, 1, 2, 3, 0, 0, 0, 1, 0, 1, 1, 0, 3, 3, 3, 2, 0, 0, 0, 0, 3, 3, 2, 2, 0, 2, 3, 2, 1, 0, 1, 2, 1, 3, 1, 2, 3, 1, 3, 3, 2, 1, 0, 2, 0, 3, 0, 0, 3, 0, 0, 2, 3, 2, 1, 3, 0, 0, 1, 2, 0, 2, 0, 2, 2, 1, 0, 1, 2, 3, 1, 1, 0, 1, 3, 0, 0, 3, 0, 3, 3, 0, 1, 2, 2, 2, 2, 3, 2, 3, 1, 3, 2, 1, 1, 0, 2, 1, 1, 0, 0, 2, 1, 1, 3, 0, 1, 3, 3, 2, 2, 2, 2, 0, 1, 1, 2, 3, 0, 1, 2, 1, 0, 0, 2, 1, 0, 0, 2, 2, 0, 2, 0, 2, 3, 0, 2, 1, 3, 2, 3, 2, 0, 1, 2, 1, 2, 0, 0, 2, 1, 3, 3, 1, 3, 1, 2, 0, 1, 0, 2, 2, 3, 2, 0, 2, 2, 2, 3, 2, 0, 2, 0, 1, 0, 2, 2, 2, 2, 3, 1, 0, 2, 0, 1, 1, 2, 0, 1, 3, 0, 3, 1, 3, 2, 1, 2, 0, 0, 1, 2, 0, 0, 2, 1, 0, 0, 0, 2, 3, 3, 3, 3, 1, 0, 0, 2, 2, 0, 0, 1, 1, 3, 3, 0, 1, 0, 2, 2, 0, 0, 0, 1, 0, 3, 2, 0, 3, 2, 2, 2, 1, 2, 1, 2, 2, 2, 1, 0, 2, 1, 1, 3, 0, 0, 0}; uint8_t *query = &cquery[0]; uint8_t* target = &ctarget[0]; bwa_fill_scmat(A, B, mat); if(GPU) { int h0 = H0; int w = W; int32_t *h; int8_t *qp; // query profile int i, j, k; int oe_del = O_DEL + E_DEL; // opening and ending deletion int oe_ins = O_INS + E_INS; // opening and ending insertion int max, max_i, max_j, max_ins, max_del, max_ie, gscore, max_off; int passes, t_lastp; // number of passes and number of thread active in the last pass // allocate memory qp = (int8_t*)malloc(QLEN * MATH_SIZE); h = (int32_t*)calloc(QLEN + 1, sizeof(int32_t)); // generate the query profile for (k = i = 0; k < MATH_SIZE; ++k) { const int8_t *p = &mat[k * MATH_SIZE]; for (j = 0; j < QLEN; ++j) { qp[i++] = p[query[j]]; } } // fill the first row h[0] = h0; h[1] = h0 > oe_ins? h0 - oe_ins : 0; for (j = 2; j <= QLEN && h[j-1] > E_INS; ++j) { h[j] = h[j - 1] - E_INS; } // adjust $w if it is too large k = MATH_SIZE * MATH_SIZE; for (i = 0, max = 0; i < k; ++i) // get the max score max = max > mat[i]? max : mat[i]; max_ins = (int)((double)(QLEN * max + END_BONUS - O_INS) / E_INS + 1.); max_ins = max_ins > 1? max_ins : 1; w = w < max_ins? w : max_ins; max_del = (int)((double)(QLEN * max + END_BONUS - O_DEL) / E_DEL + 1.); max_del = max_del > 1? max_del : 1; w = w < max_del? w : max_del; // TODO: is this necessary? // DP loop max = h0, max_i = max_j = -1; max_ie = -1, gscore = -1; max_off = 0; // Initialize // memset: max, max_j, max_i, max_ie, gscore, max_off -> GPU // kernel parameters: // value: w, oe_ins, e_ins, o_del, e_del, oe_del, tlen, qlen, passes, t_lastp, h0, zdrop // memcpy: e[...], h[...], qp[...], target[...] int *d_max, *d_max_j, *d_max_i, *d_max_ie, *d_gscore, *d_max_off; int32_t *d_h; int8_t *d_qp; uint8_t *d_target; passes = (int)((double)TLEN / (double)WARP + 1.); t_lastp = TLEN - (TLEN / WARP) * WARP; // gpuErrchk(cudaDeviceSetLimit(cudaLimitMallocHeapSize, FIXED_HEAP * ONE_MBYTE)); // Allocate device memory gpuErrchk(cudaMalloc(&d_max, sizeof(int))); gpuErrchk(cudaMalloc(&d_max_j, sizeof(int))); gpuErrchk(cudaMalloc(&d_max_i, sizeof(int))); gpuErrchk(cudaMalloc(&d_max_ie, sizeof(int))); gpuErrchk(cudaMalloc(&d_gscore, sizeof(int))); gpuErrchk(cudaMalloc(&d_max_off, sizeof(int))); gpuErrchk(cudaMalloc(&d_h, sizeof(int32_t) * (QLEN + 1))); gpuErrchk(cudaMalloc(&d_qp, sizeof(int8_t) * QLEN * MATH_SIZE)); gpuErrchk(cudaMalloc(&d_target, sizeof(uint8_t) * TLEN)); // Transfer data to GPU gpuErrchk(cudaMemcpy(d_h, h, sizeof(int32_t) * (QLEN + 1), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_qp, qp, sizeof(int8_t) * QLEN * MATH_SIZE, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_target, target, sizeof(uint8_t) * TLEN, cudaMemcpyHostToDevice)); // The kernel // if(DEBUG) printf("Passes = %d, t_lastp = %d\n", passes, t_lastp); sw_kernel<<<1, WARP, 2 * (QLEN + 1) * sizeof(int32_t) + QLEN * MATH_SIZE * sizeof(int8_t)>>>\ (d_max, d_max_i, d_max_j, d_max_ie, d_gscore, d_max_off, \ tcheck, w, oe_ins, E_INS, O_DEL, E_DEL, oe_del, MATH_SIZE, \ TLEN, QLEN, passes, t_lastp, h0, ZDROP, \ d_h, d_qp, d_target); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Deallocate host variables free(h); free(qp); // Get the result back from kernel gpuErrchk(cudaMemcpy(&max, d_max, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&max_i, d_max_i, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&max_j, d_max_j, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&max_ie, d_max_ie, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&gscore, d_gscore, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&max_off, d_max_off, sizeof(int), cudaMemcpyDeviceToHost)); // Deallocate CUDA variables gpuErrchk(cudaFree(d_max_j)); gpuErrchk(cudaFree(d_max_i)); gpuErrchk(cudaFree(d_max_ie)); gpuErrchk(cudaFree(d_gscore)); gpuErrchk(cudaFree(d_max_off)); gpuErrchk(cudaFree(d_max)); gpuErrchk(cudaFree(d_h)); gpuErrchk(cudaFree(d_qp)); gpuErrchk(cudaFree(d_target)); if(DEBUG && pr) printf("max = %d, max_i = %d, max_j = %d, max_ie = %d, gscore = %d, max_off = %d\n",\ max, max_i, max_j, max_ie, gscore, max_off); } else ksw_extend2(QLEN, &query[0], TLEN, &target[0], MATH_SIZE, mat, \ O_DEL, E_DEL, O_INS, E_INS, W, END_BONUS, ZDROP, H0); return 0; } /******************** *** SW extension *** ********************/ int ksw_extend2(int qlen, const uint8_t *query, int tlen, const uint8_t *target, int m, const int8_t *mat, \ int o_del, int e_del, int o_ins, int e_ins, int w, int end_bonus, int zdrop, int h0) { eh_t *eh; // score array int8_t *qp; // query profile int i, j, k, \ oe_del = o_del + e_del, \ oe_ins = o_ins + e_ins, \ beg, end, max, max_i, max_j, max_ins, max_del, max_ie, gscore, max_off; assert(h0 > 0); // allocate memory qp = (int8_t*)malloc(qlen * m); eh = (eh_t*)calloc(qlen + 1, 8); // generate the query profile for (k = i = 0; k < m; ++k) { const int8_t *p = &mat[k * m]; for (j = 0; j < qlen; ++j) qp[i++] = p[query[j]]; } // fill the first row eh[0].h = h0; eh[1].h = h0 > oe_ins? h0 - oe_ins : 0; for (j = 2; j <= qlen && eh[j-1].h > e_ins; ++j) eh[j].h = eh[j-1].h - e_ins; // adjust $w if it is too large k = m * m; for (i = 0, max = 0; i < k; ++i) // get the max score max = max > mat[i]? max : mat[i]; max_ins = (int)((double)(qlen * max + end_bonus - o_ins) / e_ins + 1.); max_ins = max_ins > 1? max_ins : 1; w = w < max_ins? w : max_ins; max_del = (int)((double)(qlen * max + end_bonus - o_del) / e_del + 1.); max_del = max_del > 1? max_del : 1; w = w < max_del? w : max_del; // TODO: is this necessary? // DP loop max = h0, max_i = max_j = -1; max_ie = -1, gscore = -1; max_off = 0; beg = 0, end = qlen; for (i = 0; LIKELY(i < tlen); ++i) { int t, f = 0, h1, m = 0, mj = -1; int8_t *q = &qp[target[i] * qlen]; // apply the band and the constraint (if provided) if (beg < i - w) beg = i - w; if (end > i + w + 1) end = i + w + 1; if (end > qlen) end = qlen; // compute the first column if (beg == 0) { h1 = h0 - (o_del + e_del * (i + 1)); if (h1 < 0) h1 = 0; } else h1 = 0; for (j = beg; LIKELY(j < end); ++j) { // At the beginning of the loop: eh[j] = { H(i-1,j-1), E(i,j) }, f = F(i,j) and h1 = H(i,j-1) // Similar to SSE2-SW, cells are computed in the following order: // H(i,j) = max{H(i-1,j-1)+S(i,j), E(i,j), F(i,j)} // E(i+1,j) = max{H(i,j)-gapo, E(i,j)} - gape // F(i,j+1) = max{H(i,j)-gapo, F(i,j)} - gape eh_t *p = &eh[j]; int h, M = p->h, e = p->e; // get H(i-1,j-1) and E(i-1,j) if(DEBUG) printf("i = %d, j = %d, M = %d, h1 = %d\n", i, j, M, h1); p->h = h1; // set H(i,j-1) for the next row M = M? M + q[j] : 0;// separating H and M to disallow a cigar like "100M3I3D20M" h = M > e? M : e; // e and f are guaranteed to be non-negative, so h>=0 even if M<0 h = h > f? h : f; if(DEBUG) printf("i = %d, j = %d, h = %d\n", i, j, h); h1 = h; // save H(i,j) to h1 for the next column mj = m > h? mj : j; // record the position where max score is achieved m = m > h? m : h; // m is stored at eh[mj+1] t = M - oe_del; t = t > 0? t : 0; e -= e_del; e = e > t? e : t; // computed E(i+1,j) p->e = e; // save E(i+1,j) for the next row t = M - oe_ins; t = t > 0? t : 0; f -= e_ins; f = f > t? f : t; // computed F(i,j+1) if(DEBUG) printf("i = %d, j = %d, M = %d, h = %d, h1 = %d, e = %d, f = %d, t = %d\n", \ i, j, M, h, h1, e, f, t); } eh[end].h = h1; eh[end].e = 0; if (j == qlen) { max_ie = gscore > h1? max_ie : i; gscore = gscore > h1? gscore : h1; } if (m == 0) break; if (m > max) { max = m, max_i = i, max_j = mj; max_off = max_off > abs(mj - i)? max_off : abs(mj - i); } else if (zdrop > 0) { if (i - max_i > mj - max_j) { if (max - m - ((i - max_i) - (mj - max_j)) * e_del > zdrop) break; } else { if (max - m - ((mj - max_j) - (i - max_i)) * e_ins > zdrop) break; } } // update beg and end for the next round for (j = beg; LIKELY(j < end) && eh[j].h == 0 && eh[j].e == 0; ++j); beg = j; for (j = end; LIKELY(j >= beg) && eh[j].h == 0 && eh[j].e == 0; --j); end = j + 2 < qlen? j + 2 : qlen; //beg = 0; end = qlen; // uncomment this line for debugging } free(eh); free(qp); if(DEBUG) printf("max = %d, max_i = %d, max_j = %d, max_ie = %d, " "gscore = %d, max_off = %d\n", max, max_i, max_j, max_ie, gscore, max_off); return max; } /***************** * CIGAR related * *****************/ void bwa_fill_scmat(int a, int b, int8_t mat[25]) { int i, j, k; for (i = k = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) mat[k++] = i == j? a : -b; mat[k++] = -1; // ambiguous base } for (j = 0; j < 5; ++j) mat[k++] = -1; }
6,018
#include <stdio.h> #include <math.h> #define N 8 #define THREAD_PER_BLOCK 2 __global__ void transpose(int * in, int * out, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; out[index] = in[(index / size) + size * (index % size)]; } int main() { int * in, * out; int * d_in, * d_out; int size = N * N * sizeof(int); int i; cudaMalloc((void**)&d_in, size); cudaMalloc((void**)&d_out, size); in = (int *)malloc(size); out = (int *)malloc(size); for(i = 0; i<N*N; ++i) { in[i] = i; } cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); transpose<<< N*N/THREAD_PER_BLOCK, THREAD_PER_BLOCK >>>(d_in, d_out, N); cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); for(i=0; i<N*N; ++i) { printf("%2d ", in[i]); if((i+1)%N == 0) { printf("\n"); } } printf("--------\n"); for(i=0; i<N*N; ++i) { printf("%2d ", out[i]); if((i+1)%N == 0) { printf("\n"); } } free(in); free(out); cudaFree(d_in); cudaFree(d_out); return 0; }
6,019
#include <iostream> using namespace std; __device__ double counter = 0.5; __device__ double myAtomicAdd(double * address, double val) { unsigned long long int * address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __global__ void increment(double by) { myAtomicAdd(&counter, by); } __global__ void print() { printf("counter = %f\n", counter); } int main() { const int blockSize=1024; const int gridSize=1024; const double by = 1.7; increment<<<blockSize, gridSize>>>(by); print<<<1,1>>>(); cudaDeviceSynchronize(); printf("The correct answer is %f\n", (0.5 + by*blockSize*gridSize)); }
6,020
////#include<math.h> ////#include<cuda.h> ////#include<helper_math.h> //#include<device_launch_parameters.h> //#include<cutil_math.h> //#include<cutil_inline.h> //#include<cutil_gl_inline.h> //#include<cuda_gl_interop.h> //////////////////////////////////for __syncthreads() //#ifndef __CUDACC__ // #define __CUDACC__ //#endif // //#include<device_functions.h> // // //float gain, xStart, yStart, zOffset, octaves, lacunarity; //#define Z_PLANE 50.0f // //__constant__ unsigned char c_perm[256]; //__shared__ unsigned char s_perm[256]; ///shared memory copy of permutation array //unsigned char * d_perm = NULL; ///global memory copy of permutation array ////host version of permutation array //const static unsigned char h_perm[] = { 151,160,137,91,90,15, // 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, // 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, // 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, // 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, // 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, // 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, // 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, // 223,183,170,213,119,248,152,2,44,154,163, 70,221,153,101,155,167, 43,172,9, // 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, // 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, // 49,192,214, 31,181,199,106,157,184,84,204,176,115,121,50,45,127, 4,150,254, // 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180 //}; // //__device__ inline int perm(int i) //{ // return(s_perm[i&0xff]); //} // //__device__ inline float fade(float t) //{ // return t * t*t*(t*(t*6.0f - 15.0f) + 10.0f); //} // //__device__ inline float lerpP(float t, float a, float b) //{ // return a + t * (b - a); //} // //__device__ inline float grad(int hash, float x, float y, float z) //{ // int h = hash & 15; //convert LO 4 bits of Hash code // float u = h < 8 ? x : y, //into 12 gradient directions // v = h < 4 ? y : h == 12 || h == 14 ? x : z; // return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v); //} // // // //__device__ float inoise(float x, float y, float z) //{ // int X = ((int)floorf(x)) & 255, //Find unit cube // Y = ((int)floorf(y)) & 255, //contains Point // Z = ((int)floorf(z)) & 255; // // // x -= floorf(x); //Find relative X,Y,Z // y -= floorf(y); //of that point in cube // z -= floorf(z); // // float u = fade(x), //compute fade curves // v = fade(y), // w = fade(z); // // int A = perm(X) + Y, AA = perm(A) + Z, AB = perm(A + 1) + Z, //HASH coordinates of // B = perm(X + 1) + Y, BA = perm(B) + Z, BB = perm(B + 1) + Z; //the 8 cube corners // // return lerpP(w, lerpP(v, lerpP(u, grad(perm(AA), x, y, z), // grad(perm(BA), x - 1.0f, y, z)), // lerpP(u, grad(perm(AB), x, y - 1.0, z), // grad(perm(BB), x - 1.0, y - 1.0, z))), // lerpP(v, lerpP(u, grad(perm(AA + 1), x, y, z - 1.0f), // grad(perm(BA + 1), x - 1.0f, y, z - 1.0f)), // lerpP(u, grad(perm(AB + 1), x, y - 1.0f, z - 1.0f), // grad(perm(BB + 1), x - 1.0, y - 1.0, z - 1.0)))); // // return(perm(X)); // //} // // // //__device__ float fBm(float x, float y, int octaves, float lacunarity = 2.0f, float gain = 0.5f) //{ // float freq = 1.0f, amp = 0.5f; // float sum = 0.0f; // for (int i = 0; i < octaves; i++) // { // sum += inoise(x*freq, y*freq, Z_PLANE)*amp; // freq *= lacunarity; // amp *= gain; // } // return sum; //} // // // //__device__ inline uchar4 colorElevation(float texHeight) //{ // uchar4 pos; // // //color texel (r,g,b,a) // if (texHeight < -1.000f) pos = make_uchar4(000, 000, 124, 255); //deeps // else if (texHeight < -0.2500f) pos = make_uchar4(000, 000, 255, 255); //shallow // else if (texHeight < 0.0000f) pos = make_uchar4(000, 128, 255, 255); //shore // else if (texHeight < 0.0125f) pos = make_uchar4(240, 240, 064, 255); //sand // else if (texHeight < 0.0125f) pos = make_uchar4(032, 160, 000, 255); //grass // else if (texHeight < 0.3750f) pos = make_uchar4(224, 224, 000, 255); //dirt // else if (texHeight < 0.7500f) pos = make_uchar4(128, 128, 128, 255);//rock // else pos = make_uchar4(255, 255, 255, 255); //snow // // return(pos); // // //} // //void checkCUDAError(const char *msg) //{ // cudaError_t err = cudaGetLastError(); // if (cudaSuccess != err) // { // fprintf(stderr, "Cuda error : %s:%s", msg, cudaGetErrorString(err)); // exit(EXIT_FAILURE); // } //} // // // // // /////Simple Kernel fills an array with perlin noise //__global__ void k_perlin(float4 *pos, uchar4 *colorPos, // unsigned int width, unsigned int height, // float2 start,float2 delta, float gain, // float zOffset, unsigned char* d_perm, // float ocataves, float lacunarity) //{ // int idx = blockIdx.x * blockDim.x + threadIdx.x; // float xCur = start.x + ((float)(idx%width)) * delta.x; // float yCur = start.x + ((float)(idx / width)) * delta.y; // // if (threadIdx.x < 256) // //optimization:this causes bank conflicts // s_perm[threadIdx.x] = d_perm[threadIdx.x]; // //this synchronization can be imp.if there are more than 256 threads // __syncthreads(); // // //Each thread creates one pixel location in the texture (textel) // if (idx < width*height) // { // float w = fBm(xCur, yCur, ocataves, lacunarity, gain) + zOffset; // // colorPos[idx] = colorElevation(w); // float u = ((float)(idx%width)) / (float)width; // float v = ((float)(idx / width)) / (float)height; // u = u * 2.0f-1.0f; // v = v * 2.0f - 1.0f; // w = (w > 0.0f) ? w : 0.0f; //dont show region underwater // pos[idx] = make_float4(u, w, v, 1.0f); // // } //} // // // //uchar4 *eColor = NULL; ////Wrapper for __global__ call that setups the kernel call //extern "C" void launch_kernel(float4 *pos, uchar4 *posColor, // unsigned int image_width, unsigned int image_height, float time) //{ // int nThreads = 256; //must be equal or larger than 256! // int totalThreads = image_height * image_width; // int nBlocks = totalThreads / nThreads; // nBlocks += ((totalThreads%nThreads) > 0) ? 1 : 0; // // float xExtent = 10.0f; // float yExtent = 10.0f; // float xDelta = xExtent / (float)image_width; // float yDelta = yExtent / (float)image_height; // // // if (!d_perm) // { // //for convenience allocate and copy d_perm here // cudaMalloc((void**)&d_perm, sizeof(h_perm)); // cudaMemcpy(d_perm, h_perm, sizeof(h_perm), cudaMemcpyHostToDevice); // checkCUDAError("d_perm malloc or copy failed!!"); // } // // k_perlin << <nBlocks, nThreads >> > (pos, posColor, image_width, image_height, // make_float2(xStart, yStart), // make_float2(xDelta, yDelta), // gain, zOffset, d_perm, // octaves, lacunarity); // // //make certain the kernel has completed // cudaThreadSynchronize(); // checkCUDAError("kernel failed!!"); // //} // // // // //
6,021
/**************************************** ADD Description TODO *****************************************/ #include <cuda.h> __global__ void setVal(double * B, size_t size, double val) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for(; tid < size; tid += stride) B[tid] = val; } extern "C" { void cudaSet(double * B, size_t size, double val) { setVal<<<1,128>>>(B,size,val); } }
6,022
__global__ void kernel(float *a) { int i = gridDim.x; a[i] = i; }
6,023
#include "includes.h" __global__ void gpu_copy_velocity( const int num_atoms, const double* g_vx_i, const double* g_vy_i, const double* g_vz_i, float* g_vx_o, float* g_vy_o, float* g_vz_o) { const int n = threadIdx.x + blockIdx.x * blockDim.x; if (n < num_atoms) { g_vx_o[n] = g_vx_i[n]; g_vy_o[n] = g_vy_i[n]; g_vz_o[n] = g_vz_i[n]; } }
6,024
/* Code adapted from book "CUDA by Example: An Introduction to General-Purpose GPU Programming" This code computes a visualization of the Julia set. Two-dimenansional "bitman" data which can be plotted is computed by the function kernel. The data can be viewed with gnuplot. The Julia set iteration is: z= z**2 + C If it converges, then the initial point z is in the Julia set. */ #include <stdio.h> #define DIM 1000 __device__ int julia( int x, int y ) { const float scale = 1.5; float jx = scale * (float)(DIM/2 - x)/(DIM/2); float jy = scale * (float)(DIM/2 - y)/(DIM/2); float cr=-0.8f; float ci=0.156f; float ar=jx; float ai=jy; float artmp; int i = 0; for (i=0; i<200; i++) { artmp = ar; ar =(ar*ar-ai*ai) +cr; ai = 2.0f*artmp*ai + ci; if ( (ar*ar+ai*ai) > 1000) return 0; } return 1; } __global__ void kernel(int *arr ) { // map from blockIdx to pixel position int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; // now calculate the value at that position int juliaValue = julia( x, y ); arr[offset] = juliaValue; } // globals needed by the update routine struct DataBlock { unsigned char *dev_bitmap; }; int main( void ) { int arr[DIM*DIM]; FILE *out; int *arr_dev; size_t memsize; int error; memsize = DIM * DIM * sizeof(int); if(error = cudaMalloc( (void **) &arr_dev,memsize ) ) { printf ("Error in cudaMalloc %d\n", error); exit (error); } dim3 grid(DIM,DIM); kernel<<<grid,1>>>( arr_dev ); if(error = cudaMemcpy(arr, arr_dev,memsize, cudaMemcpyDeviceToHost ) ) { printf ("Error in cudaMemcpy %d\n", error); exit (error); } /* guarantee synchronization */ cudaDeviceSynchronize(); cudaFree( arr_dev ); out = fopen( "julia.dat", "w" ); for (int y=0; y<DIM; y++) { for (int x=0; x<DIM; x++) { int offset = x + y * DIM; if(arr[offset]==1){ fprintf(out,"%d %d \n",x,y); } } } fclose(out); }
6,025
#include <stdio.h> #include <math.h> #define BLOCK_DIM_X 32 #define BLOCK_DIM_Y 16 #define VECTOR_DIM 300 #define PARTITION_DIM 32 __global__ void vectorAdd(float *A, const float *B,unsigned int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { A[i] = B[i] - A[i]; } } __global__ void L2_norm(float *g_idata, float *g_odata) { __shared__ float sdata[256]; unsigned int tid = threadIdx.x; sdata[tid] = 0; float A = g_idata[tid + VECTOR_DIM * blockIdx.x]; sdata[tid] += A * A; if(tid + blockDim.x < 300){ // blockDimx.x = 256 A = g_idata[tid + blockDim.x + VECTOR_DIM * blockIdx.x]; sdata[tid] += A * A; } __syncthreads(); if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); if (tid < 32) { sdata[tid] += sdata[tid + 32]; } __syncthreads(); if(tid < 16){ sdata[tid] += sdata[tid + 16]; } __syncthreads(); if(tid < 8){ sdata[tid] += sdata[tid + 8]; } __syncthreads(); if(tid < 4){ sdata[tid] += sdata[tid + 4]; } __syncthreads(); if(tid < 2){ sdata[tid] += sdata[tid + 2]; } __syncthreads(); if(tid < 1){ sdata[tid] += sdata[tid + 1]; } __syncthreads(); if (tid == 0){ // printf("%f",sdata[0]); g_odata[blockIdx.x] = sqrtf(sdata[0]); } } __global__ void Dot_product(float *A_input, float *B_input,float * output) { __shared__ float sdata[256]; unsigned int tid = threadIdx.x; sdata[tid] = 0; float A = A_input[tid + VECTOR_DIM * blockIdx.x]; float B = B_input[tid + VECTOR_DIM * blockIdx.x]; sdata[tid] += A * B; if(tid + blockDim.x < 300){ A = A_input[tid + blockDim.x + VECTOR_DIM * blockIdx.x]; B = B_input[tid + blockDim.x + VECTOR_DIM * blockIdx.x]; sdata[tid] += A * B; } __syncthreads(); if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); if (tid < 32) { sdata[tid] += sdata[tid + 32]; } __syncthreads(); if(tid < 16){ sdata[tid] += sdata[tid + 16]; } __syncthreads(); if(tid < 8){ sdata[tid] += sdata[tid + 8]; } __syncthreads(); if(tid < 4){ sdata[tid] += sdata[tid + 4]; } __syncthreads(); if(tid < 2){ sdata[tid] += sdata[tid + 2]; } __syncthreads(); if(tid < 1){ sdata[tid] += sdata[tid + 1]; } __syncthreads(); if (tid == 0){ // printf("%f",sdata[0]); output[blockIdx.x] = (sdata[0]); } } __global__ void Normalize_vector(float *g_idata) { __shared__ float sdata[256]; __shared__ float norm; unsigned int tid = threadIdx.x; sdata[tid] = 0; float A = g_idata[tid + VECTOR_DIM * blockIdx.x]; sdata[tid] += A * A; if(tid + blockDim.x < 300){ A = g_idata[tid + blockDim.x + VECTOR_DIM * blockIdx.x]; sdata[tid] += A * A; } __syncthreads(); if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); if (tid < 32) { sdata[tid] += sdata[tid + 32]; } __syncthreads(); if(tid < 16){ sdata[tid] += sdata[tid + 16]; } __syncthreads(); if(tid < 8){ sdata[tid] += sdata[tid + 8]; } __syncthreads(); if(tid < 4){ sdata[tid] += sdata[tid + 4]; } __syncthreads(); if(tid < 2){ sdata[tid] += sdata[tid + 2]; } __syncthreads(); if(tid < 1){ sdata[tid] += sdata[tid + 1]; } __syncthreads(); if (tid == 0){ // printf("%f",sdata[0]); norm = sqrtf(sdata[0]); } __syncthreads(); g_idata[tid + VECTOR_DIM * blockIdx.x] = g_idata[tid + VECTOR_DIM * blockIdx.x]/norm; if(tid + blockDim.x < 300){ g_idata[tid + blockDim.x + VECTOR_DIM * blockIdx.x] = g_idata[tid + blockDim.x + VECTOR_DIM * blockIdx.x]/ norm; } } __global__ void cos_distance(float *A, float *B_L2_NORM, int* Count, unsigned int num_entries) { __shared__ float B_L2_NORM_SHARED; __shared__ float B[300]; __shared__ float AB[16][32]; int curr_entry = blockDim.y * blockIdx.x + threadIdx.y; // printf("%d\n",curr_entry); float A_L2_NORM = B_L2_NORM[curr_entry]; float A_LOCAL[10]; unsigned int Local_Count = 0; //Load the current entry if(curr_entry < num_entries){ for(int i = 0; i < VECTOR_DIM;i += PARTITION_DIM){ if(i+threadIdx.x < VECTOR_DIM){ A_LOCAL[i / PARTITION_DIM] = A[VECTOR_DIM * curr_entry+i+threadIdx.x]; } } } __syncthreads(); //loop through all the entries for(unsigned int entry = 0 ; entry < num_entries; entry += 1){ AB[threadIdx.y][threadIdx.x] = 0; if (entry < num_entries){ if(threadIdx.y == 0 && threadIdx.x == 0){ B_L2_NORM_SHARED = B_L2_NORM[entry]; } int B_index = threadIdx.y * BLOCK_DIM_X + threadIdx.x; if( B_index < VECTOR_DIM){ B[B_index] = A[entry * VECTOR_DIM + B_index]; } __syncthreads(); for(unsigned int partition = 0; partition < VECTOR_DIM; partition += PARTITION_DIM){ if(partition + threadIdx.x < VECTOR_DIM){ AB[threadIdx.y][threadIdx.x] += A_LOCAL[partition/PARTITION_DIM] * B[partition + threadIdx.x]; } } __syncthreads(); if(threadIdx.x < 16){ AB[threadIdx.y][threadIdx.x] += AB[threadIdx.y][threadIdx.x+16]; } __syncthreads(); if(threadIdx.x < 8){ AB[threadIdx.y][threadIdx.x] += AB[threadIdx.y][threadIdx.x+8]; } __syncthreads(); if(threadIdx.x < 4){ AB[threadIdx.y][threadIdx.x] += AB[threadIdx.y][threadIdx.x+4]; } __syncthreads(); if(threadIdx.x < 2){ AB[threadIdx.y][threadIdx.x] += AB[threadIdx.y][threadIdx.x+2]; } __syncthreads(); if(threadIdx.x < 1){ AB[threadIdx.y][threadIdx.x] += AB[threadIdx.y][threadIdx.x+1]; } __syncthreads(); if (threadIdx.x == 0 and curr_entry < num_entries){ // printf("curr_entry %d,%d, %f\n", curr_entry,entry,AB[threadIdx.y][0] /(A_L2_NORM * B_L2_NORM_SHARED)); if( AB[threadIdx.y][0] / (A_L2_NORM * B_L2_NORM_SHARED) > 0){ Local_Count += 1; } } } } if (threadIdx.x == 0 and curr_entry < num_entries){ Count[curr_entry] = Local_Count-1; } } __global__ void cos_distance2(float *A, float* B, float *B_L2_NORM, int* Count, unsigned int offset, unsigned int num_entries) { __shared__ float W_2[300]; __shared__ float W_1[300]; __shared__ float W_4_W_2[16][32]; __shared__ float W_4_W_1[16][32]; int curr_entry = blockDim.y * blockIdx.x + threadIdx.y + offset; float A_L2_NORM = B_L2_NORM[curr_entry]; float W_4_LOCAL[10]; unsigned int Local_Count = 0; //Load the current entry if(curr_entry < num_entries){ for(int i = 0; i < VECTOR_DIM;i += PARTITION_DIM){ if(i+threadIdx.x < VECTOR_DIM){ W_4_LOCAL[i / PARTITION_DIM] = B[VECTOR_DIM * curr_entry+i+threadIdx.x]; } } } __syncthreads(); //loop through all the entries for(unsigned int entry = 0 ; entry < num_entries; entry += 1){ W_4_W_2[threadIdx.y][threadIdx.x] = 0; W_4_W_1[threadIdx.y][threadIdx.x] = 0; if (entry < num_entries){ int B_index = threadIdx.y * BLOCK_DIM_X + threadIdx.x; if( B_index < VECTOR_DIM){ W_1[B_index] = A[entry * VECTOR_DIM + B_index]; W_2[B_index] = B[entry * VECTOR_DIM + B_index]; } __syncthreads(); for(unsigned int partition = 0; partition < VECTOR_DIM; partition += PARTITION_DIM){ if(partition + threadIdx.x < VECTOR_DIM){ W_4_W_2[threadIdx.y][threadIdx.x] += W_4_LOCAL[partition/PARTITION_DIM] * W_2[partition + threadIdx.x]; W_4_W_1[threadIdx.y][threadIdx.x] += W_4_LOCAL[partition/PARTITION_DIM] * W_1[partition + threadIdx.x]; } } __syncthreads(); if(threadIdx.x < 16){ W_4_W_2[threadIdx.y][threadIdx.x] += W_4_W_2[threadIdx.y][threadIdx.x+16]; W_4_W_1[threadIdx.y][threadIdx.x] += W_4_W_1[threadIdx.y][threadIdx.x+16]; } __syncthreads(); if(threadIdx.x < 8){ W_4_W_2[threadIdx.y][threadIdx.x] += W_4_W_2[threadIdx.y][threadIdx.x+8]; W_4_W_1[threadIdx.y][threadIdx.x] += W_4_W_1[threadIdx.y][threadIdx.x+8]; } __syncthreads(); if(threadIdx.x < 4){ W_4_W_2[threadIdx.y][threadIdx.x] += W_4_W_2[threadIdx.y][threadIdx.x+4]; W_4_W_1[threadIdx.y][threadIdx.x] += W_4_W_1[threadIdx.y][threadIdx.x+4]; } __syncthreads(); if(threadIdx.x < 2){ W_4_W_2[threadIdx.y][threadIdx.x] += W_4_W_2[threadIdx.y][threadIdx.x+2]; W_4_W_1[threadIdx.y][threadIdx.x] += W_4_W_1[threadIdx.y][threadIdx.x+2]; } __syncthreads(); if(threadIdx.x < 1){ W_4_W_2[threadIdx.y][threadIdx.x] += W_4_W_2[threadIdx.y][threadIdx.x+1]; W_4_W_1[threadIdx.y][threadIdx.x] += W_4_W_1[threadIdx.y][threadIdx.x+1]; } __syncthreads(); if (threadIdx.x == 0 and curr_entry < num_entries){ // printf("curr_entry %d, %f\n", curr_entry,W_4_W_2[threadIdx.y][0] * A_L2_NORM / (W_4_W_2[threadIdx.y][0]) ); if( W_4_W_2[threadIdx.y][0] * A_L2_NORM / (W_4_W_1[threadIdx.y][0]) > .50){ Local_Count += 1; } } } } if (threadIdx.x == 0 and curr_entry < num_entries){ Count[curr_entry] = Local_Count; } }
6,026
#include "time.cuh" double gettime(){ double tseconds=0.0; struct timeval mytime; gettimeofday(&mytime,(struct timezone*)0); tseconds=(double)(mytime.tv_sec+mytime.tv_usec*1.0e-6); return tseconds; }
6,027
/* * Vector addition example using CUDA. * This is a non-optimised example that is likely to benefit * from * - adaptig the main kernel launch configuration, so that * it creates a grid containing a number of blocks that is * a multiple of the number of SMs on the device. */ #include <stdio.h> /* * Host function to initialize input vector elements. This * function simply initializes each element in the vector to * a constant number 'num'. */ void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } /* * Device kernel stores into `result` the sum of * corresponding elements in input vectors `a` and * `b`. Note that the function assumes `a` and `b` * are of the same size N. */ __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } /* * Host function to confirm values in `vector`. This function * assumes all values are the same `target` value. */ void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("All values were calculated correctly. Well done.\n"); } int main() { const int N = 50000000; size_t size = N * sizeof(float); float *a; float *b; float *c; cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); initWith(3, a, N); initWith(7, b, N); initWith(0, c, N); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 32; numberOfBlocks = 32; cudaError_t addVectorsErr; cudaError_t asyncErr; addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N); addVectorsErr = cudaGetLastError(); if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr)); asyncErr = cudaDeviceSynchronize(); if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); checkElementsAre(10, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
6,028
#include<iostream> #include<cuda.h> using namespace std; #define N 40*1024 __global__ void add(int *a,int *b,int *c){ int tid=threadIdx.x+blockIdx.x*blockDim.x; while(tid<N){ c[tid]=a[tid]+b[tid]; tid+=blockDim.x*gridDim.x; } } int main(){ int a[N],b[N],c[N]; int *dev_a,*dev_b,*dev_c; cudaMalloc(&dev_a,N*sizeof(int)); cudaMalloc(&dev_b,N*sizeof(int)); cudaMalloc(&dev_c,N*sizeof(int)); for(int i=0;i<N;i++){ a[i]=i; b[i]=i*i; } cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,N*sizeof(int),cudaMemcpyHostToDevice); add<<<128,128>>>(dev_a,dev_b,dev_c); cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<N;i++){ if(i!=0&&i%20==0) cout<<c[i]<<endl; else cout<<c[i]<<" "; } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
6,029
#include <stdio.h> // code from mixbench #define CUDA_SAFE_CALL( call) { \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } // SM version to 32 FP cores static inline int _ConvertSMVer2Cores(int major, int minor) { switch(major){ case 1: return 8; case 2: switch(minor){ case 1: return 48; default: return 32; } case 3: return 192; case 6: switch(minor){ case 0: return 64; default: return 128; } case 7: switch(minor){ case 0: return 64; default: return 128; } default: return 128; } } static inline void GetDevicePeakInfo(double *aGIPS, double *aGBPS, cudaDeviceProp *aDeviceProp = NULL){ cudaDeviceProp deviceProp; int current_device; if( aDeviceProp ) deviceProp = *aDeviceProp; else{ CUDA_SAFE_CALL( cudaGetDevice(&current_device) ); CUDA_SAFE_CALL( cudaGetDeviceProperties(&deviceProp, current_device) ); } const int TotalSPs = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor)*deviceProp.multiProcessorCount; *aGIPS = 1000.0 * deviceProp.clockRate * TotalSPs / (1000.0 * 1000.0 * 1000.0); // Giga instructions/sec // *aGIPS64 = 1000.0 * deviceProp.clockRate * TotalSPs / (1000.0 * 1000.0 * 1000.0); // Giga instructions/sec *aGBPS = 2.0 * (double)deviceProp.memoryClockRate * 1000.0 * (double)deviceProp.memoryBusWidth / 8.0; } #if 0 static inline cudaDeviceProp GetDeviceProperties(void){ cudaDeviceProp deviceProp; int current_device; CUDA_SAFE_CALL( cudaGetDevice(&current_device) ); CUDA_SAFE_CALL( cudaGetDeviceProperties(&deviceProp, current_device) ); return deviceProp; } #endif // Print basic device information static void StoreDeviceInfo(FILE *fout){ cudaDeviceProp deviceProp; int current_device, driver_version; CUDA_SAFE_CALL( cudaGetDevice(&current_device) ); CUDA_SAFE_CALL( cudaGetDeviceProperties(&deviceProp, current_device) ); CUDA_SAFE_CALL( cudaDriverGetVersion(&driver_version) ); fprintf(fout, "------------------------ Device specifications ------------------------\n"); fprintf(fout, "Device: %s\n", deviceProp.name); fprintf(fout, "CUDA driver version: %d.%d\n", driver_version/1000, driver_version%1000); fprintf(fout, "GPU clock rate: %d MHz\n", deviceProp.clockRate/1000); fprintf(fout, "Memory clock rate: %d MHz\n", deviceProp.memoryClockRate/1000/2); // TODO: why divide by 2 here?? fprintf(fout, "Memory bus width: %d bits\n", deviceProp.memoryBusWidth); fprintf(fout, "WarpSize: %d\n", deviceProp.warpSize); fprintf(fout, "L2 cache size: %d KB\n", deviceProp.l2CacheSize/1024); fprintf(fout, "Total global mem: %d MB\n", (int)(deviceProp.totalGlobalMem/1024/1024)); fprintf(fout, "ECC enabled: %s\n", deviceProp.ECCEnabled?"Yes":"No"); fprintf(fout, "Compute Capability: %d.%d\n", deviceProp.major, deviceProp.minor); const int TotalSPs = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor)*deviceProp.multiProcessorCount; fprintf(fout, "Total SPs: %d (%d MPs x %d SPs/MP)\n", TotalSPs, deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor)); double InstrThroughput, MemBandwidth; GetDevicePeakInfo(&InstrThroughput, &MemBandwidth, &deviceProp); fprintf(fout, "Compute throughput: %.2f GFlops (theoretical single precision FMAs)\n", 2.0*InstrThroughput); fprintf(fout, "Memory bandwidth: %.2f GB/sec\n", MemBandwidth/(1000.0*1000.0*1000.0)); fprintf(fout, "-----------------------------------------------------------------------\n"); } int main() { int count; cudaGetDeviceCount (&count); printf ("Total GPU device count =%d\n", count); #if 1 for (int i =0; i< 1; i++) { cudaDeviceProp prop; cudaGetDeviceProperties (&prop, i); printf ("Name: %s\n", prop.name); printf ("Global Mem (GB): %zu\n", prop.totalGlobalMem/1024/1024/1024); printf ("Shared Mem per Block: %zd\n", prop.sharedMemPerBlock); printf ("regs per block: %d\n", prop.regsPerBlock); printf ("warpSize: %d\n", prop.warpSize); //5 printf ("memPitch: %zd\n", prop.memPitch); printf ("maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock); printf ("maxThreadsDim[0]: %d\n", prop.maxThreadsDim[0]); printf ("maxThreadsDim[1]: %d\n", prop.maxThreadsDim[1]); printf ("maxThreadsDim[2]: %d\n", prop.maxThreadsDim[2]); printf ("maxGridSize[0]: %d\n", prop.maxGridSize[0]); printf ("maxGridSize[1]: %d\n", prop.maxGridSize[1]); printf ("maxGridSize[2]: %d\n", prop.maxGridSize[2]); printf ("clockRate: %d\n", prop.clockRate); //10 printf ("totalConstMem: %zd\n", prop.totalConstMem); printf ("major: %d\n", prop.major); printf ("minor: %d\n", prop.minor); printf ("textureAlignment: %zd\n", prop.textureAlignment); printf ("texturePitchAlignment: %zd\n", prop.texturePitchAlignment); //15 printf ("deviceOverlap: %d\n", prop.deviceOverlap); printf ("multiProcessorCount: %d\n", prop.multiProcessorCount); printf ("kernelExecTimeoutEnabled: %d\n", prop.kernelExecTimeoutEnabled); printf ("integrated: %d\n", prop.integrated); printf ("canMapHostMemory: %d\n", prop.canMapHostMemory); // 20 printf ("computeMode: %d\n", prop.computeMode); printf ("maxTexture1D: %d\n", prop.maxTexture1D); printf ("maxTexture1DMipmap: %d\n", prop.maxTexture1DMipmap); printf ("maxTexture1DLinear: %d\n", prop.maxTexture1DLinear); printf ("maxTexture2D[0]: %d\n", prop.maxTexture2D[0]); printf ("maxTexture2D[1]: %d\n", prop.maxTexture2D[1]); //25 printf ("maxTexture3D[0]: %d\n", prop.maxTexture3D[0]); printf ("maxTexture3D[1]: %d\n", prop.maxTexture3D[1]); printf ("maxTexture3D[2]: %d\n", prop.maxTexture3D[2]); printf ("maxTexture1DLayered[0]: %d\n", prop.maxTexture1DLayered[0]); printf ("maxTexture1DLayered[1]: %d\n", prop.maxTexture1DLayered[1]); printf ("maxTexture2DLayered[0]: %d\n", prop.maxTexture2DLayered[0]); printf ("maxTexture2DLayered[1]: %d\n", prop.maxTexture2DLayered[1]); printf ("maxTexture2DLayered[2]: %d\n", prop.maxTexture2DLayered[2]); printf ("maxSurfaceCubemap: %d\n", prop.maxSurfaceCubemap); //40 printf ("maxSurfaceCubemapLayered[0]: %d\n", prop.maxSurfaceCubemapLayered[0]); printf ("maxSurfaceCubemapLayered[1]: %d\n", prop.maxSurfaceCubemapLayered[1]); printf ("surfaceAlignment: %zd\n", prop.surfaceAlignment); printf ("concurrentKernels: %d\n", prop.concurrentKernels); printf ("ECCEnabled: %d\n", prop.ECCEnabled); printf ("pciBusID: %d\n", prop.pciBusID); //45 printf ("pciDeviceID: %d\n", prop.pciDeviceID); printf ("pciDomainID: %d\n", prop.pciDomainID); printf ("tccDriver: %d\n", prop.tccDriver); printf ("asyncEngineCount: %d\n", prop.asyncEngineCount); printf ("unifiedAddressing: %d\n", prop.unifiedAddressing); // 50 printf ("memoryClockRate: %d\n", prop.memoryClockRate); printf ("memoryBusWidth: %d\n", prop.memoryBusWidth); printf ("l2CacheSize: %d\n", prop.l2CacheSize); printf ("maxThreadsPerMultiProcessor: %d\n", prop.maxThreadsPerMultiProcessor); printf ("streamPrioritiesSupported: %d\n", prop.streamPrioritiesSupported); // 55 printf ("globalL1CacheSupported: %d\n", prop.globalL1CacheSupported); printf ("localL1CacheSupported: %d\n", prop.localL1CacheSupported); printf ("sharedMemPerMultiprocessor: %zd\n", prop.sharedMemPerMultiprocessor); printf ("regsPerMultiprocessor: %d\n", prop.regsPerMultiprocessor); // printf ("managedMemSupported: %d\n", prop.managedMemSupported); //60 printf ("isMultiGpuBoard: %d\n", prop.isMultiGpuBoard); printf ("multiGpuBoardGroupID: %d\n", prop.multiGpuBoardGroupID); printf ("singleToDoublePrecisionPerfRatio: %d\n", prop.singleToDoublePrecisionPerfRatio); printf ("pageableMemoryAccess: %d\n", prop.pageableMemoryAccess); printf ("concurrentManagedAccess: %d\n", prop.concurrentManagedAccess); //65 printf ("computePreemptionSupported: %d\n", prop.computePreemptionSupported); printf ("canUseHostPointerForRegisteredMem: %d\n", prop.canUseHostPointerForRegisteredMem); printf ("cooperativeLaunch: %d\n", prop.cooperativeLaunch); printf ("cooperativeMultiDeviceLaunch: %d\n", prop.cooperativeMultiDeviceLaunch); } #endif cudaSetDevice(0); StoreDeviceInfo(stdout); return 0; }
6,030
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29) { if (comp > -1.9157E-41f + ceilf((+1.5242E-36f - var_1 - var_2 + -1.3343E11f))) { float tmp_1 = -1.0025E-20f; comp += tmp_1 * acosf((var_4 - var_5 / (var_6 / var_7))); comp += (var_8 / (var_9 - (-1.2082E-1f - -1.2852E-42f))); for (int i=0; i < var_3; ++i) { comp = var_10 - -1.7545E-44f; comp = acosf((var_11 * -1.7718E-41f * +1.5120E36f / var_12)); comp = (var_13 - (-1.3659E13f / var_14 / fmodf(var_15 + -1.1530E-41f + +1.4933E-6f, (var_16 * var_17 * +1.0793E-43f - var_18)))); comp = expf(-1.4044E-37f); } if (comp <= atan2f((var_19 / var_20 * (-1.8581E-42f + (var_21 + var_22 + var_23))), log10f((-0.0f / var_24 / (var_25 + var_26 + sqrtf(+1.2291E23f)))))) { comp += (var_27 + (var_28 * var_29)); comp = -1.3685E36f * acosf(cosf((-1.0995E-41f / +1.4802E17f))); comp = -1.5914E36f - +1.2207E-29f; } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30); cudaDeviceSynchronize(); return 0; }
6,031
/* nvcc flagg_low.cu -o flagg_low ./flagg_low -h */ #include <iostream> #include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #define NCHANS 8 // # of channels -- assume no more than 2048 channels for now, see blinest call in main{} #define NANTS 2048 // # of antennas #define NPOLS 2 // # of pols void extract_autocorrs(int32_t *readdata, float *h_data) { int nc, na, np; int nChanLen = (NANTS*(NANTS-1)/2+NANTS) * NPOLS * 2; int idx; for(nc = 0; nc < NCHANS; nc++){ for(na = 0; na < NANTS; na++){ for(np = 0; np < NPOLS; np++){ idx = (int)((NANTS*(NANTS+1)/2) - ((NANTS-na)*((NANTS-na)+1)/2)); h_data[na*(NCHANS*NPOLS)+nc*NPOLS+np] = (float)readdata[nc*nChanLen + idx*(NPOLS*2)+np*2]; } } } } __device__ void swap(float *p, float *q) { float t; t=*p; *p=*q; *q=t; } /*median value for NCHANS-long array*/ __device__ float medchans(float *a, int cont) { // cont controls wether the array is contiguous or interleaved int i,j; float tmp[NCHANS] = {0}; if (cont == 0){ for (i = 0; i < NCHANS; i++) tmp[i] = a[2*i]; // polarizations are interleaved } else { for (i = 0; i < NCHANS; i++) tmp[i] = a[i]; // samples are not interleaved } for(i = 0; i < NCHANS-1; i++) { for(j = 0; j < NCHANS-i-1; j++) { if(tmp[j] > tmp[j+1]) swap(&tmp[j],&tmp[j+1]); } } /*CHECK IF NCHANS IS EVEN ; IF EVEN: TAKE AVERAGE BETWEEN TWO MIDDLE VALUES*/ if (NCHANS%2 == 0){ return (tmp[(int)(NCHANS/2-1)]+tmp[(int)(NCHANS/2)])/2.; } else { return tmp[(int)((NCHANS+1)/2-1)]; } } /*computes MAD for one spectrum*/ __device__ void mad(float *a, float *madval, float *medval) { int i; float dev[NCHANS]; float me = medchans(a,0); // interleaved array *medval = me; for (i = 0; i < NCHANS; i++) dev[i] = abs(a[2*i]-me); // polarizations are interleaved *madval = medchans(dev,1); // contiguous array } __global__ void flagg(float *d_data, int32_t *d_flags, float dThres, float *d_madmed) { int nAnt = blockIdx.x; int nPol = threadIdx.x; int nChanLen = NCHANS*NPOLS; if (nPol < 2){ int i; float madval = 0; float medval = 0; float M; mad(&d_data[nAnt*nChanLen+nPol], &madval, &medval); d_madmed[nAnt*NPOLS*2+nPol*2] = madval; // TO TEST d_madmed[nAnt*NPOLS*2+nPol*2+1] = medval; // TO TEST for(i = 0; i < NCHANS; i++){ M = 0.6745*((float)d_data[nAnt*nChanLen + nPol + 2*i]-medval) / madval; if (abs(M) > dThres) d_flags[nAnt*nChanLen + nPol + 2*i] = 1; } } } void usage() { fprintf (stdout, "flagg_low [options]\n" " -t threshold flagger threshold in # of sigma [default : 3.5]\n" " -h print usage\n"); } int main(int argc, char**argv) { int arg = 0; int nXcorr = NCHANS*(NANTS*(NANTS-1)/2 + NANTS)*NPOLS*2; int N = NANTS*NCHANS*NPOLS; // size of 1 time sample, autocorrelations only float dThres = 3.5; // modified z-score threshold recommended by Iglewicz and Hoaglin int32_t *readdata = (int32_t *)malloc(nXcorr*sizeof(int32_t)); // data in (all xcorr incl auto corr) float *h_data = (float *)malloc(N*sizeof(float)); // data in (autocorrelations) float *fl_data = (float *)malloc(N*sizeof(float)); // data out (corrected data) float *d_data; // input data on device cudaMalloc((void **)&d_data, N*sizeof(float)); int32_t *h_flags = (int32_t *)malloc(N*sizeof(int32_t)); // data out (corrected data) int32_t *d_flags; // flags on device cudaMalloc((void **)&d_flags, N*sizeof(int32_t)); cudaMemset(d_flags, 0., N*sizeof(int32_t)); while ((arg=getopt(argc,argv,"t:h")) != -1) { switch (arg) { case 't': if (optarg) { dThres = atof(optarg); break; } else { printf("-t flag requires argument"); usage(); } case 'h': usage(); return EXIT_SUCCESS; } } /*TO TEST*/ float *h_madmed = (float *)malloc(NANTS*NPOLS*2*sizeof(float)); float *d_madmed; // flags on device cudaMalloc((void **)&d_madmed, NANTS*NPOLS*2*sizeof(float)); /*TO TEST*/ /*disk files management*/ FILE *ptr; FILE *write_ptr; FILE *write_flg; FILE *write_madmed; // TO TEST ptr = fopen("xcorr_in.bin","rb"); // simulates 1 time sample of all xcorr (incl auto corr) write_ptr = fopen("output.bin","wb"); write_flg = fopen("flags.bin","wb"); write_madmed = fopen("madmed.bin","wb"); // TO TEST int rd; rd = fread(readdata,nXcorr,sizeof(int32_t),ptr); /*extract autocorr and write them to h_data*/ extract_autocorrs(readdata, h_data); /*copy data onto GPU*/ cudaMemcpy(d_data, h_data, N*sizeof(float), cudaMemcpyHostToDevice); /*FLAG DATA*/ flagg<<<NANTS, 2>>>(d_data, d_flags, dThres, d_madmed); cudaDeviceSynchronize(); /*copy back to CPU and write to disk*/ cudaMemcpy(fl_data, d_data, N*sizeof(float), cudaMemcpyDeviceToHost); fwrite(fl_data,N,sizeof(float),write_ptr); cudaMemcpy(h_flags, d_flags, N*sizeof(int32_t), cudaMemcpyDeviceToHost); fwrite(h_flags,N,sizeof(int32_t),write_flg); cudaMemcpy(h_madmed, d_madmed, NANTS*NPOLS*2*sizeof(float), cudaMemcpyDeviceToHost); // TO TEST fwrite(h_madmed,NANTS*NPOLS*2,sizeof(float),write_madmed); // TO TEST /*Free memory*/ free(readdata); free(h_data); free(fl_data); free(h_flags); /*TO TEST*/ free(h_madmed); cudaFree(d_madmed); fclose(write_madmed); /*TO TEST*/ cudaFree(d_data); cudaFree(d_flags); fclose(ptr); fclose(write_ptr); fclose(write_flg); return 0; }
6,032
#include <cstdio> __global__ void mykernel(void) { } int main() { mykernel<<<1,1>>>(); printf("Hello CPU\n"); return 0; }
6,033
/* #include "engine.h" struct Point point_init(struct Point pt, double x, double y, double z){ pt.x = (double *)malloc(sizeof(double)); pt.y = (double *)malloc(sizeof(double)); pt.z = (double *)malloc(sizeof(double)); pt->x = x; pt->y = y; pt->z = z; pt->cons = 1; return pt; } */
6,034
#include <stdio.h> #include <cuda.h> #define N 4096 #define G 4 #define B 1024 __global__ void vectorAddKernel(int * a, int * b, int * c){ int index = blockIdx.x*blockDim.x + threadIdx.x; c[index] = a[index] + b[index]; } int main(){ dim3 grid(G, 1, 1); //e.g. dim3 grid(4,1,1) dim3 block(B, 1, 1); //e.g. dim3 bock(128,1,1) int a_h[N]; int b_h[N]; int c_h[N]; int *a_d; int *b_d; int *c_d; for(int i=0; i<N; i++) { a_h[i] = i; b_h[i] = i*2; } cudaMalloc((void**)&a_d, N*sizeof(int)); cudaMalloc((void**)&b_d, N*sizeof(int)); cudaMalloc((void**)&c_d, N*sizeof(int)); cudaMemcpy(a_d, a_h, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(c_d, c_h, N*sizeof(int), cudaMemcpyHostToDevice); cudaEvent_t start; cudaEvent_t stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); vectorAddKernel<<<grid,block >>>(a_d, b_d, c_d); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cudaMemcpy(c_h, c_d, N*sizeof(int), cudaMemcpyDeviceToHost); for(int i=0; i<N; i++) { printf("%i+%i = %i\n",a_h[i], b_h[i], c_h[i]); } printf("Time to calculate results: %f ms.\n", elapsedTime); cudaFree(a_h); cudaFree(b_h); cudaFree(c_h); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
6,035
#include "includes.h" __global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){ __shared__ int sourceRowIndices[32]; const int startTargetRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startTargetRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ sourceRowIndices[tid] = int(indices[startTargetRowI + tid]); if (sourceRowIndices[tid]<0) sourceRowIndices[tid] += nSourceRows; if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows) sourceRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } }
6,036
#include <stdio.h> #include <cuda.h> #define THREADS_PER_BLOCK 1024 void matrixAdd(int *a, int *b, int *c, int N) { int index; for (int col = 0; col < N; col++) { for (int row = 0; row < N; row++) { c[index] = a[index] + b[index]; } } } __global__ void matrixAddKernel(int *a, int *b, int *c, int N) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int index = row * N + col; c[index] = a[index] + b[index]; } int main() { int N = 4096; // Define size of 1 side of square matrix // Initialise grid and block variables dim3 grid(N / THREADS_PER_BLOCK, 1, 1); dim3 block(THREADS_PER_BLOCK, 1, 1); // Initialise host pointers (dynamically allocated memory) and device pointers int *a_h; int *b_h; int *c_h; // GPU results int *d_h; // CPU results int *a_d; int *b_d; int *c_d; int size; // Number of bytes required by arrays // Create timer cudaEvent_t start; cudaEvent_t stop; float elapsedTime; // Print out information about blocks and threads printf("Number of threads: %i (%ix%i)\n", block.x*block.y, block.x, block.y); printf("Number of blocks: %i (%ix%i)\n", grid.x*grid.y, grid.x, grid.y); // Dynamically allocate host memory size = N * N * sizeof(int); a_h = (int*) malloc(size); b_h = (int*) malloc(size); c_h = (int*) malloc(size); d_h = (int*) malloc(size); // Load host arrays with data for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { a_h[i * N + j] = i; b_h[i * N + j] = i; } } // Allocate device memory cudaMalloc((void**)&a_d, size); cudaMalloc((void**)&b_d, size); cudaMalloc((void**)&c_d, size); // Copy host memory to device memory cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice); // Start timer for GPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Launch kernel matrixAddKernel<<<grid, block>>>(a_d, b_d, c_d, N); // Stop timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); // Print execution time printf("Time to calculate results on GPU: %f ms\n", elapsedTime); // Copy results to device cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); // Start timer for CPU cudaEventRecord(start, 0); // Launch CPU code matrixAdd(a_h, b_h, d_h, N); // Stop timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); // Print execution time printf("Time to calculate results on CPU: %f ms\n", elapsedTime); // Compare results for (int i = 0; i < N*N; i++) { if (c_h[i] != d_h[i]) { printf("Error: CPU and GPU results do not match\n"); break; } } // Free memory free(a_h); free(b_h); free(c_h); free(d_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
6,037
/* Matrix normalization using CUDA * Compile with "nvcc matrixNorm.cu" */ /* ****** ADD YOUR CODE AT THE END OF THIS FILE. ****** * You need not submit the provided code. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/types.h> #include <sys/times.h> #include <sys/time.h> #include <time.h> /* Program Parameters */ #define MAXN 8000 /* Max value of N */ int N; /* Matrix size */ float BLOCK_SIZE; /* Size of blocks */ float GRID_DIM; /* Size of the grid */ /* Matrices */ volatile float A[MAXN][MAXN], B[MAXN][MAXN]; /* junk */ #define randm() 4|2[uid]&3 /* Prototype */ void matrixNorm(); /* returns a seed for srand based on the time */ unsigned int time_seed() { struct timeval t; struct timezone tzdummy; gettimeofday(&t, &tzdummy); return (unsigned int)(t.tv_usec); } /* Set the program parameters from the command-line arguments */ void parameters(int argc, char **argv) { int seed = 0; /* Random seed */ /* Read command-line arguments */ srand(time_seed()); /* Randomize */ if (argc >= 2) { N = atoi(argv[1]); if (N < 1 || N > MAXN) { printf("N = %i is out of range.\n", N); exit(0); } } else { printf("Usage: %s <matrix_dimension> [random seed] [grid_dimension] [blocks size]\n", argv[0]); exit(0); } if (argc >= 3) { seed = atoi(argv[2]); srand(seed); } BLOCK_SIZE = ceil(argc >= 4 ? atof(argv[3]) : 8.0); if (!BLOCK_SIZE) { printf("Blocks need to be of a size greater than zero!\n"); exit(0); } GRID_DIM = ceil(N / (float)BLOCK_SIZE); /* Print parameters */ printf("\nRandom seed = %i\n", seed); printf("Matrix dimension N = %i\n", N); printf("Grid dim = %d\n", (int)GRID_DIM); printf("Blocks size = %d\n", (int)BLOCK_SIZE); } /* Initialize A and B*/ void initialize_inputs() { int row, col; printf("\nInitializing...\n"); for (col = 0; col < N; col++) { for (row = 0; row < N; row++) { A[row][col] = (float)rand() / 32768.0; B[row][col] = 0.0; } } /* for (col = 0; col < N; col++) { for (row = 0; row < N; row++) { A[row][col] = col + row; B[row][col] = 0.0; } } */ } /* Print input matrices */ void print_inputs() { int row, col; if (N < 10) { printf("\nA =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t"); } } } } void print_B() { int row, col; if (N < 10) { printf("\nB =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t"); } } } } // echo elapsed time in a csv file void print_time(char * seed, float time, char * prog) { char time_file[20] = "elapsed_times.csv"; FILE * file = fopen(time_file, "r"); if (file == NULL) { // if the file doesn't exist we create it with headers file = fopen(time_file, "a"); fprintf(file, "program;size_matrix;seed;dim_grid;dim_block;time\n"); } fclose(file); file = fopen(time_file, "a"); fprintf(file, "%s;%d;%s;%d;%d;%g\n", prog, N, seed, (int)GRID_DIM, (int)BLOCK_SIZE, time); } /* Prototype of the Kernel function */ __global__ void matrixNormKernel(float * d_A, float * d_B, int size); int main(int argc, char **argv) { /* Timing variables */ struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */ struct timezone tzdummy; unsigned long long usecstart, usecstop; struct tms cputstart, cputstop; /* CPU times for my processes */ /* Process program parameters */ parameters(argc, argv); /* Initialize A and B */ initialize_inputs(); /* Print input matrices */ print_inputs(); /* Start Clock */ printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); times(&cputstart); /****************** Gaussian Elimination ******************/ float *d_A, *d_B; cudaMalloc((void**)&d_A, (N * N) * sizeof(float)); cudaMalloc((void**)&d_B, (N * N) * sizeof(float)); for (int i = 0; i < N; i++) { cudaMemcpy(d_A + i * N, (float*)A[i], N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B + i * N, (float*)B[i], N * sizeof(float), cudaMemcpyHostToDevice); } dim3 dimGrid(GRID_DIM, 1); dim3 dimBlock(BLOCK_SIZE, 1); printf("Computing in parallel.\n"); matrixNormKernel<<<dimGrid, dimBlock>>>(d_A, d_B, N); for (int i = 0; i < N; i++) { cudaMemcpy((float*)B[i], d_B + i * N, N * sizeof(float), cudaMemcpyDeviceToHost); } cudaFree(d_A); cudaFree(d_B); /***********************************************************/ /* Stop Clock */ gettimeofday(&etstop, &tzdummy); times(&cputstop); printf("Stopped clock.\n"); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; /* Display output */ print_B(); /* Display timing results */ printf("\nElapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); printf("(CPU times are accurate to the nearest %g ms)\n", 1.0/(float)CLOCKS_PER_SEC * 1000.0); printf("My total CPU time for parent = %g ms.\n", (float)( (cputstop.tms_utime + cputstop.tms_stime) - (cputstart.tms_utime + cputstart.tms_stime) ) / (float)CLOCKS_PER_SEC * 1000); printf("My system CPU time for parent = %g ms.\n", (float)(cputstop.tms_stime - cputstart.tms_stime) / (float)CLOCKS_PER_SEC * 1000); printf("My total CPU time for child processes = %g ms.\n", (float)( (cputstop.tms_cutime + cputstop.tms_cstime) - (cputstart.tms_cutime + cputstart.tms_cstime) ) / (float)CLOCKS_PER_SEC * 1000); /* Contrary to the man pages, this appears not to include the parent */ printf("--------------------------------------------\n"); print_time(argv[2], (float)(usecstop - usecstart)/(float)1000, argv[0] + 2); exit(0); } /* ------------------ Above Was Provided --------------------- */ __global__ void matrixNormKernel(float * d_A, float * d_B, int size) { int tx = threadIdx.x; int bd = blockDim.x; int bx = blockIdx.x; int row; float mu, sigma; // Thread workload mu = 0.0; for(row=0; row < size; row++) { if (bx * bd + tx < size) { mu += d_A[(row * size) + (bx * bd + tx)]; } } mu /= (float) size; sigma = 0.0; for(row=0; row < size; row++) { if (bx * bd + tx < size) { sigma += powf(d_A[(row * size) + (bx * bd + tx)] - mu, 2.0); } } sigma /= (float) size; for(row=0; row < size; row++) { if (sigma == 0.0) { d_B[(row * size) + (bx * bd + tx)] = 0.0; } else { d_B[(row * size) + (bx * bd + tx)] = (d_A[(row * size) + (bx * bd + tx)] - mu) / sigma; } } }
6,038
// // Created by Cheevarit Rodnuson on 11/21/17. // #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <string.h> #include <iostream> #include <stdlib.h> #include <stdio.h> long* createVector (long size, long inivalue) { long* vector = (long*) malloc(sizeof(long)*size); for (long i = 0; i < size; ++i) { vector[i] = inivalue; } return vector; } void readVector (long* vector, long size) { for (long i = 0; i < size; ++i) { printf("%ld ",vector[i]); } printf("\n"); } long sumVector (long* vector, long size) { long sum = 0; for (long i = 0; i < size; ++i) { sum+= vector[i]; } return sum; } int main(void) { /* const char raw_input[] = " But the raven, sitting lonely on the placid bust, spoke only,\n" " That one word, as if his soul in that one word he did outpour.\n" " Nothing further then he uttered - not a feather then he fluttered -\n" " Till I scarcely more than muttered `Other friends have flown before -\n" " On the morrow he will leave me, as my hopes have flown before.'\n" " Then the bird said, `Nevermore.'\n"; thrust::device_vector<char> input(raw_input, raw_input + sizeof(raw_input)); */ long* test= createVector(10,2); readVector(test,10); thrust::device_vector<long> d_test(10,0); for (int i = 0; i < 10; i++) { d_test[i] = *(test + i); } long sum = thrust::reduce(d_test.begin(), d_test.end(),(int) 0, thrust::plus<int>()); printf("sum : %lu\n", sum); //printf("%s\n",a); return 0; }
6,039
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <math.h> //#include <ctime> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define true 1 #define false 0 //#define M_PI 3.141592653589793 //#define INFINITY 1e8 #define MAX_RAY_DEPTH 5 #define NO_OF_SPHERES 5 #define NO_OF_LIGHTS 1 #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) typedef struct vec { float x; float y; float z; }vec3; typedef struct Sphere_t { vec3 center; float radius; float radius2; vec3 surface_color; float reflection; float transparency; vec3 emission_color; } sphere_t; __device__ void vec3_init(vec3 *name, float a, float b, float c) { name->x = a; name->y = b; name->z = c; } __device__ vec3 vec3_normalize(vec3 name) { float sq, invsqrt; vec3 op; sq = name.x * name.x + name.y * name.y + name.z * name.z; invsqrt = 1 / sqrt(sq); op.x = name.x * invsqrt; op.y = name.y * invsqrt; op.z = name.z * invsqrt; return op; } __device__ vec3 vec3_add(vec3 op1, vec3 op2) { vec3 dest; dest.x = op1.x + op2.x; dest.y = op1.y + op2.y; dest.z = op1.z + op2.z; return dest; } __device__ vec3 vec3_sub(vec3 op1, vec3 op2) { vec3 dest; dest.x = op1.x - op2.x; dest.y = op1.y - op2.y; dest.z = op1.z - op2.z; return dest; } __device__ vec3 vec3_mul(vec3 op1, vec3 op2) { vec3 dest; dest.x = op1.x * op2.x; dest.y = op1.y * op2.y; dest.z = op1.z * op2.z; return dest; } __device__ vec3 vec3_const_mul(vec3 ip, float value) { vec3 op; op.x = ip.x * value; op.y = ip.y * value; op.z = ip.z * value; return op; } __device__ float vec3_dot(vec3 *op1, vec3 *op2) { return (((op1->x)*(op2->x)) + ((op1->y)*(op2->y)) + ((op1->z)*(op2->z))); } __device__ vec3 vec3_negate(vec3 ip) { vec3 op; op.x = -ip.x; op.y = -ip.y; op.z = -ip.z; return op; } __device__ void vec3_copy(vec3 *dest, vec3 *source) { dest->x = source->x; dest->y = source->y; dest->z = source->z; } __device__ bool intersect(vec3 ray_origin, vec3 ray_dir, sphere_t s, float *t0, float *t1) { vec3 l; l = vec3_sub(s.center, ray_origin); float tca = vec3_dot(&l, &ray_dir); if (tca < 0) return false; float d2 = vec3_dot(&l, &l); d2 -= (tca*tca); if (d2 > s.radius2) return false; float thc = sqrt(s.radius2 - d2); *t0 = tca - thc; *t1 = tca + thc; //free all the allocated temp variables return true; } __device__ float mix(float a, float b, float mix) { return (b * mix + a * (1 - mix)); } __global__ void render(sphere_t *spheres, int no_of_spheres, int no_of_lights, float invWidth, float invHeight, float aspectratio, float angle, vec3 *image_output, unsigned int width, unsigned int height, int count,int ray_depth) //__global__ void render(vec3 *image_output, int width, int height) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy*width + ix; unsigned int y; unsigned int x; float xx; float yy; int ik = 0; for (ik = 0; ik < count; ik++) { if (ix < width && iy < height) { y = idx / width; x = idx % width; xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio; yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle; vec3 raydir; vec3_init(&raydir, xx, yy, -1); raydir = vec3_normalize(raydir); vec3 rayorig; vec3_init(&rayorig, 0, 0, 0); vec3 final_color; vec3 last_reflection; vec3 scale; vec3 nhit; vec3 phit; vec3 surfaceColor; float bias = 1e-4; float tnear = INFINITY; sphere_t *sphere = NULL; int depth = 0; vec3_init(&scale, 1, 1, 1); vec3_init(&final_color, 0, 0, 0); vec3_init(&surfaceColor, 0, 0, 0); vec3_init(&last_reflection, 0, 0, 0); do { sphere = NULL; for (unsigned i = 0; i < no_of_spheres + no_of_lights; ++i) { float t0 = INFINITY, t1 = INFINITY; if (intersect(rayorig, raydir, spheres[i], &t0, &t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; sphere = &spheres[i]; } } } // if there's no intersection return black or background color if (!sphere) { vec3_init(&last_reflection, 2, 2, 2); break; } vec3 temp2; temp2 = vec3_const_mul(raydir, tnear); phit = vec3_add(rayorig, temp2); nhit = vec3_sub(phit, sphere->center); nhit = vec3_normalize(nhit); // If the normal and the view direction are not opposite to each other // reverse the normal direction. That also means we are inside the sphere so set // the inside bool to true. Finally reverse the sign of IdotN which we want // positive. if (vec3_dot(&raydir, &nhit) > 0) { nhit = vec3_negate(nhit); } if (sphere->reflection > 0 && depth < ray_depth) { float facingratio = -(vec3_dot(&raydir, &nhit)); // change the mix value to tweak the effect float fresneleffect = mix(pow(1 - facingratio, 3), 1, 0.1); // compute reflection direction (not need to normalize because all vectors // are already normalized) vec3 refldir = vec3_sub(raydir, vec3_const_mul(nhit, 2 * vec3_dot(&raydir, &nhit))); refldir = vec3_normalize(refldir); vec3 temp10, temp11; temp10 = vec3_const_mul(nhit, bias); temp11 = vec3_add(phit, temp10); vec3_copy(&rayorig, &temp11); vec3_copy(&raydir, &refldir); final_color = vec3_add(final_color, vec3_mul(sphere->emission_color, scale)); scale = vec3_mul(scale, vec3_const_mul(sphere->surface_color, fresneleffect)); } else { for (unsigned i = no_of_spheres; i < no_of_spheres + no_of_lights; ++i) { // this is a light vec3 transmission; vec3_init(&transmission, 1, 1, 1); vec3 lightDirection = vec3_sub(spheres[i].center, phit); lightDirection = vec3_normalize(lightDirection); for (unsigned j = 0; j < no_of_spheres + no_of_lights; ++j) { float t0 = 0, t1 = 0; vec3 temp3; temp3 = vec3_add(phit, vec3_const_mul(nhit, bias)); if (intersect(temp3, lightDirection, spheres[j], &t0, &t1)) { vec3_init(&transmission, 0, 0, 0); break; } } vec3 temp8, temp9; float ftemp1; temp8 = vec3_mul(sphere->surface_color, transmission); ftemp1 = MAX((float)0, vec3_dot(&nhit, &lightDirection)); temp9 = vec3_const_mul(spheres[i].emission_color, ftemp1); temp9 = vec3_mul(temp8, temp9); surfaceColor = vec3_add(surfaceColor, temp9); } last_reflection = vec3_add(surfaceColor, sphere->emission_color); } depth++; } while (depth < ray_depth + 1); final_color = vec3_add(final_color, vec3_mul(last_reflection, scale)); image_output[idx].x = final_color.x; image_output[idx].y = final_color.y; image_output[idx].z = final_color.z; } } } void init_spheres(sphere_t *sphere, float posx, float posy, float posz, float radius, float surfx, float surfy, float surfz, float reflection, float transparency, float emisx, float emisy, float emisz ) { sphere->center.x = posx; sphere->center.y = posy; sphere->center.z = posz; sphere->radius = radius; sphere->radius2 = sphere->radius * sphere->radius; sphere->surface_color.x = surfx; sphere->surface_color.y = surfy; sphere->surface_color.z = surfz; sphere->reflection = reflection; sphere->transparency = transparency; sphere->emission_color.x = emisx; sphere->emission_color.y = emisy; sphere->emission_color.z = emisz; } int main(int argc, char **argv) { sphere_t *spheres; sphere_t *d_spheres; vec3 *h_image_output; vec3 *d_image_output; int no_of_spheres; int no_of_lights; clock_t begin, end; double time_spent; int ray_depth; int count; count = atoi(argv[1]); ray_depth = atoi(argv[2]); if (!ray_depth) { ray_depth = 5; printf("ray depth is default value i.e., 5 \n"); } unsigned int width = atoi(argv[3]); unsigned int height = atoi(argv[4]); float invWidth = 1 / (float)width; float invHeight = 1 / (float)height; float fov = 30; float aspectratio = width / (float)height; float angle = tan(M_PI * 0.5 * fov / 180.); no_of_spheres = NO_OF_SPHERES; no_of_lights = NO_OF_LIGHTS; spheres = (sphere_t *)calloc((no_of_spheres + no_of_lights), sizeof(sphere_t)); h_image_output = (vec3 *)calloc(width * height, sizeof(vec3)); if (cudaMalloc(&d_spheres, sizeof(sphere_t) * (no_of_spheres + no_of_lights)) != cudaSuccess) { printf("Memory allocation failed for d_spheres\n"); return 0; } if (cudaMalloc(&d_image_output, sizeof(vec3) * width * height) != cudaSuccess) { printf("Memory allocation failed during d_image_output \n"); return 0; } init_spheres(&spheres[0], 0.0, -10004, -20, 10000, 0.20, 0.20, 0.20, 0, 0.0, 0.0, 0.0, 0.0); init_spheres(&spheres[1], 0.0, 0, -20, 4, 1.00, 0.32, 0.36, 1, 0.5, 0.0, 0.0, 0.0); init_spheres(&spheres[2], 5.0, -1, -15, 2, 0.90, 0.76, 0.46, 1, 0.0, 0.0, 0.0, 0.0); init_spheres(&spheres[3], 5.0, 0, -25, 3, 0.65, 0.77, 0.97, 1, 0.0, 0.0, 0.0, 0.0); init_spheres(&spheres[4], -5.5, 0, -15, 3, 0.90, 0.90, 0.90, 1, 0.0, 0.0, 0.0, 0.0); // light init_spheres(&spheres[5], 0.0, 20, -30, 3, 0.00, 0.00, 0.00, 1, 0.0, 3, 0.0, 0.0); if (cudaMemcpy(d_spheres, spheres, sizeof(sphere_t) * (no_of_spheres + no_of_lights), cudaMemcpyHostToDevice) != cudaSuccess) { printf("Data transfer of d_a from host to device failed"); free(spheres); cudaFree(d_spheres); return 0; } begin = clock(); dim3 block(32, 32); dim3 grid((width+block.x -1)/block.x, (height + block.y - 1)/block.y); render <<<grid, block>>>(d_spheres, no_of_spheres, no_of_lights, invWidth, invHeight, aspectratio, angle, d_image_output, width, height, count, ray_depth); cudaDeviceSynchronize(); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("the execution time of kernel is %f \n",(time_spent)); if (cudaMemcpy(h_image_output, d_image_output, sizeof(vec3) * width * height, cudaMemcpyDeviceToHost) != cudaSuccess) { printf("Data transfer of d_image_output from device to host failed \n"); free(h_image_output); cudaFree(d_image_output); return 0; } FILE *fp = fopen("first.ppm", "wb"); /* b - binary mode */ FILE *fp1 = fopen("first1.txt", "w+"); /* b - binary mode */ fprintf(fp, "P6\n%d %d\n255\n", width, height); for (unsigned i = 0; i < width * height; ++i) { static unsigned char color[3]; color[0] = MIN((float)1, h_image_output[i].x) * 255; color[1] = MIN((float)1, h_image_output[i].y) * 255; color[2] = MIN((float)1, h_image_output[i].z) * 255; (void)fwrite(color, 1, 3, fp); fprintf(fp1, "%f, %f, %f\n", h_image_output[i].x, h_image_output[i].y, h_image_output[i].z); } fclose(fp); fclose(fp1); free(h_image_output); free(spheres); cudaFree(d_image_output); cudaFree(d_spheres); return 0; }
6,040
__global__ void find_primes(int *a, int n) { int idx = threadIdx.x + blockIdx.x * blockDim.x; // int total_threads = gridDim.x * blockDim.x; int is_prime = 1; if (idx > 1 && idx < n){ int j; for (j=2; j<idx/2+1; ++j){ if (!(idx % j) && j != idx){ is_prime = 0; break; } } if (is_prime) a[idx] = 1; is_prime = 1; } }
6,041
/* declare a 1d array and find the maximum of each chunk using reduce method. No shared memory is used * *chunksize must be an exponential of 2 how to compile: nvcc para when n is 600,000 or more, the results are not correct probably because there is not enough threads. The 1d array used for testing is a sequence from 0 to n-1. How to deal with the incomplete chunk: if (tid < s && myId < n) { //myId >=n the incomplete chunk is less than blockDim.x/2) float right_counterpart = (myId+s) >= n? 0:darr[myId+s]; //if the right_counterpart is missing, use 0 */ #include <stdio.h> #include <cuda.h> float * serial_max_each_chunk(float maxarr[], float arr[], int chunkSize, int n); __global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int chunkSize, int n); int main(int argc, char **argv) { //generate a 1d array int n = atoi(argv[1]); float *arr = (float*) malloc(n*sizeof(float)); int i; for (i =0; i < n; i++) { arr[i] = (float)i/2.0f; } const int chunkSize = 512; int numChunk = (n + chunkSize -1)/chunkSize; float *maxarr = (float *)malloc(numChunk * sizeof(float)); // declare GPU memory pointers float *darr, * dmaxarr; cudaMalloc((void **)&darr, n*sizeof(float)); cudaMalloc((void **)&dmaxarr, numChunk*sizeof(float)); cudaMemcpy(darr, arr, n*sizeof(float), cudaMemcpyHostToDevice); dim3 dimGrid(numChunk,1); dim3 dimBlock(chunkSize,1,1); parallel_max_each_chunk<<<dimGrid,dimBlock>>>(dmaxarr, darr, chunkSize,n); cudaThreadSynchronize(); cudaMemcpy(maxarr, dmaxarr, numChunk*sizeof(float), cudaMemcpyDeviceToHost); for (i=0; i < numChunk; i++) { printf("%d maximum: %f\n",i,maxarr[i]); } float * smaxarr = (float *) malloc(numChunk * sizeof(float)); printf("\nserial solution\n"); serial_max_each_chunk(smaxarr, arr, chunkSize, n); bool judge = true; for (i=0; i < numChunk; i++) { printf("%d maximum: %f\n",i,smaxarr[i]); judge = judge && (smaxarr[i] == maxarr[i]); } printf("\n--------correct or wrong---------\n"); printf(judge ? "right\n": "wrong\n"); // check the exit state of CUDA code cudaError_t error = cudaGetLastError(); if (error !=cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } return 0; } float * serial_max_each_chunk(float maxarr[], float arr[], int chunkSize, int n) { int numChunk = (n + chunkSize - 1)/chunkSize; int i,j; for (i = 0; i < numChunk; i++){ maxarr[i] = -3.0; for (j = i * chunkSize; j < (i+1)*chunkSize; j++) { if (j >= n) { break; } else { if (maxarr[i] < arr[j]) { maxarr[i] = arr[j];} } } } return maxarr; } __global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int chunkSize, int n) { int myId = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; for (int s = blockDim.x/2; s > 0; s>>=1) { if (tid < s && myId < n) { //myId >=n the incomplete chunk is less than blockDim.x/2) float right_counterpart = (myId+s) >= n? 0:darr[myId+s]; //if the right_counterpart is missing, use 0 darr[myId]= right_counterpart > darr[myId]? right_counterpart : darr[myId]; } __syncthreads(); } if(tid == 0) { dmaxarr[blockIdx.x] = darr[myId]; } }
6,042
// // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #include <time.h> /* when block=1, threads have to be the * the maximum based on current kernel * implementations */ #define N 512 #define THREADS_PER_BLOCK 512 // // kernel routine // //__global__ void dot_product(const int *a, const int *b, int *c, int numElements) __global__ void dot_product(const int *a, const int *b, int *c) { // each thread in a block sharing the memory, temp __shared__ int temp[N]; temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x]; __syncthreads(); if (threadIdx.x == 0) { int sum = 0; for (int i=0; i<N; i++) sum += temp[i]; *c = sum; } } // // main code // int main(int argc, char **argv) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size = N * sizeof(int); int result = 0; time_t t; // initialise card - legacy code //cutilDeviceInit(argc, argv); srand((unsigned) time(&t)); printf("DEBUG: Size of 'int' type: %lu\n", sizeof(int)); printf("DEBUG: Total footprint size: %d bytes\n", size); // allocate device copies of a, b, c cudaMalloc( (void**)&dev_a, size ); cudaMalloc( (void**)&dev_b, size ); cudaMalloc( (void**)&dev_c, sizeof(int) ); a = (int*)malloc( size ); b = (int*)malloc( size ); c = (int*)malloc( sizeof(int) ); for (int i=0; i<N; i++) { #if 0 a[i] = rand()%N; b[i] = rand()%N; #else a[i] = 5; b[i] = 5; #endif } printf("DEBUG: a[%d]=%d, b[%d]=%d\n",0, a[0], 0, b[0]); printf("DEBUG: a[%d]=%d, b[%d]=%d\n",1, a[1], 1, b[1]); // copy inputs to device cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice ); int threadsPerBlock = THREADS_PER_BLOCK; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; // launch dot_product() kernel with N parallel threads printf("INFO: Launching CUDA kernel: dot product with blocks=%d, threads=%d...", blocksPerGrid, THREADS_PER_BLOCK); dot_product<<< blocksPerGrid, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c ); printf(" Done\n"); // copy device result back to host copy of c cudaMemcpy( c, dev_c, sizeof(int), cudaMemcpyDeviceToHost ); #if 1 //result = 0; for (int i=0; i<N; i++) { result += a[i] * b[i]; } if (fabs(result - *c) < 1e-5) printf("INFO: PASS\n"); else printf("ERROR: *** FAILED *** sum=%d\n", result); #endif #if 1 printf("DEBUG: a[0]=%d, b[0]=%d\n", a[0], b[0]); printf("DEBUG: a[%d]=%d, b[%d]=%d, c=%d\n", 1, a[1], 1, b[1], *c); //printf("Checking results %d\n", a[0]+b[0]-c[0]); #endif free( a ); free( b ); free( c ); cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); cudaDeviceReset(); return 0; }
6,043
#include <stdio.h> #include <cassert> #define ALLOC_SIZE 128 __global__ void test_malloc(int **controller) { __shared__ int *ptr; int bx = blockIdx.x; if (threadIdx.x == 0) { ptr = (int*)malloc(ALLOC_SIZE * sizeof(int)); controller[bx] = ptr; printf("allocate GPU memory at %d\n", ptr); } __syncthreads(); for (int idx = threadIdx.x; idx < ALLOC_SIZE; idx += blockDim.x) { ptr[idx] = threadIdx.x; } } __global__ void test_free(int **controller) { int bx = blockIdx.x; if (threadIdx.x == 0) { free(controller[bx]); printf("free controller of %d\n", bx); } } int main() { int block_num = 64; int block_size = 32; int **g_controller; cudaMalloc(&g_controller, sizeof(int*)*block_num); test_malloc<<<block_num, block_size>>>(g_controller); int *h_controller[block_num]; cudaMemcpy(h_controller, g_controller, sizeof(int*)*block_num, cudaMemcpyDeviceToHost); for (int i = 0; i != block_size; i++) { printf("allocated pointer %p. \n", h_controller[i]); int buffer[ALLOC_SIZE]; cudaMemcpy(buffer, h_controller[i], sizeof(int)*ALLOC_SIZE, cudaMemcpyDeviceToHost); for (int index = 0; index != block_size; index++) ; //assert(buffer[index] == index); } test_free<<<block_num, block_size>>>(g_controller); }
6,044
#include "includes.h" __global__ void MatrixMulKernel(float *d_x, float *d_y, float *d_z, int Width) { int idx = threadIdx.x; int idy = threadIdx.y; float kernelSum = 0; if ((idx < Width) && (idy < Width)) { for (int k = 0; k < Width; ++k) { kernelSum += d_x[idy * Width + k] * d_y[k * Width + idx]; } d_z[idy * Width + idx] = kernelSum; } }
6,045
/* * main.cu * * Created on: Nov 14, 2019 * Author: cuda-s01 */ #include <stdio.h> const int TILE_WIDTH = 2; __global__ void matrixMultiplicationKernel(float* M, float* N, float* P, int Width) { // Calculate the row index of the P element and M int Row = blockIdx.y*blockDim.y+threadIdx.y; // Calculate the column index of P and N int Col = blockIdx.x*blockDim.x+threadIdx.x; __shared__ float sum_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float sum_N[TILE_WIDTH][TILE_WIDTH]; sum_M[threadIdx.y][threadIdx.x]=0.0; sum_N[threadIdx.y][threadIdx.x]=0.0; float Pval = 0; for(int k=0; k<((Width - 1)/TILE_WIDTH + 1); k++) { //printf("Col:%d, Row:%d, k:%d, th:(%d,%d), "); if(k*TILE_WIDTH + threadIdx.x < Width && Row < Width) sum_M[threadIdx.y][threadIdx.x] = M[Row*Width + k*TILE_WIDTH + threadIdx.x]; else sum_M[threadIdx.y][threadIdx.x] = 0.0; if(k*TILE_WIDTH + threadIdx.y < Width && Col < Width) sum_N[threadIdx.y][threadIdx.x] = N[(k*TILE_WIDTH + threadIdx.y)*Width + Col]; else sum_N[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for(int n=0; n<TILE_WIDTH;++n) Pval += sum_M[threadIdx.y][n] * sum_N[n][threadIdx.x]; __syncthreads(); } if(Row < Width && Col < Width) { P[Row * Width + Col] = Pval; //printf("(%d,%d)=%f\n",Row,Col,P[Row*Width+Col]); } } void matrixMultiplication(float *M, float *N, float *P, int Width){ // declare the number of blocks per grid and the number of threads per block int th = TILE_WIDTH; int bl = (Width/TILE_WIDTH) + 1; dim3 threadsPerBlock(th,th,1); dim3 blocksPerGrid(bl,bl,1); printf("Kernel started: (%d,%d,1) grid, (%d,%d,1) blocks.\n", bl,bl, th,th); matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(M, N, P, Width); } void PrintMatrix(float* M, int Width) { for(int i = 0; i < Width; i++) { for(int j = 0; j < Width; j++) printf("%f ",M[i*Width+j]); printf("\n"); } printf("\n"); } int main(void) { printf("Starting the program:\n"); cudaError_t err = cudaSuccess; int matrix_size = 8; int num_of_elements = matrix_size * matrix_size; size_t size = num_of_elements * sizeof(float); printf("matrix [%d x %d] multiplication.\n", matrix_size, matrix_size); //==========================Shared Memory============================================ //allocate matrixes on the device: printf("Started variables allocation for the device.\n"); printf("First matrix.\n"); float *M; err = cudaMallocManaged((void**)&M, size); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate M matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); printf("Second matrix.\n"); float *N; err = cudaMallocManaged((void**)&N, size); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate N matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); printf("Third matrix.\n"); float *P; err = cudaMallocManaged((void**)&P, size); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate P matrix!\n"); exit(EXIT_FAILURE); } else printf("Allocation successful.\n"); //initialisation: for(int i=0; i<num_of_elements; i++) { M[i] = rand()/(float)RAND_MAX; N[i] = rand()/(float)RAND_MAX; } printf("Initialisation finished.\n"); //calculations: matrixMultiplication(M, N, P, matrix_size); err = cudaGetLastError(); if(err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel. Error: %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else printf("Kerel operations successful.\n"); //==========================TEST=============================================== PrintMatrix(M, matrix_size); PrintMatrix(N, matrix_size); PrintMatrix(P, matrix_size); for(int i = 0; i < matrix_size; i++) { for(int j = 0; j < matrix_size; j++) { float tmp = 0; for(int k = 0; k < matrix_size; k++) tmp += M[i*matrix_size + k] * N[k*matrix_size + j]; //debug line: //printf("%f ",tmp); if(fabs(tmp - P[i*matrix_size + j]) > 1e-3) { fprintf(stderr, "Verification test failed.!\nElement at index (%d, %d) should be %f, but is %f. \n", i,j,tmp,P[i*matrix_size + j]); exit(EXIT_FAILURE); } } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(M); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device matrix M (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(N); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device matrix N (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(P); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device matrix P (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
6,046
/********************************************************************** * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void init_line(void); void update (void); void printfinal (void); int nsteps, /* number of time steps */ tpoints, /* total points along string */ rcode; /* generic return code */ float values[MAXPOINTS+2], /* values at time t */ oldval[MAXPOINTS+2], /* values at time (t-dt) */ newval[MAXPOINTS+2]; /* values at time (t+dt) */ int maxThreadsPerBlock; float* valptr; float* oldptr; float* newptr; int* datasizeptr; float cuda_val[MAXPOINTS+2]; __global__ void cuda_do_math(float* values, float* oldval, float* newval, int* datasize) { float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau= (c * dtime / dx); sqtau = tau * tau; int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < (*datasize)) { newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0) * values[i]); } } /********************************************************************** * Checks input values from parameters *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: " ,MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while ((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Initialize points on line *********************************************************************/ void init_line(void) { int i, j; float x, fac, k, tmp; /* Calculate initial values based on sine curve */ fac = 2.0 * PI; k = 0.0; tmp = tpoints - 1; for (j = 1; j <= tpoints; j++) { x = k/tmp; values[j] = sin (fac * x); k = k + 1.0; } /* Initialize old values array */ for (i = 1; i <= tpoints; i++) oldval[i] = values[i]; } /********************************************************************** * Calculate new values using wave equation *********************************************************************/ void do_math(int i) { float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c * dtime / dx); sqtau = tau * tau; newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]); } /********************************************************************** * Update all values along line a specified number of times *********************************************************************/ void update() { int i, j; int numBlocks = tpoints / maxThreadsPerBlock; if (tpoints % maxThreadsPerBlock != 0) { numBlocks++; } /* Update values for each time step */ for (i = 1; i<= nsteps; i++) { cuda_do_math<<<numBlocks, maxThreadsPerBlock>>>(valptr, oldptr, newptr, datasizeptr); float* temp; temp = oldptr; oldptr = valptr; valptr = newptr; newptr = temp; /* Update old values with new values */ for (j = 1; j <= tpoints; j++) { oldval[j] = values[j]; values[j] = newval[j]; } } cudaMemcpy(cuda_val, valptr, sizeof(float) * tpoints, cudaMemcpyDeviceToHost); } /********************************************************************** * Print final results *********************************************************************/ void printfinal_cuda() { int i; for (i = 1; i <= tpoints; i++) { printf("%6.4f ", cuda_val[i]); if (i%10 == 0) printf("\n"); } } /********************************************************************** * Main program *********************************************************************/ int main(int argc, char *argv[]) { sscanf(argv[1],"%d",&tpoints); sscanf(argv[2],"%d",&nsteps); check_param(); printf("Initializing points on the line...\n"); init_line(); int count; cudaGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "Here is no cuda device\n"); return 1; } int i; for (i = 0; i < count; i++) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { /* printf("max grid size = %d\n", prop.maxGridSize); printf("max threads dim = %d\n", prop.maxThreadsDim); printf("max threads per block = %d\n", prop.maxThreadsPerBlock); */ maxThreadsPerBlock = prop.maxThreadsPerBlock; } } cudaSetDevice(i); /* Allocate global memory on device */ cudaMalloc((void**)&valptr, sizeof(float) * tpoints); cudaMalloc((void**)&oldptr, sizeof(float) * tpoints); cudaMalloc((void**)&newptr, sizeof(float) * tpoints); cudaMalloc((void**)&datasizeptr, sizeof(int)); cudaMemcpy(valptr, values, sizeof(float) * tpoints, cudaMemcpyHostToDevice); cudaMemcpy(oldptr, oldval, sizeof(float) * tpoints, cudaMemcpyHostToDevice); cudaMemcpy(newptr, newval, sizeof(float) * tpoints, cudaMemcpyHostToDevice); cudaMemcpy(datasizeptr, &tpoints, sizeof(int), cudaMemcpyHostToDevice); printf("Updating all points for all time steps...\n"); update(); printf("Printing final results...\n"); printfinal_cuda(); printf("\nDone.\n\n"); cudaFree(valptr); cudaFree(oldptr); cudaFree(newptr); return 0; }
6,047
#include <stdio.h> #include "curand.h" #include "curand_kernel.h" #include "math.h" #include <thrust/device_vector.h> __global__ void calc_pi(int *dev, long num_trials, double r) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num_trials) return; double x, y, test; int Ncirc = 0; curandState st; curand_init(0, idx, 0, &st); for (int i = 0; i < 4192; i++) { x = curand_uniform(&st); y = curand_uniform(&st); test = x * x + y * y; if (test <= r * r) { Ncirc++; } } dev[idx] = Ncirc; } int main() { static long num_trials = 1000000000; static int gpu_threads = 1024; static long nblocks = ceil(num_trials / (gpu_threads * 4192) ); double r = 1.0; // radius of circle. Side of squrare is 2*r cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); thrust::device_vector<int> dev(nblocks*gpu_threads); calc_pi<<<nblocks, gpu_threads>>>(thrust::raw_pointer_cast(dev.data()), num_trials, r); double Ncirc = thrust::reduce(dev.begin(), dev.end(), 0.0, thrust::plus<double>()); double pi = 4.0 * ((double) Ncirc / (double) num_trials); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); printf("\n%ld trials, pi is %lf \n", num_trials, pi); printf("%.2f milisegundo(s). \n", msecTotal); return 0; }
6,048
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> #include <assert.h> using namespace std; #define errCheck(stmt) do {\ cudaError_t err = stmt;\ if (err != cudaSuccess) {\ printf("%s in %s at line %d\n",cudaGetErrorString(err), __FILE__, __LINE__);\ exit(EXIT_FAILURE);\ }\ } while(0) // You can use any other block size you wish. #define BLOCK_SIZE 1024 #define DEFAULT_NUM_ELEMENTS 16777216 //16000000 #define MAX_RAND 2 // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. __global__ void scan_hs(float *d_odata, float *d_idata, int length) { volatile extern __shared__ float temp[]; int tid = threadIdx.x; int tx = threadIdx.x; volatile __shared__ float acc; acc = 0; for(int i=0; i< (length-1)/blockDim.x+1; i++ ){ tid = i*blockDim.x + threadIdx.x; temp[tx] = (tx == 0)?acc:d_idata[tid-1]; __syncthreads(); int pout = 0; int pin = 1; //if(tid < length) { for (int offset = 1; offset < blockDim.x; offset <<= 1) { pout = 1 - pout; pin = 1 - pin; if (tx >= offset) temp[pout * blockDim.x + tx] = temp[pin * blockDim.x + tx] + temp[pin * blockDim.x + tx - offset]; else temp[pout * blockDim.x + tx] = temp[pin * blockDim.x + tx]; __syncthreads(); } d_odata[tid] = temp[pout * blockDim.x + tx]; if (tx == blockDim.x - 1) acc = d_idata[tid] + d_odata[tid]; } __syncthreads(); } } void prescanArray(float *outArray, float *inArray, int numElements) { scan_hs<<<1, BLOCK_SIZE, 2*sizeof(float)*BLOCK_SIZE>>>(outArray, inArray, numElements); } // **===-----------------------------------------------------------===** //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int ReadFile(float*, char* file_name, int size); void WriteFile(float*, char* file_name, int size); extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { cudaEvent_t cpuStart, cpuStop, gpuIncluStart, gpuIncluStop; errCheck(cudaEventCreate(&cpuStart)); errCheck(cudaEventCreate(&gpuIncluStart)); errCheck(cudaEventCreate(&cpuStop)); errCheck(cudaEventCreate(&gpuIncluStop)); float cpuTime, gpuIncluTime; int errorM = 0; int size = 0; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof(float) * num_elements; //float* h_data = (float*) malloc( mem_size); float * h_data; errCheck(cudaMallocHost((void**) &h_data, mem_size)); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Randomly generate input data and write the result to // file name specified by first argument // * Two arguments: Read the first argument which indicates the size of the array, // randomly generate input data and write the input data // to the second argument. (for generating random input data) // * Three arguments: Read the first file which indicate the size of the array, // then input data from the file name specified by 2nd argument and write the // SCAN output to file name specified by the 3rd argument. switch(argc-1) { case 2: // Determine size of array size = atoi(argv[1]); if(size < 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size; // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; //h_data = (float*) malloc( mem_size); errCheck(cudaMallocHost((void**) &h_data, mem_size)); for(unsigned int i = 0; i < num_elements; ++i) { h_data[i] = (int)(rand() % MAX_RAND); } WriteFile(h_data, argv[2], num_elements); break; case 3: // Three Arguments size = atoi(argv[1]); if(size < 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size; // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; //h_data = (float*) malloc( mem_size); errCheck(cudaMallocHost((void**) &h_data, mem_size)); errorM = ReadFile(h_data, argv[2], size); if(errorM != 0) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 // Use DEFAULT_NUM_ELEMENTS num_elements num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; //h_data = (float*) malloc(mem_size); errCheck(cudaMallocHost((void**) &h_data, mem_size)); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } // compute reference solution float* reference = (float*) malloc(mem_size); errCheck(cudaEventRecord(cpuStart, NULL)); computeGold( reference, h_data, num_elements); errCheck(cudaEventRecord(cpuStop, NULL)); errCheck(cudaEventSynchronize(cpuStop)); errCheck(cudaEventElapsedTime(&cpuTime, cpuStart, cpuStop)); printf("\n\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); printf("Host CPU Processing time: %f (ms)\n", cpuTime); // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; errCheck( cudaMalloc( (void**) &d_idata, mem_size)); errCheck( cudaMalloc( (void**) &d_odata, mem_size)); // copy host memory to device input array errCheck( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) ); // initialize all the other device arrays to be safe errCheck( cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice) ); // **===-------- Allocate data structure here -----------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement prescanArray(d_odata, d_idata, 16); errCheck(cudaEventRecord(gpuIncluStart, NULL)); // copy host memory to device input array errCheck( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) ); // initialize all the other device arrays to be safe errCheck( cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice) ); // **===-------- Modify the body of this function -----------===** prescanArray(d_odata, d_idata, num_elements); // **===-----------------------------------------------------------===** //errCheck( cudaThreadSynchronize() ); // **===-------- Deallocate data structure here -----------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // copy result from device to host errCheck(cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements, cudaMemcpyDeviceToHost)); errCheck(cudaEventRecord(gpuIncluStop, NULL)); errCheck(cudaEventSynchronize(gpuIncluStop)); errCheck(cudaEventElapsedTime(&gpuIncluTime, gpuIncluStart, gpuIncluStop)); printf("GPU inclusive time: %f (ms)\n", gpuIncluTime); printf("Speedup: %fX\n", cpuTime/gpuIncluTime); if ((argc - 1) == 3) // Three Arguments, write result to file { WriteFile(h_data, argv[3], num_elements); } else if ((argc - 1) == 1) // One Argument, write result to file { WriteFile(h_data, argv[1], num_elements); } // Check if the result is equivalent to the expected soluion unsigned int result_regtest = compare(reference, h_data, num_elements); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); // cleanup memory //free(h_data); free(reference); cudaFree(h_data); cudaFree(d_odata); cudaFree(d_idata); } int ReadFile(float* M, char* file_name, int data_read) { unsigned int i = 0; std::ifstream ifile(file_name); for(; i < data_read; i++){ ifile>>M[i]; } ifile.close(); return (i==data_read)? 0:1; } void WriteFile(float* M, char* file_name, int data_write) { std::ofstream ofile(file_name); for(unsigned int i = 0; i < data_write; i++){ ofile<<M[i]<<" "; } ofile.close(); } unsigned int compare(const float* reference, const float* data, const unsigned int len) { for(unsigned int i = 0; i < len; i++){ /* if(i > 1020 && i <1028){ cout<<"i = "<<i<<endl; cout<<"Host = "<<reference[i]<<endl; cout<<"Device = "<<data[i]<<endl; //return false; } */ float error = fabs(reference[i]-data[i]); if(error > 0.0001f){ return false; } } return true; } unsigned int compare( const float* reference, const float* data, const unsigned int len);
6,049
#include <stdio.h> __device__ static int *arrptr; __device__ static int x; extern "C" { __device__ void sub1_() { arrptr = (int *) malloc (10); x = 11; printf ("sub1: arrptr=%p\n", arrptr); printf ("sub1: x=%d\n", x); } __device__ void sub2_() { printf ("sub2: arrptr=%p\n", arrptr); printf ("sub2: x=%d\n", x); } }
6,050
#include <stdint.h> #include <cuda.h> __global__ void add(uint32_t *a, uint32_t *b, uint32_t *c, uint32_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < n && j < n) { int idx = i * n + j; c[idx] = a[idx] + b[idx]; } }
6,051
// This example introduces __device__ functions, which are special functions // which may be called from code executing on the device. #include <stdlib.h> #include <stdio.h> // __device__ functions may only be called from __global__ functions or other // __device__ functions. Unlike __global__ functions, __device__ functions are // not configured, and have no restriction on return type. __device__ int get_constant(void) { // just return 7 return 7; } __device__ int get_block_index(void) { // return the index of the current thread's block return blockIdx.x; } __device__ int get_thread_index(void) { // return the index of the current thread within its block return threadIdx.x; } __device__ int get_global_index(void) { // return the index of the current thread across the entire grid launch return blockIdx.x * blockDim.x + threadIdx.x; } // kernel1 returns the result of calling the __device__ function return_constant(): __global__ void kernel1(int *array) { int index = get_global_index(); array[index] = get_constant(); } // kernel2 returns the result of calling the __device__ function return_block_index(): __global__ void kernel2(int *array) { int index = get_global_index(); array[index] = get_block_index(); } // kernel3 returns the result of calling the __device__ function return_thread_index(): __global__ void kernel3(int *array) { int index = get_global_index(); array[index] = get_thread_index(); } // kernel4 returns the result of calling the __device__ function return_thread_index(): __global__ void kernel4(int *array) { int index = get_global_index(); array[index] = get_global_index(); } int main(void) { int num_elements = 256; int num_bytes = num_elements * sizeof(int); int *device_array = 0; int *host_array = 0; // malloc a host array host_array = (int*)malloc(num_bytes); // cudaMalloc a device array cudaMalloc((void**)&device_array, num_bytes); // if either memory allocation failed, report an error message if(host_array == 0 || device_array == 0) { printf("couldn't allocate memory\n"); return 1; } // choose a launch configuration int block_size = 128; int grid_size = num_elements / block_size; // launch each kernel and print out the results kernel1<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel1 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); kernel2<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel2 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); kernel3<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel3 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); kernel4<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel4 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); // deallocate memory free(host_array); cudaFree(device_array); }
6,052
#include <iostream> #include <cstdlib> #include <cstdio> #include <curand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> using namespace std; __device__ int sum = 1; __global__ void degreeCalc (int *array){ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i>=1000000){ return; } sum+=array[i]; // if (i==999999){ // printf("%d", sum); // } } int main(int argc, char const *argv[]) { /* code */ int n = 1000000; int *h_array = new int [n]; int *h_sum = new int; int *d_array = NULL; cudaMalloc((void **)&d_array, n*sizeof(int)); for (int i = 0; i < n; ++i) { /* code */ h_array[i]=1; } cudaMemcpy(d_array, h_array, n*sizeof(int), cudaMemcpyHostToDevice); int threadsPerBlock = 512; int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); degreeCalc<<<blocksPerGrid, threadsPerBlock>>>(d_array); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaMemcpyFromSymbol(h_sum, sum, sizeof(int), 0, cudaMemcpyDeviceToHost); cout<<*h_sum<<endl; // Retrieve result from device and store it in host array cudaEventElapsedTime(&time, start, stop); cout<<"Time for the kernel: "<<time<<" ms"<<endl; delete[] h_array; cudaFree(d_array); cudaDeviceReset(); return 0; }
6,053
#include "update.hh" #include <cassert> #include <stdexcept> #include "graph.hh" #include "mse-grad.hh" #include "ops-builder.hh" #include "variable.hh" #include "../runtime/node.hh" #include "../memory/alloc.hh" namespace ops { Update::Update(Variable* var, Op* dt, Op* coeff) : Op("update", var->shape_get(), {var, dt, coeff}) , var_(var) {} void Update::compile() { auto& g = Graph::instance(); auto& cdt = g.compiled(preds()[1]); auto& ccoeff = g.compiled(preds()[2]); Shape out_shape = cdt.out_shape; std::size_t len = out_shape.total(); dbl_t* ptr = var_->data_begin(); auto out_node = rt::Node::op_update(ptr, cdt.out_data, ccoeff.out_data, len, {cdt.out_node, ccoeff.out_node}); g.add_compiled(this, {out_node}, {}, out_node, out_shape, ptr); } }
6,054
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> using namespace std; #define BLOCK_SIZE 16 #define BASE_TYPE double __global__ void matrixMult(const BASE_TYPE *A, BASE_TYPE *C, int Acols, int Arows) { int i0 = Acols *(blockDim.y*blockIdx.y + threadIdx.y); //int iAT = Arows*(blockDim.x*blockIdx.x + threadIdx.x) + blockDim.y*blockIdx.y + threadIdx.y; BASE_TYPE sum = 0; for (int k = 0; k < Acols; k++) { sum = +A[i0 + k] * A[i0+k]; } int ind = Acols* (blockDim.y*blockIdx.y + threadIdx.y) + blockDim.x*blockIdx.x + threadIdx.x; C[ind] = sum; } int main() { int Arows = 100; int Acols = 200; size_t Asize = Arows*Acols * sizeof(BASE_TYPE); BASE_TYPE *h_A = (BASE_TYPE *)malloc(Asize); BASE_TYPE *h_C = (BASE_TYPE *)malloc(Asize); for (int i = 0; i < Arows*Acols; i++) { h_A[i] = rand() / (BASE_TYPE)RAND_MAX; } for (int i = 0; i < Arows*Acols; i++) { printf("h_A[%d]=%d ", i, h_A[i]); } BASE_TYPE *d_A = NULL; cudaMalloc((void **)&d_A, Asize); BASE_TYPE *d_C = NULL; cudaMalloc((void **)&d_C, Asize); cudaMemcpy(d_A, h_A, Asize, cudaMemcpyHostToDevice); dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE); dim3 blocksPerGrid = dim3(Acols / BLOCK_SIZE, Arows / BLOCK_SIZE); matrixMult <<<blocksPerGrid, threadsPerBlock >>> (d_A, d_C, Acols, Arows); cudaMemcpy(h_C, d_C, Asize, cudaMemcpyDeviceToHost); printf("Test Started\n"); bool t = false; for (int i = 0; i < Arows; i++) { for (int j = 0; j < Arows; j++) { if (h_C[i*Arows + j] !=1) { t = true; //fprintf(stderr, "Result verification failed at element [%d,%d]!\n", i, j); //printf("sum=%f,h_C[i*Arows + j]=%f\n", 1, h_C[i*Arows + j]); //exit(EXIT_FAILURE); printf("Matrix A is not orthogonal\n"); } if (t) break; } if (t) break; } printf("Test Passed\n"); cudaFree(d_A); cudaFree(d_C); free(h_A); free(h_C); getchar(); system("pause"); }
6,055
#ifndef _EXP_KERNEL_ #define _EXP_KERNEL_ #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <math.h> /* * The actual kernel */ template <class T> __global__ void expKernel(T * in, T * out, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n) out[index] = exp(in[index]); __syncthreads(); } /* Wrapper function for expKernel n - array size */ template <class T> void exp(T * in, T * out, int n, int threadsPerBlock){ dim3 grid(ceil(n/(float)threadsPerBlock), 1, 1); dim3 block(threadsPerBlock, 1, 1); expKernel<T><<<grid, block>>>(in, out, n); cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != CUDA_SUCCESS) printf("sigmoid kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); } template void exp<float>(float * in, float * out, int n, int threadsPerBlock); template void exp<double>(double * in, double * out, int n, int threadsPerBlock); #endif
6,056
#include "includes.h" __global__ void FloatDiv(float *A, float *B, float *C) { unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x; if (B[i] != 0) { C[i] = A[i] / B[i]; } else { C[i] = 0; } }
6,057
#include <stdio.h> #include <cuda.h> #include <sys/time.h> __global__ void matTran(int result_row_size, int result_col_size, float* result, int input_row_size, int input_col_size, float* matrix){ // each row is a block // size of row (vert length) is block dim int current_row = blockIdx.x; int current_col = threadIdx.x; int current_vector_format = current_row * input_row_size + current_col ; int destination_row = current_col; int destination_col = current_row; int destination_vector_format = destination_row * result_row_size + destination_col; result[destination_vector_format] = matrix[current_vector_format]; } typedef unsigned long long timestamp_t; static timestamp_t get_timestamp(); int main(){ int row = 16384; int col = 16384; float *matrix, *result; float *d_matrix, *d_result; matrix = (float*) malloc(row * col * sizeof(float)); result = (float*) malloc(row * col * sizeof(float)); cudaMalloc( &d_matrix, row * col * sizeof(float)); cudaMalloc( &d_result, row * col * sizeof(float)); for(int i = 0; i < row*col; i++){ matrix[i] = 0; result[i] = 0; } for(int i = 0, j = row*2; i < row; j++, i++){ matrix[i] = 2.5; matrix[j] = 2.6; result[i] = 0; } timestamp_t t0 = get_timestamp(); cudaMemcpy(d_matrix, matrix, col * row * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_result, result, col * row * sizeof(float), cudaMemcpyHostToDevice); matTran <<<row, col>>> (row, col, d_result, row, col, d_matrix); cudaMemcpy(result, d_result, col * row * sizeof(float), cudaMemcpyDeviceToHost); timestamp_t t1 = get_timestamp(); double diff = (double)t1 - (double)t0; printf("RUNNING TIME: %f microsecond\n", diff); /*printf("Original: \n"); for (int i = 0; i < row; i++){ printf("\n"); for(int j = 0; j < col; j++){ printf(" %.2f ", matrix[i * row + j]); } }*/ /*printf("Result: \n"); for (int i = 0; i < row; i++){ printf("\n"); for(int j = 0; j < col; j++){ printf(" %.2f ", result[i * row + j]); } }*/ return 0; } static timestamp_t get_timestamp(){ struct timeval now; gettimeofday(&now, NULL); return now.tv_usec + (timestamp_t)now.tv_sec * 1000000; }
6,058
#include <stdio.h> #include <cuda_runtime.h> // #include <helper_cuda.h> #define N 1000 #define THREADS_PER_BLOCK 10 __global__ void histogram(char *buffer, int *frequencies){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < N) frequencies[(int) buffer[i]]++; } int main(void){ cudaError_t err = cudaSuccess; size_t size = N * sizeof(int); char *buffer; cudaMallocManaged(&buffer, size); // Use `buffer` on the CPU and/or on any GPU in the accelerated system. int *frequencies; cudaMallocManaged(&frequencies, 127); for(int i = 0; i < N; i++) buffer[i] = 32 + rand() % 95; histogram<<<(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(buffer, frequencies); if ((err = cudaGetLastError()) != cudaSuccess){ fprintf(stderr, "Failed to launch kernel: %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaDeviceSynchronize(); for(int i = 32; i < 127; i++) printf("%c: %d\n", i, frequencies[i]); cudaFree(buffer); return 0; }
6,059
#include <stdio.h> #include <time.h> #define PerThread 1024*16//每个线程计算多少个i #define N 64*256*1024*16//积分计算PI总共划分为这么多项相加 #define BlockNum 64 //block的数量 #define ThreadNum 256 //每个block中threads的数量 __global__ void Gpu_calPI(double* Gpu_list) { __shared__ double cache[ThreadNum];//每个block共享一个shared memory. int cacheIdx=threadIdx.x; int tid=blockIdx.x*blockDim.x*blockDim.y+threadIdx.x; int begin=tid*PerThread+1; int end=begin+PerThread; double temp=0; int flag=1; for(int i=begin;i<end;i++){ temp+=flag*(1.0/(2*i-1)); flag=flag*(-1); } cache[cacheIdx]=temp; __syncthreads(); int i=blockDim.x/2; while(i!=0){ if(cacheIdx<i) cache[cacheIdx]+=cache[cacheIdx+i]; __syncthreads(); i=i/2; } if(cacheIdx==0){ Gpu_list[blockIdx.x]=cache[0]; } } int main(void) { double * cpu_list; double * Gpu_list; double outcome=0; cpu_list=(double*)malloc(sizeof(double)*BlockNum); cudaMalloc((void**)&Gpu_list,sizeof(double)*BlockNum); // dim3 blocksize=dim3(1,ThreadNum); // dim3 gridsize=dim3(1,BlockNum); // printf("go to GPU\n"); double begin=clock(); Gpu_calPI<<<BlockNum,ThreadNum>>>(Gpu_list); cudaMemcpy(cpu_list,Gpu_list,sizeof(double)*BlockNum,cudaMemcpyDeviceToHost); for(int i=0;i<BlockNum;i++){ outcome+=cpu_list[i]; } outcome=4*outcome; double end=clock(); printf("Scu2: N=%d,outcome=%.10f,time spend %.10f\n",N,outcome,(end-begin)/(CLOCKS_PER_SEC)); // printf("block x=%d,y=%d\n",blocksize.x,blocksize.y); // printf("grid x=%d,y=%d\n",gridsize.x,gridsize.y); }
6,060
#include "includes.h" __global__ void gpu_calculation(float c0r, float c0i, float float_step, float imag_step, int *results, unsigned n, int W, int H, int inicial){ // index = m*x + y const long unsigned globalIndex = blockDim.x*blockIdx.x + threadIdx.x; // printf("%d %d\n", blockIdx.x, threadIdx.x); if (globalIndex < n) { //calcular os complexos na mão int x = (globalIndex + inicial)/W; int y = (globalIndex + inicial)%H; // printf("%d %d %d\n", x, y, n); float point_r = c0r+x*float_step; float point_i = c0i+y*imag_step; // printf("%f %f\n", point_r, point_i); const int M = 1000; // valor Zj que falhou // -1 se não tiver falhado int j = -1; //Valor da iteração passada float old_r = 0; float old_i = 0; float aux = 0; //Calcula o mandebrot for(int i = 1; i <= M; i++){ //Calculo da nova iteração na mão aux = (old_r * old_r) - (old_i * old_i) + point_r; old_i = (2 * old_r * old_i) + point_i; old_r = aux; //abs(complex) = sqrt(a*a + b*b) //Passei a raiz do abs para outro lado if( ((old_r * old_r + old_i * old_i) > 4 )){ j = i; break; } } // printf("%d\n", j); // printf("%d\n", j); results[globalIndex] = j; // printf("%d\n", j); } // else printf("oh boy\n"); }
6,061
#include <iostream> #include <fstream> #include <string.h> #include <sys/time.h> #include <math.h> #include <random> #include <cuda_runtime.h> #include <device_launch_parameters.h> using namespace std; #define BLOCKSIZE 1024 #define FLOAT_MIN 10 #define FLOAT_MAX 100 #define GPU_ERR_CHK(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } /** * @brief Comprueba el codigo de error de una llamada Cuda * @param code Codigo del error * @param file Archivo donde se produjo el error * @param line Linea que ha dado el error * @param abort Indica si debe abortar el programa ante el error. True por defecto */ inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /** * @brief Desenrrollado de bucle del ultimo warp de cada bloque * @param sdata Puntero a memoria compartida de device * @param tid Identificador de hebra de GPU * @post sdata es modificado */ __device__ void warpReduce(volatile float *sdata, int tid) { sdata[tid] = (sdata[tid] > sdata[tid + 32]) ? sdata[tid] : sdata[tid + 32]; sdata[tid] = (sdata[tid] > sdata[tid + 16]) ? sdata[tid] : sdata[tid + 16]; sdata[tid] = (sdata[tid] > sdata[tid + 8]) ? sdata[tid] : sdata[tid + 8]; sdata[tid] = (sdata[tid] > sdata[tid + 4]) ? sdata[tid] : sdata[tid + 4]; sdata[tid] = (sdata[tid] > sdata[tid + 2]) ? sdata[tid] : sdata[tid + 2]; sdata[tid] = (sdata[tid] > sdata[tid + 1]) ? sdata[tid] : sdata[tid + 1]; } /** * @brief Kernel para la reduccion * @param Min Vector a reducir * @param Mout Resultado de la reduccion * @note La reduccion se hace por bloques usando memoria compartida * por lo que el vector de salida no esta reducido completamente */ __global__ void reduce_max(float *Min, float *Mout, const int nverts) { extern __shared__ float sdata[]; // Cada hebra carga un elemento a memoria compartida int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; // Primera reduccion antes de cargar en memoria compartida sdata[tid] = (Min[i] > Min[i + blockDim.x]) ? Min[i] : Min[i + blockDim.x]; __syncthreads(); // Hacer reduccion en memoria compartida for (int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) sdata[tid] = (sdata[tid] > sdata[tid + s]) ? sdata[tid] : sdata[tid + s]; __syncthreads(); } if (tid < 32) warpReduce(sdata, tid); if (tid == 0) Mout[blockIdx.x] = sdata[0]; } /** * @brief Kernel que calcula el vector B del problema * @param B_in Vector con los datos de entrada * @param N Tamaño del vector * @note Esta version hace uso de memoria compartida */ __global__ void calcula_B_shared(float *B_in, int N) { extern __shared__ float sdata[]; float A_im2, A_im1, A_i, A_ip1, A_ip2; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { sdata[tid] = B_in[i]; //__syncthreads(); // Esperar a que las hebras carguen en memoria compartida A_im2 = (i - 2 < 0) ? 0.0 : B_in[i - 2]; A_im1 = (i - 1 < 0) ? 0.0 : B_in[i - 1]; A_i = B_in[i]; A_ip1 = (i + 1 > N) ? 0.0 : B_in[i + 1]; A_ip2 = (i + 2 > N) ? 0.0 : B_in[i + 2]; sdata[tid] = (pow(A_im2, 2) + 2 * pow(A_im1, 2) + pow(A_i, 2) - 3 * pow(A_ip1, 2) + 5 * pow(A_ip2, 2)) / 24.0; } // Copiar de memoria compartida a salida if (tid == 0) { int offset = blockIdx.x * blockDim.x; int posicion; for (int i = 0; i < blockDim.x; i++) { posicion = offset + i; if (posicion < N) // Necesario por las hebras que sobran B_in[posicion] = sdata[i]; } } } /** * @brief Kernel que calcula el vector B del problema * @param B_in Vector con los datos de entrada * @param B_out Vector con los datos de salida * @param N Tamaño del vector * @note Esta version no hace uso de memoria compartida */ __global__ void calcula_B(float *B_in, float *B_out, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; float A_im2, A_im1, A_i, A_ip1, A_ip2; if (i < N) { A_im2 = (i - 2 < 0) ? 0.0 : B_in[i - 2]; A_im1 = (i - 1 < 0) ? 0.0 : B_in[i - 1]; A_i = B_in[i]; A_ip1 = (i + 1 > N) ? 0.0 : B_in[i + 1]; A_ip2 = (i + 2 > N) ? 0.0 : B_in[i + 2]; B_out[i] = (pow(A_im2, 2) + 2 * pow(A_im1, 2) + pow(A_i, 2) - 3 * pow(A_ip1, 2) + 5 * pow(A_ip2, 2)) / 24.0; } } /** * @brief Genera número aleatorio * @note cambiar valores de macros * para mayor o menor rango */ float generate_random_float() { static default_random_engine generador; static uniform_real_distribution<float> distribucion_uniforme(FLOAT_MIN, FLOAT_MAX); return distribucion_uniforme(generador); } /** * @brief Calcula un instante de tiempo * @return Instante de tiempo */ double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6); } int main(int argc, char const *argv[]) { // Get GPU information int dev_id; int num_val; cudaDeviceProp props; GPU_ERR_CHK(cudaGetDevice(&dev_id)); cudaGetDeviceProperties(&props, dev_id); printf("Device %d: \"%s\" with Compute %d.%d capability\n", dev_id, props.name, props.major, props.minor); cout << "Introduce numero de valores: "; cin >> num_val; //**************************** // Inicializamos vector A //**************************** float *A = new float[num_val]; // Vector de entrada A for (int i = 0; i < num_val; i++) A[i] = generate_random_float(); //**************************** // Calculamos vector B en CPU //**************************** float *B = new float[num_val]; float A_im2, A_im1, A_i, A_ip1, A_ip2; double t_cpu_inicial = cpuSecond(); for (int i = 0; i < num_val; i++) { A_im2 = (i - 2 < 0) ? 0.0 : A[i - 2]; A_im1 = (i - 1 < 0) ? 0.0 : A[i - 1]; A_i = A[i]; A_ip1 = (i + 1 > num_val) ? 0.0 : A[i + 1]; A_ip2 = (i + 2 > num_val) ? 0.0 : A[i + 2]; B[i] = (pow(A_im2, 2) + 2 * pow(A_im1, 2) + pow(A_i, 2) - 3 * pow(A_ip1, 2) + 5 * pow(A_ip2, 2)) / 24.0; } double t_cpu_final = cpuSecond(); double t_cpu = t_cpu_final - t_cpu_inicial; //***************************************************** // Calculamos vector B en GPU (sin memoria compartida) //***************************************************** float *d_A, *d_b; float *h_b = new float[num_val]; GPU_ERR_CHK(cudaMalloc((void **)&d_A, num_val * sizeof(float))); GPU_ERR_CHK(cudaMalloc((void **)&d_b, num_val * sizeof(float))); GPU_ERR_CHK(cudaMemcpy(d_A, A, num_val * sizeof(float), cudaMemcpyHostToDevice)); int blocks_per_grid = ceil((float)num_val / (float)BLOCKSIZE); double t_gpu_inicial_1 = cpuSecond(); calcula_B<<<blocks_per_grid, BLOCKSIZE>>>(d_A, d_b, num_val); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_final_1 = cpuSecond(); GPU_ERR_CHK(cudaGetLastError()); GPU_ERR_CHK(cudaMemcpy(h_b, d_b, num_val * sizeof(float), cudaMemcpyDeviceToHost)); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_1 = t_gpu_final_1 - t_gpu_inicial_1; //***************************************************** // Calculamos vector B en GPU (con memoria compartida) //***************************************************** float *d_c; float *h_c = new float[num_val]; int shared_mem_size = BLOCKSIZE * sizeof(float); GPU_ERR_CHK(cudaMalloc((void **)&d_c, num_val * sizeof(float))); GPU_ERR_CHK(cudaMemcpy(d_c, A, num_val * sizeof(float), cudaMemcpyHostToDevice)); double t_gpu_inicial_2 = cpuSecond(); calcula_B_shared<<<blocks_per_grid, BLOCKSIZE, shared_mem_size>>>(d_c, num_val); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_final_2 = cpuSecond(); GPU_ERR_CHK(cudaGetLastError()); GPU_ERR_CHK(cudaMemcpy(h_c, d_c, num_val * sizeof(float), cudaMemcpyDeviceToHost)); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_2 = t_gpu_final_2 - t_gpu_inicial_2; //****************** // Reduccion en CPU //****************** double t_red_cpu_ini = cpuSecond(); float mayor_cpu = B[0]; for (int i = 1; i < num_val; i++) { mayor_cpu = (B[i] > mayor_cpu) ? B[i] : mayor_cpu; } double t_red_cpu_fin = cpuSecond(); double t_red_cpu = t_red_cpu_fin - t_red_cpu_ini; //****************** // Reduccion en GPU //****************** float *d_d, *d_e; // Parametros de entrada del kernel float *h_d = new float[blocks_per_grid]; // Salida del kernel en el host GPU_ERR_CHK(cudaMalloc((void **)&d_d, num_val * sizeof(float))); GPU_ERR_CHK(cudaMalloc((void **)&d_e, blocks_per_grid * sizeof(float))); GPU_ERR_CHK(cudaMemcpy(d_d, B, num_val * sizeof(float), cudaMemcpyHostToDevice)); double t_gpu_inicial_3 = cpuSecond(); reduce_max<<<blocks_per_grid, BLOCKSIZE, shared_mem_size>>>(d_d, d_e, num_val); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_final_3 = cpuSecond(); GPU_ERR_CHK(cudaGetLastError()); GPU_ERR_CHK(cudaMemcpy(h_d, d_e, blocks_per_grid * sizeof(float), cudaMemcpyDeviceToHost)); GPU_ERR_CHK(cudaDeviceSynchronize()); float mayor_gpu = h_d[0]; for (int i = 1; i < blocks_per_grid; i++) { mayor_gpu = (h_d[i] > mayor_gpu) ? h_d[i] : mayor_gpu; } double t_red_gpu = t_gpu_final_3 - t_gpu_inicial_3; //**************************** // Comprobacion CPU-GPU //**************************** bool passed = true; int i = 0; while (passed && i < num_val) { if (B[i] != h_b[i] && B[i] != h_c[i]) { cout << "ERR B[" << i << "] = " << B[i] << " h_b[" << i << "] = " << h_b[i] << " h_c[" << i << "] = " << h_c[i] << endl; passed = false; } i++; } if (passed) cout << "PASSED TEST" << endl; else cout << "ERROR IN TEST" << endl; //******************** // Mostrar resultados //******************** cout << "Tiempo en CPU = " << t_cpu << endl << "Tiempo en GPU (sin memoria compartida) = " << t_gpu_1 << endl << "Tiempo en GPU (con memoria compartida) = " << t_gpu_2 << endl << "Ganancia (sin memoria compartida) = " << t_cpu / t_gpu_1 << endl << "Ganancia (con memoria compartida) = " << t_cpu / t_gpu_2 << endl << "Tiempo de reduccion en CPU = " << t_red_cpu << endl << "Tiempo de reduccion en GPU = " << t_red_gpu << endl << "Valor de reduccion en CPU = " << mayor_cpu << endl << "Valor de reduccion en GPU = " << mayor_gpu << endl; // Liberar memoria host delete (A); delete (B); delete (h_b); delete (h_c); delete (h_d); // Liberar memoria device cudaFree(d_A); cudaFree(d_b); cudaFree(d_c); cudaFree(d_d); cudaFree(d_e); return 0; }
6,062
#include <stdio.h> #include <stdlib.h> #define HOST_TO_DEVICE 0 #define DEVICE_TO_HOST 1 // Print the usage of the program inline void usage(char *program) { fprintf(stderr, "usage: %s memsize iters [-r]\n", program); fprintf(stderr, " memsize : memory transferred in bytes (>0)\n"); fprintf(stderr, " iters : number of iterations (>0)\n"); fprintf(stderr, " -r : re-allocate memory for each iteration\n"); } // Allocate 'size' worth of bytes to host memory '*ptr' inline void alloc_host_mem(void **ptr, int size, bool pinned) { if (pinned) { cudaMallocHost(ptr, size); } else { *ptr = malloc(size); } } // Free host memory pointed by 'ptr' inline void free_host_mem(void *ptr, bool pinned) { if (pinned) { cudaFreeHost(ptr); } else { free(ptr); } } // Profile memory copy performance between GPU and CPU // For each iteration, we copy 'size' worth of bytes in a direction // size : memory copy size for each iteration // iters : number of iterations // direction : HOST_TO_DEVICE / DEVICE_TO_HOST // pinned : whether enable pinned memory allocation at host // reallocate : whether re-allocate memory for each time // This function prints the average throughput and transfer time result void profile_memcpy(int size, int iters, int direction, bool pinned, bool reallocate) { void *h = NULL, *d = NULL; // host and device memory cudaEvent_t start, stop; float time, avg_time, total_time, throughput; int i; cudaError_t result; // Allocate host and device memory only one time if (!reallocate) { alloc_host_mem(&h, size, pinned); cudaMalloc(&d, size); } cudaEventCreate(&start); cudaEventCreate(&stop); total_time = 0; for (i = 0; i < iters; i++) { // Re-allocate host and device memory for each time if (reallocate) { alloc_host_mem(&h, size, pinned); cudaMalloc((void**)&d, size); } cudaEventRecord(start, 0); if (direction == HOST_TO_DEVICE) { result = cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); } else { result = cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); total_time += time; if (result != cudaSuccess) { fprintf(stderr, "Error: memory copy\n"); } if (reallocate) { free_host_mem(h, pinned); cudaFree(d); } } // Calculate host to device information avg_time = total_time / iters / 1000; // time in second throughput = (float)size / avg_time / 1000000000; // throughput in GB/s if (direction == HOST_TO_DEVICE) { printf(" Host to Device Time: %.6f s\n", avg_time); printf(" Host to Device Throughput: %.6f GB/s\n", throughput); } else { printf(" Device to Host Time: %.6f s\n", avg_time); printf(" Device to Host Throughput: %.6f GB/s\n", throughput); } cudaEventDestroy(start); cudaEventDestroy(stop); if (!reallocate) { free_host_mem(h, pinned); cudaFree(d); } } int main(int argc, char **argv) { int size, iters; bool reallocate = false; // By default, we don't re-allocate memory for each test if (!(argc == 3 || (argc == 4 && strcmp(argv[3], "-r") == 0))) { usage(argv[0]); return EXIT_FAILURE; } size = atoi(argv[1]); iters = atoi(argv[2]); if (size <= 0 || iters <= 0) { usage(argv[0]); return EXIT_FAILURE; } if (argc == 4) { reallocate = true; } // warm up cudaFree(0); // Profile memory copy printf("Transfer size (MB): %f\n\n", (float)size / (1024 * 1024)); printf("Pageable transfers\n"); profile_memcpy(size, iters, HOST_TO_DEVICE, false, reallocate); profile_memcpy(size, iters, DEVICE_TO_HOST, false, reallocate); printf("\n"); printf("Pinned transfers\n"); profile_memcpy(size, iters, HOST_TO_DEVICE, true, reallocate); profile_memcpy(size, iters, DEVICE_TO_HOST, true, reallocate); return EXIT_SUCCESS; }
6,063
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <math.h> #include "device_launch_parameters.h" #include "openglcuda.cuh" #include <time.h> int numElementsRand = 10, numElementsMat = 100, numElementsBestCost = 100; int sizeRand = numElementsMat * sizeof(int); int sizeMat = numElementsMat * sizeof(int); int sizeBestCost = numElementsBestCost * sizeof(int);
6,064
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/time.h> // CUDA runtime #include <cuda_runtime.h> /* Problem size */ #define NI 4096 #define NJ 4096 __global__ void Convolution(double* A, double* B) { int i, j; double c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +0.2; c21 = +0.5; c31 = -0.8; c12 = -0.3; c22 = +0.6; c32 = -0.9; c13 = +0.4; c23 = +0.7; c33 = +0.10; i = blockIdx.y*blockDim.y + threadIdx.y + 1; j = blockIdx.x*blockDim.x + threadIdx.x + 1; if (i < NI - 1 && j < NJ - 1) { B[i*NJ + j] = c11 * A[(i - 1)*NJ + (j - 1)] + c12 * A[(i + 0)*NJ + (j - 1)] + c13 * A[(i + 1)*NJ + (j - 1)] + c21 * A[(i - 1)*NJ + (j + 0)] + c22 * A[(i + 0)*NJ + (j + 0)] + c23 * A[(i + 1)*NJ + (j + 0)] + c31 * A[(i - 1)*NJ + (j + 1)] + c32 * A[(i + 0)*NJ + (j + 1)] + c33 * A[(i + 1)*NJ + (j + 1)]; } } void init(double* A) { int i, j; for (i = 0; i < NI; ++i) { for (j = 0; j < NJ; ++j) { A[i*NJ + j] = (double)rand()/RAND_MAX; } } } int main(int argc, char *argv[]) { double *A_h, *B_h; double *A_d, *B_d; struct timeval cpu_start, cpu_end; A_h = (double*)malloc(NI*NJ*sizeof(double)); B_h = (double*)malloc(NI*NJ*sizeof(double)); // Δέσμευση μνήμης στο device για τα διανύσματα cudaMalloc((void **) &A_d, NI*NJ*sizeof(double)); cudaMalloc((void **) &B_d, NI*NJ*sizeof(double)); //initialize the arrays init(A_h); // Αντιγραφή A στο device cudaMemcpy(A_d, A_h, NI*NJ*sizeof(double), cudaMemcpyHostToDevice); //---------------------------------------------------------------------- // Κάθε block θα έχει διάσταση 32×32 unsigned int BLOCK_SIZE_PER_DIM = 32; // Στρογγυλοποίηση προς τα πάνω για το πλήθος των block σε κάθε διάσταση unsigned int numBlocksX = (NI - 1) / BLOCK_SIZE_PER_DIM + 1; unsigned int numBlocksY = (NJ - 1) / BLOCK_SIZE_PER_DIM + 1; // Ορισμός διαστάσεων πλέγματος dim3 dimGrid(numBlocksX, numBlocksY, 1); // Ορισμός διαστάσεων block dim3 dimBlock(BLOCK_SIZE_PER_DIM, BLOCK_SIZE_PER_DIM, 1); //---------------------------------------------------------------------- gettimeofday(&cpu_start, NULL); // Κλήση υπολογιστικού πυρήνα Convolution<<<dimGrid, dimBlock>>>(A_d, B_d); cudaMemcpy(B_h, B_d, NI*NJ*sizeof(double), cudaMemcpyDeviceToHost); gettimeofday(&cpu_end, NULL); fprintf(stdout, "GPU Runtime: %0.6lfs\n", ((cpu_end.tv_sec - cpu_start.tv_sec) * 1000000.0 + (cpu_end.tv_usec - cpu_start.tv_usec)) / 1000000.0); // Αντιγραφή του αποτελέσματος στον host printf("================================\n"); FILE *f = fopen("ask1_cuda_output.txt", "w+"); if (f == NULL) { printf("Error opening ask1_cuda_output.txt!\n"); exit(1); } for (int i = 1; i < NI - 1; ++i) { for (int j = 1; j < NJ - 1; ++j) { fprintf(f, "%f\n", B_h[i*NJ + j]); } } if(f) { printf("Results saved in ask1_cuda_output.txt!\n"); } fclose(f); // Αποδέσμευση μνήμης στον host free(A_h); free(B_h); // Αποδέσμευση μνήμης στο device cudaFree(A_d); cudaFree(B_d); return 0; }
6,065
#include <cstdio> #include <cstdlib> #include <cstring> #include <cuda_runtime.h> #include <iomanip> #include <iostream> #include <vector> inline void check(cudaError_t err, const char* context) { if (err != cudaSuccess) { std::cerr << "CUDA error: " << context << ": " << cudaGetErrorString(err) << std::endl; std::exit(EXIT_FAILURE); } } #define CHECK(x) check(x, #x) struct DeviceStat { const std::string label; const std::string attr_unit; const cudaDeviceAttr attr_enum; int device; }; class DeviceAnalyzer { const std::vector<DeviceStat> device_stats; public: DeviceAnalyzer(int); void print_stats() const; }; DeviceAnalyzer::DeviceAnalyzer(int device_num) : device_stats({ { "compute capability", "", cudaDevAttrComputeCapabilityMajor, device_num }, { "global memory bus width", "bits", cudaDevAttrGlobalMemoryBusWidth, device_num }, { "streaming multiprocessors", "", cudaDevAttrMultiProcessorCount, device_num }, { "maximum threads per SM", "", cudaDevAttrMaxThreadsPerMultiProcessor, device_num }, { "L2 cache size", "bytes", cudaDevAttrL2CacheSize, device_num }, }) { }; void DeviceAnalyzer::print_stats() const { for (const auto& stat : device_stats) { int attr_value; CHECK(cudaDeviceGetAttribute(&attr_value, stat.attr_enum, stat.device)); std::cout << std::left << std::setw(30) << stat.label + ": " << attr_value << ' ' << stat.attr_unit << std::endl; } } int main(int argc, char** argv) { if (argc > 2 || (argc >= 2 && std::strcmp(argv[1], "-h") == 0)) { std::cerr << "usage: ./analyze [-h] [device_num]\n"; std::exit(2); } int device_num = 0; if (argc == 2) { device_num = std::stoi(argv[1]); } const DeviceAnalyzer analyzer(device_num); std::cout << "Device " << device_num << " stats:\n"; analyzer.print_stats(); }
6,066
#include <stdio.h> __global__ void loop() { int i = threadIdx.x + blockIdx.x * blockDim.x; printf("This is iteration number %d\n", i); } int main() { loop<<<2, 5>>>(); cudaDeviceSynchronize(); }
6,067
#include <stdlib.h> #include <stdio.h> #define CSC(call) \ do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(0); \ } \ } while(0) __global__ void kernel(double *vector, int n) { int offset = blockDim.x * gridDim.x; for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += offset) { vector[i] *= vector[i] < 0 ? -1 : 1; } } int main() { int n; scanf("%d", &n); int size = n * sizeof(double); double *vector = (double *) malloc(size); for (int i = 0; i < n; ++i) { scanf("%lf", &vector[i]); } double *device_vector; CSC(cudaMalloc(&device_vector, size)); CSC(cudaMemcpy(device_vector, vector, size, cudaMemcpyHostToDevice)); cudaEvent_t start, end; CSC(cudaEventCreate(&start)); CSC(cudaEventCreate(&end)); CSC(cudaEventRecord(start)); kernel<<<1024, 1024>>>(device_vector, n); CSC(cudaGetLastError()); CSC(cudaEventRecord(end)); CSC(cudaEventSynchronize(end)); float time; CSC(cudaEventElapsedTime(&time, start, end)); CSC(cudaEventDestroy(start)); CSC(cudaEventDestroy(end)); printf("Time = %f ms\n", time); CSC(cudaMemcpy(vector, device_vector, size, cudaMemcpyDeviceToHost)); CSC(cudaFree(device_vector)); /* for (int i = 0; i < n; ++i) { printf("%.10e ", vector[i]); } */ printf("\n"); free(vector); return 0; }
6,068
#include <cstdio> __global__ void iwarp(int* out) { volatile int* vout = out; *vout = threadIdx.x; } int main() { int* din; cudaMalloc((void**)&din, sizeof(int)); int in = 0; cudaMemcpy(din, &in, sizeof(int), cudaMemcpyHostToDevice); iwarp<<<1,16>>>(din); int output; cudaMemcpy(&output, din, sizeof(int), cudaMemcpyDeviceToHost); printf("%d\n", output); }
6,069
#include "includes.h" __global__ void sgemvn_kernel2_fermi(int n, int m, int n1, float alpha, float* A, int lda, float *x, float *y) { int ind = blockIdx.x*num_threads + threadIdx.x; A += ind; x += threadIdx.x; float res = 0.f; __shared__ float buff[num_threads]; for(int i=0; i<n1; i += num_threads ){ __syncthreads(); buff[threadIdx.x] = x[i]; __syncthreads(); #pragma unroll for(int j=0; j < num_threads ; j++){ res+=A[0]*buff[j]; A+=lda; } } #if 0 __syncthreads(); if (m>n1){ buff[threadIdx.x] = x[n1]; __syncthreads(); for(int j=0; j<(m-n1); j++){ res += A[0]*buff[j]; A+=lda; } } #endif if (ind<n) y[ind] = alpha * res; }
6,070
/* Taken from gputools with the purpos of showing how we can . */ #define NUM_THREADS 32 /* vg_a and vg_b are two matrices. n_a, n_b are the number of rows/observations in the respective matrices. pitch_a, pitch_b are the number of bytes (not elements) between observations in a row, i.e. the stride k - number of variables in each observation, i.e. ncol for each of vg_a and vg_b. d - the storage for the answers pitch_d - the stride for d giving the offset between elements p - ignored by this metric. The kernel calls are arranged in a grid of n_a x n_b. So the kernel looks at the block indices and only bothers to compute the lower diagonal block of the result matrix. It checks if we are on the diagonal (x==y) and if we are in the first thread of the block and sets the result to 0. If we are not on the diagonal and x < y, */ extern "C" __global__ void euclidean_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p) { size_t x = blockIdx.x, y = blockIdx.y; if((x == y) && (x < n_a) && (threadIdx.x == 0)) d[y * pitch_d + x] = 0.0; // If all element is to be computed if(y < n_a && x < y) { __shared__ float temp[NUM_THREADS]; temp[threadIdx.x] = 0.0; for(size_t offset = threadIdx.x; offset < k; offset += NUM_THREADS) { float t = vg_a[x * pitch_a + offset] - vg_a[y * pitch_a + offset]; temp[threadIdx.x] += (t * t); } // Sync with other threads __syncthreads(); // Reduce for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) { if(threadIdx.x < stride) temp[threadIdx.x] += temp[threadIdx.x + stride]; __syncthreads(); } // Write to global memory if(threadIdx.x == 0) { float s = sqrt(temp[0]); d[y * pitch_d + x] = s; d[x * pitch_d + y] = s; } } }
6,071
#include "includes.h" __global__ void Make1DprofileKernel (double *gridfield, double *axifield, int nsec, int nrad) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j; if (i < nrad){ double sum = 0.0; for (j = 0; j < nsec; j++) sum += gridfield[i*nsec + j]; axifield[i] = sum/(double)nsec; } }
6,072
#include "includes.h" __global__ void helloWorld(){ }
6,073
/* objective * C = A*B // A[m][k], B[k][n], C[m][n] * compile: nvcc --gpu-architecture=compute_60 --gpu-code=sm_60 -O3 matmul_double.cu -o matmul_double Using nvprof for this lab nvprof -- query-metrics nvprof dram_read_transactions ./test 1024 1024 128 nvprof ./test 1024 1024 128 second line of result shows time for GPU kernel GFlop ( 2MNK * 10^-9 ) / time (second) */ #include <iostream> #include <cstdlib> #include <stdio.h> #include <cuda.h> #include <stdlib.h> #define TILE_WIDTH 16 #define EC(ans) { chkerr((ans), __FILE__, __LINE__); } inline void chkerr(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { std::cerr << "ERROR!!!:" << cudaGetErrorString(code) << " File: " << file << " Line: " << line << '\n'; exit(-1); } } void init (double *A, double *B, int M , int N, int K) { for (int i = 0; i < M; ++i) { for (int j = 0; j < K; ++j) { A[i * K + j] = i * K + j; } } for (int i = 0; i < K; ++i) { for (int j = 0; j < N; ++j) { B[i * N + j] = i * N + j + 1; } } } void matmul_double_host(double* A, double* B, double* C, int M, int N, int K) { for (int i = 0; i < M; ++i) { for (int j = 0; j < N; ++j) { double tmp = 0; for (int k = 0; k < K; ++k) { tmp += A[i * K + k] * B[k * N + j]; } C[i * N + j] = tmp; } } } __global__ void matmul_double(double* A, double* B , double* C, int M, int N, int K) { /// complete code int bx = blockIdx.x ; int by = blockIdx.y ; int tx = threadIdx.x ; int ty = threadIdx.y ; int row = by * TILE_WIDTH + ty ; int col = bx * TILE_WIDTH + tx ; __shared__ double SA[TILE_WIDTH][TILE_WIDTH] ; __shared__ double SB[TILE_WIDTH][TILE_WIDTH] ; double Csub = 0; for (int i = 0; i < (K-1)/TILE_WIDTH +1 ; ++i) { /* code */ //SA[ty][tx] = A[row*n + i * TILE_WIDTH + tx] ; //SB[ty][tx] = B[(i * TILE_WIDTH + ty )*n + col ] ; if ( (row < M) && (i * TILE_WIDTH + tx < K ) ){ SA[ty][tx] = A[row*K + i * TILE_WIDTH + tx] ; } else{ SA[ty][tx] = 0; } if ( (col < N ) && ( i * TILE_WIDTH + ty < K) ){ SB[ty][tx] = B[(i*TILE_WIDTH + ty)*N + col] ; } else{ SB[ty][tx] = 0; } __syncthreads() ; for (int k = 0; k < TILE_WIDTH; ++k){ Csub += SA[ty][k] * SB[k][tx] ; } __syncthreads() ; } //C[row*n + col] = Csub ; if ( (row < M ) && ( col < N )){ C[ row * N + col] = Csub ; } } void validate (double *host, double *gpu, int M, int N) { for (int i = 0; i < M; ++i) { for (int j = 0; j < N; ++j) { if(std::abs(host[i * N + j] - gpu[i * N + j]) > 1e-3) { std::cerr << "possible error at position " << i << ',' << j << " host: " << host[i * N + j] << " device " << gpu[i * N + j] << '\n'; } } } } int main(int argc, char *argv[]) { if(argc < 3) { std::cerr << "Usage: ./matmul_double M N K\n"; exit(-1); } int M = std::atoi(argv[1]); int N = std::atoi(argv[2]); int K = std::atoi(argv[3]); /* Host alloc */ double *hA = (double*) malloc (M * K * sizeof(double)); double *hB = (double*) malloc (K * N * sizeof(double)); double *hC = (double*) malloc (M * N * sizeof(double)); double *dtohC = (double*) malloc (M * N * sizeof(double)); /* Device alloc */ /// complete code double *dA; double *dB; double *dC; cudaMalloc((void**) &dA, M*K * sizeof(double)) ; cudaMalloc((void**) &dB, K*N * sizeof(double)) ; cudaMalloc((void**) &dC, M*N * sizeof(double)) ; /* Initialize host memory*/ init(hA, hB, M, N, K); /* host compute */ matmul_double_host(hA, hB, hC, M, N, K); /* Copy from host to device */ /// complete code cudaMemcpy(dA,hA ,M*K * sizeof(double) , cudaMemcpyHostToDevice ) ; cudaMemcpy(dB,hB ,K*N * sizeof(double) , cudaMemcpyHostToDevice ) ; /* call gpu kernel */ /// complete code //Initialize the grid and block dimensions here //dim3 dimGrid( ceil()) dim3 dimGrid( (N - 1) / TILE_WIDTH + 1 , (M - 1)/ TILE_WIDTH + 1 , 1) ; //dim3 dimGrid( (M - 1) / TILE_WIDTH + 1 , (N - 1)/ TILE_WIDTH + 1 , 1) ; dim3 dimBlock(TILE_WIDTH , TILE_WIDTH , 1) ; matmul_double<<<dimGrid, dimBlock>>>(dA, dB , dC , M , N , K) ; /* Copy from device to host (dC -> dtohC) */ /// complete code cudaMemcpy(dtohC, dC , sizeof(double)*M*N , cudaMemcpyDeviceToHost) ; /* host vs device validation */ validate(hC, dtohC, M, N); /* be clean */ free(hA); free(hB); free(hC); free(dtohC); /// add code to free gpu memory cudaFree(dA) ; cudaFree(dB) ; cudaFree(dC) ; return 0; }
6,074
#include "includes.h" __global__ void matrixMulKernel(float *C, float *A, float *B, int width, int height){ int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if(tx >= width || ty >= height) return; float sum = 0; for(int i=0; i<width; ++i){ sum += A[ty * width + i] * B[i * width + tx]; } C[ty * width + tx] = sum; }
6,075
#include <stdio.h> // #include <cutil.h> #define MAX 1000000 #define MAX_ITERATIONS 1000 #define CUDA_SAFE_CALL(x) x __global__ void kernel(int* a) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = 0; unsigned int answer = idx; if (idx != 0 && idx <= MAX) { while (answer != 1 && i < MAX_ITERATIONS) { if ((answer & 1) == 0) { answer = answer >> 1; } else { answer = 3 * answer + 1; } i++; } } if (i == MAX_ITERATIONS) { a[idx] = 69696969; } else { a[idx] = i; } } int main() { int threads_per_block = ceil(MAX / 256.0); printf("threads per block = %d\n", threads_per_block); int dimx = threads_per_block*256; int num_bytes = dimx * sizeof(int); int *d_a = 0, *h_a = 0; h_a = (int*)malloc(num_bytes); CUDA_SAFE_CALL(cudaMalloc((void**)&d_a, num_bytes)); if (0==h_a || 0==d_a) { printf("can't allocate memory"); } CUDA_SAFE_CALL(cudaMemset(d_a, 0, num_bytes)); CUDA_SAFE_CALL(cudaMemcpy(d_a, h_a, num_bytes, cudaMemcpyHostToDevice)); cudaEvent_t start, stop; CUDA_SAFE_CALL(cudaEventCreate(&start); cudaEventCreate(&stop)); CUDA_SAFE_CALL(cudaEventRecord(start, 0)); kernel<<<threads_per_block, 256>>>(d_a); CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); float et; CUDA_SAFE_CALL(cudaEventElapsedTime(&et, start, stop)); CUDA_SAFE_CALL(cudaEventDestroy(start)); CUDA_SAFE_CALL(cudaEventDestroy(stop)); printf("kernel execution time: %8.6fms\n", et); CUDA_SAFE_CALL(cudaMemcpy(h_a, d_a, num_bytes, cudaMemcpyDeviceToHost)); int max = 0; for(int i=0; i<dimx; i++) { // printf("%d ", h_a[i]); if (h_a[i] > max) max = h_a[i]; } // printf("\n"); printf("max is %d\n", max); free(h_a); CUDA_SAFE_CALL(cudaFree(d_a)); return 0; }
6,076
#define SQRT_TWO_PI 2.506628274631000 extern "C" __global__ void calc_loglik(double* vals, int N, double mu, double sigma) { // note that this assumes no third dimension to the grid // id of the block int myblock = blockIdx.x + blockIdx.y * gridDim.x; // size of each block (within grid of blocks) int blocksize = blockDim.x * blockDim.y * blockDim.z; // id of thread in a given block int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; // assign overall id/index of the thread int idx = myblock * blocksize + subthread; if(idx < N) { double std = (vals[idx] - mu)/ sigma; double e = exp( - 0.5 * std * std); vals[idx] = e / ( sigma * SQRT_TWO_PI); } }
6,077
/* * @author Connie Shi * Lab 3: Write a reduction program in CUDA that finds the maximum * of an array of M integers. * Part 1: Write a CUDA version that does not take thread divergence * into account. Uses interleaved addressing. * * Should be run on cuda1 machine with 1024 max threads per block. */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #define THREADS_PER_BLOCK 1024 #define THREADS_PER_SM 2048 /** Function Declarations **/ void generate_random(int random[], int num_elements); __global__ void maximum(int random[], int num_elements); /* Generates M random numbers from 1 to 100000 and put in array * Multiply by 100000 because rand()/RAND_MAX is [0, 100000] */ void generate_random(int random[], int num_elements) { int i; time_t t; srand((unsigned)time(&t)); //randomizes seeds for (i = 0; i < num_elements; i++) { random[i] = (int)(((double)rand()/RAND_MAX)*100000); } } /* global function called from host and executed on device * to do the parallel max reduction, using a tree-like * structure branching using nearest neighbors. * Does NOT avoid branch divergence. */ __global__ void maximum(int random[], int num_elements) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; unsigned int stride; __syncthreads(); for (stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if (tid % (2 * stride) == 0 && tid + stride < num_elements) { if (random[tid] < random[tid + stride]) { random[tid] = random[tid + stride]; } } } } /**************************************************************/ int main(int argc, char* argv[]) { int* h_random; int* d_random; clock_t start, end; if (argc != 2) { printf("Invalid number of commands: usage ./cudabasic M\n"); exit(1); } int num_elements = atoi(argv[1]); // Create array of M random elements h_random = (int*) malloc(sizeof(int) * num_elements); generate_random(h_random, num_elements); // Work in finding max starts start = clock(); // Allocate space on device and copy over elements cudaError_t err = cudaMalloc((void**)&d_random, sizeof(int) * num_elements); if (err != cudaSuccess) { printf("cudaMalloc failure\n"); } err = cudaMemcpy(d_random, h_random, sizeof(int) * num_elements, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("cudaMemcpy failure\n"); } // Calculation for device dimensions, one element per thread int n_blocks = (int)ceil((double)num_elements/THREADS_PER_BLOCK); int n_threads = (num_elements > THREADS_PER_BLOCK) ? THREADS_PER_BLOCK : num_elements; // Execute kernel using calculated dimensions maximum<<<n_blocks, n_threads>>>(d_random, num_elements); // Copy back reduction results cudaMemcpy(h_random, d_random, sizeof(int) * num_elements, cudaMemcpyDeviceToHost); // Reduction results are in random[blockIdx.x * n_threads] for each block // Iterate over first element per block to find the max int i; int largest = h_random[0]; for (i = 0; i < num_elements; i += n_threads) { if (largest < h_random[i]) { largest = h_random[i]; } } end = clock(); printf("Time to find max %f\n", (double)(end-start)/CLOCKS_PER_SEC); printf("Largest: %d\n", largest); // Clean up resources cudaFree(d_random); free(h_random); }
6,078
#define t_max 1 #define t 1 /* (T[0][0][0][1][0]=((((T[0][0][0][0][0]*((c[0][0][0][0][1]*T[0][0][0][0][0])+c[0][0][0][0][2]))+c[0][0][0][0][3])+((c[0][0][0][0][4]*T[-1][0][0][0][0])+(c[0][0][0][0][5]*T[1][0][0][0][0])))+(((c[0][0][0][0][6]*T[0][-1][0][0][0])+(c[0][0][0][0][7]*T[0][1][0][0][0]))+((c[0][0][0][0][8]*T[0][0][-1][0][0])+(c[0][0][0][0][9]*T[0][0][1][0][0]))))) */ __global__ void hyperthermia(float * * T_0_1_out, float * T_0_0, float * T_0_1, float * c_1_0, float * c_2_0, float * c_3_0, float * c_4_0, float * c_5_0, float * c_6_0, float * c_7_0, float * c_8_0, float * c_9_0, int x_max, int y_max, int z_max, int cbx) { /* const float * const u__c_1[16] = { c_1_0 } ; const float * const u__c_2[16] = { c_2_0 } ; const float * const u__c_3[16] = { c_3_0 } ; const float * const u__c_4[16] = { c_4_0 } ; const float * const u__c_5[16] = { c_5_0 } ; const float * const u__c_6[16] = { c_6_0 } ; const float * const u__c_7[16] = { c_7_0 } ; const float * const u__c_8[16] = { c_8_0 } ; const float * const u__c_9[16] = { c_9_0 } ; float * const u__T_0[16] = { T_0_0, T_0_1 } ; */ int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int _idx7; int idx_1_2; int pt_idx_x; int pt_idx_y; int pt_idx_z; int size_1_1; int size_1_2; //int t; int tmp; int v_idx_x; int v_idx_x_max; int v_idx_y; int v_idx_y_max; int v_idx_z; int v_idx_z_max; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); v_idx_x=(cbx*(threadIdx.x+(blockDim.x*blockIdx.x))); v_idx_x_max=(v_idx_x+cbx); v_idx_y=(threadIdx.y+(tmp*blockDim.y)); v_idx_y_max=(v_idx_y+1); v_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); v_idx_z_max=(v_idx_z+1); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in v[t=t, s=(cbx, 1, 1)][0] */ /* for POINT pt[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in v[t=t, s=(:, :, :)][0] parallel 1 <level 1> schedule default { ... } */ { /* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */ pt_idx_z=v_idx_z; pt_idx_y=v_idx_y; for (pt_idx_x=v_idx_x; pt_idx_x<(v_idx_x_max-0); pt_idx_x+=1) { /* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */ /* v[t=(t+1), s=pt[t=?, s=?][0]][0]=stencil(v[t=t, s=pt[t=?, s=?][0]][0]) */ /* _idx0 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */ _idx0=(((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1); /* _idx1 = ((((pt_idx_z*x_max)*y_max)+(pt_idx_y*x_max))+pt_idx_x) */ _idx1=(((((_idx0+(((((-2*pt_idx_z)-2)*t)-x_max)*y_max))+(((((-2*pt_idx_z)-2)*t)-1)*x_max))+(((-4*pt_idx_z)-4)*(t*t)))+(((-2*pt_idx_y)-2)*t))-1); /* _idx2 = ((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x) */ _idx2=(_idx0-1); /* _idx3 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+2) */ _idx3=(_idx2+2); /* _idx4 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+(((((2*pt_idx_z)+2)*t)+pt_idx_y)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+((2*pt_idx_y)*t))+pt_idx_x)+1) */ _idx4=((_idx0-x_max)-(2*t)); /* _idx5 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+2)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+4)*t))+pt_idx_x)+1) */ _idx5=((_idx0+x_max)+(2*t)); /* _idx6 = ((((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(((((2*pt_idx_z)*t)+pt_idx_y)+1)*x_max))+((4*pt_idx_z)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */ _idx6=((((_idx2+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1); /* _idx7 = (((((((((pt_idx_z+2)*x_max)+(((2*pt_idx_z)+4)*t))*y_max)+((((((2*pt_idx_z)+4)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+8)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */ _idx7=(((_idx0+((x_max+(2*t))*y_max))+((2*t)*x_max))+(4*(t*t))); // u__T_0[t][_idx0]=((((u__T_0[(t-1)][_idx0]*((u__c_1[(t-1)][_idx1]*u__T_0[(t-1)][_idx0])+u__c_2[(t-1)][_idx1]))+u__c_3[(t-1)][_idx1])+((u__c_4[(t-1)][_idx1]*u__T_0[(t-1)][_idx2])+(u__c_5[(t-1)][_idx1]*u__T_0[(t-1)][_idx3])))+(((u__c_6[(t-1)][_idx1]*u__T_0[(t-1)][_idx4])+(u__c_7[(t-1)][_idx1]*u__T_0[(t-1)][_idx5]))+((u__c_8[(t-1)][_idx1]*u__T_0[(t-1)][_idx6])+(u__c_9[(t-1)][_idx1]*u__T_0[(t-1)][_idx7])))); T_0_1[_idx0]=((((T_0_0[_idx0]*((c_1_0[_idx1]*T_0_0[_idx0])+c_2_0[_idx1]))+c_3_0[_idx1])+((c_4_0[_idx1]*T_0_0[_idx2])+(c_5_0[_idx1]*T_0_0[_idx3])))+(((c_6_0[_idx1]*T_0_0[_idx4])+(c_7_0[_idx1]*T_0_0[_idx5]))+((c_8_0[_idx1]*T_0_0[_idx6])+(c_9_0[_idx1]*T_0_0[_idx7])))); } } } } __global__ void initialize(float * T_0_0, float * T_0_1, float * c_1_0, float * c_2_0, float * c_3_0, float * c_4_0, float * c_5_0, float * c_6_0, float * c_7_0, float * c_8_0, float * c_9_0, int x_max, int y_max, int z_max, int cbx) { float * const u__c_1[16] = { c_1_0 } ; float * const u__c_2[16] = { c_2_0 } ; float * const u__c_3[16] = { c_3_0 } ; float * const u__c_4[16] = { c_4_0 } ; float * const u__c_5[16] = { c_5_0 } ; float * const u__c_6[16] = { c_6_0 } ; float * const u__c_7[16] = { c_7_0 } ; float * const u__c_8[16] = { c_8_0 } ; float * const u__c_9[16] = { c_9_0 } ; float * const u__T_0[16] = { T_0_0, T_0_1 } ; int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int _idx7; int idx_1_2; int pt_idx_x; int pt_idx_y; int pt_idx_z; int size_1_1; int size_1_2; //int t; int tmp; int v_idx_x; int v_idx_x_max; int v_idx_y; int v_idx_y_max; int v_idx_z; int v_idx_z_max; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); v_idx_x=(cbx*(threadIdx.x+(blockDim.x*blockIdx.x))); v_idx_x_max=(v_idx_x+cbx); v_idx_y=(threadIdx.y+(tmp*blockDim.y)); v_idx_y_max=(v_idx_y+1); v_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); v_idx_z_max=(v_idx_z+1); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in v[t=t, s=(cbx, 1, 1)][0] */ /* for POINT pt[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in v[t=t, s=(:, :, :)][0] parallel 1 <level 1> schedule default { ... } */ { /* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */ pt_idx_z=v_idx_z; pt_idx_y=v_idx_y; for (pt_idx_x=v_idx_x; pt_idx_x<(v_idx_x_max-0); pt_idx_x+=1) { /* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */ /* v[t=(t+1), s=pt[t=?, s=?][0]][0]=stencil(v[t=t, s=pt[t=?, s=?][0]][0]) */ /* _idx0 = ((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x) */ _idx0=((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x); u__T_0[(t-1)][_idx0]=0.1; /* _idx1 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+(((((2*pt_idx_z)+2)*t)+pt_idx_y)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+((2*pt_idx_y)*t))+pt_idx_x)+1) */ _idx1=(((_idx0-x_max)-(2*t))+1); u__T_0[(t-1)][_idx1]=0.1; /* _idx2 = ((((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(((((2*pt_idx_z)*t)+pt_idx_y)+1)*x_max))+((4*pt_idx_z)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */ _idx2=((((_idx0+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1); u__T_0[(t-1)][_idx2]=0.1; /* _idx3 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */ _idx3=(_idx0+1); u__T_0[(t-1)][_idx3]=0.1; /* _idx4 = ((((pt_idx_z*x_max)*y_max)+(pt_idx_y*x_max))+pt_idx_x) */ _idx4=(((((_idx2-(((2*pt_idx_z)*t)*y_max))+((((-2*pt_idx_z)*t)-1)*x_max))-((4*pt_idx_z)*(t*t)))+(((-2*pt_idx_y)-2)*t))-1); u__c_1[(t-1)][_idx4]=0.2; u__c_2[(t-1)][_idx4]=0.30000000000000004; u__c_3[(t-1)][_idx4]=0.4; u__c_4[(t-1)][_idx4]=0.5; u__c_5[(t-1)][_idx4]=0.6000000000000001; u__c_6[(t-1)][_idx4]=0.7000000000000001; u__c_7[(t-1)][_idx4]=0.8; u__c_8[(t-1)][_idx4]=0.9; u__c_9[(t-1)][_idx4]=1.0; /* _idx5 = (((((((((pt_idx_z+2)*x_max)+(((2*pt_idx_z)+4)*t))*y_max)+((((((2*pt_idx_z)+4)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+8)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */ _idx5=(((_idx3+((x_max+(2*t))*y_max))+((2*t)*x_max))+(4*(t*t))); u__T_0[(t-1)][_idx5]=0.1; /* _idx6 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+2)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+4)*t))+pt_idx_x)+1) */ _idx6=((_idx3+x_max)+(2*t)); u__T_0[(t-1)][_idx6]=0.1; /* _idx7 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+2) */ _idx7=(_idx0+2); u__T_0[(t-1)][_idx7]=0.1; u__T_0[t][_idx3]=1.1; } } } }
6,079
//#include <math.h> //#include <stdio.h> //#include <time.h> //#include <vector_functions.h> //#include "stereo_cuda_shared.h" // // //#define USE_NCC 1 //#define USE_SQRT_APPROX 1 // //#define BLOCK_SIZE 32 //#define NCC_HEIGHT 3 //#define NCC_WIDTH 7 //#define HF_NCC_HEIGHT (NCC_HEIGHT / 2) //#define HF_NCC_WIDTH (NCC_WIDTH / 2) //#define WIDE_PATCH_H (BLOCK_SIZE + NCC_HEIGHT - 1) //#define WIDE_PATCH_W (BLOCK_SIZE + NCC_WIDTH - 1) //#define WIDE_PATCH_ELM (WIDE_PATCH_H * WIDE_PATCH_W) //#define MAX_DISP (BLOCK_SIZE * 4) //#define INFTY (1 << 29) // //#define NUM_CHNL 3 // ////////////////////////// //__device__ //Float3 operator*(Float3 &a, float b) //{ // // TODO: use make_float as in (https://stackoverflow.com/questions/26676806/efficiency-of-cuda-vector-types-float2-float3-float4) // Float3 res; // res.arr[0] = a.arr[0] * b; // res.arr[1] = a.arr[1] * b; // res.arr[2] = a.arr[2] * b; // return res; //} // // //__device__ //Float3 operator-(Float3 &a, Float3 &b) //{ // Float3 res; // res.arr[0] = a.arr[0] - b.arr[0]; // res.arr[1] = a.arr[1] - b.arr[1]; // res.arr[2] = a.arr[2] - b.arr[2]; // return res; //} // // //__device__ //Float3 operator*(Float3 a, Float3 b) //{ // Float3 res; // res.arr[0] = a.arr[0] * b.arr[0]; // res.arr[1] = a.arr[1] * b.arr[1]; // res.arr[2] = a.arr[2] * b.arr[2]; // return res; //} // //__device__ //float reduceSum(Float3 &a) { // return a.arr[0] + a.arr[1] + a.arr[2]; //} // //__device__ //void resetValue(Float3 &a, float val) { // a.arr[0] = val; // a.arr[1] = val; // a.arr[2] = val; //} // //__device__ //Float3& operator+=(Float3 &first, const Float3& sec) { // first.arr[0] += sec.arr[0]; // first.arr[1] += sec.arr[1]; // first.arr[2] += sec.arr[2]; // return first; //} ////////////////////////// // // ///* Internally synced */ //__device__ //void loadIntoBuffer(Float3* arr, Float3 buffer[][WIDE_PATCH_W], int offsetRow, int offsetCol, int imgHeight, int imgWidth) { // int respAlongRow = (WIDE_PATCH_H + BLOCK_SIZE - 1) / BLOCK_SIZE; // int respAlongCol = (WIDE_PATCH_W + BLOCK_SIZE - 1) / BLOCK_SIZE; // int buffIdxRow, buffIdxCol; // int arrIdxRow, arrIdxCol; // bool flag1, flag2; // for (int i = 0; i < respAlongRow; i++) { // buffIdxRow = i * BLOCK_SIZE + threadIdx.y; // if (buffIdxRow >= WIDE_PATCH_H) // break; // arrIdxRow = buffIdxRow + offsetRow; // flag1 = arrIdxRow < 0 || arrIdxRow >= imgHeight; // for (int j = 0; j < respAlongCol; j++) { // buffIdxCol = j * BLOCK_SIZE + threadIdx.x; // if (buffIdxCol >= WIDE_PATCH_W) // break; // arrIdxCol = buffIdxCol + offsetCol; // flag2 = arrIdxCol < 0 || arrIdxCol >= imgWidth; // if (flag1 || flag2) // resetValue(buffer[buffIdxRow][buffIdxCol], -1); // else // buffer[buffIdxRow][buffIdxCol] = arr[arrIdxRow * imgWidth + arrIdxCol]; // } // } // // __syncthreads(); //} // // //__device__ //void flushBuffer(Float3 buffer[][WIDE_PATCH_W]) { // for (int i = 0; i < WIDE_PATCH_H; i++) { // for (int j = 0; j < WIDE_PATCH_W; j++) // printf("(%.2f, %.2f, %.2f)\t", buffer[i][j].arr[0], buffer[i][j].arr[1], buffer[i][j].arr[2]); // printf("\n"); // } //} // // //__device__ //float inverseSqrt(float n, int iter) { // if (n < 1e-5) // return 1e-5; // #if (USE_PARALLEL_DIRECTIVES) // float x = 0.5; // for (int i = 0; i < iter; i++) // x -= (x * x - 1.0 / n) / (2.0 * x); // return x; // #else // return sqrt(1.0 / n); // #endif //} // //__device__ //void loadMeanStdShared(Float3 buf[][WIDE_PATCH_W], int rCenter, int cCenter, Float3 &mean, Float3 &invStd) { // Float3 sum; // resetValue(sum, 0.0); // int cnt = 0; // for (int r = rCenter - HF_NCC_HEIGHT; r <= rCenter + HF_NCC_HEIGHT; r++) { // for (int c = cCenter - HF_NCC_WIDTH; c <= cCenter + HF_NCC_WIDTH; c++) { // if (buf[r][c].arr[0] >= 0) { // sum += buf[r][c]; // cnt++; // } // } // } // // mean.f = (sum * (1.0 / cnt)).f; // // Float3 varSum, diff; // resetValue(varSum, 0.0); // for (int r = rCenter - HF_NCC_HEIGHT; r <= rCenter + HF_NCC_HEIGHT; r++) { // for (int c = cCenter - HF_NCC_WIDTH; c <= cCenter + HF_NCC_WIDTH; c++) { // if (buf[r][c].arr[0] >= 0) { // diff = buf[r][c] - mean; // varSum += (diff * diff); // } // } // } // // varSum.f = (varSum * (1.0 / cnt)).f; // // for (int channel = 0; channel < NUM_CHNL; channel++) // invStd.arr[channel] = inverseSqrt(varSum.arr[channel], 4); //} // // //__device__ //float computeNCCShared(Float3 buf1[][WIDE_PATCH_W], Float3 buf2[][WIDE_PATCH_W], int r, int c1, int c2) { // Float3 mean1, mean2, invStd1, invStd2; // loadMeanStdShared(buf1, r, c1, mean1, invStd1); // loadMeanStdShared(buf2, r, c2, mean2, invStd2); // Float3 invStdMult = invStd1 * invStd2; // // float nccSum = 0.0; // short itemCount = 0; // Float3 nccTerm; // // for (int dr = -HF_NCC_HEIGHT; dr <= HF_NCC_HEIGHT; dr++) { // for (int dc = -HF_NCC_WIDTH; dc <= HF_NCC_WIDTH; dc++) { // if (buf1[r + dr][c1 + dc].arr[0] >= 0 && buf2[r + dr][c2 + dc].arr[0] >= 0) { // nccTerm = (buf1[r + dr][c1 + dc] - mean1) * (buf2[r + dr][c2 + dc] - mean2) * invStdMult; // nccSum += reduceSum(nccTerm); // itemCount += NUM_CHNL; // } // } // } // // float ncc = nccSum / itemCount; // return ncc; //} // // //__device__ //float computeSSDShared(Float3 buf1[][WIDE_PATCH_W], Float3 buf2[][WIDE_PATCH_W], int r, int c1, int c2) { // float ssdSum = 0.0; // short cnt = 0; // Float3 diff; // for (int dr = -HF_NCC_HEIGHT; dr <= HF_NCC_HEIGHT; dr++) { // for (int dc = -HF_NCC_WIDTH; dc <= HF_NCC_WIDTH; dc++) { // if (buf1[r + dr][c1 + dc].arr[0] >= 0 && buf2[r + dr][c2 + dc].arr[0] >= 0) { // diff = buf1[r + dr][c1 + dc] - buf2[r + dr][c2 + dc]; // diff = diff * diff; // ssdSum += reduceSum(diff); // cnt++; // } // } // } // return -ssdSum / (cnt * NUM_CHNL); //} // // //__global__ //void disparityKernel(Problem* problem) { // int br = blockIdx.y; // int bc = blockIdx.x; // int tr = threadIdx.y; // int tc = threadIdx.x; // int imgHeight = problem->height; // int imgWidth = problem->width; // int blockLeaderRow = br * blockDim.y; // int blockLeaderCol = bc * blockDim.x; // int row = blockLeaderRow + tr; // int col = blockLeaderCol + tc; // bool cellInvalid = row >= imgHeight || col >= imgWidth; // // __shared__ Float3 buffer1[WIDE_PATCH_H][WIDE_PATCH_W]; // __shared__ Float3 buffer2[WIDE_PATCH_H][WIDE_PATCH_W]; // // Load into shared memory // if (cellInvalid) // __syncthreads(); // else // loadIntoBuffer(problem->img1, buffer1, blockLeaderRow - HF_NCC_HEIGHT, blockLeaderCol - HF_NCC_WIDTH, imgHeight, imgWidth); // // float bestSimilarity = -1e5; // int dispBest = INFTY; // for (int disparityBlock = MAX_DISP / BLOCK_SIZE; disparityBlock >= 0; disparityBlock--) { // int dispStart = disparityBlock * BLOCK_SIZE; // int disparityBlockLeaderCol = blockLeaderCol - dispStart; // if (cellInvalid || disparityBlockLeaderCol < 0) { // __syncthreads(); // __syncthreads(); // continue; // } // loadIntoBuffer(problem->img2, buffer2, blockLeaderRow - HF_NCC_HEIGHT, disparityBlockLeaderCol - HF_NCC_WIDTH, imgHeight, imgWidth); // // int dispEnd = dispStart - BLOCK_SIZE; // float similarity; // for (int disp = dispStart, dispDel = 0; disp > dispEnd; disp--, dispDel++) { // if (col >= disp && disp + tc >= 0) { // #if (USE_NCC) // similarity = computeNCCShared(buffer1, buffer2, tr + HF_NCC_HEIGHT, tc + HF_NCC_WIDTH, dispDel + HF_NCC_WIDTH); // #else // similarity = computeSSDShared(buffer1, buffer2, tr + HF_NCC_HEIGHT, tc + HF_NCC_WIDTH, dispDel + HF_NCC_WIDTH); // #endif // if (similarity > bestSimilarity) { // bestSimilarity = similarity; // dispBest = disp + tc; // } // } // } // __syncthreads(); // } // // if (cellInvalid) // return; // // if (dispBest == INFTY) // problem->res[row * imgWidth + col] = 0.0; // else { // if (dispBest < 0) // dispBest = 0; // problem->res[row * imgWidth + col] = dispBest; // } //} // // //float* computeDisparityMapShared(Float3* img1, Float3* img2, int height, int width) { // float* res; // cudaMalloc(&res, sizeof(float) * height * width); // Problem* problemGPU; // Problem problemCPU(img1, img2, height, width, res); // cudaMalloc(&problemGPU, sizeof(Problem)); // cudaMemcpy(problemGPU, &problemCPU, sizeof(Problem), cudaMemcpyHostToDevice); // double tStart = clock(); // // dim3 threadDim(BLOCK_SIZE, BLOCK_SIZE); // int blockCountX = (width + BLOCK_SIZE - 1) / BLOCK_SIZE; // int blockCountY = (height + BLOCK_SIZE - 1) / BLOCK_SIZE; // dim3 blockDim(blockCountX, blockCountY); // disparityKernel<<<blockDim, threadDim>>>(problemGPU); // cudaDeviceSynchronize(); // // double tEnd = clock(); // printf("Kernel call took %.2lf ms.\n", (tEnd - tStart) / CLOCKS_PER_SEC * 1000.0); // float* resCPU = new float[height * width]; // cudaMemcpy(resCPU, res, sizeof(float) * height * width, cudaMemcpyDeviceToHost); // cudaFree(res); // cudaFree(problemGPU); // return resCPU; //} //
6,080
#include "includes.h" __device__ int GPUKernel_Position(int i,int j) { if (i<j){ return j*(j+1)/2+i; } return i*(i+1)/2+j; } __global__ void GPUKernel_VpVm_v2(int a, int b,int v,double * in,double * outp,double * outm) { int blockid = blockIdx.x*gridDim.y + blockIdx.y; int id = blockid*blockDim.x + threadIdx.x; int v2 = v*v; if ( id >= v2 ) return; int d = id%v; int c = (id-d)/v; if ( d > c ) return; int cd = GPUKernel_Position(c,d); outp[cd] = in[d*v+c] + in[c*v+d]; outm[cd] = in[d*v+c] - in[c*v+d]; }
6,081
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> int main(void) { // H has storage for 4 integers thrust::host_vector<int> H(4); // initialize individual elements H[0] = 14; H[1] = 20; H[2] = 38; H[3] = 46; // H.size() returns the size of vector H std::cout << "H has size " << H.size() << std::endl; // print contents of H for(int i = 0; i < H.size(); i++) std::cout << "H[" << i << "] = " << H[i] << std::endl; // resize H H.resize(2); std::cout << "H now has size " << H.size() << std::endl; // Copy host_vector H to device_vector D thrust::device_vector<int> D = H; // elements of D can be modified D[0] = 99; D[1] = 88; // print contents of D for(int i = 0; i < D.size(); i++) std::cout << "D[" << i << "] = " << D[i] << std::endl; // H and D are automatically deleted when the function returns return 0; }
6,082
/** * * bash版キャリーチェーンのC言語版のGPU/CUDA移植版 * 詳しい説明はこちらをどうぞ https://suzukiiichiro.github.io/search/?keyword=Nクイーン問題 * アーキテクチャの指定(なくても問題なし、あれば高速) -arch=sm_13 or -arch=sm_61 CPUの再帰での実行 $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -r CPUの非再帰での実行 $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -c GPUのシングルスレッド $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -g GPUのマルチスレッド $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -n */ #include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 #define MAX 27 // システムによって以下のマクロが必要であればコメントを外してください。 //#define UINT64_C(c) c ## ULL // // グローバル変数 unsigned long TOTAL=0; unsigned long UNIQUE=0; // キャリーチェーン 非再帰版 // 構造体 typedef struct { unsigned int size; unsigned int pres_a[930]; unsigned int pres_b[930]; // uint64_t COUNTER[3]; // //カウンター配列 // unsigned int COUNT2; // unsigned int COUNT4; // unsigned int COUNT8; }Global; Global g; // 構造体 typedef struct Board { uint64_t row; uint64_t down; uint64_t left; uint64_t right; long long x[MAX]; }Board ; typedef struct Local { unsigned int size; struct Board B; struct Board nB; struct Board eB; struct Board sB; struct Board wB; unsigned n; unsigned e; unsigned s; unsigned w; uint64_t dimx; uint64_t dimy; uint64_t COUNTER[3]; //カウンター配列 unsigned int COUNT2; unsigned int COUNT4; unsigned int COUNT8; unsigned int STEPS; }Local; /** CPU/CPUR 再帰・非再帰共通 */ // チェーンのリストを作成 void listChain() { unsigned int idx=0; for(unsigned int a=0;a<(unsigned)g.size;++a){ for(unsigned int b=0;b<(unsigned)g.size;++b){ if(((a>=b)&&(a-b)<=1)||((b>a)&&(b-a)<=1)){ continue; } g.pres_a[idx]=a; g.pres_b[idx]=b; ++idx; } } } /** CPU 非再帰 */ // クイーンの効きをチェック bool placement(void* args) { Local *l=(Local *)args; if(l->B.x[l->dimx]==l->dimy){ return true; } if (l->B.x[0]==0){ if (l->B.x[1]!=(uint64_t)-1){ if((l->B.x[1]>=l->dimx)&&(l->dimy==1)){ return false; } } }else{ if( (l->B.x[0]!=(uint64_t)-1) ){ if(( (l->dimx<l->B.x[0]||l->dimx>=g.size-l->B.x[0]) && (l->dimy==0 || l->dimy==g.size-1) )){ return 0; } if (( (l->dimx==g.size-1)&&((l->dimy<=l->B.x[0])|| l->dimy>=g.size-l->B.x[0]))){ return 0; } } } l->B.x[l->dimx]=l->dimy; //xは行 yは列 uint64_t row=UINT64_C(1)<<l->dimx; uint64_t down=UINT64_C(1)<<l->dimy; uint64_t left=UINT64_C(1)<<(g.size-1-l->dimx+l->dimy); //右上から左下 uint64_t right=UINT64_C(1)<<(l->dimx+l->dimy); // 左上から右下 if((l->B.row&row)||(l->B.down&down)||(l->B.left&left)||(l->B.right&right)){ return false; } l->B.row|=row; l->B.down|=down; l->B.left|=left; l->B.right|=right; return true; } //非再帰 uint64_t solve(int size,int current,uint64_t row,uint64_t left,uint64_t down,uint64_t right) { uint64_t row_a[MAX]; uint64_t right_a[MAX]; uint64_t left_a[MAX]; uint64_t down_a[MAX]; uint64_t bitmap_a[MAX]; for (int i=0;i<size;i++){ row_a[i]=0; left_a[i]=0; down_a[i]=0; right_a[i]=0; bitmap_a[i]=0; } row_a[current]=row; left_a[current]=left; down_a[current]=down; right_a[current]=right; uint64_t bitmap=bitmap_a[current]=~(left_a[current]|down_a[current]|right_a[current]); uint64_t total=0; uint64_t bit; while(current>-1){ if((bitmap!=0||row&1)&&current<size){ if(!(down+1)){ total++; current--; row=row_a[current]; left=left_a[current]; right=right_a[current]; down=down_a[current]; bitmap=bitmap_a[current]; continue; }else if(row&1){ while( row&1 ){ row>>=1; left<<=1; right>>=1; } bitmap=~(left|down|right); //再帰に必要な変数は必ず定義する必要があります。 continue; }else{ bit=-bitmap&bitmap; bitmap=bitmap^bit; if(current<size){ row_a[current]=row; left_a[current]=left; down_a[current]=down; right_a[current]=right; bitmap_a[current]=bitmap; current++; } row>>=1; //1行下に移動する left=(left|bit)<<1; down=down|bit; right=(right|bit)>>1; bitmap=~(left|down|right); //再帰に必要な変数は必ず定義する必要があります。 } }else{ current--; row=row_a[current]; left=left_a[current]; right=right_a[current]; down=down_a[current]; bitmap=bitmap_a[current]; } } return total; } //非再帰 対称解除法 void carryChain_symmetry(void* args) { Local *l=(Local *)args; // 対称解除法 unsigned const int ww=(g.size-2)*(g.size-1)-1-l->w; unsigned const int w2=(g.size-2)*(g.size-1)-1; // # 対角線上の反転が小さいかどうか確認する if((l->s==ww)&&(l->n<(w2-l->e))){ return ; } // # 垂直方向の中心に対する反転が小さいかを確認 if((l->e==ww)&&(l->n>(w2-l->n))){ return; } // # 斜め下方向への反転が小さいかをチェックする if((l->n==ww)&&(l->e>(w2-l->s))){ return; } // 枝刈り 1行目が角の場合回転対称チェックせずCOUNT8にする if(l->B.x[0]==0){ l->COUNTER[l->COUNT8]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return ; } // n,e,s==w の場合は最小値を確認する。右回転で同じ場合は、 // w=n=e=sでなければ値が小さいのでskip w=n=e=sであれば90度回転で同じ可能性 if(l->s==l->w){ if((l->n!=l->w)||(l->e!=l->w)){ return; } l->COUNTER[l->COUNT2]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } // e==wは180度回転して同じ 180度回転して同じ時n>=sの時はsmaller? if((l->e==l->w)&&(l->n>=l->s)){ if(l->n>l->s){ return; } l->COUNTER[l->COUNT4]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } l->COUNTER[l->COUNT8]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } //非再帰 pthread run() void thread_run(void* args) { Local *l=(Local *)args; // memcpy(&l->B,&l->wB,sizeof(Board)); // B=wB; l->B=l->wB; l->dimx=0; l->dimy=g.pres_a[l->w]; //if(!placement(l)){ continue; } if(!placement(l)){ return; } l->dimx=1; l->dimy=g.pres_b[l->w]; // if(!placement(l)){ continue; } if(!placement(l)){ return; } //2 左2行に置く // memcpy(&l->nB,&l->B,sizeof(Board)); // nB=B; l->nB=l->B; for(l->n=l->w;l->n<(g.size-2)*(g.size-1)-l->w;++l->n){ // memcpy(&l->B,&l->nB,sizeof(Board)); // B=nB; l->B=l->nB; l->dimx=g.pres_a[l->n]; l->dimy=g.size-1; if(!placement(l)){ continue; } l->dimx=g.pres_b[l->n]; l->dimy=g.size-2; if(!placement(l)){ continue; } // 3 下2行に置く // memcpy(&l->eB,&l->B,sizeof(Board)); // eB=B; l->eB=l->B; for(l->e=l->w;l->e<(g.size-2)*(g.size-1)-l->w;++l->e){ // memcpy(&l->B,&l->eB,sizeof(Board)); // B=eB; l->B=l->eB; l->dimx=g.size-1; l->dimy=g.size-1-g.pres_a[l->e]; if(!placement(l)){ continue; } l->dimx=g.size-2; l->dimy=g.size-1-g.pres_b[l->e]; if(!placement(l)){ continue; } // 4 右2列に置く // memcpy(&l->sB,&l->B,sizeof(Board)); // sB=B; l->sB=l->B; for(l->s=l->w;l->s<(g.size-2)*(g.size-1)-l->w;++l->s){ // memcpy(&l->B,&l->sB,sizeof(Board)); // B=sB; l->B=l->sB; l->dimx=g.size-1-g.pres_a[l->s]; l->dimy=0; if(!placement(l)){ continue; } l->dimx=g.size-1-g.pres_b[l->s]; l->dimy=1; if(!placement(l)){ continue; } // 対称解除法 carryChain_symmetry(l); } //w } //e } //n } //非再帰 チェーンのビルド void buildChain() { Local l[(g.size/2)*(g.size-3)]; // カウンターの初期化 l->COUNT2=0; l->COUNT4=1; l->COUNT8=2; l->COUNTER[l->COUNT2]=l->COUNTER[l->COUNT4]=l->COUNTER[l->COUNT8]=0; // Board の初期化 nB,eB,sB,wB; l->B.row=l->B.down=l->B.left=l->B.right=0; // Board x[]の初期化 for(unsigned int i=0;i<g.size;++i){ l->B.x[i]=-1; } //1 上2行に置く // memcpy(&l->wB,&l->B,sizeof(Board)); // wB=B; l->wB=l->B; for(l->w=0;l->w<=(unsigned)(g.size/2)*(g.size-3);++l->w){ thread_run(&l); } //w /** * 集計 */ UNIQUE= l->COUNTER[l->COUNT2]+ l->COUNTER[l->COUNT4]+ l->COUNTER[l->COUNT8]; TOTAL= l->COUNTER[l->COUNT2]*2+ l->COUNTER[l->COUNT4]*4+ l->COUNTER[l->COUNT8]*8; } //非再帰 キャリーチェーン void carryChain() { listChain(); //チェーンのリストを作成 buildChain(); // チェーンのビルド // calcChain(&l); // 集計 } /** CPUR 再帰 */ //再帰 ボード外側2列を除く内側のクイーン配置処理 uint64_t solveR(uint64_t row,uint64_t left,uint64_t down,uint64_t right) { if(down+1==0){ return 1; } while((row&1)!=0) { row>>=1; left<<=1; right>>=1; } row>>=1; uint64_t total=0; for(uint64_t carryChain=~(left|down|right);carryChain!=0;){ uint64_t const bit=carryChain&-carryChain; total+=solveR(row,(left|bit)<<1,down|bit,(right|bit)>>1); carryChain^=bit; } return total; } //再帰 対称解除法 void carryChain_symmetryR(void* args) { Local *l=(Local *)args; // 対称解除法 unsigned const int ww=(g.size-2)*(g.size-1)-1-l->w; unsigned const int w2=(g.size-2)*(g.size-1)-1; // # 対角線上の反転が小さいかどうか確認する if((l->s==ww)&&(l->n<(w2-l->e))){ return ; } // # 垂直方向の中心に対する反転が小さいかを確認 if((l->e==ww)&&(l->n>(w2-l->n))){ return; } // # 斜め下方向への反転が小さいかをチェックする if((l->n==ww)&&(l->e>(w2-l->s))){ return; } // 枝刈り 1行目が角の場合回転対称チェックせずCOUNT8にする if(l->B.x[0]==0){ l->COUNTER[l->COUNT8]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return ; } // n,e,s==w の場合は最小値を確認する。右回転で同じ場合は、 // w=n=e=sでなければ値が小さいのでskip w=n=e=sであれば90度回転で同じ可能性 if(l->s==l->w){ if((l->n!=l->w)||(l->e!=l->w)){ return; } l->COUNTER[l->COUNT2]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } // e==wは180度回転して同じ 180度回転して同じ時n>=sの時はsmaller? if((l->e==l->w)&&(l->n>=l->s)){ if(l->n>l->s){ return; } l->COUNTER[l->COUNT4]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } l->COUNTER[l->COUNT8]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } //再帰 pthread run() void thread_runR(void* args) { Local *l=(Local *)args; // memcpy(&l->B,&l->wB,sizeof(Board)); // B=wB; l->B=l->wB; l->dimx=0; l->dimy=g.pres_a[l->w]; //if(!placement(l)){ continue; } if(!placement(l)){ return; } l->dimx=1; l->dimy=g.pres_b[l->w]; // if(!placement(l)){ continue; } if(!placement(l)){ return; } //2 左2行に置く // memcpy(&l->nB,&l->B,sizeof(Board)); // nB=B; l->nB=l->B; for(l->n=l->w;l->n<(g.size-2)*(g.size-1)-l->w;++l->n){ // memcpy(&l->B,&l->nB,sizeof(Board)); // B=nB; l->B=l->nB; l->dimx=g.pres_a[l->n]; l->dimy=g.size-1; if(!placement(l)){ continue; } l->dimx=g.pres_b[l->n]; l->dimy=g.size-2; if(!placement(l)){ continue; } // 3 下2行に置く // memcpy(&l->eB,&l->B,sizeof(Board)); // eB=B; l->eB=l->B; for(l->e=l->w;l->e<(g.size-2)*(g.size-1)-l->w;++l->e){ // memcpy(&l->B,&l->eB,sizeof(Board)); // B=eB; l->B=l->eB; l->dimx=g.size-1; l->dimy=g.size-1-g.pres_a[l->e]; if(!placement(l)){ continue; } l->dimx=g.size-2; l->dimy=g.size-1-g.pres_b[l->e]; if(!placement(l)){ continue; } // 4 右2列に置く // memcpy(&l->sB,&l->B,sizeof(Board)); // sB=B; l->sB=l->B; for(l->s=l->w;l->s<(g.size-2)*(g.size-1)-l->w;++l->s){ // memcpy(&l->B,&l->sB,sizeof(Board)); // B=sB; l->B=l->sB; l->dimx=g.size-1-g.pres_a[l->s]; l->dimy=0; if(!placement(l)){ continue; } l->dimx=g.size-1-g.pres_b[l->s]; l->dimy=1; if(!placement(l)){ continue; } // 対称解除法 carryChain_symmetryR(l); } //w } //e } //n } //再帰 チェーンのビルド void buildChainR() { Local l[(g.size/2)*(g.size-3)]; // カウンターの初期化 l->COUNT2=0; l->COUNT4=1; l->COUNT8=2; l->COUNTER[l->COUNT2]=l->COUNTER[l->COUNT4]=l->COUNTER[l->COUNT8]=0; // Board の初期化 nB,eB,sB,wB; l->B.row=l->B.down=l->B.left=l->B.right=0; // Board x[]の初期化 for(unsigned int i=0;i<g.size;++i){ l->B.x[i]=-1; } //1 上2行に置く // memcpy(&l->wB,&l->B,sizeof(Board)); // wB=B; l->wB=l->B; for(l->w=0;l->w<=(unsigned)(g.size/2)*(g.size-3);++l->w){ thread_runR(&l); } //w /** * 集計 */ UNIQUE= l->COUNTER[l->COUNT2]+ l->COUNTER[l->COUNT4]+ l->COUNTER[l->COUNT8]; TOTAL= l->COUNTER[l->COUNT2]*2+ l->COUNTER[l->COUNT4]*4+ l->COUNTER[l->COUNT8]*8; } //再帰 キャリーチェーン void carryChainR() { listChain(); //チェーンのリストを作成 buildChainR(); // チェーンのビルド // calcChain(&l); // 集計 } /** GPU */ // GPU クイーンの効きをチェック bool GPU_placement(void* args) { Local *l=(Local *)args; if(l->B.x[l->dimx]==l->dimy){ return true; } if (l->B.x[0]==0){ if (l->B.x[1]!=(uint64_t)-1){ if((l->B.x[1]>=l->dimx)&&(l->dimy==1)){ return false; } } }else{ if( (l->B.x[0]!=(uint64_t)-1) ){ if(( (l->dimx<l->B.x[0]||l->dimx>=g.size-l->B.x[0]) && (l->dimy==0 || l->dimy==g.size-1) )){ return 0; } if (( (l->dimx==g.size-1)&&((l->dimy<=l->B.x[0])|| l->dimy>=g.size-l->B.x[0]))){ return 0; } } } l->B.x[l->dimx]=l->dimy; //xは行 yは列 uint64_t row=UINT64_C(1)<<l->dimx; uint64_t down=UINT64_C(1)<<l->dimy; uint64_t left=UINT64_C(1)<<(g.size-1-l->dimx+l->dimy); //右上から左下 uint64_t right=UINT64_C(1)<<(l->dimx+l->dimy); // 左上から右下 if((l->B.row&row)||(l->B.down&down)||(l->B.left&left)||(l->B.right&right)){ return false; } l->B.row|=row; l->B.down|=down; l->B.left|=left; l->B.right|=right; return true; } //GPU 再帰 ボード外側2列を除く内側のクイーン配置処理 uint64_t GPU_solveR(uint64_t row,uint64_t left,uint64_t down,uint64_t right) { if(down+1==0){ return 1; } while((row&1)!=0) { row>>=1; left<<=1; right>>=1; } row>>=1; uint64_t total=0; for(uint64_t carryChain=~(left|down|right);carryChain!=0;){ uint64_t const bit=carryChain&-carryChain; total+=solveR(row,(left|bit)<<1,down|bit,(right|bit)>>1); carryChain^=bit; } return total; } //GPU 再帰 対称解除法 void GPU_carryChain_symmetryR(void* args) { Local *l=(Local *)args; // 対称解除法 unsigned const int ww=(g.size-2)*(g.size-1)-1-l->w; unsigned const int w2=(g.size-2)*(g.size-1)-1; // # 対角線上の反転が小さいかどうか確認する if((l->s==ww)&&(l->n<(w2-l->e))){ return ; } // # 垂直方向の中心に対する反転が小さいかを確認 if((l->e==ww)&&(l->n>(w2-l->n))){ return; } // # 斜め下方向への反転が小さいかをチェックする if((l->n==ww)&&(l->e>(w2-l->s))){ return; } // 枝刈り 1行目が角の場合回転対称チェックせずCOUNT8にする if(l->B.x[0]==0){ l->COUNTER[l->COUNT8]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return ; } // n,e,s==w の場合は最小値を確認する。右回転で同じ場合は、 // w=n=e=sでなければ値が小さいのでskip w=n=e=sであれば90度回転で同じ可能性 if(l->s==l->w){ if((l->n!=l->w)||(l->e!=l->w)){ return; } l->COUNTER[l->COUNT2]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } // e==wは180度回転して同じ 180度回転して同じ時n>=sの時はsmaller? if((l->e==l->w)&&(l->n>=l->s)){ if(l->n>l->s){ return; } l->COUNTER[l->COUNT4]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } l->COUNTER[l->COUNT8]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } //GPU 再帰 pthread run() void GPU_thread_runR(void* args) { Local *l=(Local *)args; // memcpy(&l->B,&l->wB,sizeof(Board)); // B=wB; l->B=l->wB; l->dimx=0; l->dimy=g.pres_a[l->w]; //if(!GPU_placement(l)){ continue; } if(!GPU_placement(l)){ return; } l->dimx=1; l->dimy=g.pres_b[l->w]; // if(!GPU_placement(l)){ continue; } if(!GPU_placement(l)){ return; } //2 左2行に置く // memcpy(&l->nB,&l->B,sizeof(Board)); // nB=B; l->nB=l->B; for(l->n=l->w;l->n<(g.size-2)*(g.size-1)-l->w;++l->n){ // memcpy(&l->B,&l->nB,sizeof(Board)); // B=nB; l->B=l->nB; l->dimx=g.pres_a[l->n]; l->dimy=g.size-1; if(!GPU_placement(l)){ continue; } l->dimx=g.pres_b[l->n]; l->dimy=g.size-2; if(!GPU_placement(l)){ continue; } // 3 下2行に置く // memcpy(&l->eB,&l->B,sizeof(Board)); // eB=B; l->eB=l->B; for(l->e=l->w;l->e<(g.size-2)*(g.size-1)-l->w;++l->e){ // memcpy(&l->B,&l->eB,sizeof(Board)); // B=eB; l->B=l->eB; l->dimx=g.size-1; l->dimy=g.size-1-g.pres_a[l->e]; if(!GPU_placement(l)){ continue; } l->dimx=g.size-2; l->dimy=g.size-1-g.pres_b[l->e]; if(!GPU_placement(l)){ continue; } // 4 右2列に置く // memcpy(&l->sB,&l->B,sizeof(Board)); // sB=B; l->sB=l->B; for(l->s=l->w;l->s<(g.size-2)*(g.size-1)-l->w;++l->s){ // memcpy(&l->B,&l->sB,sizeof(Board)); // B=sB; l->B=l->sB; l->dimx=g.size-1-g.pres_a[l->s]; l->dimy=0; if(!GPU_placement(l)){ continue; } l->dimx=g.size-1-g.pres_b[l->s]; l->dimy=1; if(!GPU_placement(l)){ continue; } // 対称解除法 carryChain_symmetryR(l); } //w } //e } //n } //GPU 再帰 チェーンのビルド void GPU_buildChainR(const unsigned int size,unsigned int STEPS) { Local l[(g.size/2)*(g.size-3)]; l->STEPS=STEPS; l->size=size; Local lDevice[(g.size/2)*(g.size-3)]; cudaMallocHost((void**) &l, sizeof(struct Local)*l->STEPS); cudaMalloc((void**) &lDevice, sizeof(struct Local)*l->STEPS); // カウンターの初期化 l->COUNT2=0; l->COUNT4=1; l->COUNT8=2; l->COUNTER[l->COUNT2]=l->COUNTER[l->COUNT4]=l->COUNTER[l->COUNT8]=0; // Board の初期化 nB,eB,sB,wB; l->B.row=l->B.down=l->B.left=l->B.right=0; // Board x[]の初期化 for(unsigned int i=0;i<g.size;++i){ l->B.x[i]=-1; } //1 上2行に置く // memcpy(&l->wB,&l->B,sizeof(Board)); // wB=B; l->wB=l->B; unsigned int limit=(unsigned)(g.size/2)*(g.size-3); cudaMemcpy(lDevice,l, sizeof(struct Local)*limit,cudaMemcpyHostToDevice); for(l->w=0;l->w<=(unsigned)(g.size/2)*(g.size-3);++l->w){ thread_runR(&l); //GPU_thread_runR<<<l->STEPS/THREAD_NUM,THREAD_NUM>>>(&l); //GPU_thread_runR<<<l->STEPS/THREAD_NUM,THREAD_NUM>>>(&l); } //w cudaMemcpy(l,lDevice, sizeof(struct Local)*limit,cudaMemcpyDeviceToHost); /** * 集計 */ UNIQUE= l->COUNTER[l->COUNT2]+ l->COUNTER[l->COUNT4]+ l->COUNTER[l->COUNT8]; TOTAL= l->COUNTER[l->COUNT2]*2+ l->COUNTER[l->COUNT4]*4+ l->COUNTER[l->COUNT8]*8; } //GPU 再帰 キャリーチェーン void GPU_carryChainR(const unsigned int size,unsigned int STEPS) { listChain(); //チェーンのリストを作成 GPU_buildChainR(size,STEPS); // チェーンのビルド // calcChain(&l); // 集計 } // CUDA 初期化 bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ struct cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} cudaSetDevice(i); return true; } //メイン int main(int argc,char** argv) { bool cpu=false,cpur=false,gpu=false,gpuNodeLayer=false; int argstart=2; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuNodeLayer=true;} else{ gpuNodeLayer=true; } //デフォルトをgpuとする argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]); printf(" -r: CPU 再帰\n"); printf(" -c: CPU 非再帰\n"); printf(" -g: GPU 再帰\n"); printf(" -n: GPU キャリーチェーン\n"); } if(cpur){ printf("\n\nCPU キャリーチェーン 再帰 \n"); } else if(cpu){ printf("\n\nCPU キャリーチェーン 非再帰 \n"); } else if(gpu){ printf("\n\nGPU キャリーチェーン シングルスレッド\n"); } else if(gpuNodeLayer){ printf("\n\nGPU キャリーチェーン マルチスレッド\n"); } if(cpu||cpur) { int min=4; int targetN=17; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ TOTAL=UNIQUE=0; gettimeofday(&t0, NULL);//計測開始 if(cpur){ //再帰 g.size=size; carryChainR(); } if(cpu){ //非再帰 g.size=size; carryChain(); } // gettimeofday(&t1, NULL);//計測終了 int ss;int ms;int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } //end for }//end if if(gpu||gpuNodeLayer) { if(!InitCUDA()){return 0;} int STEPS=24576; int min=4; int targetN=21; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ gettimeofday(&t0,NULL); // 計測開始 if(gpu){ TOTAL=UNIQUE=0; g.size=size; GPU_carryChainR(size,STEPS); //キャリーチェーン }else if(gpuNodeLayer){ TOTAL=UNIQUE=0; g.size=size; GPU_carryChainR(size,STEPS); // キャリーチェーン } gettimeofday(&t1,NULL); // 計測終了 int ss;int ms;int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); }//end for }//end if return 0; }
6,083
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define checkCudaError(o, l) _checkCudaError(o, l, __func__) #define SHARED_MEMORY_BANKS 32 #define LOG_MEM_BANKS 5 #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_MEM_BANKS) #include<limits.h> long long int THREADS_PER_BLOCK = 512; // This is the numver of threads per block used, and 512 gave the best results long long int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2; // As each threads takes care of two elements so number of elements is twice of threads float sequential_scan(long long int* output, long long int* input, long long int length,long long int operation); float scan(long long int *output, long long int *input, long long int length,long long int operation); void scanMultiBlock(long long int *output, long long int *input, long long int length,long long int operation,long long int identity); void scanSingleBlock(long long int *device_output, long long int *device_input, long long int length, long long int operation,long long int identity); void scanBlockSizedArray(long long int *output, long long int *input, long long int length, long long int operation,long long int identity); void check(long long int* CPU_Vector,long long int* GPU_Vector, long long int start, long long int end); __global__ void prescan_SingleBlock(long long int *output, long long int *input, long long int n, long long int nextPowerOfTwo, long long int operation, long long int identity); __global__ void prescan_MultiBlock(long long int *output, long long int *input, long long int n, long long int* sums, long long int operation,long long int identity); __global__ void add_two(long long int *output, long long int length, long long int *n1); __global__ void add_three(long long int *output, long long int length, long long int *n1, long long int *n2); __global__ void max_two(long long int *output, long long int length, long long int *n1); __global__ void max_three(long long int *output, long long int length, long long int *n1, long long int *n2); __global__ void min_two(long long int *output, long long int length, long long int *n1); __global__ void min_three(long long int *output, long long int length, long long int *n1, long long int *n2); __global__ void exc_to_inc(long long output, long long input,long long int operations); void _checkCudaError(const char *message, cudaError_t err, const char *caller); void printResult(const char* prefix, long long int result, float milliseconds); void printArrayInFile (const char* prefix ,long long int Output[], long long int start, long long int end); void Scan(long long int N, long long int Option, long long int operation, long long int printing) { time_t t; srand((unsigned)time(&t)); long long int *in =(long long int *) malloc (sizeof(long long int) * (N+1)); //input array long long int *mod_input =(long long int *) malloc (sizeof(long long int) * (N+1));// modified input array incase of Subtraction being the operation if(Option == 1 ) // IF THE ARRAY NEEDS TO BE RANDOMLY GENERATED { printf("Generating Random Numbers...\n"); in[0] = rand()%1000000; mod_input[0] = in[0]; for (long long int i = 1; i < N; i++) { in[i] = rand() % 1000000; mod_input[i] = -in[i]; } if(operation==4) { in[N] = LONG_LONG_MAX; } else if (operation==3) { in[N] = LONG_LONG_MIN; } else { in[N] = 0; mod_input[N] = 0; } printf("Finished Generating Random Numbers...\n\n"); } else // IF THE ARRAY IS FED AS INPUT TO THE PROGRAM { printf("Please type the desired %lld values of the vector each seperated by an ENTER KEY or WHITESPACE\n",N); scanf("%lld",&in[0]); mod_input[0] = in[0]; for (long long int i = 1; i < N; i++) { scanf("%lld",&in[i]); mod_input[i] = -in[i]; } if(operation==4) { in[N] = LONG_LONG_MAX; } else if (operation==3) { in[N] = LONG_LONG_MIN; } else { in[N] = 0; mod_input[N] = 0; } printf("Finished Taking Input...\n\n"); } if(printing) //PRINTING THE OUTPUT ARRAY TO output.txt { printf("Printing the Input Vector...\n"); FILE* fp = fopen("output.txt", "w"); printArrayInFile("Original Array",in, 0 , N); fclose(fp); printf("Finished Printing the Input Vector \n"); } long long int *output_CPU = (long long int *) malloc (sizeof(long long int) * (N+1)); printf("Doing the sequential Exclusive scan...\n"); float time_host = sequential_scan(output_CPU, in, (N+1), operation); printf("Finished the sequential Exclusive scan...\n\n"); //Printing The Result and Time printResult("Host Time ", output_CPU[N], time_host); if(printing) //PRINTING THE OUTPUT ARRAY TO output.txt { printf("Printing the Scanned Vector formed by the CPU...\n"); printArrayInFile ("HOST RESULT", output_CPU, 1, N+1); printf("Finished Printing the Scanned Vector formed by the CPU\n\n"); } // Parallel scan on GPU printf("Doing the Parallel Exclusive scan...\n"); long long int *output_GPU = (long long int *) malloc (sizeof(long long int) * N); printf("Finished the Parallel Exclusive scan...\n"); if(operation == 2) // Special consideration for subtraction because the operation is not associative { float time_gpu = scan(output_GPU, mod_input, N+1, operation); printResult("GPU time ", output_GPU[N], time_gpu); } else // for all the other operationators { float time_gpu = scan(output_GPU, in, N + 1, operation); printResult("GPU time ", output_GPU[N], time_gpu); } if(printing) { printf("Printing the Scanned Vector formed by the GPU...\n"); printArrayInFile ("GPU RESULT", output_GPU,1, N + 1); printf("Finished Printing the Scanned Vector formed by the GPU\n\n"); printf("Please look at the output.txt to see the scanned vectors and input vector\n"); } // For checking correctness of solution check(output_CPU,output_GPU,1,N + 1); //clean up of all memory used up free(in); free(mod_input); //free(output_CPU); free(output_GPU); } int main(){ long long int N=0, options=0, operation=0; char printing = 0; printf("Please input a proper size of the array or vector\n"); scanf("%lld",&N); if(N <= 0) { printf("Please input a proper number which is greater than zero for the size\n"); printf("The application would terminate now\n"); return 0; } printf("Please select one of the given options \n"); printf("\t1)Randomize the Elements input array of size %lld\n", N); printf("\t2)Proived the Elements of input array of size %lld\n", N); printf("Type 1 or 2 depending upon the option you want to select\n"); scanf("%lld", &options); printf("\n"); if(options!=1&&options!=2) { printf("Please type either 1 or 2 only next time for selecting the Options\n"); printf("The application would terminate now\n"); return 0; } printf("Please select one of the given operations\n"); printf("1)Addition\t2)Subtraction\t3)Maximum\t4)Minimum\n"); printf("Type 1,2,3 or 4 depending upon the operation you want to select\n"); scanf("%lld", &operation); printf("\n"); if(operation!=1 && operation!=2 && operation!=3 && operation!=4) { printf("Please type either 1,2,3 OR 4 only next time for selecting the Operator\n"); printf("The application would terminate now\n"); return 0; } getchar(); // to eat the enter key; printf("Do you wish to print the input and scanned vector in an output.txt file?\n"); printf("Type y for Yes or n for No\n"); scanf("%c", &printing); printf("\n"); if(printing!='y'&& printing!='n') { printf("Please type either character 'y' OR 'n' only next time for choosing to print or not the vectors\n"); printf("The application would terminate now\n"); return 0; } if(printing=='y') { Scan(N,options,operation,1); } else { Scan(N,options,operation,0); } return 0;} float sequential_scan(long long int* output, long long int* input, long long int length, long long int operation) { struct timeval start, end; gettimeofday(&start, NULL); switch(operation) { case 1: { output[0] = 0; // since this is an exclusive scan output[1] = input[0]; for (long long int j = 2; j < length; ++j) { output[j] = input[j - 1] + output[j - 1]; } break; } case 2: { output[0] = 0; // since this is an exclusive scan output[1] = input[0]; for (long long int j = 2; j < length; ++j) { output[j] = output[j - 1] - input[j - 1]; } break; } case 3: { output[0] = LONG_LONG_MIN; output[1] = input[0]; for (long long int j = 2; j < length; ++j) { if(input[j-1] > output[j - 1]) output[j] = input[j-1]; else output[j] = output[j-1]; } break; } case 4: { output[0] = LONG_LONG_MAX; //since in exclusive scan the first element is the identity of the operator, in this case the max //number possible output[1] = input[0]; for (long long int j = 2; j < length; ++j) { if(input[j-1] < output[j - 1]) output[j] = input[j-1]; else output[j] = output[j-1]; } break; } } gettimeofday(&end, NULL); float seconds = (end.tv_sec - start.tv_sec); float micros = ((seconds * 1000000)+ (end.tv_usec - start.tv_usec)); return (float)(micros/1000);} float scan(long long int *output, long long int *input, long long int length,long long int operation) { long long int *device_input,*device_output; long long int arraySize = length * sizeof(long long int); cudaMalloc((void **)&device_output, arraySize); cudaMalloc((void **)&device_input, arraySize); cudaMemcpy(device_output, output, arraySize, cudaMemcpyHostToDevice); cudaMemcpy(device_input, input, arraySize, cudaMemcpyHostToDevice); cudaEvent_t initial, final; cudaEventCreate(&initial); cudaEventCreate(&final); // starting the timer given in CUDA Library cudaEventRecord(initial); long long int identity = 0; if(operation==4) { identity = LONG_LONG_MAX; } else if (operation==3) { identity = LONG_LONG_MIN; } if (length <= ELEMENTS_PER_BLOCK) { /*float elapsed = */ scanSingleBlock(device_output, device_input, length, operation, identity); //printf("The parallelizable part took %lf ms of time", elapsed);//needed for finding parallelizable part } else { /*float elpased = */scanMultiBlock(device_output, device_input, length, operation , identity); //printf("The parallelizable part took %lf ms of time", elapsed); //needed for finding parallelizable part } // end timer cudaEventRecord(final); cudaEventSynchronize(final); float elapsedTime = 0; cudaEventElapsedTime(&elapsedTime, initial, final); cudaMemcpy(output, device_output, arraySize, cudaMemcpyDeviceToHost); //clean up cudaFree(device_input); cudaFree(device_output); cudaEventDestroy(final); cudaEventDestroy(initial); return elapsedTime;} /*float*/ //needed return type for finding parallelizable part void scanMultiBlock(long long int *device_output, long long int *device_input, long long int length,long long int operation, long long int identity) { long long int reminder = length % (ELEMENTS_PER_BLOCK); //float temp_time =0, elapsed_time = 0; //needed for finding parallelizable part if (reminder != 0) { // perform a large scan on a compatible multiple of elements long long int blockMultiple = length - reminder; /*elapsed_time += */scanBlockSizedArray(device_output, device_input, blockMultiple, operation, identity); // needed varaible for finding parallelizable part // scan the remaining elements and add the (inclusive) last element of the large scan to this long long int *startOfOutputArray = &(device_output[blockMultiple]); long long int *startOfInputArray = &(device_input[blockMultiple]); /*elapsed_time +=*/ scanSingleBlock(startOfOutputArray, startOfInputArray, reminder, operation, identity);// needed varaible for finding parallelizable part /* // needed for finding parallelizable part cudaEvent_t initial, final; cudaEventCreate(&initial); cudaEventCreate(&final); // starting the timer given in CUDA Library cudaEventRecord(initial); */ switch(operation) { case 1: { add_three<<<1, reminder>>>(startOfOutputArray, reminder, &(device_input[blockMultiple - 1]), &(device_output[blockMultiple - 1])); break; } case 2: { add_three<<<1, reminder>>>(startOfOutputArray, reminder, &(device_input[blockMultiple - 1]), &(device_output[blockMultiple - 1])); break; } case 3: { max_three<<<1, reminder>>>(startOfOutputArray, reminder, &(device_input[blockMultiple - 1]), &(device_output[blockMultiple - 1])); break; } case 4: { min_three<<<1, reminder>>>(startOfOutputArray, reminder, &(device_input[blockMultiple - 1]), &(device_output[blockMultiple - 1])); break; } } /* Needed for finding parallelizable part cudaEventRecord(final); cudaEventSynchronize(final); cudaEventElapsedTime(&temp_time, initial, final); elapsed_time += temp_time cudaFree(device_input); cudaFree(device_output); cudaEventDestroy(final); cudaEventDestroy(initial); return elapsed_time; */ } else { // Both the comments are needed for finding parallelizable part /*float elapsed_time = */ scanBlockSizedArray(device_output, device_input, length, operation, identity); /*return elapsed_time;*/ } } /*float*/ void scanSingleBlock(long long int *device_output, long long int *device_input, long long int length,long long int operation, long long int identity) { long long int nextPowerOfTwo = 1; while (nextPowerOfTwo < length) { nextPowerOfTwo *= 2; } /* Needed for finding parallelizable part float elapsed_time = 0; cudaEvent_t initial, final; cudaEventCreate(&initial); cudaEventCreate(&final); // starting the timer given in CUDA Library cudaEventRecord(initial); */ prescan_SingleBlock<<<1, (length + 1) / 2, 2 * nextPowerOfTwo * sizeof(long long int)>>>(device_output, device_input, length, nextPowerOfTwo, operation, identity); /* Needed for finding parallelizable part cudaEventRecord(final); cudaEventSynchronize(final); cudaEventElapsedTime(&elapsed_time, initial, final); cudaFree(device_input); cudaFree(device_output); cudaEventDestroy(final); cudaEventDestroy(initial); return elapsed_time; */ } /*float*/ void scanBlockSizedArray(long long int *device_output, long long int *device_input, long long int length, long long int operation, long long int identity) { long long int num_blocks = length / ELEMENTS_PER_BLOCK; long long int sharedMemBlockSize = ELEMENTS_PER_BLOCK * sizeof(long long int); long long int *device_blocks, *device_inputcr; cudaMalloc((void **)&device_blocks, num_blocks * sizeof(long long int)); cudaMalloc((void **)&device_inputcr, num_blocks * sizeof(long long int)); /* Needed for finding parallelizable part float elapsed_time = 0, temp_time; cudaEvent_t initial, final; cudaEventCreate(&initial); cudaEventCreate(&final); // starting the timer given in CUDA Library cudaEventRecord(initial); */ prescan_MultiBlock<<<num_blocks, THREADS_PER_BLOCK, 2*sharedMemBlockSize>>>(device_output, device_input, ELEMENTS_PER_BLOCK, device_blocks, operation, identity); /* Needed for finding parallelizable part cudaEventRecord(final); cudaEventSynchronize(final); cudaEventElapsedTime(&elapsed_time, initial, final); cudaFree(device_input); cudaFree(device_output); cudaEventDestroy(final); cudaEventDestroy(initial); */ if ((num_blocks + 1) / 2 < THREADS_PER_BLOCK) { //Needed for finding parallelizable part /*elapsed_time+=*/scanSingleBlock(device_inputcr, device_blocks, num_blocks, operation, identity); } else { // Needed for finding parallelizable part /*elapsed_time+= */scanMultiBlock(device_inputcr, device_blocks, num_blocks, operation, identity); } /* Needed for finding parallelizable part cudaEventCreate(&initial); cudaEventCreate(&final); // starting the timer given in CUDA Library cudaEventRecord(initial); */ switch(operation) { case 1: { add_two<<<num_blocks, ELEMENTS_PER_BLOCK>>>(device_output, ELEMENTS_PER_BLOCK, device_inputcr); break; } case 2: { add_two<<<num_blocks, ELEMENTS_PER_BLOCK>>>(device_output, ELEMENTS_PER_BLOCK, device_inputcr); break; } case 3: { max_two<<<num_blocks, ELEMENTS_PER_BLOCK>>>(device_output, ELEMENTS_PER_BLOCK, device_inputcr); break; } case 4: { min_two<<<num_blocks, ELEMENTS_PER_BLOCK>>>(device_output, ELEMENTS_PER_BLOCK, device_inputcr); break; } } /* Needed for finding parallelizable part cudaEventRecord(final); cudaEventSynchronize(final); cudaEventElapsedTime(&temp_time, initial, final); cudaFree(device_input); cudaFree(device_output); cudaEventDestroy(final); cudaEventDestroy(initial); elapsed_time += temp_time cudaFree(device_inputcr); cudaFree(device_blocks); return elapsed_time; */ } __global__ void prescan_SingleBlock(long long int *output, long long int *input, long long int n, long long int nextPowerOfTwo, long long int operation, long long int identity) { extern __shared__ long long int temp[]; long long int threadID = threadIdx.x; long long int offset = 1; long long int index1 = threadID; long long int index2 = threadID + (n / 2); long long int bankOffsetB = CONFLICT_FREE_OFFSET(index2); long long int bankOffsetA = CONFLICT_FREE_OFFSET(index1); if (threadID >= n) { temp[index1 + bankOffsetA] = 0; temp[index2 + bankOffsetB] = 0; } else { temp[index1 + bankOffsetA] = input[index1]; temp[index2 + bankOffsetB] = input[index2]; } for (long long int d = nextPowerOfTwo/2; d > 0; d= d/2) // Do the reduction by building a operation(like sum) tree in place { __syncthreads(); if (threadID < d) { long long int index1 = offset * (2 * threadID + 1) - 1; long long int index2 = offset * (2 * threadID + 2) - 1; index1 += CONFLICT_FREE_OFFSET(index1); index2 += CONFLICT_FREE_OFFSET(index2); switch(operation) { case 1: { temp[index2] += temp[index1]; break; } case 2: { temp[index2] += temp[index1]; break; } case 3: { if(temp[index2] < temp[index1]) temp[index2] = temp[index1]; break; } case 4: { if(temp[index2] > temp[index1]) temp[index2] = temp[index1]; break; }} } offset *= 2; } __syncthreads(); if (threadID == 0) { /* //FOR DEBUGGIN PURPOSE PLEASE IGNORE printf("%s\n", "TEMP"); for(long long int i=0; i < n; i++) { printf("%lld ",temp[i]); } printf("\n"); */ if(operation!=4) { temp[nextPowerOfTwo - 1 + CONFLICT_FREE_OFFSET(nextPowerOfTwo - 1)] = 0; // clear the last element for exclusive scan } else { temp[nextPowerOfTwo - 1 + CONFLICT_FREE_OFFSET(nextPowerOfTwo - 1)] = identity; // clear the last element for exclusive scan } } for (long long int d = 1; d < nextPowerOfTwo; d *= 2) // traverse down tree & build scan { offset = offset/2; __syncthreads(); if (threadID < d) { long long int index1 = offset * (2 * threadID + 1) - 1; long long int index2 = offset * (2 * threadID + 2) - 1; index1 += CONFLICT_FREE_OFFSET(index1); index2 += CONFLICT_FREE_OFFSET(index2); long long int t = temp[index1]; temp[index1] = temp[index2]; switch(operation) { case 1: { temp[index2] += t; break; } case 2: { temp[index2] += t; break; } case 3: { if(temp[index2] < t) temp[index2] = t; break; } case 4: { if(temp[index2] > t) temp[index2] = t; break; }} } } __syncthreads(); if (threadID < n) { output[index1] = temp[index1 + bankOffsetA]; output[index2] = temp[index2 + bankOffsetB]; }} __global__ void prescan_MultiBlock(long long int *output, long long int *input, long long int n, long long int *sums,long long int operation,long long int identity) { extern __shared__ long long int temp[]; long long int blockID = blockIdx.x; long long int threadID = threadIdx.x; long long int blockOffset = blockID * n; long long int index1 = threadID; long long int index2 = threadID + (n / 2); long long int bankOffsetA = CONFLICT_FREE_OFFSET(index1); long long int bankOffsetB = CONFLICT_FREE_OFFSET(index2); temp[index1 + bankOffsetA] = input[blockOffset + index1]; temp[index2 + bankOffsetB] = input[blockOffset + index2]; long long int offset = 1; for (long long int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { long long int index1 = offset * (2 * threadID + 1) - 1; long long int index2 = offset * (2 * threadID + 2) - 1; index1 += CONFLICT_FREE_OFFSET(index1); index2 += CONFLICT_FREE_OFFSET(index2); switch(operation) { case 1: { temp[index2] += temp[index1]; break; } case 2: { temp[index2] += temp[index1]; break; } case 3: { if(temp[index2] < temp[index1]) temp[index2] = temp[index1]; break; } case 4: { if(temp[index2] > temp[index1]) temp[index2] = temp[index1]; } } } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)]; if(operation!=4) { temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0; } else { temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = identity; } } for (long long int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { long long int index1 = offset * (2 * threadID + 1) - 1; long long int index2 = offset * (2 * threadID + 2) - 1; index1 += CONFLICT_FREE_OFFSET(index1); index2 += CONFLICT_FREE_OFFSET(index2); long long int t = temp[index1]; temp[index1] = temp[index2]; switch(operation) { case 1: { temp[index2] += t; break; } case 2: { temp[index2] += t; break; } case 3: { if(temp[index2] < t) temp[index2] = t; break; } case 4: { if(temp[index2] > t) temp[index2] = t; break; } } } } __syncthreads(); output[blockOffset + index1] = temp[index1 + bankOffsetA]; output[blockOffset + index2] = temp[index2 + bankOffsetB]; } //functions to add two or three numbers in given arrays __global__ void add_two(long long int *output, long long int length, long long int *n) { long long int blockID = blockIdx.x; long long int threadID = threadIdx.x; long long int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID];} __global__ void add_three(long long int *output, long long int length, long long int *n1, long long int *n2) { long long int blockID = blockIdx.x; long long int threadID = threadIdx.x; long long int blockOffset = blockID * length; output[blockOffset + threadID] += n1[blockID] + n2[blockID];} //functions to find maximum of two or three numbers in given arrays __global__ void max_two(long long int *output, long long int length, long long int *n) { long long int blockID = blockIdx.x; long long int threadID = threadIdx.x; long long int blockOffset = blockID * length; if(output[blockOffset + threadID] < n[blockID]) { output[blockOffset + threadID] = n[blockID]; }} __global__ void max_three(long long int *output, long long int length, long long int *n1, long long int *n2) { long long int blockID = blockIdx.x; long long int threadID = threadIdx.x; long long int blockOffset = blockID * length; if(n1[blockID] > n2[blockID]) { if(n1[blockID] > output[blockOffset + threadID]) { output[blockOffset + threadID] = n1[blockID]; } } else { if(n2[blockID] > output[blockOffset + threadID]) { output[blockOffset + threadID] = n2[blockID]; } }} //functions to find minimum of two or three numbers in given arrays __global__ void min_two(long long int *output, long long int length, long long int *n) { long long int blockID = blockIdx.x; long long int threadID = threadIdx.x; long long int blockOffset = blockID * length; if(output[blockOffset + threadID] > n[blockID]) { output[blockOffset + threadID] = n[blockID]; }} __global__ void min_three(long long int *output, long long int length, long long int *n1, long long int *n2) { long long int blockID = blockIdx.x; long long int threadID = threadIdx.x; long long int blockOffset = blockID * length; if(n1[blockID] < n2[blockID]) { if(n1[blockID] < output[blockOffset + threadID]) { output[blockOffset + threadID] = n1[blockID]; } } else { if(n2[blockID] < output[blockOffset + threadID]) { output[blockOffset + threadID] = n2[blockID]; } }} void _checkCudaError(const char *message, cudaError_t err, const char *caller) { if (err != cudaSuccess) { fprintf(stderr, "Error in: %s\n", caller); fprintf(stderr, "%s\n", message); fprintf(stderr, ": %s\n", cudaGetErrorString(err)); exit(0);}} void printResult(const char* Heading, long long int result, float milliseconds) { printf("%s\n", Heading); printf("Final Reduction is %lld and it was done in %lf ms\n", result, milliseconds);} void printArrayInFile (const char* Header ,long long int Output[], long long int start, long long int end){ FILE* fp = fopen("output.txt", "a"); fprintf(fp, "%s\n", Header); for(long long int i=start; i < end; i++) { fprintf(fp, "%lld ",Output[i]); } fprintf(fp, "\n"); fclose(fp);} void check(long long int* CPU_Vector,long long int* GPU_Vector, long long int start, long long int end) { for(int i=start; i < end; i++) { if(CPU_Vector[i]!=GPU_Vector[i]) { printf("Outputs don't match\n"); return; } } printf("Outputs do match, The implementation is successful\n");}
6,084
#ifndef __U_TENSOR_OPERATION_GPU_HPP__ #define __U_TENSOR_OPERATION_GPU_HPP__ /*** u-op-gpu.hpp base functions for tensor Copyright (C) 2017 Renweu Gao This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ***/ #ifdef USE_CUDA #include <cuda.h> namespace u { } #endif #endif
6,085
/*------------check.cu------------------------------------------------------// * * Purpose: This is a simple cuda file for checking your gpu works * * It prints 0 -> 63 * *-----------------------------------------------------------------------------*/ #include <iostream> #include <math.h> __global__ void findID(double *a, int n){ // First we need to find our global threadID int id = blockIdx.x*blockDim.x + threadIdx.x; // Make sure we are not out of range if (id < n){ a[id] = id; } } int main(){ // size of vectors int n = 64; // Host vectors double *h_a; // Device vectors double *d_a; // allocating space on host and device h_a = (double*)malloc(sizeof(double)*n); // Allocating space on GPU cudaMalloc(&d_a, sizeof(double)*n); // Creating blocks and grid ints int threads, grid; threads = 64; grid = (int)ceil((float)n/threads); findID<<<grid, threads>>>(d_a, n); // Now to copy c back cudaMemcpy(h_a, d_a, sizeof(double)*n, cudaMemcpyDeviceToHost); for (int i = 0; i < n; ++i){ std::cout << h_a[i] << '\n'; } // Release memory cudaFree(d_a); free(h_a); }
6,086
#include "includes.h" __global__ void _calculate_wnp( const long* edge_num, const long* edge_start_idx, float* weight, long* ind, const int b, const int n, const int orig_p_num, const int p_num ) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= b * n * orig_p_num) return; const int c_b = index / (n * orig_p_num); const int c_n = (index - c_b * n * orig_p_num) / orig_p_num; const int c_edge_idx = index % orig_p_num; const long c_edge_num = edge_num[index]; const int c_start_idx = int(edge_start_idx[index]); float* c_weight = &weight[c_b * n * p_num + c_n * p_num + c_start_idx]; long* c_ind = &ind[c_b * n * p_num * 2 + c_n * p_num * 2 + c_start_idx * 2]; for (long i = 0; i < c_edge_num; i++) { c_weight[i] = float(i) / float(c_edge_num); c_ind[i * 2] = long(c_edge_idx); c_ind[i * 2 + 1] = long((c_edge_idx + 1) % orig_p_num); } }
6,087
#include "includes.h" __global__ void ForwardReLU(float* Z, int nRowsZ, int nColsZ, float* A) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nRowsZ * nColsZ) { if (Z[index] >= 0) A[index] = Z[index]; else A[index] = 0; } }
6,088
#include <iostream> #include <vector> __global__ void vecadd( int * v0, int * v1, std::size_t size ) { auto tid = threadIdx.x; v0[ tid ] += v1[ tid ]; } int main() { cudaError_t err; std::size_t const size = 100; std::size_t const sizeb = size * sizeof( int ); int * v0_h = nullptr; int * v1_h = nullptr; int * v0_d = nullptr; int * v1_d = nullptr; err = cudaMallocHost( &v0_h, sizeb ); if( err != cudaSuccess ) { std::cerr << "Error" << std::endl; } err = cudaMallocHost( &v1_h, sizeb ); if( err != cudaSuccess ) { std::cerr << "Error" << std::endl; } for( std::size_t i = 0 ; i < size ; ++i ) { v0_h[ i ] = v1_h[ i ] = i; } err = cudaMalloc( &v0_d, sizeb ); if( err != cudaSuccess ) { std::cerr << "Error" << std::endl; } err = cudaMalloc( &v1_d, sizeb ); if( err != cudaSuccess ) { std::cerr << "Error" << std::endl; } cudaStream_t streams[ 2 ]; for( std::size_t i = 0 ; i < 2 ; ++i ) { cudaStreamCreate( &streams[ i ] ); } for( std::size_t i = 0 ; i < 2 ; ++i ) { cudaMemcpyAsync( v0_d + i*size/2, v0_h + i*size/2, sizeb/2, cudaMemcpyHostToDevice, streams[ i ] ); cudaMemcpyAsync( v1_d + i*size/2, v1_h + i*size/2, sizeb/2, cudaMemcpyHostToDevice, streams[ i ] ); } for( std::size_t i = 0 ; i < 2 ; ++i ) { vecadd<<< 1, size/2, 0, streams[ i ] >>>( v0_d + i*size/2, v1_d + i*size/2, size/2 ); } /* cudaDeviceSynchronize(); err = cudaGetLastError(); if( err != cudaSuccess ) { std::cout << cudaGetErrorString( err ) << std::endl; } */ for( std::size_t i = 0 ; i < 2 ; ++i ) { cudaMemcpyAsync( v0_h + i*size/2, v0_d + i*size/2, sizeb/2, cudaMemcpyDeviceToHost, streams[ i ] ); } cudaDeviceSynchronize(); /* err = cudaGetLastError(); if( err != cudaSuccess ) { std::cout << cudaGetErrorString( err ) << std::endl; } */ for( std::size_t i = 0 ; i < 2 ; ++i ) { cudaStreamDestroy( streams[ i ] ); } for( std::size_t i = 0 ; i < size ; ++i ) { std::cout << v0_h[ i ] << std::endl; } cudaFree( v0_d ); cudaFree( v1_d ); cudaFreeHost( v0_h ); cudaFreeHost( v1_h ); return 0; }
6,089
// REQUIRES: nvptx-registered-target // RUN: %clang_cc1 -triple nvptx -fcuda-is-device \ // RUN: -fgpu-allow-device-init \ // RUN: %s 2>&1 | FileCheck %s // CHECK: warning: '-fgpu-allow-device-init' is ignored since it is only supported for HIP
6,090
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #define error 1e-6 #define BLOCK_SIZE 32 ///////////////////////////////////////// UTILITIES //////////////////////////////////////////////////////////////////////////////////////////// /* ********************************************************************** function name: init_Array description: create a matrix with random values parameters: &x pointer to a row x col Matrix return: None ********************************************************************** */ void init_Array(float *x, int m, int n){ for(int i = 0; i < m; i++){ for(int j = 0; j < n; j++){ x[i * n + j] = rand() % 1000; } } } /* ********************************************************************** function name: init_0_Array description: create a matrix that cointains a high number of zero values, but every row have at least one non-zero values parameters: &x pointer to a row x col Matrix return: None ********************************************************************** */ void init_0_Array(float *x, int row, int col){ bool ultimo = false; for(int i = 0; i < row; i++){ for(int j = 0; j < col; j++){ if(j == col - 1 && !ultimo){ ultimo = true; } float r = (float) rand() / RAND_MAX; if(r <= 0.25){ x[i*col + j] = rand() % 1000; ultimo = false; } else{ if (!ultimo){ x[i*col + j] = 0; } else { x[i*col + j] = rand() % 1000; ultimo = false; } } } } } /* ********************************************************************** function name: count_NUM description: count the non-zero elements of a matrix parameters: &x pointer to a row x col Matrix return: the total number of non-zero elements ********************************************************************** */ int count_Num(float *x, int row, int col){ int count = 0; for(int i = 0; i < row; i++){ for(int j = 0; j < col; j++){ if(x[i*col + j] != 0){ count++; } } } return count; } /* ********************************************************************** function name: sparse_matrix description: convert a matric in a sparse matrix format in CSR parameters: &x pointer to a row x col matrix &rows pointer to a rows + 1 vector: points to the first element in each row and assigns it the value of its index in value &cols pointer to a size vector: the column to which each non-zero value belongs &val pointer to a size vector: contains every non-zero value of the x matrix return: None ********************************************************************** */ void sparse_matrix(float *x, int *rows, int *cols, float *val, int row, int col){ int antes = -1; int count = 0; int count_row = 0; for(int i = 0; i < row; i++){ for(int j = 0; j < col; j++){ if(x[i*col + j] != 0){ if(i != antes){ rows[count_row] = count; antes = i; count_row++; } cols[count] = j; val[count] = x[i*col + j]; count++; } } } } /* ********************************************************************** function name: cpuSecond() description: when it is called you obtain the exact time in this moment return: the time in microseonds ********************************************************************** */ double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } /* ********************************************************************** function name: print_matrix description: print a matrix compose by (float) elements parameters: &a pointer to a row x col Matrix return: None ********************************************************************** */ void print_matrix(float *a, int row, int col){ for(int i = 0; i < row; i++){ for(int j = 0; j < col; j++){ printf("%f ",a[i * col + j]); } printf("\n"); } printf("\n"); } /* ********************************************************************** function name: print_imatrix description: print a matrix compose by (int) elements parameters: &a pointer to a row x col Matrix return: None ********************************************************************** */ void print_imatrix(int *a, int row, int col){ for(int i = 0; i < row; i++){ for(int j = 0; j < col; j++){ printf("%d ",a[i * col + j]); } printf("\n"); } printf("\n"); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////// GPU KERNEL METHODS ////////////////////////////////////////////////////////////////// /* ********************************************************************* function name: mmatrix description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE,(m + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void mmatrix(float *a, float *b, float *c, int m, int n, int k){ unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0; if(col < k && row < m){ for(int i = 0; i < n; i++){ sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: gpuMatrixConv description: implementation of the convultion operation parameters: &a GPU device pointer to a row1 X col1 matrix (A) &b GPU device pointer to a row2 X col2 matrix (B) &c GPU device output purpose pointer to a row3 X col3 matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((col3 + BLOCK_SIZE - 1) / BLOCK_SIZE,(row3 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpuMatrixConv(float *a, float *b, float *c, int row1, int col1, int row2, int col2, int row3, int col3) { unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0; if (row < row3 && col < col3) { for (int i = 0; i < row2; i++) { for (int j = 0; j < col2; j++) { sum += a[(row + i) * col1 + col + j] * b[i * row2 + j]; } } c[row * col3 + col] = sum; } } /* ********************************************************************* function name: gpu_matrix_transpose description: matrix transpose parameters: &mat_in GPU device pointer to a rows X cols matrix &mat_out GPU device output purpose pointer to a cols X rows matrix to store the result Note: grid and block should be configured as: dim3 dim_grid((col + BLOCK_SIZE - 1) / BLOCK_SIZE, (row + BLOCK_SIZE - 1) / BLOCK_SIZE, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpuMatrixTranpose(float *a, float *b, int rows, int cols){ unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < rows && col < cols){ int pos_a = row * cols + col; int pos_b = col * rows + row; b[pos_b] = a[pos_a]; } } /* ********************************************************************* function name: gpuMatrixConv description: implementation the matrix * vector in sparse format CSR parameters: &values GPU device pointer to a size vector (values): the non-zero values for the original matrix &vector GPU device pointer to a col vector (x): to multiply the non-zero values &rows GPU device pointer to a rows + 1 vector (row): points to the first element in each row and assigns it the value of its index in value &cols GPU device pointer to a col size vector (col): the column to which each non-zero value belongs &res GPU device output purpose pointer to a row vector (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((col3 + BLOCK_SIZE - 1) / BLOCK_SIZE,(row3 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpuMVSparse(float *values, float *vector, int *rows, int *cols,float *res, int row){ unsigned int Id = threadIdx.x + blockDim.x * blockIdx.x; if(Id < row){ for(int k = rows[Id]; k < rows[Id+1]; k++){ res[Id] += values[k]*vector[cols[k]]; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////// CPU METHODS AND GPU PREPARATION /////////////////////////////////////////////////////////// /* ********************************************************************** function name: sparse_preparation description: compare the CPU and GPU implementation of a matrix * vector multiplication in sparse format 1º obtaint the size of the matrix 2º create the matrix 3º count the non-zero values 4º tranfor into CSR format 5º Do the operation 6º compare the results Optional: print the results return: None ********************************************************************** */ void sparse_preparation(){ int col, row, val; bool bien = true; printf("\n"); printf("\n"); SP1: printf("Introduce the rows of A:\n"); fflush(stdout); val = scanf("%d", &row); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto SP1; } SP2: printf("Introduce the columns of A:\n"); fflush(stdout); val = scanf("%d", &col); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto SP2; } double start_GPU, stop_GPU; double start_CPU, stop_CPU; double diferencia_CPU, diferencia_GPU; float *A = (float *)malloc(row * col * sizeof(float)); float *vector = (float *)malloc(col * sizeof(float)); init_Array(vector, 1, col); init_0_Array(A, row, col); int size = count_Num(A, row, col); float *values = (float *)malloc(size * sizeof(float)); int *rows = (int *)malloc((row + 1) * sizeof(int)); int *cols = (int *)malloc(size * sizeof(int)); float *res = (float *)malloc(row * sizeof(float)); float *res_F = (float *)malloc(row * sizeof(float)); sparse_matrix(A, rows, cols, values, row, col); rows[row] = size; int *rows_GPU; int *cols_GPU; float *values_GPU; float *res_GPU; float *vector_GPU; cudaMalloc(&rows_GPU, (row + 1) * sizeof(int)); cudaMalloc(&cols_GPU, size * sizeof(int)); cudaMalloc(&values_GPU, size * sizeof(float)); cudaMalloc(&res_GPU, row * sizeof(float)); cudaMalloc(&vector_GPU, col * sizeof(float)); cudaMemset(res_GPU, 0, row * sizeof(float)); cudaMemcpy(rows_GPU, rows, (row +1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cols_GPU, cols, size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(values_GPU, values, size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(vector_GPU, vector, col * sizeof(float), cudaMemcpyHostToDevice); //Start CPU Part// start_CPU = cpuSecond(); for(int k = 0; k < row; k++){ res[k] = 0; } for(int i = 0; i < row; i++){ for(int k = rows[i]; k < rows[i + 1]; k++){ res[i] += values[k]*vector[cols[k]]; } } stop_CPU = cpuSecond(); diferencia_CPU = stop_CPU - start_CPU; //Stop Cpu Part // unsigned int GRID = col + BLOCK_SIZE - 1 / BLOCK_SIZE; //Init GPU part// start_GPU = cpuSecond(); gpuMVSparse<<<GRID, BLOCK_SIZE>>>(values_GPU, vector_GPU, rows_GPU, cols_GPU, res_GPU, row); cudaDeviceSynchronize(); cudaMemcpy(res_F, res_GPU, row * sizeof(float), cudaMemcpyDeviceToHost); stop_GPU = cpuSecond(); diferencia_GPU = stop_GPU - start_GPU; //Stop GPU part// //Start Checking // for(int j = 0; j < row; j++){ if(fabs(res_F[j] - res[j]) >= error ){ bien = false; printf("Error en: %f %f\n", res_F[j], res[j]); } } if(bien){ printf("Comparing the output for each implementation.. Correct!\n"); }else { printf("Comparing the output for each implementation.. Incorrect!\n"); } char d; printf("Do you want to print the matrix:\n"); printf("YES: y or NO: n\n"); fflush(stdout); scanf(" %c", &d); if(d == 'y'){ print_matrix(A,row,col); print_matrix(values, 1, size); print_imatrix(rows, 1, (col + 1)); print_imatrix(cols, 1 ,size); print_matrix(res,row,1); print_matrix(res_F,row,1); fflush(stdout); } printf("Duration of the CPU: %f\n", diferencia_CPU); printf("Duration of the GPU: %f\n", diferencia_GPU); delete[] A; delete[] vector; delete[] cols; delete[] rows; delete[] res; delete[] res_F; delete[] values; cudaFree(values_GPU); cudaFree(cols_GPU); cudaFree(rows_GPU); cudaFree(res_GPU); cudaFree(vector_GPU); } /* ********************************************************************** function name: transpose_preparation description: compare the CPU and GPU implementation of the transpose operation 1º obtaint the size of the matrix 2º create the matrix 3º Do the operation 4º compare the results Optional: print the results return: None ********************************************************************** */ void tranpose_preparation(){ int col, row, val; bool bien = true; printf("\n"); printf("\n"); TR1: printf("Introduce the rows of A:\n"); fflush(stdout); val = scanf("%d", &row); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto TR1; } TR2: printf("Introduce the columns of A:\n"); fflush(stdout); val = scanf("%d", &col); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto TR2; } double start_GPU, stop_GPU; double start_CPU, stop_CPU; double diferencia_CPU, diferencia_GPU; float *A = (float *)malloc(row * col * sizeof(float)); float *res = (float *)malloc(row * col * sizeof(float)); float *res_F = (float *)malloc(row * col * sizeof(float)); float *A_GPU; float *res_GPU; cudaMalloc(&A_GPU, row * col * sizeof(float)); cudaMalloc(&res_GPU, row * col * sizeof(float)); init_Array(A, row, col); cudaMemcpy(A_GPU, A, row * col * sizeof(float), cudaMemcpyHostToDevice); //Start CPU Part// start_CPU = cpuSecond(); for(int i = 0; i < row; i++){ for(int j = 0; j < col; j++){ int pos_a = i * col + j; int pos_res = j * row + i; res[pos_res] = A[pos_a]; } } stop_CPU = cpuSecond(); diferencia_CPU = stop_CPU - start_CPU; //Stop Cpu Part // unsigned int grid_rows = (row + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_colm = (col + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_colm, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); //Init GPU part// start_GPU = cpuSecond(); gpuMatrixTranpose<<<dimGrid, dimBlock>>>(A_GPU, res_GPU, row, col); cudaDeviceSynchronize(); cudaMemcpy(res_F, res_GPU, row * col * sizeof(float), cudaMemcpyDeviceToHost); stop_GPU = cpuSecond(); diferencia_GPU = stop_GPU - start_GPU; //Stop GPU part// //Start Checking // for(int i = 0; i < row; i++){ for(int j = 0; j < col; j++){ if(fabs(res_F[i * col + j] - res[i*col + j]) >= error ){ bien = false; printf("Error en: %f %f\n", res_F[i * col + j], res[i * col + j]); } } } if(bien){ printf("Comparing the output for each implementation.. Correct!\n"); }else { printf("Comparing the output for each implementation.. Incorrect!\n"); } char d; printf("Do you want to print the differents matrix:\n"); printf("YES: y or NO: n\n"); fflush(stdout); scanf(" %c", &d); if(d == 'y'){ print_matrix(A,row,col); print_matrix(res,col,row); print_matrix(res_F,col,row); fflush(stdout); } printf("Duration of the CPU: %f\n", diferencia_CPU); printf("Duration of the GPU: %f\n", diferencia_GPU); cudaFreeHost(A); cudaFreeHost(res); cudaFreeHost(res_F); cudaFree(A_GPU); cudaFree(res_GPU); } /* ********************************************************************** function name: conv_preparation description: compare the CPU and GPU implementation of the convultion operation 1º obtaint the size of the matrix A and the square matrix B 2º create the matrix A and B with random values 3º Do the operation 4º compare the results Optional: print the results return: None ********************************************************************** */ void conv_preparation(){ int col1, row1, col2, row2, col3, row3, val; bool bien = true; INTRO: printf("\n"); printf("\n"); CO1: printf("Introduce the rows of A:\n"); fflush(stdout); val = scanf("%d", &row1); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto CO1; } CO2: printf("Introduce the colums of A:\n"); fflush(stdout); val = scanf("%d", &col1); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto CO2; } CO3: printf("Introduce the rows and colums of B:\n"); fflush(stdout); val = scanf("%d", &col2); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto CO3; } row2 = col2; if(row2 >= row1 || col2 >= col1 ) { printf("You must introduce again the numbers, the matrix (rows and colums) A have to be higher than B\n"); goto INTRO; } col3 = col1 - col2 + 1; row3 = row1 - row2 + 1; double start_GPU, stop_GPU; double start_CPU, stop_CPU; double diferencia_CPU, diferencia_GPU; float *A = (float *)malloc(row1 * col1 * sizeof(float)); float *B = (float *)malloc(row2 * col2 * sizeof(float)); float *res = (float *)malloc(row3 * col3 * sizeof(float)); float *res_F = (float *)malloc(row3 * col3 * sizeof(float)); float *A_GPU; float *B_GPU; float *res_GPU; cudaMalloc(&A_GPU, row1 * col1 * sizeof(float)); cudaMalloc(&B_GPU, row2 * col2 * sizeof(float)); cudaMalloc(&res_GPU, row3 * col3 * sizeof(float)); init_Array(A, row1, col1); init_Array(B, row2, col2); cudaMemcpy(A_GPU, A, row1 * col1 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_GPU, B, row2 * col2 * sizeof(float), cudaMemcpyHostToDevice); //Start CPU Part// start_CPU = cpuSecond(); int i, j ,k, z; float sum = 0.0; for(i = 0; i < row3; i++){ for(z = 0; z < col3; z++){ sum = 0.0; for(j = 0; j < row2; j++){ for(k = 0; k < col2; k++){ sum += A[(i + j) * col1 + z + k] * B[j * row2 + k]; } } res[i * col3 + z] = sum; } } stop_CPU = cpuSecond(); diferencia_CPU = stop_CPU - start_CPU; //Stop Cpu Part // unsigned int grid_rows = (row3 + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_colm = (col3 + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_colm, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); //Init GPU part// start_GPU = cpuSecond(); gpuMatrixConv<<<dimGrid, dimBlock>>>(A_GPU, B_GPU, res_GPU, row1, col1, row2, col2, row3, col3); cudaDeviceSynchronize(); cudaMemcpy(res_F, res_GPU, row3 * col3 * sizeof(float), cudaMemcpyDeviceToHost); stop_GPU = cpuSecond(); diferencia_GPU = stop_GPU - start_GPU; //Stop GPU part// //Start Checking // for(int i = 0; i < row3; i++){ for(int j = 0; j < col3; j++){ if(fabs(res_F[i * col3 + j] - res[i*col3 + j]) >= error ){ bien = false; printf("Error: %f %f\n", res_F[i * col3 + j], res[i * col3 + j]); } } } if(bien){ printf("Comparing the output for each implementation.. Correct!\n"); }else { printf("Comparing the output for each implementation.. Incorrect!\n"); } char d; printf("Do you want to print the matrix:\n"); printf("YES: y or NO: n\n"); fflush(stdout); scanf(" %c", &d); if(d == 'y'){ print_matrix(A,row1,col1); print_matrix(B,row2, col2); print_matrix(res,row3,col3); print_matrix(res_F,row3,col3); fflush(stdout); } printf("Duration of the CPU: %f\n", diferencia_CPU); printf("Duration of the GPU: %f\n", diferencia_GPU); cudaFreeHost(A); cudaFreeHost(B); cudaFreeHost(res); cudaFreeHost(res_F); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(res_GPU); } /* ********************************************************************** function name: matrix_preparation description: compare the CPU and GPU implementation of the matrix multiplication 1º obtaint the size of the matrix A and the matrix B 2º create the matrix A and B with random values 3º Do the operation 4º compare the results Optional: print the results return: None ********************************************************************** */ void matrix_preparation(){ int m, n, k, val; printf("\n"); printf("\n"); MA1: printf("Introduce the rows of A:\n"); fflush(stdout); val = scanf("%d", &m); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto MA1; } MA2: printf("Introduce the columns of A:\n"); fflush(stdout); val = scanf("%d", &n); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto MA2; } MA3: printf("Introduce The columns of B:\n"); fflush(stdout); val = scanf("%d", &k); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid number, please do it again.\n"); printf("\n"); goto MA3; } bool bien = true; double start_GPU, stop_GPU; double start_CPU, stop_CPU; double diferencia_CPU, diferencia_GPU; float *A = (float *)malloc(m * n * sizeof(float)); float *B = (float *)malloc(n * k * sizeof(float)); float *res = (float *)malloc(m * k * sizeof(float)); float *res_F = (float *)malloc(m * k * sizeof(float)); float *A_GPU; float *B_GPU; float *res_GPU; cudaMalloc(&A_GPU, m * n * sizeof(float)); cudaMalloc(&B_GPU, n * k * sizeof(float)); cudaMalloc(&res_GPU, m * k * sizeof(float)); init_Array(A, m, n); init_Array(B, n, k); cudaMemcpy(A_GPU, A, m * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_GPU, B, n * k * sizeof(float), cudaMemcpyHostToDevice); unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_colm = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_colm, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); //init CPU part// start_CPU = cpuSecond(); for(int i = 0; i < m; i++){ for(int j = 0; j < k; j++){ float cont = 0.0; for(int z = 0; z < n; z++){ cont += A[i * n + z] * B[z * k + j]; } res[i * k + j] = cont; } } stop_CPU = cpuSecond(); diferencia_CPU = stop_CPU - start_CPU; //init GPU Part// start_GPU = cpuSecond(); mmatrix<<<dimGrid, dimBlock>>>(A_GPU, B_GPU, res_GPU, m, n, k); cudaDeviceSynchronize(); cudaMemcpy(res_F, res_GPU, m * k * sizeof(float), cudaMemcpyDeviceToHost); stop_GPU = cpuSecond(); diferencia_GPU = stop_GPU - start_GPU; //check if it is correct// for(int i = 0; i < m; i++){ for(int j = 0; j < k; j++){ if(fabs(res_F[i * k + j] - res[i*k + j]) >= error ){ bien = false; break; } } if(!bien){break;} } if(bien){ printf("Comparing the output for each implementation.. Correct!\n"); }else { printf("Comparing the output for each implementation.. Incorrect!\n"); } char d; printf("Do you want to print the matrix:\n"); printf("YES: y or NO: n\n"); fflush(stdout); scanf(" %c", &d); if(d == 'y'){ print_matrix(A,m,n); print_matrix(B,n, k); print_matrix(res,m,k); print_matrix(res_F,m,k); fflush(stdout); } printf("Duration of the CPU: %f\n", diferencia_CPU); printf("Duration of the GPU: %f\n", diferencia_GPU); cudaFreeHost(A); cudaFreeHost(B); cudaFreeHost(res); cudaFreeHost(res_F); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(res_GPU); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////// MAIN ////////////////////////////////////////////////////////////////////////////// int main( int argc, char *argv[]){ int op, val; bool salir = false; START: printf("\n"); printf("\n"); printf("******************************************\n"); printf("* Select the operation you want to do: *\n"); printf("* *\n"); printf("* 1. Matrix Multiplication *\n"); printf("* 2. Matrix Convection *\n"); printf("* 3. Matrix Transpose *\n"); printf("* 4. Matrix-Vector Sparse *\n"); printf("* 5. Exit *\n"); printf("* *\n"); printf("******************************************\n"); fflush(stdout); printf("Introduce the number of the operation:\n"); fflush(stdout); val = scanf("%d", &op); if(val == 0) { while ( (val = getchar()) != EOF && val != '\n' ); printf("You don't introduce a valid option, please do it again.\n"); goto START; }else{ switch(op){ case 1: matrix_preparation(); fflush(stdout); break; case 2: conv_preparation(); fflush(stdout); break; case 3: tranpose_preparation(); fflush(stdout); break; case 4: sparse_preparation(); fflush(stdout); break; case 5: salir = true; printf("Successful Exit\n"); fflush(stdout); break; default: printf("You dont select any option, please do it again\n"); fflush(stdout); break; } if(!salir){goto START;} } return 0; }
6,091
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void add(int *X, int *Y, int *alpha){ int idx = blockIdx.x; Y[idx] = ((*alpha)*(X[idx])) + Y[idx]; } int main(){ int alpha,*X,*Y, N; //program vars int *d_x, *d_y, *d_a; //device vars int size = sizeof(int); printf("Enter number of elements and alpha: "); scanf("%d %d",&N, &alpha); X = (int*)malloc(sizeof(int)*N); Y = (int*)malloc(sizeof(int)*N); printf("Enter elements x <space> y:\n"); for(int i=0; i<N; i++){ scanf("%d %d",&X[i],&Y[i]); } //Allocate space for device copies of a,b,c cudaMalloc((void**)&d_x,size*N); cudaMalloc((void**)&d_y,size*N); cudaMalloc((void**)&d_a,size); //setup input values cudaMemcpy(d_a,&alpha,size,cudaMemcpyHostToDevice); cudaMemcpy(d_x,X,size*N,cudaMemcpyHostToDevice); cudaMemcpy(d_y,Y,size*N,cudaMemcpyHostToDevice); //launch add kernel on GPU add<<<N,1>>>(d_x,d_y,d_a); //copy result back to host cudaMemcpy(Y,d_y,size*N,cudaMemcpyDeviceToHost); printf("Result:\n"); for(int i=0; i<N; i++){ printf("Y%d = %d \n",i,Y[i]); } //Cleanup cudaFree(d_a); cudaFree(d_x); cudaFree(d_y); return 0; }
6,092
#define t_max 1 #define t 1 /* (u[0][0][0][1][0]=((((u[1][0][0][0][0]+(u[-1][0][0][0][0]+u[0][1][0][0][0]))+(u[0][-1][0][0][0]+(u[0][0][1][0][0]+u[0][0][-1][0][0])))*0.25)-u[0][0][0][0][0])) */ __global__ void laplacian(float * * u_0_1_out, float * u_0_0, float * u_0_1, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c) { //float * const u__u_0[16] = { u_0_0, u_0_1 } ; int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int chunk_idx_x; int chunk_idx_x_max; int chunk_idx_y; int chunk_idx_y_max; int chunk_idx_z; int chunk_idx_z_max; int idx_1_2; int size_1_1; int size_1_2; //int t; int thd_idx_x; int thd_idx_y; int thd_idx_z; int thdblks_idx_x; int thdblks_idx_x_max; int thdblks_idx_y; int thdblks_idx_y_max; int thdblks_idx_z; int thdblks_idx_z_max; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x))); chunk_idx_x_max=(chunk_idx_x+c); chunk_idx_y=(threadIdx.y+(tmp*blockDim.y)); chunk_idx_y_max=(chunk_idx_y+1); chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); chunk_idx_z_max=(chunk_idx_z+1); thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x))); thdblks_idx_x_max=(thdblks_idx_x+tbx); thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y))); thdblks_idx_y_max=(thdblks_idx_y+tby); thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z))); thdblks_idx_z_max=(thdblks_idx_z+tbz); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */ /* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */ /* for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... } */ { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ thd_idx_z=chunk_idx_z; thd_idx_y=chunk_idx_y; for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1) { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0]) */ /* _idx0 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+2) */ _idx0=((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+2); /* _idx1 = (((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t)) */ _idx1=(_idx0-2); /* _idx2 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+2)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(4*t))+1) */ _idx2=(((_idx1+x_max)+(2*t))+1); /* _idx3 = (((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+1) */ _idx3=(((_idx1-x_max)-(2*t))+1); /* _idx4 = ((((((((((((thd_idx_z+2)*x_max)+((2*t)*thd_idx_z))+(4*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(4*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(8*(t*t)))+(2*t))+1) */ _idx4=((((_idx1+((x_max+(2*t))*y_max))+((2*t)*x_max))+(4*(t*t)))+1); /* _idx5 = (((((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(2*t))+1) */ _idx5=((((_idx1+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1); /* _idx6 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+1) */ _idx6=(_idx1+1); u_0_1[_idx6]=((((u_0_0[_idx0]+(u_0_0[_idx1]+u_0_0[_idx2]))+(u_0_0[_idx3]+(u_0_0[_idx4]+u_0_0[_idx5])))*0.25)-u_0_0[_idx6]); } } } } __global__ void initialize(float * u_0_0, float * u_0_1, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c) { float * const u__u_0[16] = { u_0_0, u_0_1 } ; int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int chunk_idx_x; int chunk_idx_x_max; int chunk_idx_y; int chunk_idx_y_max; int chunk_idx_z; int chunk_idx_z_max; int idx_1_2; int size_1_1; int size_1_2; //int t; int thd_idx_x; int thd_idx_y; int thd_idx_z; int thdblks_idx_x; int thdblks_idx_x_max; int thdblks_idx_y; int thdblks_idx_y_max; int thdblks_idx_z; int thdblks_idx_z_max; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x))); chunk_idx_x_max=(chunk_idx_x+c); chunk_idx_y=(threadIdx.y+(tmp*blockDim.y)); chunk_idx_y_max=(chunk_idx_y+1); chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); chunk_idx_z_max=(chunk_idx_z+1); thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x))); thdblks_idx_x_max=(thdblks_idx_x+tbx); thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y))); thdblks_idx_y_max=(thdblks_idx_y+tby); thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z))); thdblks_idx_z_max=(thdblks_idx_z+tbz); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */ /* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */ /* for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... } */ { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ thd_idx_z=chunk_idx_z; thd_idx_y=chunk_idx_y; for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1) { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0]) */ /* _idx0 = (((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t)) */ _idx0=(((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t)); u__u_0[(t-1)][_idx0]=0.1; /* _idx1 = (((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+1) */ _idx1=(((_idx0-x_max)-(2*t))+1); u__u_0[(t-1)][_idx1]=0.1; /* _idx2 = (((((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(2*t))+1) */ _idx2=((((_idx0+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1); u__u_0[(t-1)][_idx2]=0.1; /* _idx3 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+1) */ _idx3=(_idx0+1); u__u_0[(t-1)][_idx3]=0.1; /* _idx4 = ((((((((((((thd_idx_z+2)*x_max)+((2*t)*thd_idx_z))+(4*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(4*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(8*(t*t)))+(2*t))+1) */ _idx4=(((_idx3+((x_max+(2*t))*y_max))+((2*t)*x_max))+(4*(t*t))); u__u_0[(t-1)][_idx4]=0.1; /* _idx5 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+2)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(4*t))+1) */ _idx5=((_idx3+x_max)+(2*t)); u__u_0[(t-1)][_idx5]=0.1; /* _idx6 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+2) */ _idx6=(_idx0+2); u__u_0[(t-1)][_idx6]=0.1; u__u_0[t][_idx3]=1.1; } } } }
6,093
#include <iostream> #include <vector> #include <chrono> #include <thread> class memory_keeper { private: std::vector<void*> _memory; const size_t _block_size = 128 * 1024 * 1024; //128MB. size_t _blocks; void allocate_block() { void *block; cudaMalloc(&block, _block_size); _memory.push_back(block); ++ _blocks; } void free_block() { void *block = _memory.back(); cudaFree(block); _memory.pop_back(); -- _blocks; } public: memory_keeper() : _blocks {0} {} ~memory_keeper() { if (!_memory.empty()) { void *block = _memory.front(); cudaFree(block); } _blocks = 0; } size_t get_blocks() { return _blocks; } bool allocate(size_t blocks) { for (size_t i=0; i<blocks; ++i) { allocate_block(); } return true; } bool free(size_t blocks) { if (blocks > _blocks) { return false; } for (size_t i=0; i<blocks; ++i) { free_block(); } return true; } bool reallocate() { size_t original_blocks = _blocks; while (_blocks > 0) { free_block(); } for (size_t i=0; i<original_blocks; ++i) { allocate_block(); } return true; } }; int main() { memory_keeper mk; //c++17, no support. using namespace std::chrono_literals; std::cout << "Try to allocate 1G mem: " << std::endl; mk.allocate(8); std::cout << "Done, sleep for 5 seconds. " << std::endl; std::this_thread::sleep_for(std::chrono::seconds(5)); std::cout << "Release 512M mem: " << std::endl; mk.free(4); std::cout << "Done, sleep for 5 seconds. " << std::endl; std::this_thread::sleep_for(std::chrono::seconds(5)); std::cout << "Try to allocate another 1G mem: " << std::endl; mk.allocate(8); std::cout << "Done, sleep for 5 seconds. " << std::endl; std::this_thread::sleep_for(std::chrono::seconds(5)); std::cout << "Clean them all. " << std::endl; }
6,094
#include "includes.h" __global__ void matrixMultKernel (float *d_A, float *d_B, float *d_C, int N) { // Calculate the row index of the d_C element and d_A int row = blockIdx.y * blockDim.y + threadIdx.y; // Calculate the column index of d_C and d_B int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row < N) && (col < N)) { float Cvalue = 0; for (int k = 0; k < N; k++) Cvalue += d_A[row * N + k] * d_B[k * N + col]; d_C[row * N + col] = Cvalue; } }
6,095
/***************************************************************************//** * \file intermediatePressure.cu * \author Christopher Minar (minarc@oregonstate.edu) * \brief kernels to generate the right hand side of the poission equation */ #include "intermediatePressure.h" /** * \namespace kernels * \brief Contains all the custom-written CUDA kernels. */ namespace kernels { __global__ void intermediatePressure(double *rhs2, double *uhat, int *ghostTagsP, int *hybridTagsP, int *ghostTagsUV, double *distance_from_u_to_body, double *distance_from_v_to_body, double *ym, double *yp, double *xm, double *xp, double *dx, double *dy, int nx, int ny) { if (threadIdx.x + blockDim.x * blockIdx.x >= nx*ny) return; int ip = threadIdx.x + blockDim.x * blockIdx.x, I = ip % nx, J = ip / nx, iu = (nx-1)*J + I, iv = (nx-1)*ny + nx*J +I; double temp = 0; //Outside immersed body if (hybridTagsP[ip] != -1) { //EAST //check if east pressure node is outside of the body if (ghostTagsP[ip+1] == -1) { if (distance_from_u_to_body[ip] > dx[I]/2 && distance_from_u_to_body[ip] < dx[I]) { temp -= uhat[iu]/distance_from_u_to_body[ip]; } else temp -= uhat[iu]/dx[I]; } //WEST //check if west pressure node is outside of the body if (ghostTagsP[ip-1] == -1) { if (distance_from_u_to_body[ip] > dx[I]/2 && distance_from_u_to_body[ip] < dx[I]) { temp += uhat[iu-1]/distance_from_u_to_body[ip]; } else temp += uhat[iu-1]/dx[I]; } //NORTH //check if north pressure node is outside of the body if (ghostTagsP[ip+nx] == -1) { if (distance_from_v_to_body[ip] > dy[J]/2 && distance_from_v_to_body[ip] < dy[J]) { temp -= uhat[iv]/distance_from_v_to_body[ip]; } else temp -= uhat[iv]/dy[J]; } //SOUTH //check if south velocity node is outside of the body if (ghostTagsP[ip-nx] == -1) { if (distance_from_v_to_body[ip] > dy[J]/2 && distance_from_v_to_body[ip] < dy[J]) { temp += uhat[iv-nx]/distance_from_v_to_body[ip]; } else temp += uhat[iv-nx]/dy[J]; } } //end outside immersed body //if just inside body else if (ghostTagsP[ip] > 0) { //EAST if (ghostTagsP[ip+1] == 0) temp -= uhat[iu]/dx[I]; //WEST if (ghostTagsP[ip-1] == 0) temp += uhat[iu - 1]/dx[I]; //NORTH if (ghostTagsP[ip+nx] == 0) temp -= uhat[iv]/dy[J]; //SOUTH if (ghostTagsP[ip-nx] == 0) temp += uhat[iv-nx]/dy[J]; } //end just inside body //everywhere else else { //EAST //if not on the east wall and east is outside the body, add east term if (I != nx-1)//not at east boundry temp -= uhat[iu]/dx[I]; else if (I == nx-1)//at east boundry temp -= xp[J]/dx[I]; //WEST //if not on west wall and west is outside the body, add west term if (I != 0)//not at west boundary temp += uhat[iu - 1]/dx[I]; else if (I == 0)//at the west boundary temp += xm[J]/dx[I]; //NORTH //if not on north wall and north is outside the body, add north term if (J != ny-1)//not at north boundry temp -= uhat[iv]/dy[J]; else if (J == ny-1)//at north boundry temp -= yp[(nx-1)+I]/dy[J]; //SOUTH //if not on south wall and south is outside the body, add south term if (J != 0)//not at south boundry temp += uhat[iv-nx]/dy[J]; else if (J == 0)//at south boundry temp += ym[(nx-1)+I]/dy[J]; }//end everywhere else rhs2[ip] = temp; } }
6,096
#include <iostream> #include <cstdlib> #include <cfloat> #include <math.h> #include <sys/time.h> #define THREADS_PER_BLOCK 32 __global__ void voronoi_d (int *imageArray, int *points, int imageSize, int numPoints) { // use x to access each cell and compare it to each point and assign the cell's value to match the closest point int x = blockIdx.x * blockDim.x + threadIdx.x; double minDistance = DBL_MAX; int minPoint = -1; for (int k=0; k<numPoints; k++) { double distance = sqrt(pow((double) (x % imageSize - points[k + numPoints]), 2.0) + pow((double) (x / imageSize - points[k]), 2.0)); if (distance < minDistance) { minDistance = distance; minPoint = k; } } imageArray[x] = minPoint; } extern void gpuVoronoi(int *imageArray_h, int *points_h, int imageSize, int numPoints) { // allocate space for the image and point coordinates on the device and copy the coordinates over int *imageArray; int *points; cudaMalloc ((void**) &imageArray, sizeof(int) * imageSize * imageSize); cudaMalloc ((void**) &points, sizeof(int) * numPoints * 2); cudaMemcpy (points, points_h, sizeof(int) * numPoints * 2, cudaMemcpyHostToDevice); // start calculation timing struct timeval start, end; gettimeofday(&start, NULL); // calculate and then synchronize to ensure accurate timing voronoi_d <<< ceil((float) imageSize*imageSize/THREADS_PER_BLOCK), THREADS_PER_BLOCK >>> (imageArray, points, imageSize, numPoints); cudaDeviceSynchronize(); // end timing and print processing time gettimeofday(&end, NULL); long seconds = (end.tv_sec - start.tv_sec); long micros = ((seconds * 1000000) + end.tv_usec) - (start.tv_usec); printf("Processing time elpased is %zu seconds or %zu micros\n", seconds, micros); // print CUDA errors cudaError_t err = cudaGetLastError(); printf("CUDA error: %s\n", cudaGetErrorString(err)); // copy results to host and free device memory cudaMemcpy (imageArray_h, imageArray, sizeof(int) * imageSize * imageSize, cudaMemcpyDeviceToHost); cudaFree (imageArray); cudaFree (points); }
6,097
#include <cuda_runtime.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> #define DIMBLOCK_X 65535 //2^16 #define DIMBLOCK_Y 32 //2^5 #define DIMTHREAD_X 1024 //2^10 //Total 2^31 __device__ char found(0); __global__ void searchFactor(unsigned long int * number, unsigned int * factor){ if (found) return; unsigned int block = blockIdx.x + blockIdx.y * gridDim.x; unsigned int n = block * blockDim.x + threadIdx.x; n = (n + 1) * 2 + 1; if (*number % n == 0){ *factor = n; found = 1; } } int main(){ struct timeval t1, t2; unsigned long int h_number; unsigned int h_prime1; unsigned long int h_prime2; unsigned long int *d_number; unsigned int *d_factor; h_number = 742312722905005279; cudaMalloc((void **)&d_number, sizeof(unsigned long int)); cudaMalloc((void **)&d_factor, sizeof(unsigned int)); cudaMemcpy(d_number, &h_number, sizeof(unsigned long int), cudaMemcpyHostToDevice); dim3 blocks(DIMBLOCK_X, DIMBLOCK_Y); gettimeofday(&t1, 0); searchFactor<<<blocks, DIMTHREAD_X>>>(d_number, d_factor); cudaDeviceSynchronize(); gettimeofday(&t2, 0); cudaMemcpy(&h_prime1, d_factor, sizeof(unsigned int), cudaMemcpyDeviceToHost); printf("Primo 1 = %d\n", h_prime1); h_prime2 = h_number / h_prime1; printf("Primo 2 = %ld\n", h_prime2); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Tiempo: %f ms\n", time); /* * p = 976250239; * q = 760371361; */ cudaFree(d_number); cudaFree(d_factor); }
6,098
#include <stdio.h> __global__ void add(int* a, int* b, int* c, int n) { int id = threadIdx.x; if(id < n ) c[id] = a[id] + b[id]; } int main(void) { int n = 1000; int* a; int* b; int* c; size_t nbytes = n * sizeof(int); cudaMallocManaged (&a, nbytes); cudaMallocManaged (&b, nbytes); cudaMallocManaged (&c, nbytes); for(int i = 0; i < n; i++) { a[i] = 1; b[i] = 2; } add<<<1,n>>>(a,b,c,n); cudaDeviceSynchronize(); int sum = 0; for(int i = 0; i < n; i++) { sum+=c[i]; } printf("%d\n",sum); cudaFree(&a); cudaFree(&b); cudaFree(&c); return 0; }
6,099
#include "includes.h" __global__ void addVector(int *d1_in, int *d2_in, int *d_out, int n){ int ind = blockDim.x*blockIdx.x + threadIdx.x; if(ind<n){ d_out[ind] = d1_in[ind]+d2_in[ind]; } }
6,100
// compile command: // https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list // nvcc binary_arithmetics.cu --ptx -o binary_arithmetics.ptx --gpu-architecture=compute_70 --gpu-code=sm_70,compute_70 #define ADD + #define SUB - #define MUL * #define DIV / #define MOD % #define BINARY_EXPRESSION(_operation) \ uint local_thread_id = threadIdx.x; \ uint work_group_id = blockIdx.x; \ uint work_group_size = blockDim.x; \ uint global_thread_id = work_group_size * work_group_id + local_thread_id; \ uint end = BATCH * STRIDE; \ for (uint i = 0; i < end; i += STRIDE) { \ uint idx = global_thread_id + i; \ out[idx] = vec_a[idx] _operation vec_b[idx]; \ i += STRIDE; \ } extern "C" __global__ void add(double* vec_a, double* vec_b, double* out, const uint BATCH, const uint STRIDE) { BINARY_EXPRESSION(ADD) } extern "C" __global__ void sub(double* vec_a, double* vec_b, double* out, const uint BATCH, const uint STRIDE) { BINARY_EXPRESSION(SUB) } extern "C" __global__ void mul(double* vec_a, double* vec_b, double* out, const uint BATCH, const uint STRIDE) { BINARY_EXPRESSION(MUL) } // 'div' is already reserved extern "C" __global__ void division(double* vec_a, double* vec_b, double* out, const uint BATCH, const uint STRIDE) { BINARY_EXPRESSION(DIV) }