serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
4,301
#include <cuda.h> #include <stdio.h> #define N 100000 __global__ void kernel_add(int a, int b, int *c){ *c = a + b; } int main(int argc, char **argv){ int* host_a = (int*) malloc(sizeof(int)); int* host_b = (int*) malloc(sizeof(int)); int* host_c = (int*) malloc(sizeof(int)); int* device_c; cudaMalloc((void**) &device_c, sizeof(int)); for(int i = 0; i < N; i++){ *host_a = 2; *host_b = 7; kernel_add<<<1,1>>>(*host_a, *host_b, device_c); cudaMemcpy(host_c, device_c, sizeof(int), cudaMemcpyDeviceToHost); } cudaFree(&device_c); printf("%d\n", *host_c); return 0; }
4,302
#include<stdio.h> #include<cuda.h> #include<stdlib.h> #include<sys/time.h> #include<time.h> //Macro for checking cuda errors following a cuda launch or api call #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void initialize(int *a, int *b, int *c, int *d, int input_length) { for (int i = 0; i < input_length; i++) { a[i] = rand() % 100; b[i] = rand() % 100; c[i] = 0; d[i] = 0; } } void validate(int *a, int *b, int length) { for (int i = 0; i < length; ++i) { if (a[i] != b[i]) { printf("Different value detected at position: %d," "expected %d but get %d\n", i, a[i], b[i]); break; } } } void vector_add(int *a, int *b, int *c, int size) { for (int i = 0; i < size; i++) { c[i] = a[i] + b[i]; } } __global__ void vector_add_kernel_coalesced_access(int *a_d, int *b_d, int *d_d, int work_per_thread, int input_length, int totalThreads) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for(int i = 0; i < work_per_thread && tid < input_length; i++, tid += totalThreads){ d_d[tid] = a_d[tid] + b_d[tid]; } } int main(int argc, char *argv[]) { int input_length, block_size, work_per_thread; struct timeval start, end; if (argc != 2) { printf("Usage is: VectorAddParallel input_length block_size work_per_thread\nNow, type input_length: "); scanf("%d", &input_length); printf("Type block_size: "); scanf("%d", &block_size); printf("Type work_per_thread: "); scanf("%d", &work_per_thread); } else{ input_length = atoi(argv[1]); block_size = atoi(argv[2]); work_per_thread = atoi(argv[3]); } // Arrays declaration int *a_h, *b_h, *c_h, *d_h; int *a_d, *b_d, *d_d; // Allocation on Host a_h = (int *) malloc(sizeof(int) * input_length); b_h = (int *) malloc(sizeof(int) * input_length); c_h = (int *) malloc(sizeof(int) * input_length); d_h = (int *) malloc(sizeof(int) * input_length); // Allocation on Device CUDA_CHECK_RETURN(cudaMalloc((void **)&a_d, sizeof(int)*input_length)); CUDA_CHECK_RETURN(cudaMalloc((void **)&b_d, sizeof(int)*input_length)); CUDA_CHECK_RETURN(cudaMalloc((void **)&d_d, sizeof(int)*input_length)); // Initialization on host side initialize(a_h, b_h, c_h, d_h, input_length); // Run host code gettimeofday(&start, NULL); vector_add(a_h, b_h, c_h, input_length); gettimeofday(&end, NULL); double diff = (end.tv_sec - start.tv_sec) * 1000000.0 + (end.tv_usec - start.tv_usec); printf("Host VectorAdd time calculation duration: %8.5fms\n", diff / 1000); // Run device code int grid_size = (input_length - 1) / (block_size * work_per_thread) + 1; dim3 grid_dime(grid_size, 1, 1); dim3 block_dime(block_size, 1, 1); int totalThreads = grid_size * block_size; gettimeofday(&start, NULL); // Copy input data to device CUDA_CHECK_RETURN(cudaMemcpy(a_d, a_h, sizeof(int)*input_length, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(b_d, b_h, sizeof(int)*input_length, cudaMemcpyHostToDevice)); vector_add_kernel_coalesced_access<<< grid_dime, block_dime >>>(a_d, b_d, d_d, work_per_thread, input_length, totalThreads); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(cudaGetLastError()); //Copy back the result CUDA_CHECK_RETURN(cudaMemcpy(d_h, d_d, sizeof(int)*input_length, cudaMemcpyDeviceToHost)); gettimeofday(&end, NULL); diff = (end.tv_sec - start.tv_sec) * 1000000.0 + (end.tv_usec - start.tv_usec); printf("Device VectorAdd time calculation duration: %8.5fms\n", diff / 1000); // Validation validate(c_h, d_h, input_length); free(a_h); free(b_h); free(c_h); free(d_h); cudaFree(a_d); cudaFree(b_d); cudaFree(d_d); return 0; }
4,303
#include <stdio.h> #include <stdint.h> #define MAXN 1024 __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } void rand_gen(uint32_t cA, uint32_t cB, int N, uint32_t *A, uint32_t *B) { uint32_t xA = 2, n = N*N; uint32_t xB = 2; uint32_t *_A = A; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { *_A = xA = (xA * xA + cA + i + j)%n, _A++; B[j*N + i] = xB = (xB * xB + cB + i + j)%n; } } } uint32_t signature(int N, uint32_t *A) { uint32_t h = 0; uint32_t *_A = A; for (int i = N*N; i > 0; i--) h = (h + *_A) * 2654435761LU, _A++; return h; } __global__ void myMatrixMul(int N, uint32_t *cuC, uint32_t *cuA, uint32_t *cuTransB){ int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N*N) return; int x = tid / N; int y = tid % N; uint32_t *_A = cuA + x*N; uint32_t *_B = cuTransB + y*N; uint32_t sum = 0; for (int k=N; k>0; k--) sum += *_A * *_B, _A++, _B++; cuC[x*N + y] = sum; } uint32_t A[MAXN*MAXN], B[MAXN*MAXN], C[MAXN*MAXN]; int main() { int N; uint32_t S1, S2; scanf("%d %u %u", &N, &S1, &S2); rand_gen(S1, S2, N, A, B); uint32_t *cuA, *cuB, *cuC; cudaMalloc( &cuA, sizeof(uint32_t)*N*N ); cudaMalloc( &cuB, sizeof(uint32_t)*N*N ); cudaMalloc( &cuC, sizeof(uint32_t)*N*N ); cudaMemcpy( cuA, A, sizeof(uint32_t)*N*N, cudaMemcpyHostToDevice); cudaMemcpy( cuB, B, sizeof(uint32_t)*N*N, cudaMemcpyHostToDevice); dim3 grid(CeilDiv(N*N, 32)), block(32); myMatrixMul<<< grid, block >>>( N, cuC, cuA, cuB ); cudaMemcpy( C, cuC, sizeof(uint32_t)*N*N, cudaMemcpyDeviceToHost); printf("%u\n", signature(N, C)); cudaFree(cuA); cudaFree(cuB); cudaFree(cuC); return 0; }
4,304
#include <stdio.h> #include <iostream> // размер грида #define DGX 4 #define DGY 8 // размер блока #define DBX 2 #define DBY 2 #define DBZ 2 // общее количество параллельных процессов: 4*8*2*2*2 = 256 #define N (DBX*DBY*DBZ*DGX*DGY) __global__ void kern( float *a ) { int bs = blockDim.x*blockDim.y*blockDim.z; int idx = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*(blockDim.x*blockDim.y) + blockIdx.x*bs + blockIdx.y*bs*gridDim.x ; if(idx > N-1) return; a[idx] -= 0.5f; } int main() { float host_a[N], host_c[N]; float *dev_a; srand((unsigned int)time(NULL)); for(int i=0; i<N; i++) { host_a[i] = (float)rand()/(float)RAND_MAX - 0.5f; } cudaMalloc((void**)&dev_a, N * sizeof(float)); cudaMemcpy(dev_a, host_a, N * sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 blocks(DGX,DGY); dim3 threads(DBX,DBY,DBZ); cudaEventRecord(start); kern<<<blocks,threads>>>( dev_a ); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaMemcpy(host_c, dev_a, N * sizeof(float), cudaMemcpyDeviceToHost); for (int i=0; i<N; i++) { if(host_a[i]-0.5f != host_c[i]) printf( "[%d]\t %.2f -> %.2f\n",i, host_a[i], host_c[i] ); } float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << "CUDA time simple (ms): " << milliseconds << std::endl; cudaFree( dev_a ) ; return 0; }
4,305
/* Task #7 - Gustavo Ciotto Pinton MO644 - Parallel Programming */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #define THREAD_PER_BLOCK 32 /* Tesla k40 supports 1024 threads (32 x 32 = 1024 in 2D grids) */ __global__ void addMatrix2d (int *A, int *B, int *C, int rows, int columns) { int col = blockDim.x * blockIdx.x + threadIdx.x, row = blockDim.y * blockIdx.y + threadIdx.y; int index = row * columns + col; if (col < columns && row < rows) C[index] = A[index] + B[index]; } int main() { int *A, *B, *C; /* Memory pointers used by the device */ int *d_A, *d_B, *d_C; int i, j, m_size; /* Matrix dimensions */ int linhas, colunas; scanf("%d", &linhas); scanf("%d", &colunas); m_size = sizeof(int) * linhas * colunas; A = (int *) malloc (m_size); B = (int *) malloc (m_size); C = (int *) malloc (m_size); for(i = 0; i < linhas; i++) for(j = 0; j < colunas; j++) A[i*colunas+j] = B[i*colunas+j] = i+j; /* Allocating memory for CUDA pointers in the device */ cudaMalloc( (void**) &d_A, m_size); cudaMalloc( (void**) &d_B, m_size); cudaMalloc( (void**) &d_C, m_size); /* Copying data into device memory */ cudaMemcpy(d_A, A, m_size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, m_size, cudaMemcpyHostToDevice); /* Computes block grid dimensions (X,Y) */ dim3 dimGrid( ceil( (float) colunas / THREAD_PER_BLOCK ), ceil( (float) linhas / THREAD_PER_BLOCK)); /* Computes block dimensions (X, Y) */ dim3 dimBlock(THREAD_PER_BLOCK, THREAD_PER_BLOCK); /* Launches computing in GPU */ addMatrix2d <<< dimGrid, dimBlock >>> (d_A, d_B, d_C, linhas, colunas); /* Copying computed result from device memory */ cudaMemcpy(C, d_C, m_size, cudaMemcpyDeviceToHost); long long int somador = 0; //Manter esta computação na CPU for(i = 0; i < linhas; i++) for(j = 0; j < colunas; j++) somador += C[i*colunas+j]; printf("%lli\n", somador); /* Cleaning everything up */ free(A); free(B); free(C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
4,306
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> //#include <sm_11_atomic_functions.h> #define MAX_NUM_BLOCKS 40 // Synchronization code is based on paper: // Shucai Xiao and Wu-chun Feng. "Inter-Block GPU Communication via Fast Barrier Synchronization". // Proceedings of the 24th IEEE International Parallel and Distributed Processing Symposium (IPDPS), 2010 // __device__ volatile int g_mutex; __device__ volatile int ArrayIn[MAX_NUM_BLOCKS]; __device__ volatile int ArrayOut[MAX_NUM_BLOCKS]; __global__ void init_sync(){ g_mutex = 0; for(int i = threadIdx.x; i < MAX_NUM_BLOCKS; i+=blockDim.x){ ArrayIn[i] = 0; ArrayOut[i] = 0; } }; __device__ void __gpu_sync(int goalVal){ // Thread id in block int tid_in_block = threadIdx.x * blockDim.y + threadIdx.y; // only thread 0 is used for synchronization if(tid_in_block == 0){ atomicAdd((int*)&g_mutex, 1); while(g_mutex != goalVal){ // Do nothing. Volatile g_mutex guarantees // that this loop is not moved away } } __syncthreads(); } __device__ void __gpu_sync_lock_free(int goalVal){ // Thread id in block int tid_in_block = threadIdx.x * blockDim.y + threadIdx.y; int nBlockNum = gridDim.x * gridDim.y; int bid = blockIdx.x * gridDim.y + blockIdx.y; // only thread 0 is used for synchronization if(tid_in_block == 0){ ArrayIn[bid] = goalVal; } if(bid == 1){ // Assuming that there are more threads than blocks. Modify it. if(tid_in_block < nBlockNum){ while(ArrayIn[tid_in_block] != goalVal){ // Do nothing here } } __syncthreads(); if(tid_in_block < nBlockNum){ ArrayOut[tid_in_block] = goalVal; } } if(tid_in_block == 0){ while(ArrayOut[tid_in_block] != goalVal){ // Do nothing here } } __syncthreads(); }
4,307
#include <stdio.h> #include <string.h> #include <stdlib.h> #include "hist-equ.cuh" #define nbr_bin 256 __device__ unsigned char clip_rgb_gpu(int x) { if(x > 255) return 255; if(x < 0) return 0; return (unsigned char)x; } __global__ void rgb2yuv_conversion_gpu(unsigned char * img_y, unsigned char * img_u, unsigned char * img_v, unsigned char * img_r, unsigned char * img_g, unsigned char * img_b) { int i = blockDim.x * blockIdx.x + threadIdx.x; unsigned char r, g, b; unsigned char y, cb, cr; r = img_r[i]; g = img_g[i]; b = img_b[i]; y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128); cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128); img_y[i] = y; img_u[i] = cb; img_v[i] = cr; } __global__ void yuv2rgb_conversion_gpu(unsigned char * img_y, unsigned char * img_u, unsigned char * img_v, unsigned char * img_r, unsigned char * img_g, unsigned char * img_b) { int i = blockDim.x * blockIdx.x + threadIdx.x; int rt,gt,bt; int y, cb, cr; y = (int)img_y[i]; cb = (int)img_u[i] - 128; cr = (int)img_v[i] - 128; rt = (int)( y + 1.402*cr); gt = (int)( y - 0.344*cb - 0.714*cr); bt = (int)( y + 1.772*cb); img_r[i] = clip_rgb_gpu(rt); img_g[i] = clip_rgb_gpu(gt); img_b[i] = clip_rgb_gpu(bt); } YUV_IMG rgb2yuv_gpu(PPM_IMG img_in) { YUV_IMG img_out; int img_size; img_out.w = img_in.w; img_out.h = img_in.h; img_size = img_out.w * img_out.h; img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // Allocate vectors in device memory unsigned char* img_y_gpu; cudaMalloc(&img_y_gpu, img_size * sizeof(unsigned char)); unsigned char* img_u_gpu; cudaMalloc(&img_u_gpu, img_size * sizeof(unsigned char)); unsigned char* img_v_gpu; cudaMalloc(&img_v_gpu, img_size * sizeof(unsigned char)); unsigned char* img_r_gpu; cudaMalloc(&img_r_gpu, img_size * sizeof(unsigned char)); unsigned char* img_g_gpu; cudaMalloc(&img_g_gpu, img_size * sizeof(unsigned char)); unsigned char* img_b_gpu; cudaMalloc(&img_b_gpu, img_size * sizeof(unsigned char)); // Copy vectors from host memory to device memory cudaMemcpy(img_r_gpu, img_in.img_r, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(img_g_gpu, img_in.img_g, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(img_b_gpu, img_in.img_b, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); // Invoke kernel int blocksPerGrid = (img_size + nbr_bin - 1) / nbr_bin; rgb2yuv_conversion_gpu<<<blocksPerGrid, nbr_bin>>>(img_y_gpu, img_u_gpu, img_v_gpu, img_r_gpu, img_g_gpu, img_b_gpu); // Copy result from device memory to host memory cudaMemcpy(img_out.img_y, img_y_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_u, img_u_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_v, img_v_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); // Free device memory cudaFree(img_r_gpu); cudaFree(img_g_gpu); cudaFree(img_b_gpu); cudaFree(img_y_gpu); cudaFree(img_u_gpu); cudaFree(img_v_gpu); return img_out; } PPM_IMG yuv2rgb_gpu(YUV_IMG img_in) { PPM_IMG img_out; int img_size; img_out.w = img_in.w; img_out.h = img_in.h; img_size = img_out.w * img_out.h; img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // Allocate vectors in device memory unsigned char* img_y_gpu; cudaMalloc(&img_y_gpu, img_size * sizeof(unsigned char)); unsigned char* img_u_gpu; cudaMalloc(&img_u_gpu, img_size * sizeof(unsigned char)); unsigned char* img_v_gpu; cudaMalloc(&img_v_gpu, img_size * sizeof(unsigned char)); unsigned char* img_r_gpu; cudaMalloc(&img_r_gpu, img_size * sizeof(unsigned char)); unsigned char* img_g_gpu; cudaMalloc(&img_g_gpu, img_size * sizeof(unsigned char)); unsigned char* img_b_gpu; cudaMalloc(&img_b_gpu, img_size * sizeof(unsigned char)); // Copy vectors from host memory to device memory cudaMemcpy(img_y_gpu, img_in.img_y, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(img_u_gpu, img_in.img_u, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(img_v_gpu, img_in.img_v, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); // Invoke kernel int blocksPerGrid = (img_size + nbr_bin - 1) / nbr_bin; yuv2rgb_conversion_gpu<<<blocksPerGrid, nbr_bin>>>(img_y_gpu, img_u_gpu, img_v_gpu, img_r_gpu, img_g_gpu, img_b_gpu); // Copy result from device memorhy to host memory cudaMemcpy(img_out.img_r, img_r_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_g, img_g_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_b, img_b_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); // Free device memory cudaFree(img_r_gpu); cudaFree(img_g_gpu); cudaFree(img_b_gpu); cudaFree(img_y_gpu); cudaFree(img_u_gpu); cudaFree(img_v_gpu); return img_out; } __device__ float Hue_2_RGB_gpu(float v1, float v2, float vH) { if ( vH < 0 ) vH += 1; if ( vH > 1 ) vH -= 1; if ( ( 6 * vH ) < 1 ) return ( v1 + ( v2 - v1 ) * 6 * vH ); if ( ( 2 * vH ) < 1 ) return ( v2 ); if ( ( 3 * vH ) < 2 ) return ( v1 + ( v2 - v1 ) * ( ( 2.0f/3.0f ) - vH ) * 6 ); return ( v1 ); } __global__ void hsl2rgb_conversion_gpu(int * img_size, unsigned char * img_r, unsigned char * img_g, unsigned char * img_b, float * img_h, float * img_s, unsigned char * img_l) { int i = blockIdx.x * blockDim.x + threadIdx.x; float H = img_h[i]; float S = img_s[i]; float L = img_l[i]/255.0f; float var_1, var_2; unsigned char r,g,b; if (S == 0) { r = L * 255; g = L * 255; b = L * 255; } else { if (L < 0.5) var_2 = L * (1 + S); else var_2 = (L + S) - (S * L); var_1 = 2 * L - var_2; r = 255 * Hue_2_RGB_gpu(var_1, var_2, H + (1.0f/3.0f)); g = 255 * Hue_2_RGB_gpu(var_1, var_2, H); b = 255 * Hue_2_RGB_gpu(var_1, var_2, H - (1.0f/3.0f)); } img_r[i] = r; img_g[i] = g; img_b[i] = b; } PPM_IMG hsl2rgb_gpu(HSL_IMG img_in) { int img_size; PPM_IMG result; result.w = img_in.width; result.h = img_in.height; img_size = result.w * result.h; result.img_r = (unsigned char *)malloc(img_size * sizeof(unsigned char)); result.img_g = (unsigned char *)malloc(img_size * sizeof(unsigned char)); result.img_b = (unsigned char *)malloc(img_size * sizeof(unsigned char)); // Allocate vectors in device memory int* img_size_gpu; cudaMalloc(&img_size_gpu, sizeof(int)); unsigned char* img_r_gpu; cudaMalloc(&img_r_gpu, img_size * sizeof(unsigned char)); unsigned char* img_g_gpu; cudaMalloc(&img_g_gpu, img_size * sizeof(unsigned char)); unsigned char* img_b_gpu; cudaMalloc(&img_b_gpu, img_size * sizeof(unsigned char)); float* img_h_gpu; cudaMalloc(&img_h_gpu, img_size * sizeof(float)); float* img_s_gpu; cudaMalloc(&img_s_gpu, img_size * sizeof(float)); unsigned char* img_l_gpu; cudaMalloc(&img_l_gpu, img_size * sizeof(unsigned char)); // Copy vectors from host memory to device memory cudaMemcpy(img_size_gpu, &img_size, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(img_h_gpu, img_in.h, img_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(img_s_gpu, img_in.s, img_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(img_l_gpu, img_in.l, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); // Invoke kernel int blocksPerGrid = (img_size + nbr_bin - 1) / nbr_bin; hsl2rgb_conversion_gpu<<<blocksPerGrid, nbr_bin>>>(img_size_gpu, img_r_gpu, img_g_gpu, img_b_gpu, img_h_gpu, img_s_gpu, img_l_gpu); // Copy vectors from device memory to host memory cudaMemcpy(result.img_r, img_r_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(result.img_g, img_g_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(result.img_b, img_b_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); // Free device memory cudaFree(img_size_gpu); cudaFree(img_r_gpu); cudaFree(img_g_gpu); cudaFree(img_b_gpu); cudaFree(img_h_gpu); cudaFree(img_s_gpu); cudaFree(img_l_gpu); return result; } __global__ void rgb2hsl_conversion_gpu(int * img_size, float * img_h, float * img_s, unsigned char * img_l, unsigned char * img_r, unsigned char * img_g, unsigned char * img_b) { int i = blockIdx.x * blockDim.x + threadIdx.x; float H, S, L; float var_r = ( (float)img_r[i]/255 );//Convert RGB to [0,1] float var_g = ( (float)img_g[i]/255 ); float var_b = ( (float)img_b[i]/255 ); float var_min = (var_r < var_g) ? var_r : var_g; var_min = (var_min < var_b) ? var_min : var_b; //min. value of RGB float var_max = (var_r > var_g) ? var_r : var_g; var_max = (var_max > var_b) ? var_max : var_b; //max. value of RGB float del_max = var_max - var_min; //Delta RGB value L = ( var_max + var_min ) / 2; if ( del_max == 0 )//This is a gray, no chroma... { H = 0; S = 0; } else //Chromatic data... { if ( L < 0.5 ) S = del_max/(var_max+var_min); else S = del_max/(2-var_max-var_min ); float del_r = (((var_max-var_r)/6)+(del_max/2))/del_max; float del_g = (((var_max-var_g)/6)+(del_max/2))/del_max; float del_b = (((var_max-var_b)/6)+(del_max/2))/del_max; if( var_r == var_max ){ H = del_b - del_g; } else{ if( var_g == var_max ){ H = (1.0/3.0) + del_r - del_b; } else{ H = (2.0/3.0) + del_g - del_r; } } } if ( H < 0 ) H += 1; if ( H > 1 ) H -= 1; img_h[i] = H; img_s[i] = S; img_l[i] = (unsigned char)(L*255); } HSL_IMG rgb2hsl_gpu(PPM_IMG img_in) { int img_size; HSL_IMG img_out; img_out.width = img_in.w; img_out.height = img_in.h; img_size = img_out.width * img_out.height; img_out.h = (float *)malloc(img_size * sizeof(float)); img_out.s = (float *)malloc(img_size * sizeof(float)); img_out.l = (unsigned char *)malloc(img_size * sizeof(unsigned char)); // Allocate vectors in device memory int* img_size_gpu; cudaMalloc(&img_size_gpu, sizeof(int)); float* img_h_gpu; cudaMalloc(&img_h_gpu, img_size * sizeof(float)); float* img_s_gpu; cudaMalloc(&img_s_gpu, img_size * sizeof(float)); unsigned char* img_l_gpu; cudaMalloc(&img_l_gpu, img_size * sizeof(unsigned char)); unsigned char* img_r_gpu; cudaMalloc(&img_r_gpu, img_size * sizeof(unsigned char)); unsigned char* img_g_gpu; cudaMalloc(&img_g_gpu, img_size * sizeof(unsigned char)); unsigned char* img_b_gpu; cudaMalloc(&img_b_gpu, img_size * sizeof(unsigned char)); // Copy vectors from host memory to device memory cudaMemcpy(img_size_gpu, &img_size, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(img_r_gpu, img_in.img_r, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(img_g_gpu, img_in.img_g, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(img_b_gpu, img_in.img_b, img_size * sizeof(unsigned char), cudaMemcpyHostToDevice); // Invoke kernel int blocksPerGrid = (img_size + nbr_bin - 1) / nbr_bin; rgb2hsl_conversion_gpu<<<blocksPerGrid, nbr_bin>>>(img_size_gpu, img_h_gpu, img_s_gpu, img_l_gpu, img_r_gpu, img_g_gpu, img_b_gpu); // Copy vectors from device memory to host memory cudaMemcpy(img_out.h, img_h_gpu, img_size * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(img_out.s, img_s_gpu, img_size * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(img_out.l, img_l_gpu, img_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); // Free device memory cudaFree(img_size_gpu); cudaFree(img_h_gpu); cudaFree(img_s_gpu); cudaFree(img_l_gpu); cudaFree(img_r_gpu); cudaFree(img_g_gpu); cudaFree(img_b_gpu); return img_out; } PPM_IMG contrast_enhancement_c_yuv_gpu(PPM_IMG img_in) { YUV_IMG yuv_med; PPM_IMG result; unsigned char * y_equ; int hist[256]; yuv_med = rgb2yuv_gpu(img_in); y_equ = (unsigned char *)malloc(yuv_med.h*yuv_med.w*sizeof(unsigned char)); histogram_gpu(hist, yuv_med.img_y, yuv_med.h * yuv_med.w, 256); histogram_equalization_gpu(y_equ,yuv_med.img_y,hist,yuv_med.h * yuv_med.w, 256); free(yuv_med.img_y); yuv_med.img_y = y_equ; result = yuv2rgb_gpu(yuv_med); free(yuv_med.img_y); free(yuv_med.img_u); free(yuv_med.img_v); return result; } PPM_IMG contrast_enhancement_c_hsl_gpu(PPM_IMG img_in) { HSL_IMG hsl_med; PPM_IMG result; unsigned char * l_equ; int hist[256]; hsl_med = rgb2hsl_gpu(img_in); l_equ = (unsigned char *)malloc(hsl_med.height*hsl_med.width*sizeof(unsigned char)); histogram_gpu(hist, hsl_med.l, hsl_med.height * hsl_med.width, 256); histogram_equalization_gpu(l_equ, hsl_med.l,hist,hsl_med.width*hsl_med.height, 256); free(hsl_med.l); hsl_med.l = l_equ; result = hsl2rgb_gpu(hsl_med); free(hsl_med.h); free(hsl_med.s); free(hsl_med.l); return result; } PGM_IMG contrast_enhancement_g_gpu(PGM_IMG img_in) { PGM_IMG result; int hist[256]; result.w = img_in.w; result.h = img_in.h; result.img = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char)); histogram_gpu(hist, img_in.img, img_in.h * img_in.w, 256); histogram_equalization_gpu(result.img,img_in.img,hist,result.w*result.h, 256); return result; }
4,308
/* ****************************************************** This program is to reconstruct for 3-D cone beam projection, apply on 3-D shep-Logan head phaton There are three steps to the weighted filtered backprojection algorithm: 1) convert projection to projection_prime (weighted) 2) filtering part 3) backprojection part reference book: "Principles of Computerized Tomographic Imaging" Avinash C. Kak & Malcolm Slaney Page 100-107 implement in Frequency Domain Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments or suggestions for this OpenACC version to rxu6@uh.edu, schandra@udel.edu Authors: Rengan Xu, Sunita Chandrasekaran May 26th, 2016 ****************************************************** */ #include<stdio.h> #include<stdlib.h> #include<math.h> #include<sys/time.h> #include<string.h> #include<cuda_runtime.h> #define PI 3.141592653589793 #define SO 62.0 // distance from source to rotation center #define SD 83.0 // distance from detector to source center. originally 83 #define OD (SO/SD) // for translate real detector to the dector at origin #define PROJECTION_Y 200 // number of projections in y axis of detector #define PROJECTION_Z 200 // number of projections in z axis of detector #define frame_size (PROJECTION_Z*PROJECTION_Y) #define Z_CENTER 0 // 115.5 #define Y_CENTER 100 // 515.5 #define sample_interval_y (0.0194*OD) // sample interval in image detector at origin (not true interval at real detector) #define sample_interval_z (0.0194*OD) #define ZP 256 // zero padding #define RECONSIZE 200 #define RECONSIZE_Z 200 #define REC_XY_CENTER ((RECONSIZE-1.0)/2.0) #define CONVOLVESIZE ZP/2 // CONVOLVESIZE = ZP/2 #define num_belta 300 #define belta_step (360.0/300.0) //1.0 #define recon_step (sample_interval_y) // voxel size #define recon_step_z recon_step #define water 0.2006 #define ignore1 0 #define open 9013 #define zstart 200 // go up #define THREADS 128 void four1(float data[], unsigned long nn, int isign); void realft(float data[], unsigned long n, int isign); void cosft1(float y[], int n); //version 2.0 /************************************ Kernel for the 3rd step back projeciton *******************************/ __global__ void back_projection(float *fp_d, short int* CT_numbers_d, int view_start, int view_end, int X_SIZE, int Y_SIZE, int Z_SIZE) { float x,y,z,t,s,p_prime, ksi_prime,SO_s,factor,belta_rad,cos_belta,sin_belta; float m_f,m_z,n_f,n_y,temp; int m_less,n_less, i, j ,k, l, idx; short int CT_number; float rec; int size2; idx = threadIdx.x + blockIdx.x*blockDim.x; size2 = RECONSIZE*RECONSIZE; // size1 = RECONSIZE_Z*size2; //for(idx=idx; idx<size1; idx+=blockDim.x*gridDim.x) { i = (idx/size2)%RECONSIZE_Z; j = (idx/RECONSIZE)%RECONSIZE; k = idx%RECONSIZE; z=(Z_CENTER-zstart+i)*recon_step_z; y=(j-REC_XY_CENTER)*recon_step; x=(k-REC_XY_CENTER)*recon_step; rec = 0; for(l=view_start;l<view_end;l++) { belta_rad=(num_belta-l)*belta_step*PI/180; cos_belta=cos(belta_rad); sin_belta=sin(belta_rad); t=x*cos_belta+y*sin_belta; s=y*cos_belta-x*sin_belta; SO_s=SO/(SO-s); p_prime=SO_s*t; ksi_prime=SO_s*z; factor=SO_s*SO_s; /* bilinear interpolation */ m_f=-ksi_prime/sample_interval_z+Z_CENTER; m_less=(int)floor(m_f); m_z=(m_f-m_less); n_f=p_prime/sample_interval_y+Y_CENTER; n_less=(int)floor(n_f); n_y=(n_f-n_less); if (m_less>=199) m_less = PROJECTION_Z-2; if (n_less>=255) n_less = ZP-2; if (m_less<=0) m_less = 0; if (n_less<=0) n_less = 0; temp=(1-m_z)*(1-n_y)*fp_d[l*Y_SIZE*Z_SIZE+m_less*Z_SIZE+n_less]+m_z*(1-n_y)*fp_d[l*Y_SIZE*Z_SIZE+(m_less+1)*Z_SIZE+n_less]+ (1-m_z)*n_y*fp_d[l*Y_SIZE*Z_SIZE+m_less*Z_SIZE+n_less+1]+m_z*n_y*fp_d[l*Y_SIZE*Z_SIZE+(m_less+1)*Z_SIZE+n_less+1]; rec+=factor*temp; } // end of belta--viewend temp=rec*4*PI/num_belta; if(temp<0) temp=0.0; CT_number=(short int)((temp-water)/water*1000); CT_numbers_d[i*RECONSIZE*RECONSIZE + j*RECONSIZE + k] = CT_number; } } /************************************* MAIN *************************************************/ int main(int argc, char** argv) { if(argc < 3) { printf("usage: %s <input> <output>\n", argv[0]); exit(0); } FILE *ptr_proj,*ptr_ct; int i,j,k,l,n,skip; static unsigned short proj; static float projection[ZP]; static float weight[PROJECTION_Z][PROJECTION_Y]; // y_prime & z_prime are coordinate in detector; y[] & z[] are coordinate in detector of rotation center float y_prime,z_prime,filter[CONVOLVESIZE+1]; int num_view,view_start,view_end; float *fp_h; float *fp_d; short int *CT_numbers_h; short int *CT_numbers_d; float ***filteredprojection; struct timeval tim; double begin, end; int X_SIZE = 300; int Y_SIZE = PROJECTION_Z; int Z_SIZE = ZP; filteredprojection = (float ***)malloc(sizeof(float **) * X_SIZE); for (i = 0 ; i < X_SIZE; i++) { filteredprojection[i] = (float **)malloc(sizeof(float *) * Y_SIZE); for (j = 0; j < Y_SIZE; j++) filteredprojection[i][j] = (float *)malloc(sizeof(float) * Z_SIZE); } // ramp filter design for(n=0;n<CONVOLVESIZE+1;n++) { if(n==0) filter[n]=1/(8*(sample_interval_y)*(sample_interval_y)); else if((n%2)==0) filter[n]=0; else filter[n]=-1/(2*n*n*PI*PI*(sample_interval_y)*(sample_interval_y)); } cosft1(filter-1,CONVOLVESIZE); // FFT for(i=0;i<PROJECTION_Z;i++) { // weitht factor is independent of belta rotation angle z_prime=-(i-Z_CENTER)*sample_interval_z; // z center 414 for(j=0;j<PROJECTION_Y;j++) { y_prime=(j-Y_CENTER)*sample_interval_y; // y center 504 weight[i][j]=SO/sqrt(SO*SO+ y_prime *y_prime+z_prime*z_prime); } } num_view=num_belta;//process_size; view_start=0; //my_rank * num_view; view_end=view_start+num_view; if((ptr_proj=fopen(argv[1],"rb"))==NULL) { // If file open is not succesful, print could not open the file and quit fprintf(stderr,"Sorry could not open the file %s.\n", argv[1]); exit(1); } if((ptr_ct=fopen(argv[2],"wb"))==NULL ) { // If file open is not succesful, print could not open the file and quit fprintf(stderr,"Sorry could not open the file %s.\n", argv[2]); exit(1); } gettimeofday(&tim, NULL); begin = tim.tv_sec + (tim.tv_usec/1000000.0); // filtering projection data of each theta angle in frequency domain for(l=view_start;l<view_end;l++) { // start of l ---------------- skip=num_belta-l+ignore1; fseek(ptr_proj,(skip*frame_size)*sizeof(unsigned short),0); // skip seq header , start from current frame l //step 1: convert projection to projection_prime page 106 function (175) for(i=0;i<PROJECTION_Z;i++) { for(j=0;j<ZP;j++) { if( j<PROJECTION_Y) { fread(&proj,sizeof(unsigned short),1,ptr_proj); // if(j<50 || j>1900) proj=open; //correction for the collimators if( proj==0 ) proj=open; if(proj>=open) projection[j]=0; else projection[j]=-(log(proj*1.0/open))*weight[i][j]; } else projection[j]=0; // zero padding } //end of j-PROJECTION_Y realft(projection-1,ZP,1); // FFT //step 2: filter projection for(j=0;j<ZP;j++) { // filter process in freq domain == filter * projection if((j%2)==0) filteredprojection[l][i][j]=projection[j]*filter[j/2]*(sample_interval_y)*2/ZP; else filteredprojection[l][i][j]=projection[j]*filter[(j-1)/2]*(sample_interval_y)*2/ZP; } realft(filteredprojection[l][i]-1,ZP,-1); // IFFT } // end of i-PROJECTION_Z } // end of belta //step 3: back projection fp_h = (float*)malloc(X_SIZE * Y_SIZE * Z_SIZE * sizeof (float)); for( i=0; i<X_SIZE; i++ ) for( j=0; j<Y_SIZE; j++) for( k=0; k<Z_SIZE; k++) { fp_h[i*Y_SIZE*Z_SIZE+j*Z_SIZE+k] = filteredprojection[i][j][k]; } cudaMalloc((void**)&fp_d, X_SIZE * Y_SIZE * Z_SIZE * sizeof (float)); cudaMemcpy(fp_d, fp_h, X_SIZE * Y_SIZE * Z_SIZE * sizeof (float), cudaMemcpyHostToDevice); cudaMalloc((void**)&CT_numbers_d, RECONSIZE_Z * RECONSIZE * RECONSIZE * sizeof (short int)); cudaMemset((void*)CT_numbers_d, 0, RECONSIZE_Z * RECONSIZE * RECONSIZE * sizeof (short int)); CT_numbers_h = (short int*)malloc(RECONSIZE_Z * RECONSIZE * RECONSIZE * sizeof (short int)); dim3 dimBlock(THREADS, 1, 1); dim3 dimGrid((RECONSIZE_Z*RECONSIZE*RECONSIZE+THREADS-1)/THREADS, 1, 1); back_projection<<<dimGrid, dimBlock>>>(fp_d, CT_numbers_d, view_start, view_end, X_SIZE, Y_SIZE, Z_SIZE); cudaMemcpy(CT_numbers_h, CT_numbers_d, RECONSIZE_Z * RECONSIZE * RECONSIZE * sizeof (short int), cudaMemcpyDeviceToHost); gettimeofday(&tim, NULL); end = tim.tv_sec + (tim.tv_usec/1000000.0); fwrite(CT_numbers_h,sizeof(short int),RECONSIZE_Z * RECONSIZE * RECONSIZE,ptr_ct); fclose(ptr_proj); fclose(ptr_ct); free(fp_h); free(CT_numbers_h); printf("Execution time of FDK: %.2f seconds\n",end-begin); } // end of main /********************************* FFT ******************************************/ #define SWAP(a,b) tempr=(a);(a)=(b);(b)=tempr void four1(float data[], unsigned long nn, int isign) { unsigned long n,mmax,m,j,istep,i; double wtemp,wr,wpr,wpi,wi,theta; float tempr,tempi; n=nn << 1; j=1; for (i=1;i<n;i+=2) { if (j > i) { SWAP(data[j],data[i]); SWAP(data[j+1],data[i+1]); } m=n >> 1; while (m >= 2 && j > m) { j -= m; m >>= 1; } j += m; } mmax=2; while (n > mmax) { istep=mmax << 1; theta=isign*(6.28318530717959/mmax); wtemp=sin(0.5*theta); wpr = -2.0*wtemp*wtemp; wpi=sin(theta); wr=1.0; wi=0.0; for (m=1;m<mmax;m+=2) { for (i=m;i<=n;i+=istep) { j=i+mmax; tempr=wr*data[j]-wi*data[j+1]; tempi=wr*data[j+1]+wi*data[j]; data[j]=data[i]-tempr; data[j+1]=data[i+1]-tempi; data[i] += tempr; data[i+1] += tempi; } wr=(wtemp=wr)*wpr-wi*wpi+wr; wi=wi*wpr+wtemp*wpi+wi; } mmax=istep; } } #undef SWAP void realft(float data[], unsigned long n, int isign) { void four1(float data[], unsigned long nn, int isign); unsigned long i,i1,i2,i3,i4,np3; float c1=0.5,c2,h1r,h1i,h2r,h2i; double wr,wi,wpr,wpi,wtemp,theta; theta=3.141592653589793/(double) (n>>1); if (isign == 1) { c2 = -0.5; four1(data,n>>1,1); } else { c2=0.5; theta = -theta; } wtemp=sin(0.5*theta); wpr = -2.0*wtemp*wtemp; wpi=sin(theta); wr=1.0+wpr; wi=wpi; np3=n+3; for (i=2;i<=(n>>2);i++) { i4=1+(i3=np3-(i2=1+(i1=i+i-1))); h1r=c1*(data[i1]+data[i3]); h1i=c1*(data[i2]-data[i4]); h2r = -c2*(data[i2]+data[i4]); h2i=c2*(data[i1]-data[i3]); data[i1]=h1r+wr*h2r-wi*h2i; data[i2]=h1i+wr*h2i+wi*h2r; data[i3]=h1r-wr*h2r+wi*h2i; data[i4] = -h1i+wr*h2i+wi*h2r; wr=(wtemp=wr)*wpr-wi*wpi+wr; wi=wi*wpr+wtemp*wpi+wi; } if (isign == 1) { data[1] = (h1r=data[1])+data[2]; data[2] = h1r-data[2]; } else { data[1]=c1*((h1r=data[1])+data[2]); data[2]=c1*(h1r-data[2]); four1(data,n>>1,-1); } } void cosft1(float y[], int n) { void realft(float data[], unsigned long n, int isign); int j,n2; float sum,y1,y2; double theta,wi=0.0,wpi,wpr,wr=1.0,wtemp; theta=PI/n; wtemp=sin(0.5*theta); wpr = -2.0*wtemp*wtemp; wpi=sin(theta); sum=0.5*(y[1]-y[n+1]); y[1]=0.5*(y[1]+y[n+1]); n2=n+2; for (j=2;j<=(n>>1);j++) { wr=(wtemp=wr)*wpr-wi*wpi+wr; wi=wi*wpr+wtemp*wpi+wi; y1=0.5*(y[j]+y[n2-j]); y2=(y[j]-y[n2-j]); y[j]=y1-wi*y2; y[n2-j]=y1+wi*y2; sum += wr*y2; } realft(y,n,1); y[n+1]=y[2]; y[2]=sum; for (j=4;j<=n;j+=2) { sum += y[j]; y[j]=sum; } } #undef PI
4,309
// Type your code here, or load an example. __global__ void square(int *array, int n) { int tid = blockIdx.x; if (tid < n) array[tid] = array[tid] * array[tid]; }
4,310
//pass //--blockDim=64 --gridDim=1 --no-inline #include "cuda.h" __global__ void foo(int* glob) { int a; int* p; a = 0; p = &a; *p = threadIdx.x; glob[*p] = threadIdx.x; }
4,311
// To compile: nvcc hw06.cu -o hw06 #include <sys/time.h> #include <stdio.h> #define N 100000 #define FORMAT "%f\n" #define TYPE float __global__ void dotProduct(TYPE *a, TYPE *b, TYPE *c){ unsigned long id = (blockIdx.x*blockDim.x)+threadIdx.x; __shared__ TYPE cache[1024]; cache[threadIdx.x] = 0; if(id > N){ return; } cache[threadIdx.x] = a[id]*b[id]; __syncthreads(); int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x+i]; } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ atomicAdd(c, cache[0]); } } void CUDAErrorCheck(const char *message){ cudaError_t error; error = cudaGetLastError(); if(error != cudaSuccess){ printf("\n CUDA ERROR in: %s -> %s\n", message, cudaGetErrorString(error)); exit(0); } } int main(){ TYPE *A_CPU, *B_CPU, *C_CPU; //Pointers for memory on the Host long n = N; // Your variables start here. TYPE *A_GPU, *B_GPU, *C_GPU; dim3 gridDim, blockDim; // Your variables stop here. //Allocating and loading Host (CPU) Memory A_CPU = (TYPE*)malloc(n*sizeof(TYPE)); B_CPU = (TYPE*)malloc(n*sizeof(TYPE)); C_CPU = (TYPE*)malloc(sizeof(TYPE)); *C_CPU = 0; for(int i = 0; i < n; i++) { A_CPU[i] = 2; B_CPU[i] = 1; } // Your code starts here. //gridDim.x = (n < 1024) ? n:1024; gridDim.x = 1+(n-1)/1024; gridDim.y = 1; gridDim.z = 1; //blockDim.x = 1+(n-1)/1024; blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1; cudaMalloc(&A_GPU, n*sizeof(TYPE)); CUDAErrorCheck("a cuda malloc..."); cudaMalloc(&B_GPU, n*sizeof(TYPE)); CUDAErrorCheck("b cuda malloc..."); cudaMalloc(&C_GPU, sizeof(TYPE)); CUDAErrorCheck("c cuda malloc..."); cudaMemcpyAsync(A_GPU, A_CPU, n*sizeof(TYPE), cudaMemcpyHostToDevice); CUDAErrorCheck("a cuda memcpy from host..."); cudaMemcpyAsync(B_GPU, B_CPU, n*sizeof(TYPE), cudaMemcpyHostToDevice); CUDAErrorCheck("b cuda memcpy from host..."); cudaMemcpyAsync(C_GPU, C_CPU, sizeof(TYPE), cudaMemcpyHostToDevice); CUDAErrorCheck("c cuda memcpy from host..."); free(A_CPU); free(B_CPU); dotProduct<<<gridDim, blockDim>>>(A_GPU, B_GPU, C_GPU); CUDAErrorCheck("kernel..."); cudaMemcpyAsync(C_CPU, C_GPU, sizeof(TYPE), cudaMemcpyDeviceToHost); CUDAErrorCheck("c cuda memcpy from device.."); cudaFree(A_GPU); CUDAErrorCheck("a cuda free..."); cudaFree(B_GPU); CUDAErrorCheck("b cuda free..."); cudaFree(C_GPU); CUDAErrorCheck("c cuda free..."); printf(FORMAT, *C_CPU); free(C_CPU); return(0); }
4,312
#include <stdio.h> #include <stdlib.h> #include <math.h> #define m 10 #define n 5 // matrix_sum1<<<m, n>>>(d_A, d_B, d_C, m, n); __global__ void matrix_sum1(int A[], int B[], int C[], int fil, int col) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (blockIdx.x < fil && threadIdx.x < col) C[index] = A[index] + B[index]; } // matrix_sum2<<<m, 1>>>(d_A, d_B, d_C, m, n); __global__ void matrix_sum2(int A[], int B[], int C[], int fil, int col) { int index = blockIdx.x; int i; if(index < fil) { for(i=0;i<col;i++) { C[index + i] = A[index + i] + B[index + i]; } } } // matrix_sum2<<<n, 1>>>(d_A, d_B, d_C, m, n); __global__ void matrix_sum3(int A[], int B[], int C[], int fil, int col) { int index = blockIdx.x; int i; if(index<col) { for(i=0;i<fil;i++) { C[index+col*i] = A[index+col*i] + B[index+col*i]; } } } void fill_matrix(int A[], int fil, int col) { int i, j; for (i = 0; i < fil; i++) { for (j = 0; j < col; j++) A[i*n+j] = rand()%99; } } void print_matrix(int A[], int fil, int col) { int i, j; for (i = 0; i < fil; i++) { for (j = 0; j < col; j++) printf("%d ", A[i*n+j]); printf("\n"); } } int main(int argc, char* argv[]) { int *h_A, *h_B, *h_C; int *d_A, *d_B, *d_C; size_t size; size = m*n*sizeof(int); h_A = (int*) malloc(size); h_B = (int*) malloc(size); h_C = (int*) malloc(size); fill_matrix(h_A, m, n); fill_matrix(h_B, m, n); print_matrix(h_A, m, n); printf("\n"); print_matrix(h_B, m, n); printf("\n"); cudaMalloc((void **)&d_A, size); cudaMalloc((void **)&d_B, size); cudaMalloc((void **)&d_C, size); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); matrix_sum1<<<m, n>>>(d_A, d_B, d_C, m, n); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); print_matrix(h_C, m, n); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
4,313
// includes, system #include <stdio.h> #include <assert.h> #define ARRAY_SIZE 2000000 #define STRING_SIZE 16 int char_array[ARRAY_SIZE*STRING_SIZE]; int char_counts[26]; char getRandomChar() { int randNum = 0; char randChar = ' '; randNum = 26 * (rand() / (RAND_MAX + 1.0)); // pick number 0 < # < 25 randNum = randNum + 97; // scale to 'a' randChar = (char) randNum; // printf("%c", randChar); return randChar; } void init_arrays() { int i, j, randNum; char randChar; for ( i = 0; i < ARRAY_SIZE *STRING_SIZE; i++) { char_array[i] = (int)getRandomChar(); } for ( i = 0; i < 26; i++ ) { char_counts[i] = 0; } } void checkCUDAError(const char* msg); __global__ void countChar(int *d_out, int *d_in, int perThread, int numThreads) { int size = ARRAY_SIZE*STRING_SIZE; int i; int inOffset = blockDim.x * blockIdx.x; int in = inOffset + threadIdx.x; int theChar; int charLoc; for(i = 0; i < perThread; i++) { theChar = d_in[in*perThread+i]; charLoc = theChar - 97; d_out[in*26+charLoc]++; } } void print_results(int* results, int totalRuns) { int i, j, total = 0, count; for(i = 0; i < 26; i++) { count = 0; for(j = 0; j < totalRuns; j++) { count += results[j*26+i]; } total += count; printf(" %c %d\n", (char) (i + 97), count); } printf("\nTotal characters: %d\n", total); } ///////////////////////////////////////////////////////////////////// // Program main ///////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { int totalSize = ARRAY_SIZE * STRING_SIZE; int perThread = 10000; int size = totalSize/perThread; int *deviceInput, *deviceAnswer; int numThreadsPerBlock = 100; int numBlocks = size / numThreadsPerBlock; int totalThreads = numThreadsPerBlock * numBlocks; // allocate host and device memory size_t memSize = ARRAY_SIZE*STRING_SIZE * sizeof(int); cudaMalloc((void **) &deviceInput, memSize ); cudaMalloc((void **) &deviceAnswer, totalThreads * 26 * sizeof(int)); int* localReturn = (int*)malloc(sizeof(int)*26*totalThreads); init_arrays(); cudaMemcpy( deviceInput, char_array, memSize, cudaMemcpyHostToDevice); cudaMemset( deviceAnswer, 0, totalThreads*26*sizeof(int)); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); countChar <<< dimGrid, dimBlock >>>( deviceAnswer, deviceInput, perThread, totalThreads); cudaThreadSynchronize(); checkCUDAError("kernel invocation"); cudaMemcpy( localReturn , deviceAnswer, totalThreads*26*sizeof(int), cudaMemcpyDeviceToHost ); checkCUDAError("memcpy"); cudaFree(deviceAnswer); cudaFree(deviceInput); print_results(localReturn, totalThreads); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
4,314
#include "includes.h" __global__ void DivideKernel ( float *d_dst, unsigned short *d_denom ) { const int idx = blockIdx.x; d_dst[idx] /= d_denom[idx]; }
4,315
#include <cuda_runtime.h> #include <device_functions.h> #include <device_launch_parameters.h> #include <iostream> __global__ void addVec(int* a, int* b, int* c, int size) { int index = blockDim.x * blockIdx.x + threadIdx.x; c[index] = a[index] + b[index]; } //3.1 a __global__ void MatrixAdditionElement(int* left, int* right, int* result, size_t width) { size_t index = blockDim.x * blockIdx.x + threadIdx.x; if (index < width * width) { result[index] = left[index] + right[index]; } } //3.1 b __global__ void MatrixAdditionRow(int* left, int* right, int* result, size_t width) { size_t index = blockDim.x * blockIdx.x + threadIdx.x; if (index < width) { index *= width; for (size_t i = 0; i < width; ++i) { result[index + i] = left[index + i] + right[index + i]; } } } //3.1 c __global__ void MatrixAdditionCol(int* left, int* right, int* result, size_t width) { size_t elements = width * width; size_t index = blockDim.x * blockIdx.x + threadIdx.x; if (index < width) { for (size_t i = index; i < elements; i += width) { result[i] = left[i] + right[i]; } } } void Driver3_1() { std::cout << "Starting 3_1\nEnter width: "; size_t width; std::cin >> width; int* A, *B, *C1, *C2, *C3; size_t elements = width * width; cudaMallocManaged(&A, elements * sizeof(int)); cudaMallocManaged(&B, elements * sizeof(int)); cudaMallocManaged(&C1, elements * sizeof(int)); cudaMallocManaged(&C2, elements * sizeof(int)); cudaMallocManaged(&C3, elements * sizeof(int)); for (size_t i = 0; i < elements; ++i) { A[i] = i; B[i] = elements - i; } MatrixAdditionElement<<<elements / 256 + 1, 256>>>(A, B, C1, width); MatrixAdditionRow<<<width / 256 + 1, 256>>>(A, B, C2, width); MatrixAdditionCol<<<width / 256 + 1, 256>>>(A, B, C3, width); cudaDeviceSynchronize(); for (size_t i = 0; i < elements; ++i) { if (C1[i] != C2[i] || C2[i] != C3[i]) { std::cout << "Mismatch at " << i << '\n'; std::cout << C1[i] << ' ' << C2[i] << ' ' << C3[i] << '\n'; break; } } cudaFree(A); cudaFree(B); cudaFree(C1); cudaFree(C2); cudaFree(C3); } //3.2 __global__ void MatrixVectorMult(float* matrix, float* vectorIn, float* vectorOut, size_t width) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < width * width) { size_t target = index / width; atomicAdd(vectorOut + target, matrix[index] * vectorIn[target]); } } void Driver3_2() { float* matrix, * vectorIn, * vectorOut; size_t width = 10; size_t elements = width * width; cudaMallocManaged(&matrix, sizeof(float) * elements); cudaMallocManaged(&vectorIn, sizeof(float) * width); cudaMallocManaged(&vectorOut, sizeof(float) * width); cudaMemset(vectorOut, 0, sizeof(float) * width); for (int i = 0; i < elements; ++i) { matrix[i] = i; } for (int i = 0; i < width; ++i) { vectorIn[i] = 1; } MatrixVectorMult<<<width * width / 256 + 1, 256>>>(matrix, vectorIn, vectorOut, width); cudaDeviceSynchronize(); for (size_t i = 0; i < width; ++i) { std::cout << vectorOut[i] << ' '; } cudaFree(matrix); cudaFree(vectorIn); cudaFree(vectorOut); } int main() { Driver3_1(); Driver3_2(); return 0; }
4,316
__device__ inline float2 operator+(float2 a, float2 b) { return make_float2( a.x + b.x, a.y + b.y ); } __device__ __forceinline__ unsigned int get_mesh_id() { return (gridDim.y*gridDim.x*blockIdx.z + gridDim.x*blockIdx.y + blockIdx.x) * (blockDim.z*blockDim.y*blockDim.x) + blockDim.y*blockDim.x*threadIdx.z + blockDim.x*threadIdx.y + threadIdx.x; } __device__ __forceinline__ unsigned int get_mesh_size() { return gridDim.z*gridDim.y*gridDim.x * blockDim.z*blockDim.y*blockDim.x; } __device__ __forceinline__ unsigned int get_bundle_id() { return blockDim.y*blockDim.x*threadIdx.z + blockDim.x*threadIdx.y + threadIdx.x; } extern "C" __global__ void p2p_4(float2 * dst, float2 * src1, float2 * src2, float2 * src3, float2 * src4) { int id = get_mesh_id(); dst[id] = src1[id] + src2[id] + src3[id] + src4[id]; } extern "C" __global__ void p2p_4_center(float2 * dst, float2 * src1, float2 * src2, float2 * src3, float2 * src4, unsigned int dim, unsigned int offset) { int id = offset + blockIdx.x * dim + threadIdx.x; dst[id] = src1[id] + src2[id] + src3[id] + src4[id]; } extern "C" __global__ void p2p_2(float2 * dst, float2 * src1, float2 * src2) { int id = blockIdx.x * blockDim.x + threadIdx.x; dst[id] = src1[id] + src2[id]; }
4,317
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,int var_3,float var_4,float var_5,float var_6,float var_7) { if (comp > cosf(+0.0f / (-1.2242E34f * (-1.0814E-37f * sqrtf(+1.0582E-36f + (+1.5143E35f / (var_1 + var_2))))))) { float tmp_1 = +1.7283E-37f; comp += tmp_1 / var_4 + (-1.4541E36f - var_5); for (int i=0; i < var_3; ++i) { float tmp_2 = +1.2085E-35f; float tmp_3 = -1.5980E36f; comp += tmp_3 * tmp_2 + (+0.0f / var_6 / (-1.2555E-43f - (+1.7779E35f * (var_7 * -1.7309E-35f)))); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8); cudaDeviceSynchronize(); return 0; }
4,318
#include <stdlib.h> #include <stdio.h> #include <math.h> #include "sparse_struc.cuh" void matrix_sum(int *mat1, int *mat2, int*sum, int nrow, int ncol) { int mat_index; for (int row_index = 0; row_index < nrow; row_index++) { for (int col_index = 0; col_index < ncol; col_index++) { mat_index = row_index *ncol + col_index; sum[mat_index] = mat1[mat_index] + mat2[mat_index]; } } } int max_value(int*mat, int nelem) { int max = mat[0]; for (int i = 1;i < nelem;i++) { if (mat[i] > max) max = mat[i]; } return max; } void linearize_matrix(double *mat, double *lin_mat,int nrow, int ncol) { int lin_index=0, mat_index; for (int col_index = 0;col_index <ncol;col_index++) { for (int row_index = 0;row_index < nrow ;row_index++) { mat_index = row_index*ncol + col_index; lin_mat[lin_index] = mat[mat_index]; lin_index++; } } } void prepare_sparse(int *row, int*col, double *val, struct sparse *s, int size) { double *val2; int *row2, *col2, index=0, nonzero_count = 0, count, found; row2 = (int*)malloc(size * sizeof(int)); col2 = (int*)malloc(size * sizeof(int)); val2 = (double*)malloc(size * sizeof(double)); count = 0; row2[count] = row[0]; col2[count] = col[0]; val2[count] = val[0]; count++; for (int i = 1;i < size;i++) { found = 0; for (int j = 0;j < count;j++) { if ((row[i] == row2[j]) && (col[i] == col2[j])) { found = 1; index = j; } } if (found == 1) { val2[index] += val[i]; } else { row2[count] = row[i]; col2[count] = col[i]; val2[count] = val[i]; count++; } } nonzero_count = 0; for (int i = 0; i < count; i++) { if (fabs(val2[i]) >pow(10, -9)) { nonzero_count++; } } s->row = (int*)malloc(nonzero_count * sizeof(int)); s->col = (int*)malloc(nonzero_count * sizeof(int)); s->val = (double*)malloc(nonzero_count * sizeof(double)); nonzero_count = 0; for (int i = 0; i < count; i++) { if (fabs(val2[i]) >pow(10, -9)) { s->row[nonzero_count] = row2[i]; s->col[nonzero_count] = col2[i]; s->val[nonzero_count] = val2[i]; nonzero_count++; } } s->nnz = nonzero_count; free(row2); free(col2); free(val2); } void sparse_matrix_product(struct sparse *a, struct sparse *b, struct sparse *p) { struct sparse tmp; int count = 0, found = 0; int prod_index, tmp_index, tmp_dim ; if (a->ncol == b->nrow) { p->nrow = a->nrow; p->ncol = b->ncol; p->nnz = 0; for (int a_index = 0; a_index < a->nnz; a_index++) { for (int b_index = 0; b_index < b->nnz; b_index++) { if ((a->col[a_index] == b->row[b_index])) { count++; } } } tmp_dim = count; tmp.row = (int*)malloc(tmp_dim * sizeof(int)); tmp.col = (int*)malloc(tmp_dim * sizeof(int)); tmp.val = (double*)malloc(tmp_dim * sizeof(double)); tmp_index = 0; for (int a_index = 0; a_index < a->nnz; a_index++) { for (int b_index = 0; b_index < b->nnz; b_index++) { if ((a->col[a_index] == b->row[b_index])) { if (count == 0) { tmp.row[tmp_index] = a->row[a_index]; tmp.col[tmp_index] = b->col[b_index]; tmp.val[tmp_index] = a->val[a_index] * b->val[b_index]; tmp_index++; } else { found = 0; for (int p_index = 0; p_index < tmp_dim; p_index++) { if ((tmp.row[p_index] == a->row[a_index]) && tmp.col[p_index] == b->col[b_index]) { found = 1; tmp.val[p_index] += a->val[a_index] * b->val[b_index]; } } if (found == 0) { tmp.row[tmp_index] = a->row[a_index]; tmp.col[tmp_index] = b->col[b_index]; tmp.val[tmp_index] = a->val[a_index] * b->val[b_index]; tmp_index++; } } } } } for (int i = 0; i < tmp_dim; i++) { if (fabs(tmp.val[i])>pow(10,-9)) { p->nnz++; } } p->row = (int*)malloc(p->nnz * sizeof(int)); p->col = (int*)malloc(p->nnz * sizeof(int)); p->val = (double*)malloc(p->nnz * sizeof(double)); prod_index = 0; for (int i = 0; i < count; i++) { if (fabs(tmp.val[i])>pow(10, -9)) { p->row[prod_index] = tmp.row[i]; p->col[prod_index] = tmp.col[i]; p->val[prod_index] = tmp.val[i]; prod_index++; } } free(tmp.row); free(tmp.col); free(tmp.val); } else { printf("Matrici non compatibili\n"); } } void sparse_to_dense(struct sparse *sparse_m, double **dense_m) { (*dense_m) = (double*)malloc(sparse_m->nrow*sparse_m->ncol * sizeof(double)); int index; for (int i = 0;i < sparse_m->nrow*sparse_m->ncol;i++) { (*dense_m)[i] = 0; } for (int i = 0;i < sparse_m->nnz;i++) { index = (sparse_m->row[i] - 1)*sparse_m->ncol + (sparse_m->col[i] - 1); //row_i*ncol + col_i (*dense_m)[index] = sparse_m->val[i]; } } void dense_to_sparse(double *dense_m, int nrow, int ncol, struct sparse *sparse_m) { int s_index = 0,d_index; for (int i = 0;i < nrow;i++) { for (int j = 0;j < ncol;j++) { if (fabs(dense_m[i*ncol + j]) > pow(10, -10)) { s_index++; } } } sparse_m->nrow = nrow; sparse_m->ncol = ncol; sparse_m->nnz = s_index; sparse_m->row = (int*)malloc(sparse_m->nnz * sizeof(int)); sparse_m->col = (int*)malloc(sparse_m->nnz * sizeof(int)); sparse_m->val = (double*)malloc(sparse_m->nnz * sizeof(double)); s_index = 0; for (int i = 0;i < nrow;i++) { for (int j = 0;j < ncol;j++) { d_index = i*ncol + j; if (fabs(dense_m[d_index]) > pow(10, -10)) { sparse_m->row[s_index] = i + 1; sparse_m->col[s_index] = j + 1; sparse_m->val[s_index] = dense_m[d_index]; s_index++; } } } } void matrix_inverse(double *Min, double *Mout, int actualsize) { /* This function calculates the inverse of a square matrix * * matrix_inverse(double *Min, double *Mout, int actualsize) * * Min : Pointer to Input square Double Matrix * Mout : Pointer to Output (empty) memory space with size of Min * actualsize : The number of rows/columns * * Notes: * - the matrix must be invertible * - there's no pivoting of rows or columns, hence, * accuracy might not be adequate for your needs. * */ /* Loop variables */ int i, j, k; /* Sum variables */ double sum, x; /* Copy the input matrix to output matrix */ for (i = 0; i<actualsize*actualsize; i++) { Mout[i] = Min[i]; } /* Add small value to diagonal if diagonal is zero */ for (i = 0; i<actualsize; i++) { j = i*actualsize + i; if ((Mout[j]<1e-12) && (Mout[j]>-1e-12)) { Mout[j] = 1e-12; } } /* Matrix size must be larger than one */ if (actualsize <= 1) return; for (i = 1; i < actualsize; i++) { Mout[i] /= Mout[0]; /* normalize row 0 */ } for (i = 1; i < actualsize; i++) { for (j = i; j < actualsize; j++) { /* do a column of L */ sum = 0.0; for (k = 0; k < i; k++) { sum += Mout[j*actualsize + k] * Mout[k*actualsize + i]; } Mout[j*actualsize + i] -= sum; } if (i == actualsize - 1) continue; for (j = i + 1; j < actualsize; j++) { /* do a row of U */ sum = 0.0; for (k = 0; k < i; k++) { sum += Mout[i*actualsize + k] * Mout[k*actualsize + j]; } Mout[i*actualsize + j] = (Mout[i*actualsize + j] - sum) / Mout[i*actualsize + i]; } } for (i = 0; i < actualsize; i++) /* invert L */ { for (j = i; j < actualsize; j++) { x = 1.0; if (i != j) { x = 0.0; for (k = i; k < j; k++) { x -= Mout[j*actualsize + k] * Mout[k*actualsize + i]; } } Mout[j*actualsize + i] = x / Mout[j*actualsize + j]; } } for (i = 0; i < actualsize; i++) /* invert U */ { for (j = i; j < actualsize; j++) { if (i == j) continue; sum = 0.0; for (k = i; k < j; k++) { sum += Mout[k*actualsize + j] * ((i == k) ? 1.0 : Mout[i*actualsize + k]); } Mout[i*actualsize + j] = -sum; } } for (i = 0; i < actualsize; i++) /* final inversion */ { for (j = 0; j < actualsize; j++) { sum = 0.0; for (k = ((i>j) ? i : j); k < actualsize; k++) { sum += ((j == k) ? 1.0 : Mout[j*actualsize + k])*Mout[k*actualsize + i]; } Mout[j*actualsize + i] = sum; } } } void matrix_diff(double *a, double *b, double **diff, int a_nrow, int a_ncol, int b_nrow, int b_ncol) { int index; if ((a_nrow==b_nrow)&&(a_ncol==b_ncol)) { (*diff) = (double*)malloc(a_nrow*a_ncol * sizeof(double)); for (int i = 0;i < a_nrow;i++) { for (int j = 0;j < a_ncol;j++) { index = i*a_ncol + j; (*diff)[index] = a[index] - b[index]; } } } else printf("ERROR!\n"); } void sparse_diff(struct sparse *s1, struct sparse *s2, struct sparse *res) { int found, count = 0; for (int i = 0; i < s1->nnz; i++) { found = 0; for (int j = 0; j < s2->nnz; j++) { if ((s1->row[i] == s2->row[j]) && (s1->col[i] == s2->col[j])) { found = 1; if ((s1->val[i] - s2->val[j]) != 0) { count++; } break; } } if (found == 0) { count++; } } for (int i = 0; i < s2->nnz; i++) { found = 0; for (int j = 0; j < s1->nnz; j++) { if ((s2->row[i] == s1->row[j]) && (s2->col[i] == s1->col[j])) { found = 1; } } if (found == 0) { count++; } } res->nrow = s1->nrow; res->ncol = s1->ncol; res->nnz = count; res->row = (int*)malloc(res->nnz * sizeof(int)); res->col = (int*)malloc(res->nnz * sizeof(int)); res->val = (double*)malloc(res->nnz * sizeof(double)); count = 0; for (int i = 0; i < s1->nnz; i++) { found = 0; for (int j = 0; j < s2->nnz; j++) { if ((s1->row[i] == s2->row[j]) && (s1->col[i] == s2->col[j])) { found = 1; if (fabs(s1->val[i] - s2->val[j]) > pow(10, -9)) { res->row[count] = s1->row[i]; res->col[count] = s1->col[i]; res->val[count] = (s1->val[i] - s2->val[j]); count++; } break; } } if (found == 0) { res->row[count] = s1->row[i]; res->col[count] = s1->col[i]; res->val[count] = s1->val[i]; count++; } } for (int i = 0; i < s2->nnz; i++) { found = 0; for (int j = 0; j < s1->nnz; j++) { if ((s2->row[i] == s1->row[j]) && (s2->col[i] == s1->col[j])) { found = 1; } } if (found == 0) { res->row[count] = s2->row[i]; res->col[count] = s2->col[i]; res->val[count] = -s2->val[i]; count++; } } }
4,319
#include "includes.h" __global__ void CompareVectorsKernel(float* inputOne, float* inputTwo, float* output) { int id = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (inputOne[id] != inputTwo[id]) output[0] = 1; }
4,320
#include "includes.h" __global__ void shmem ( int *in, int *out, int N ) { extern __shared__ int buf[]; int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx < N ) { buf[ idx ] = in[ idx ]; } __syncthreads(); if ( idx < N/2 ) { int tmp = buf[ N - idx - 1]; buf[ N - idx - 1 ] = buf [ idx ]; buf[ idx ] = tmp; } __syncthreads(); if( idx < N ) { out[ idx ] = buf[ idx ]; } }
4,321
/* Pointer.to(iGA_nPtBlock0.gpuArray), Pointer.to(iGA_nPtBlock1.gpuArray), Pointer.to(iGA_blockLevel.gpuArray), Pointer.to(iGA_nPtBlPos.gpuArray), Pointer.to(iGA_nPtBlNeg.gpuArray), Pointer.to(iGA_nPtBlMid0.gpuArray), Pointer.to(iGA_nPtBlMid1.gpuArray), Pointer.to(iGA_newBlockCvg.gpuArray), Pointer.to(iGA_newBlockLevel.gpuArray), Pointer.to(new int[]{nDots}), // offset for blocks : 0 or 1 Pointer.to(new int[]{nBlocks}), // Output values Pointer.to(iGA_WhatToDoWithTheseBlocks.gpuArray) */ extern "C" __global__ void sortBlocks(// Old tree specs int* nPtBl0, int* nPtBl1, int* blLev, // New tree specs int* nPtBlPos, int* nPtBlNeg, int* nPtBlMid0, int* nPtBlMid1, int* newBlockCvg, int* newBlockLvl, int nDots, int nBlocks, int minInteract, int minPointsToKeep, // Output int* whatToDo, int* nPtKeep, int* nBlocsKeep, int* addrPt, int* addrBloc ) { int idBloc = blockIdx.x*blockDim.x+threadIdx.x; if (idBloc<nBlocks) { // do stuff // first compute the number of interactions before split float nInteractBefore=0; float nPtB0 = (float)(nPtBl0[idBloc]); float nPtB1 = (float)(nPtBl1[idBloc]); int level = blLev[idBloc]; // now compute the number of interaction after spliting blocks //__int2float_rd float nInteractSubBlockNeg; float nInteractSubBlockPos; int nPtBlTotPos = nPtBlPos[idBloc]+nPtBlPos[idBloc+nBlocks]; int nPtBlTotNeg = nPtBlNeg[idBloc]+nPtBlNeg[idBloc+nBlocks]; int nPtBlTotMid0 = nPtBlMid0[idBloc]+nPtBlMid0[idBloc+nBlocks]; int nPtBlTotMid1 = nPtBlMid1[idBloc]+nPtBlMid1[idBloc+nBlocks]; if (level>0) { nInteractBefore = nPtB0*nPtB1; } else { nInteractBefore=0.5*nPtB0*(nPtB0+1); nPtBlTotPos = nPtBlPos[idBloc]; nPtBlTotNeg = nPtBlNeg[idBloc]; } float nInteractSubBlockMid0=(float)(nPtBlMid0[idBloc])*(float)(nPtBlMid0[idBloc+nBlocks]);//*(newBlockCvg[4*idBloc+2]); float nInteractSubBlockMid1=(float)(nPtBlMid1[idBloc])*(float)(nPtBlMid1[idBloc+nBlocks]);//*(newBlockCvg[4*idBloc+3]); //printf("nInteractSubBlockMid0 = %f, car nPtBlMid0Bl0 = %i, et nPtBlMid0Bl1 = %i, gpubloc = %i, bloc = %i \n",nInteractSubBlockMid0,nPtBlMid0[idBloc],nPtBlMid0[idBloc+nBlocks],blockIdx.x,idBloc); //printf("nInteractSubBlockMid1 = %f, car nPtBlMid1Bl0 = %i, et nPtBlMid1Bl1 = %i, gpubloc = %i, bloc = %i \n",nInteractSubBlockMid1,nPtBlMid1[idBloc],nPtBlMid1[idBloc+nBlocks],blockIdx.x,idBloc); if (level==0) { nInteractSubBlockNeg=(float)(nPtBlNeg[idBloc])*(float)(nPtBlNeg[idBloc]+1)*0.5*(newBlockCvg[4*idBloc+0]); nInteractSubBlockPos=(float)(nPtBlPos[idBloc])*(float)(nPtBlPos[idBloc]+1)*0.5*(newBlockCvg[4*idBloc+1]); //nPtB0= } else { nInteractSubBlockNeg=(float)(nPtBlNeg[idBloc])*(float)(nPtBlNeg[idBloc+nBlocks])*(newBlockCvg[4*idBloc+0]); nInteractSubBlockPos=(float)(nPtBlPos[idBloc])*(float)(nPtBlPos[idBloc+nBlocks])*(newBlockCvg[4*idBloc+1]); } newBlockLvl[4*idBloc+0]=level; // bloc Neg newBlockLvl[4*idBloc+1]=level; // bloc Pos newBlockLvl[4*idBloc+2]=level+1; // bloc Mid0 newBlockLvl[4*idBloc+3]=level+1; // bloc Mid1 float nInteractAfter = nInteractSubBlockNeg+nInteractSubBlockPos+nInteractSubBlockMid0+nInteractSubBlockMid1; // s'il y a plus d'interaction apres la coupure qu'avant ou s'il y a moins d'une certaine quantité d'interaction : option 0 : copy directement les blocs enfants vers le final // default = trash whatToDo[4*idBloc+0]=3; whatToDo[4*idBloc+1]=3; whatToDo[4*idBloc+2]=3; whatToDo[4*idBloc+3]=3; // KEEP = 1 // SPLIT = 0 // DISCARD = 2 // TRASH = 3 if (nInteractAfter>nInteractBefore) { // KEEP ALL = 1 whatToDo[4*idBloc+0]=1; whatToDo[4*idBloc+1]=1; whatToDo[4*idBloc+2]=1; whatToDo[4*idBloc+3]=1; } else { if (nInteractSubBlockPos>0) { if ((nInteractSubBlockPos>=minInteract)&&((nPtBlPos[idBloc]+nPtBlPos[idBloc+nBlocks])<(nPtB0+nPtB1))) { // SPLIT = 0 whatToDo[4*idBloc+1]=0; } else { // KEEP = 1 whatToDo[4*idBloc+1]=1; } } else { if ((nPtBlPos[idBloc]+nPtBlPos[idBloc+nBlocks])>minPointsToKeep) { // DISCARD = 2 whatToDo[4*idBloc+1]=2; } else { // TRASH = 3 whatToDo[4*idBloc+1]=3; } } if (nInteractSubBlockNeg>0) { if ((nInteractSubBlockNeg>=minInteract)&&((nPtBlNeg[idBloc]+nPtBlNeg[idBloc+nBlocks])<(nPtB0+nPtB1))) { // SPLIT = 0 whatToDo[4*idBloc+0]=0; } else { // KEEP = 1 whatToDo[4*idBloc+0]=1; } } else { if ((nPtBlNeg[idBloc]+nPtBlNeg[idBloc+nBlocks])>minPointsToKeep) { // DISCARD = 2 whatToDo[4*idBloc+0]=2; } else { // TRASH = 3 whatToDo[4*idBloc+0]=3; } } if (nInteractSubBlockMid0>0) { if ((nInteractSubBlockMid0>=minInteract)&&((nPtBlMid0[idBloc]+nPtBlMid0[idBloc+nBlocks])<(nPtB0+nPtB1))) { // SPLIT = 0 whatToDo[4*idBloc+2]=0; } else { // KEEP = 1 whatToDo[4*idBloc+2]=1; } } else { // trash whatToDo[4*idBloc+2]=3; } if (nInteractSubBlockMid1>0) { if ((nInteractSubBlockMid1>=minInteract)&&((nPtBlMid1[idBloc]+nPtBlMid1[idBloc+nBlocks])<(nPtB0+nPtB1))) { // SPLIT = 0 whatToDo[4*idBloc+3]=0; } else { // KEEP = 1 whatToDo[4*idBloc+3]=1; } } else { // TRASH = 3 //printf("Trash Mid1 \n"); whatToDo[4*idBloc+3]=3; } } //printf("Je suis le Pt %i et j'appart au bloc %i, sachant qu'on est dans le blocGPU %i \n",id_pt,id_bloc, blockIdx.x); int indexNewBloc = 4*idBloc+0; //printf("Je suis le Bloc %i et ce qu'on doit faire c'est %i pour le nouveau bloc %i \n",idBloc,whatToDo[indexNewBloc], indexNewBloc); addrBloc[indexNewBloc]=atomicAdd(& nBlocsKeep[whatToDo[indexNewBloc]], 1 ); addrPt [indexNewBloc]=atomicAdd(& nPtKeep[whatToDo[indexNewBloc]] , nPtBlTotNeg); //printf("addrBloc = %i \n", addrBloc[indexNewBloc]); indexNewBloc = 4*idBloc+1; //printf("Je suis le Bloc %i et ce qu'on doit faire c'est %i pour le nouveau bloc %i \n",idBloc,whatToDo[indexNewBloc], indexNewBloc); addrBloc[indexNewBloc]=atomicAdd(& nBlocsKeep[whatToDo[indexNewBloc]], 1 ); addrPt [indexNewBloc]=atomicAdd(& nPtKeep[whatToDo[indexNewBloc]] , nPtBlTotPos); indexNewBloc = 4*idBloc+2; //printf("Je suis le Bloc %i et ce qu'on doit faire c'est %i pour le nouveau bloc %i \n",idBloc,whatToDo[indexNewBloc], indexNewBloc); addrBloc[indexNewBloc]=atomicAdd(& nBlocsKeep[whatToDo[indexNewBloc]], 1 ); addrPt [indexNewBloc]=atomicAdd(& nPtKeep[whatToDo[indexNewBloc]] , nPtBlTotMid0); indexNewBloc = 4*idBloc+3; //printf("Je suis le Bloc %i et ce qu'on doit faire c'est %i pour le nouveau bloc %i \n",idBloc,whatToDo[indexNewBloc], indexNewBloc); addrBloc[indexNewBloc]=atomicAdd(& nBlocsKeep[whatToDo[indexNewBloc]], 1 ); addrPt [indexNewBloc]=atomicAdd(& nPtKeep[whatToDo[indexNewBloc]] , nPtBlTotMid1); // s'il y a zero interaction et plus d'une certaine quantité de points -> va vers les ignored : option 1 // s'il y a zero interaction et moins d'une certaine quantité de pointe -> trash : option 2 } }
4,322
/** * Angle Between Two Vectors A and B * * Author: Gulsum Gudukbay * Date: 23 December 2017 * */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <cuda.h> #include <cuda_runtime_api.h> // double precision atomic add function // taken from https://devtalk.nvidia.com/default/topic/763119/atomic-add-operation/ __device__ double atomicAdd2(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do{ assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +__longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __device__ void dot_product(const double *A, const double *B, int numElements, int blockSize, int width_thread, double *result) { int start = width_thread*(blockIdx.x * blockSize + threadIdx.x); double sum = 0.0; for(int i = start; i < start+width_thread; i++) { if(i < numElements) sum += A[i] * B[i]; } atomicAdd2(&result[blockIdx.x], sum); } __device__ void mag_squared(const double *A, int numElements, int blockSize, int width_thread, double *result) { int start =width_thread*(blockIdx.x * blockSize + threadIdx.x); double sum = 0.0; //sum all elements squared in the block for(int i = start; i < start+width_thread; i++) { if(i < numElements) sum+= pow(A[i],2); } atomicAdd2(&result[blockIdx.x], sum); } __global__ void find_angle(const double *A, const double *B, int numElements, int blockSize, int width_thread, double *mag1, double *mag2, double *dot_prod) { mag_squared(A, numElements, blockSize, width_thread+1, mag1); mag_squared(B, numElements, blockSize, width_thread+1, mag2); dot_product(A, B, numElements, blockSize, width_thread+1, dot_prod); __syncthreads(); } double findAngleCPU(const double *A, const double *B, int numElements) { double res = 0.0; double dot_prod = 0.0; double mag1 = 0.0; double mag2 = 0.0; for(int i = 0; i < numElements; i++) { dot_prod += A[i] * B[i]; mag1 += pow(A[i], 2); mag2 += pow(B[i], 2); } mag1 = sqrt(mag1); mag2 = sqrt(mag2); res = acos(dot_prod/(mag1*mag2)); return res; } int main(int argc, char *argv[]) { srand (58); /*FILE* in = fopen("input.txt", "w+"); fprintf(in, "%f\n", (float)1000000); for( int i = 0; i < 2000000; i++) { fprintf(in, "%f\n", (float)(rand() / (RAND_MAX / 100))); } fclose(in); */ int N, blockSize, threadElts; double *A, *B, *d_A, *d_B; double *dot_prod, *mag1, *mag2; double *h_dot_prod, *h_mag1, *h_mag2; char* filename; dot_prod = NULL; mag1 = NULL; mag2 = NULL; threadElts = 256; N = atoi(argv[1]); blockSize = atoi(argv[2]); if(argc == 4) filename = argv[3]; cudaEvent_t start4, stop4; cudaEventCreate(&start4); cudaEventCreate(&stop4); cudaEventRecord(start4); if(argc == 3) { A = (double*)malloc(N * sizeof(double)); B = (double*)malloc(N * sizeof(double)); //fill in the arrays with random numbers for(int i = 0; i < N; i++) { A[i] = rand() / (RAND_MAX / 100); } for(int i = 0; i < N; i++) { B[i] = rand() / (RAND_MAX / 100); } } else { FILE * file; int i; float tmp; if ((file = fopen(filename, "r+")) == NULL) { printf("ERROR: file open failed\n"); return -1; } fscanf(file,"%f", &tmp); N = (int)tmp; printf("%f\n", tmp); A = (double*)malloc(N * sizeof(double)); B = (double*)malloc(N * sizeof(double)); for(i = 0; i < N; i++) { fscanf(file,"%f", &tmp); A[i] = tmp; } for(i = 0; i < N; i++) { fscanf(file,"%f", &tmp); B[i] = tmp; } fclose(file); } cudaEventRecord(stop4); cudaEventSynchronize(stop4); float milliseconds4 = 0; cudaEventElapsedTime(&milliseconds4, start4, stop4); printf("Time for the array generation: %f ms\n", milliseconds4); int no_of_blocks = (int)ceil( N / blockSize / threadElts)+1; printf("\nInfo\n______________________________________________________\n"); printf("Number of elements: %d\n", N); printf("Number of threads per block: %d\n", blockSize); printf("Number of blocks will be created: %d\n", no_of_blocks); printf("\nTime\n______________________________________________________\n"); h_dot_prod = (double*)malloc(no_of_blocks * sizeof(double)); h_mag1 = (double*)malloc(no_of_blocks * sizeof(double)); h_mag2 = (double*)malloc(no_of_blocks * sizeof(double)); double dot_product, magnitude1, magnitude2; dot_product = 0.0; magnitude1 = 0.0; magnitude2 = 0.0; //Compute angle on CPU cudaEvent_t start3, stop3; cudaEventCreate(&start3); cudaEventCreate(&stop3); cudaEventRecord(start3); float cpu_result = (float)((180.0 / M_PI)*findAngleCPU(A, B, N)); cudaEventRecord(stop3); cudaEventSynchronize(stop3); float milliseconds3 = 0; cudaEventElapsedTime(&milliseconds3, start3, stop3); printf("Time for the CPU function: %f ms\n", milliseconds3); cudaMalloc(&d_A, N * sizeof(double)); cudaMalloc(&d_B, N * sizeof(double)); cudaMalloc(&dot_prod, no_of_blocks*sizeof(double)); cudaMalloc(&mag1, no_of_blocks*sizeof(double)); cudaMalloc(&mag2, no_of_blocks*sizeof(double)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(d_A, A, N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, N*sizeof(double), cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time for Host to Device transfer: %f ms\n", milliseconds); //KERNEL cudaEvent_t start2, stop2; float milliseconds2 = 0; cudaEventCreate(&start2); cudaEventCreate(&stop2); cudaEventRecord(start2); find_angle<<<no_of_blocks, blockSize>>>(d_A, d_B, N, blockSize, threadElts, mag1, mag2, dot_prod); cudaDeviceSynchronize(); cudaEventRecord(stop2); cudaEventSynchronize(stop2); cudaEventElapsedTime(&milliseconds2, start2, stop2); printf("Time for the kernel execution: %f ms\n", milliseconds2); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); } cudaEvent_t start5, stop5; float milliseconds5 = 0; cudaEventCreate(&start5); cudaEventCreate(&stop5); cudaEventRecord(start5); cudaMemcpy(h_dot_prod, dot_prod, no_of_blocks*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(h_mag1, mag1, no_of_blocks*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(h_mag2, mag2, no_of_blocks*sizeof(double), cudaMemcpyDeviceToHost); cudaEventRecord(stop5); cudaEventSynchronize(stop5); cudaEventElapsedTime(&milliseconds5, start5, stop5); printf("Time for the Device to Host transfer: %f ms\n", milliseconds5); printf("Total execution time for GPU: %f ms\n", milliseconds5 + milliseconds2 + milliseconds); for(int i = 0; i < no_of_blocks; i++) { magnitude1 += h_mag1[i]; magnitude2 += h_mag2[i]; dot_product += h_dot_prod[i]; } magnitude1 = sqrt(magnitude1); magnitude2 = sqrt(magnitude2); //printf("magnitude1: %.2f, magnitude2: %.2f, dot_product: %.2f\n", (float)magnitude1, (float)magnitude2, (float)dot_product); double result = acos(dot_product/(magnitude1*magnitude2)); printf("\nResults\n____________________________________________________\n"); printf("CPU result: %f\n", cpu_result); printf("GPU result: %f\n\n", (float)((180.0 / M_PI)*result)); cudaFree(d_A); cudaFree(d_B); cudaFree(dot_prod); cudaFree(mag1); cudaFree(mag2); return 0; }
4,323
#include "includes.h" __device__ float Dist_between_two_vec(float * v0, float *v1, int size) { float dist = 0; for (int i = 0; i < size; i++) dist += (v0[i] - v1[i])*(v0[i] - v1[i]); return sqrt(dist); } __global__ void Dist_between_two_vec_naive(float * v0, float *v1, int size, float * dst) { float dist = 0; for (int i = 0; i < size; i++) dist += (v0[i] - v1[i]);//*(v0[i]-v1[i]); dst[0] = dist; }
4,324
#include <fstream> #include <iostream> #include <stdio.h> #include <string> #include <sstream> #include <stdlib.h> #include <math.h> #include <time.h> #include <ctime> #include <vector> #include <cstdlib> #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> using namespace std; //handlerror declaration : to display file and line numbers of erroneous lines static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { cout<<cudaGetErrorString(err)<<" in "<< file <<" at line "<< line<<endl; //this will print the line and filename too } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))//macro #define min(a, b) ((a) > (b))? (b): (a) //macro for min, to use in kernels #define max(a, b) ((a) > (b))? (a): (b)//macro for max, to use in kernels // storing RGB values for rgb colorspace images struct pixel_RGB { unsigned char r; //Red values unsigned char g; //Green values unsigned char b; //Blue Values }; // storing values for xyz and lab colorspace images struct pixel_XYZ { float x; //X for XYZ colorspace, L for LAB colorspace float y; //Y for XYZ colorspace, A for LAB colorspace float z; //Z for XYZ colorspace, B for LAB colorspace }; //store coordinates for each cluster centres struct point { int x; //x-ccordinate int y; //y-coordinate }; __global__ void RGB2LAB(pixel_RGB* img, int img_wd, int img_ht, pixel_XYZ* LAB_img) { unsigned int c=blockIdx.x*blockDim.x + threadIdx.x; //row value using x-index of current thread unsigned int r=blockIdx.y*blockDim.y + threadIdx.y; //column value using y-index of current thread unsigned int idx=r*img_wd+c; //row major index if(idx>img_wd*img_ht) //degenerate values return; //read the RGB channel values int R=img[idx].r; int G=img[idx].g; int B=img[idx].b; //normalize these values double var_R=double(R)/255; double var_G=double(G)/255; double var_B=double(B)/255; //linearize it to give XYZ colorspace double X = var_R * 0.4124 + var_G * 0.3576 + var_B * 0.1805; double Y = var_R * 0.2126 + var_G * 0.7152 + var_B * 0.0722; double Z = var_R * 0.0193 + var_G * 0.1192 + var_B * 0.9505; //Normalize XYZ values X=X/0.95047; Y=Y/1.00000; Z=Z/1.088969; //Conversion of XYZ to LAB Values double Y3=pow(Y,1/3); double T=0.008856;//threshold double fx=(X>T)?(pow(X,double(1)/3)):(7.787*X+(16/116)); double fy=(Y>T)?(pow(Y,double(1)/3)):(7.787*Y+(16/116)); double fz=(Z>T)?(pow(Z,double(1)/3)):(7.787*Z+(16/116)); double L=(Y>T)?(116*Y3 - 16):(903.3*Y); double a = 500 * (fx - fy); double b = 200 * (fy - fz); //saving the calculations to image LAB_img[idx].x=L; LAB_img[idx].y=a; LAB_img[idx].z=b; } int min_index(float* array, int size, int x1, int x2, int y1, int y2, int img_wd) //find the index of min value a given region { int index=(x1+1)+(y1+1)*img_wd; //initialize to the centre index for(int x=x1;x<x2;x++) { for(int y=y1;y<y2;y++) { if(array[y*img_wd+x]<array[index]) index=y*img_wd+x; } } return index; } __global__ void label_assignment(int* labels_gpu, pixel_XYZ* Pixel_LAB_gpu, point* centers_gpu, int S, int img_wd, int img_ht, int m, float* d_gpu, int k1) { unsigned int index = blockIdx.x*blockDim.x+ threadIdx.x; //find threadindex of cluster center if(index>=k1) //for degenerate cases return;//} // // finding centre coordinates int x_center=centers_gpu[index].x;//find x coordinate of the cluster centre int y_center=centers_gpu[index].y;//find y coordinate of the cluster centre int centre_idx=y_center*img_wd+x_center;//find index in image row major form int L_x1=max(0, x_center-S), L_x2=min(x_center+S,img_wd-1),L_y1= max(0,y_center-S),L_y2= min(y_center+S, img_ht-1); for(int x_coord=L_x1;x_coord<=L_x2;x_coord++) //look in 2S x 2S neighborhood {//taking care it doesn't go out of the image for(int y_coord=L_y1;y_coord<=L_y2;y_coord++) { int j=y_coord*img_wd+x_coord; // find global index of the pixel float d_c = sqrt(pow((Pixel_LAB_gpu[centre_idx].x-Pixel_LAB_gpu[j].x),2) + pow((Pixel_LAB_gpu[centre_idx].y-Pixel_LAB_gpu[j].y),2) + pow((Pixel_LAB_gpu[centre_idx].z-Pixel_LAB_gpu[j].z),2)); //color proximity; float d_s = sqrtf((x_coord-x_center)*(x_coord-x_center)+(y_coord-y_center)*(y_coord-y_center)); //spatial proximity float D=powf(powf(d_c,2)+powf(m*d_s/S,2),0.5);//effective distance //if it is lesser than current distance, update if(D<d_gpu[j]) { d_gpu[j]=D;//store new center labels_gpu[j]=index;//label as the number of cluster centre } } } } __global__ void update_centres(int* labels_gpu, point* centers_gpu, int S, int img_wd, int img_ht, int k1) { size_t index = blockIdx.x*blockDim.x+ threadIdx.x; //thread index if(index>=k1) return; // finding centre coordinates int centre_x=centers_gpu[index].x;//find x-coordinate of the centre int centre_y=centers_gpu[index].y;//find y-coordinate of the centre //finding the label of cluster, this will be center's label int i=labels_gpu[centre_y*img_wd+centre_x]; //finding the label of centre int x_mean=0, y_mean=0, count=0;//mean will store update cluster int L_x1=max(0, centre_x-S), L_x2=min(centre_x+S,img_wd-1),L_y1= max(0,centre_y-S),L_y2= min(centre_y+S, img_ht-1); //storing 2Sx2S coordinate ranges from(L_x1,L_y1) to (L_x2,L_y2) for(int x_coord=L_x1;x_coord<=L_x2;x_coord++) //look in 2S x 2S neighborhood { for(int y_coord=L_y1;y_coord<=L_y2;y_coord++) { int pt_idx=y_coord*img_wd+x_coord;//index of the points in the neighborhood if(labels_gpu[pt_idx]==i)//if the label is the cluster centres, add x and y coordinates to x_mean and y_mean { x_mean+=x_coord; y_mean+=y_coord; count++;//increment the counter } } } if(count)//if any counts { centers_gpu[index].x=x_mean/count;//calculate mean x and y coordinate centers_gpu[index].y=y_mean/count; } } //calculating residual error(MSE) between previous and current centres float error_calculation(point* centers_curr,point* centers_prev,int N) { float err=0; //initialize MSE to zero for(int i=0;i<N;i++) { err+=pow((centers_curr[i].x-centers_prev[i].x),2) + pow((centers_curr[i].y-centers_prev[i].y),2); //squared error between current and previous coordinates } err=((float)err)/N; //take mean of the squared error return err; // } int main(int argc, char* argv[]) { cudaEvent_t start, stop, begin, end;//to store time intervals of execution cout<<"Simple Linear Iterative Clustering: GPU IMPLEMENTATION"<<endl<<endl; //create event, now these can be used for record HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); HANDLE_ERROR(cudaEventCreate(&begin)); HANDLE_ERROR(cudaEventCreate(&end)); HANDLE_ERROR(cudaEventRecord(begin));//measure time for begin the read if(argc != 4) //there should be three arguments { cout<<" program_name image_name num_superpixels control_constant"<<endl; return 1; //exit and return an error } //READING FILE //reading file line by line ifstream infile; infile.open(argv[1]); //opening the file string line; int img_wd, img_ht; int max_pixel_val; //line one contains P6, line 2 mentions about gimp version, line 3 stores the height and width getline(infile, line); istringstream iss1(line); //reading first line to check format int word; string str1; iss1>>str1; if(str1.compare("P6")!=0) //comparing magic number { cout<<"wrong file format"<<endl; return 1; } getline(infile,line); //this line has version related comment, hence ignoring getline(infile,line); //this stores image dims istringstream iss2(line); iss2>>word;// this will be image width img_wd=word; iss2>>word;// this will be image height img_ht=word; // cout<<img_ht<<" "<<img_wd<<endl; //storing the pixels as 1d images(row major) pixel_RGB *Pixel = (pixel_RGB*)malloc((img_ht)*(img_wd)*sizeof(pixel_RGB)); int pix_cnt=0, cnt=0; getline(infile,line); //this stores max value istringstream iss3(line); iss3>>word; max_pixel_val=word;//max pixel value // cout<<max_pixel_val<<endl; unsigned int val; //read line by line while (getline(infile, line)) { istringstream iss4(line); for (int i=0; i<=line.length();i++) { if(pix_cnt<img_ht*img_wd) //if it a valid pixel { val =((int)line[i]); //read the current line if(cnt%3==0) //in case of R channel { Pixel[pix_cnt].r=val; //store R channel value } else if(cnt%3==1) //in case of G channel { Pixel[pix_cnt].g=val;//storing G value } else { Pixel[pix_cnt].b=val;//in case of B channel, store it pix_cnt++; //move to next pixel } cnt++; //next value read } } } HANDLE_ERROR(cudaEventRecord(stop)); HANDLE_ERROR(cudaEventSynchronize(stop)); float milliseconds=0; HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, begin, stop));//time taken to read and save the image //get the time in milliseconds cout<<"Image read in "<<milliseconds<<" ms"<<endl; //COLOR CONVERSION //RGB->CIE-L*ab cudaDeviceProp prop; HANDLE_ERROR(cudaGetDeviceProperties(&prop,0)); float thread_block=sqrt(prop.maxThreadsPerBlock); //2D blocks used dim3 DimGrid(ceil(img_wd/thread_block),ceil(img_ht/thread_block),1); //image saved as a 2D grid dim3 DimBlock(thread_block,thread_block,1); //blocks are 2D pixel_RGB* Pixel_gpu; //to copy img to gpu HANDLE_ERROR(cudaMalloc(&Pixel_gpu,img_ht*img_wd*sizeof(pixel_RGB)));//allocating memory on gpu for this HANDLE_ERROR(cudaMemcpy(Pixel_gpu,Pixel,img_wd*img_ht*sizeof(pixel_RGB),cudaMemcpyHostToDevice));//copying the rgb image value to gpu pixel_XYZ* Pixel_lab_gpu; //to store LAB image on gpu pixel_XYZ* Pixel_LAB=(pixel_XYZ*)malloc(img_ht*img_wd*sizeof(pixel_XYZ)); // to store LAB image on cpu HANDLE_ERROR(cudaMalloc(&Pixel_lab_gpu,img_ht*img_wd*sizeof(pixel_XYZ)));//allocating LAB image memory on gpu HANDLE_ERROR(cudaMemcpy(Pixel_lab_gpu,Pixel_LAB,img_wd*img_ht*sizeof(pixel_XYZ),cudaMemcpyHostToDevice));//copying lab image values to gpu, currently random values HANDLE_ERROR(cudaEventRecord(start)); //start recording kernel time // pixel_XYZ *Pixel_XYZ=RGB_XYZ(Pixel, img_ht, img_wd); RGB2LAB<<<DimGrid,DimBlock>>>(Pixel_gpu, img_wd, img_ht, Pixel_lab_gpu); //calling the kernel HANDLE_ERROR(cudaEventRecord(stop));//stop recording kernel time HANDLE_ERROR(cudaEventSynchronize(stop));//synchronizing HANDLE_ERROR(cudaMemcpy(Pixel_LAB,Pixel_lab_gpu,img_wd*img_ht*sizeof(pixel_XYZ),cudaMemcpyDeviceToHost));//copying back the LAB values HANDLE_ERROR(cudaFree(Pixel_lab_gpu));//frreing the cuda memory HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, start, stop));//get the time in milliseconds cout<<"Colorspace conversion done in "<<milliseconds<<" ms"<<endl; //IMPLEMENTING SLIC ALGORITHM int N = img_ht*img_wd; //number of pixels in the images int K = atoi(argv[2]); //number of superpixels desired int S= floor(sqrt(N/K));//size of each superpixel int m=atoi(argv[3]); //compactness control constant int k1=ceil(img_ht*1.0/S)*ceil(img_wd*1.0/S);//actual number of superpixels cout<<"Image size: "<<img_wd<<" x "<<img_ht<<endl; cout<<"Using SLIC algorithm to get "<<k1<<" superpixels of approximate size "<<S<<" x "<<S<<", area "<<S*S<<" each, also m/S="<<1.0*m/S<<endl; point* centers_curr=(point*)malloc(k1*sizeof(point)); //initialize centers int center_ctr=0; //centres are initialized in a regular grid, each separated by S distance to the nearest centre // centres start from (S/2,S/2) HANDLE_ERROR(cudaEventRecord(start)); for(int j=S/2;j<S*ceil(img_ht*1.0/S);j=j+S) { for(int i=S/2;i<S*ceil(img_wd*1.0/S);i=i+S) { int val1=((i>=img_wd)?(img_wd+j-S)/2:i);//to make sure it doesn't go out of image int val2=((j>=img_ht)?(img_ht+i-S)/2:j);//same as above in y coordinate //store x and y coordinates into the array centers_curr[center_ctr].x=val1; centers_curr[center_ctr].y=val2; center_ctr++; } } HANDLE_ERROR(cudaEventRecord(stop)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, start, stop));//get the time in milliseconds cout<<"centres initialized in "<<milliseconds<<" ms"<<endl; ////perturb centers HANDLE_ERROR(cudaEventRecord(start)); float* G=(float*)malloc(N*sizeof(float)); //to store gradient in 3x3 neighborhood //gradient is calculated as : G(x, y) = \I(x + 1, y) − I(x − 1, y)|^2+ |I(x, y + 1) − I(x, y − 1)|^2 for(int i=0; i<img_wd;i++)//x-coordinate { for(int j=0; j<img_ht;j++)//y-coordinate { int index=j*img_wd+i;//calculating the index, row major //To store L,a, b channels for points (x+1,y),(x-1,y),(x,y+1),(x,y-1) float L1, L2, L3, L4, a1, a2, a3, a4, b1, b2, b3, b4; //initializing them to zero, so as to give padding effect when at edges L1=L2=L3=L4=a1=a2=a3=a4=b1=b2=b3=b4=0; // pt1 is point(x+1, y),pt 2 is point(x-1,y),pt3 is point(x,y+1), pt4 is point(x,y-1) //replace by actual intensities in LAB colorspace when the pixel exists if(i+1<img_wd) L1=Pixel_LAB[j*img_wd+i+1].x, a1=Pixel_LAB[j*img_wd+i+1].y, b1=Pixel_LAB[j*img_wd+i+1].z; if(i-1>0) L2=Pixel_LAB[j*img_wd+i-1].x, a2=Pixel_LAB[j*img_wd+i-1].y, b2=Pixel_LAB[j*img_wd+i-1].z; if(j+1<img_ht) L3=Pixel_LAB[(j+1)*img_wd+i].x, a3=Pixel_LAB[(j+1)*img_wd+i].y, b3=Pixel_LAB[(j+1)*img_wd+i].z; if(j-1>0) L4=Pixel_LAB[(j-1)*img_wd+i].x, a4=Pixel_LAB[(j-1)*img_wd+i].y, b4=Pixel_LAB[(j-1)*img_wd+i].z; //Calculating the gradient G[index]=pow(L1-L2,2) + pow(a1-a2,2) + pow(b1-b2,2) + pow(L3-L4,2) + pow(a3-a4,2) + pow(b3-b4,2); } } for(int i=0; i<k1;i++) ////for every cluster center { //the minimum gradient is needed in the region (x-1,y-1) to (x+1,y+1) int x1=centers_curr[i].x-1; int x2=centers_curr[i].x+1; int y1=centers_curr[i].y-1; int y2=centers_curr[i].y+1; int index = min_index(G, N, x1, x2, y1, y2, img_wd);//finding minimum index in this 3x3 search region //calculating new x and y coordinates for the centre centers_curr[i].x=(floor)(index%img_wd); centers_curr[i].y=(floor)(index/img_wd); } HANDLE_ERROR(cudaEventRecord(stop)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, start, stop));//get the time in milliseconds cout<<"Centres perturbed in "<<milliseconds<<" ms"<<endl; HANDLE_ERROR(cudaEventRecord(start)); int* labels=(int*)malloc(N*sizeof(int));//this will be storing labels for every pixel float* d=(float*)malloc(N*sizeof(float)); // this will be storing distance measure of every pixel to its cluster center //initializing the labels and distance measures for(int idx=0;idx<N;idx++) { labels[idx]=-1; //unlabelled d[idx]=60000; //a high value } HANDLE_ERROR(cudaEventRecord(stop)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, start, stop));//get the time in milliseconds cout<<"labels and distance measures initialized in "<<milliseconds<<" ms"<<endl; float error=100;// initialize error to a high value //label assignment point* centers_gpu; //for storing the centers in gpu float* d_gpu; //for storing distance measures in gpu int* labels_gpu; //for storing labels in gpu pixel_XYZ* Pixel_LAB_gpu;//for storing LAB image in gpu point* centers_prev=(point*)malloc(k1*sizeof(point));// this will be storing the cluster centres for every previous epoch HANDLE_ERROR(cudaMalloc(&centers_gpu, k1*sizeof(point)));//allocating memory for centers on gpu HANDLE_ERROR(cudaMalloc(&labels_gpu, N*sizeof(int)));//allocating memory for labels on gpu HANDLE_ERROR(cudaMalloc(&Pixel_LAB_gpu, N*sizeof(pixel_XYZ)));//allocating memory for LAB image on gpu HANDLE_ERROR(cudaMalloc(&d_gpu, N*sizeof(float)));//allocating memory for distance measures on gpu HANDLE_ERROR(cudaMemcpy(Pixel_LAB_gpu, Pixel_LAB, N*sizeof(pixel_XYZ), cudaMemcpyHostToDevice));//copying LAB_image from host to device unsigned int thread_block1=prop.maxThreadsPerBlock;//1D grid and block int epoch=0; //initialize epoch while(error>1) { cout<<endl<<"Epoch = "<<epoch<<endl; for(int i=0; i<k1;i++)//for every cluster centre { centers_prev[i].x=centers_curr[i].x; //find x coordinate of the cluster centre centers_prev[i].y=centers_curr[i].y; //find y coordinate of the cluster centre } HANDLE_ERROR(cudaMemcpy(labels_gpu, labels, N*sizeof(int), cudaMemcpyHostToDevice));//copying labels on gpu HANDLE_ERROR(cudaMemcpy(centers_gpu, centers_curr, k1*sizeof(point), cudaMemcpyHostToDevice));//copying centers on gpu HANDLE_ERROR(cudaMemcpy(d_gpu, d , N*sizeof(float), cudaMemcpyHostToDevice));//copying distance measures on gpu dim3 DimGrid1(1+(k1/thread_block1),1,1); //1D grid dim3 DimBlock1(thread_block1,1,1);//1D block HANDLE_ERROR(cudaEventRecord(start)); label_assignment<<<DimGrid1,DimBlock1>>>(labels_gpu,Pixel_LAB_gpu,centers_gpu,S,img_wd, img_ht,m, d_gpu, k1); HANDLE_ERROR(cudaEventRecord(stop)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, start, stop));//get the time in milliseconds cout<<"Label and distance assignment done in "<<milliseconds<<" ms"<<endl; HANDLE_ERROR(cudaEventRecord(start)); update_centres<<<DimGrid1,DimBlock1>>>(labels_gpu, centers_gpu, S, img_wd, img_ht, k1); HANDLE_ERROR(cudaEventRecord(stop)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, start, stop));//get the time in milliseconds cout<<"Centers updated in "<<milliseconds<<" ms"<<endl; HANDLE_ERROR(cudaMemcpy(centers_curr, centers_gpu, k1*sizeof(point), cudaMemcpyDeviceToHost)); //copying centers back to cpu HANDLE_ERROR(cudaMemcpy(d, d_gpu, N*sizeof(float), cudaMemcpyDeviceToHost));//copying distances back to cpu HANDLE_ERROR(cudaMemcpy(labels, labels_gpu, N*sizeof(int), cudaMemcpyDeviceToHost));//copying labels back to cpu HANDLE_ERROR(cudaEventRecord(start)); error= error_calculation(centers_curr, centers_prev,k1); HANDLE_ERROR(cudaEventRecord(stop)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, start, stop));//get the time in milliseconds cout<<"MSE = "<<error<<" and is calculated in "<<milliseconds<<" ms"<<endl; epoch++; //next epoch } //OUTPUT STORAGE pixel_RGB *rgb=(pixel_RGB*)malloc((img_ht)*(img_wd)*sizeof(pixel_RGB)); HANDLE_ERROR(cudaEventRecord(start)); ///enforce connectivity //for every point, look into its 4 neighbour labels, if all are same and different from pixel's label, change its label for(int x=0; x<img_wd; x++) { for(int y=0; y<img_ht; y++) { //int L_0=labels[y*img_wd+x]; int L_t=labels[max(y-1,0)*img_wd+x]; int L_b=labels[min(y+1,img_ht)*img_wd+x]; int L_r=labels[y*img_wd+max(img_wd,x+1)]; int L_l=labels[y*img_wd+min(0,x-1)]; if(L_t==L_b && L_b==L_r && L_r==L_l) { labels[y*img_wd+x]=L_t; // cout<<"stray pixel found"<<endl; } } } HANDLE_ERROR(cudaEventRecord(stop)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, start, stop));//get the time in milliseconds cout<<"connectivity enforced in "<<milliseconds<<" ms"<<endl; //randomly shuffle the labels random_shuffle(labels,labels+k1); HANDLE_ERROR(cudaEventRecord(start)); float alpha=0; for(int i=0;i<img_ht*img_wd;i++) { int label_val=labels[i]; // cout<<label_val<<endl; rgb[i].r=alpha*(21*label_val%255) + (1-alpha)*Pixel[i].r; rgb[i].g=alpha*(47*label_val%255) + (1-alpha)*Pixel[i].g; rgb[i].b=alpha*(173*label_val%255) + (1-alpha)*Pixel[i].b; } //sobel edge detection int valX, valY = 0; int GX [3][3]; int GY [3][3]; //Sobel Horizontal Mask GX[0][0] = 1; GX[0][1] = 0; GX[0][2] = -1; GX[1][0] = 2; GX[1][1] = 0; GX[1][2] = -2; GX[2][0] = 1; GX[2][1] = 0; GX[2][2] = -1; //Sobel Vertical Mask GY[0][0] = 1; GY[0][1] = 2; GY[0][2] = 1; GY[1][0] = 0; GY[1][1] = 0; GY[1][2] = 0; GY[2][0] = -1; GY[2][1] =-2; GY[2][2] = -1; double val1; for(int i=0;i<img_wd;i++) { for(int j=0;j<img_ht;j++) { if(i==0||i==img_wd-1||j==0||j==img_ht-1) { valX=0; valY=0; } else { valX=0, valY=0; for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { valX = valX + labels[i+x+(j+y)*img_wd] * GX[1+x][1+y]; valY = valY + labels[i+x+(j+y)*img_wd] * GY[1+x][1+y]; } } } val1=sqrt(valX*valX + valY*valY); if(val1>0) { rgb[j*img_wd+i].r=0; rgb[j*img_wd+i].g=0; rgb[j*img_wd+i].b=0; } } } HANDLE_ERROR(cudaEventRecord(stop)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&milliseconds, start, stop));//get the time in milliseconds cout<<"Output image prepared in "<<milliseconds <<" ms"<<endl; // //OUTPUT STORAGE HANDLE_ERROR(cudaEventRecord(start)); ofstream ofs; ofs.open("output_gpu.ppm", ofstream::out); ofs<<"P6\n"<<img_wd<<" "<<img_ht<<"\n"<<max_pixel_val<<"\n"; for(int j=0; j <img_ht*img_wd;j++) { ofs<<rgb[j].r<<rgb[j].g<<rgb[j].b;//labelled_ini[j]<<0<<0;//ofs<<Pixel_LAB[j].x<<Pixel_LAB[j].y<<Pixel_LAB[j].z; //write as ascii } ofs.close(); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&milliseconds, start, end);//get the time in milliseconds cout<<"Image saved in "<<milliseconds<<" ms"<<endl; cudaEventElapsedTime(&milliseconds, begin, end);//get time for whole clustering cout<<"Clustering done in "<<milliseconds<<" ms"<<endl; return 0; }
4,325
#include "includes.h" __global__ void _norm_forward_kernel(float *x, float *mean, float *variance, int b, int c, int wxh) { int ind = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int j = (ind / wxh) % c; if (ind >= b * c * wxh) return; x[ind] = (x[ind] - mean[j]) / (sqrt(variance[j] + 0.000001f)); }
4,326
#include <stdio.h> #define BLOCKS 1 #define THREADS 256 //Create a kernal to perform the wanted task __global__ void kernal() { //Get the tread id and print it printf("Hello world, I'm thread number %d \n", threadIdx.x + blockIdx.x*blockDim.x); } int main() { //Specify the amout of blocks and threads dim3 numberOfBlocks(BLOCKS); dim3 numberOfThreads(THREADS); //Launch kernal kernal <<<numberOfBlocks, numberOfThreads>>> (); //Wait for kernal to terminate cudaDeviceSynchronize(); }
4,327
#include "includes.h" __global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes) { int h = (in_h + 2 * pad) / stride; int w = (in_w + 2 * pad) / stride; int c = in_c; int area = (size - 1) / stride; int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id >= n) return; int index = id; int j = id % in_w; id /= in_w; int i = id % in_h; id /= in_h; int k = id % in_c; id /= in_c; int b = id; int w_offset = -pad; int h_offset = -pad; float d = 0; int l, m; for (l = -area; l < area + 1; ++l) { for (m = -area; m < area + 1; ++m) { int out_w = (j - w_offset) / stride + m; int out_h = (i - h_offset) / stride + l; int out_index = out_w + w * (out_h + h * (k + c * b)); int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h); d += (valid && indexes[out_index] == index) ? delta[out_index] : 0; } } prev_delta[index] += d; }
4,328
extern "C" __global__ void kMul(double* a, double* b, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = a[idx] * b[idx]; } } extern "C" __global__ void kFillArray(double* a, int m, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = a[idx % m]; } } extern "C" __global__ void kFill(double v, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = v; } } extern "C" __global__ void kSigmoid(double* a, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = 1/(1+ exp(-1*a[idx])); } } extern "C" __global__ void kTanh(double* a, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = tanh(a[idx]); } } extern "C" __global__ void kPow(double* a, double y, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { a[idx] = pow(a[idx], y); } } extern "C" __global__ void kInverseElements(double* a, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { a[idx] = (a[idx]==0.0)?0.0:1.0/a[idx]; } } extern "C" __global__ void kSqrt(double* a, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { a[idx] = sqrt(a[idx]); } } extern "C" __global__ void kDivByColumnVector(double *a, int m, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = (a[idx/m]==0.0)?0.0:dest[idx]/a[idx/m]; } } extern "C" __global__ void kMulByColumnVector(double *a, int m, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = dest[idx]*a[idx/m]; } }
4,329
#include <bits/stdc++.h> #include <cuda.h> using namespace std; #define CEIL(a,b) ((a-1)/b+1) #define N 1024 typedef long long int lli; __global__ void Inclusive_Scan(lli *d_in, lli* d_out) { __shared__ lli sh_array[N]; int id = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; int bid = blockIdx.x; // Copying data from global to shared memory sh_array[tid] = d_in[id]; __syncthreads(); for(int step = 1; step <= N; step *= 2) { if(tid >= step) { lli temp = sh_array[tid-step]; __syncthreads(); sh_array[tid] += temp; } __syncthreads(); } __syncthreads(); d_in[id] = sh_array[tid]; if(tid == (N - 1)) d_out[bid] = d_in[id]; } // This GPU kernel adds the value d_out[id] to all values in the (id+1)th block of d_in __global__ void Add(lli *d_in, lli *d_out) { int id = blockIdx.x * blockDim.x + threadIdx.x; int bid = blockIdx.x; if(bid > 0) d_in[id] += d_out[bid-1]; __syncthreads(); } int main() { lli size; cout << "Enter size of the array\n"; cin >> size; lli h_in[size],h_out[size]; int bytes = size * sizeof(lli); int reduced_size = (int)ceil(1.0*size/N); int reduced_bytes = reduced_size * sizeof(lli); srand(time(0)); for(lli i=0; i<size; i++) { h_in[i] = rand()%100; } for(lli i=0; i<size; i++) { cout << h_in[i] << " "; } cout <<"\n"; lli *d_in, *d_out, *d_sum; cudaMalloc((void**)&d_in, reduced_size*N*sizeof(lli)); cudaMalloc((void**)&d_out, reduced_bytes); cudaMalloc((void**)&d_sum, sizeof(lli)); cudaMemcpy(d_in, h_in, bytes, cudaMemcpyHostToDevice); Inclusive_Scan <<< (int)ceil(1.0*size/1024), 1024>>> (d_in, d_out); if(size > N) { Inclusive_Scan <<< 1, N>>> (d_out, d_sum); Add <<< reduced_size, N >>> (d_in, d_out); } cudaMemcpy(h_out, d_in, bytes, cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_out); cudaFree(d_sum); cout << "Inclusive Scan Array : \n"; for(lli i=0; i<size; i++) cout << h_out[i] << " "; }
4,330
#include "includes.h" __global__ void set_dynamic_positions(float *arr, float t) { int threadID = threadIdx.x; int blockID = blockIdx.x; int threads_per_block = blockDim.x; int i = blockID * threads_per_block + threadID; if (threadID == 0 or threadID == 1 or threadID == 2) { arr[i] = arr[i] * t; } }
4,331
#include <dirent.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <malloc.h> #define MAP_COUNT __device__ void mapCount(char*key,char*value,size_t key_size, size_t value_size,int*key_im_size,int*value_im_size,int*map_im_num,int threadID) #define EMIT_IM_COUNT(im_key_size,im_value_size) emitMapCount(im_key_size,im_value_size,word_num,key_im_size,value_im_size,map_im_num,threadID) typedef struct MapFileList { char* filename; struct MapFileList* next; }MapFileList; typedef enum InputFormat{TextInputFormat,KeyValueInputFormat,SequenceFileInputFormat} input_format; typedef struct Index{ int key_offset; int key_size; int value_offset; int value_size; }Index; typedef struct MapReduceSpec{ MapFileList* map_file_list; char* map_input_keys; char* map_input_values; Index* map_input_index; int* map_im_key_size; int* map_im_value_size; int* map_im_num; char* im_keys; unsigned* im_values; Index* im_index; int map_input_num; int map_block_num; int map_thread_num; input_format map_input_format; }MapReduceSpec; int im_num_total=0; int im_key_total_size=0; char* d_map_input_keys; char* d_map_input_values; Index* d_map_input_index; /*extern */MAP_COUNT; void sort(MapReduceSpec* spec); void init_map_file_list(MapFileList* list){ list->filename=NULL; list->next=NULL; } void free_map_file_list(MapFileList* list){ MapFileList* del; MapFileList* tmp; del=list; tmp=list->next; while(tmp){ if(del->filename!=NULL) free(del->filename); free(del); del=tmp; tmp=tmp->next; } if(del->filename!=NULL) free(del->filename); free(del); } void init_mapreduce_spec(MapReduceSpec* spec){ spec->map_file_list=NULL; spec->map_input_keys=NULL; spec->map_input_values=NULL; spec->map_input_index=NULL; spec->map_im_key_size=NULL; spec->map_im_value_size=NULL; spec->map_im_num=NULL; spec->im_keys=NULL; spec->im_values=NULL; spec->im_index=NULL; spec->map_input_num=0; spec->map_block_num=0; spec->map_thread_num=512; spec->map_input_format=TextInputFormat; } void free_spec(MapReduceSpec* spec){ free_map_file_list(spec->map_file_list); free(spec->map_input_keys); free(spec->map_input_values); free(spec->map_input_index); free(spec->map_im_key_size); free(spec->map_im_value_size); free(spec->map_im_num); free(spec->im_keys); free(spec->im_values); free(spec->im_index); free(spec); } char *my_strncpy(char *dest, const char *src, size_t n) { size_t i; for (i = 0; i < n && src[i] != '\0'; i++) dest[i] = src[i]; for ( ; i < n; i++) dest[i] = '\0'; return dest; } void map_input_split(MapReduceSpec* spec){ MapFileList* file_list_entry; size_t buffer_size=(size_t)256*1024*1024; size_t buffer_used=0; FILE* pFile; file_list_entry=spec->map_file_list; size_t file_size; size_t key_array_size; size_t value_array_size; size_t index_array_size; if(spec->map_input_format==TextInputFormat){ file_size=key_array_size=value_array_size=index_array_size=0; while(file_list_entry->filename!=NULL){ pFile=fopen(file_list_entry->filename,"rb"); if (pFile==NULL) {fputs ("File error\n",stderr); exit (1);} fseek (pFile , 0 , SEEK_END); file_size = ftell (pFile); rewind (pFile); if(buffer_used+file_size<=buffer_size){ ssize_t result=0; while (result!= -1) { size_t value_size = 0; size_t key_size=0; char* temp_key=NULL; char* temp_value=NULL; temp_key=(char*)malloc(10); sprintf(temp_key,"%d",(int)ftell(pFile)); key_size=strlen(temp_key)+1; //get the new key's size spec->map_input_keys=(char*)realloc(spec->map_input_keys,key_array_size+key_size); //reallocate key_array, so that it can contain new keys my_strncpy((spec->map_input_keys)+key_array_size,temp_key,key_size); result=getline(&(temp_value), &value_size, pFile); value_size=strlen(temp_value)+1; spec->map_input_values=(char*)realloc(spec->map_input_values,value_array_size+value_size); //reallocate value_size, so that it can contain new values strcpy((char*)(spec->map_input_values+value_array_size),temp_value); spec->map_input_index=(Index*)realloc(spec->map_input_index,(index_array_size+1)*sizeof(Index)); //reallocate index array, so that it can contain new <key,value> information spec->map_input_index[index_array_size].key_offset=key_array_size; spec->map_input_index[index_array_size].key_size=key_size; spec->map_input_index[index_array_size].value_offset=value_array_size; spec->map_input_index[index_array_size].value_size=value_size; key_array_size+=key_size; value_array_size+=value_size; index_array_size++; free(temp_key); free(temp_value); } buffer_used=buffer_used+file_size; } else printf("Buffer full!!\n"); file_list_entry=file_list_entry->next; fclose(pFile); } spec->map_input_num=index_array_size; // printf("Map Input entry number: %i, %u, %u, %u\n",spec->map_input_num,key_array_size,value_array_size,index_array_size*sizeof(Index)); printf("Map Input entry number: %i\n",spec->map_input_num); } } __device__ bool isChar(char c){ if(((c<='z')&&(c>='a'))||((c<='Z')&&(c>='A'))) return true; else return false; } __device__ void emitMapCount(int key_size, int value_size,int word_num,int*key_im_size_array,int*value_im_size_array,int*map_im_num,int threadID){ *(key_im_size_array+threadID)=key_size; *(value_im_size_array+threadID)=value_size; *(map_im_num+threadID)=word_num; } __global__ void map_count_warp(char*keys,char*values,Index*index,int*map_im_key_size,int*map_im_value_size,int*map_im_num,int input_num){ int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<input_num){ mapCount((keys+((index+i)->key_offset)),(values+((index+i)->value_offset)),(index+i)->key_size,(index+i)->value_size,map_im_key_size,map_im_value_size,map_im_num,i); } } void map_count_phase(MapReduceSpec* spec){ // char* d_map_input_keys; // char* d_map_input_values; // Index* d_map_input_index; int* d_map_im_key_size; int* d_map_im_value_size; int* d_map_im_num; size_t map_im_size=(spec->map_input_num)*sizeof(int); spec->map_im_key_size=(int*)malloc(map_im_size); spec->map_im_value_size=(int*)malloc(map_im_size); spec->map_im_num=(int*)malloc(map_im_size); size_t keys_size=malloc_usable_size(spec->map_input_keys); size_t values_size=malloc_usable_size(spec->map_input_values); size_t index_size=malloc_usable_size(spec->map_input_index); //printf("%u,%u,%u\n",malloc_usable_size(spec->map_input_keys),malloc_usable_size(spec->map_input_values),malloc_usable_size(spec->map_input_index)); cudaMalloc(&d_map_input_keys,keys_size); cudaMalloc(&d_map_input_values,values_size); cudaMalloc(&d_map_input_index,index_size); cudaMalloc(&d_map_im_key_size,map_im_size); cudaMalloc(&d_map_im_value_size,map_im_size); cudaMalloc(&d_map_im_num,map_im_size); cudaMemcpy(d_map_input_keys,spec->map_input_keys,keys_size,cudaMemcpyHostToDevice); cudaMemcpy(d_map_input_values,spec->map_input_values,values_size,cudaMemcpyHostToDevice); cudaMemcpy(d_map_input_index,spec->map_input_index,index_size,cudaMemcpyHostToDevice); spec->map_block_num=((spec->map_input_num)+(spec->map_thread_num)-1)/(spec->map_thread_num); // printf("%d\n",spec->map_block_num); map_count_warp<<<spec->map_block_num,spec->map_thread_num>>>(d_map_input_keys,d_map_input_values,d_map_input_index,d_map_im_key_size,d_map_im_value_size,d_map_im_num,spec->map_input_num); cudaMemcpy(spec->map_im_key_size,d_map_im_key_size,map_im_size,cudaMemcpyDeviceToHost); cudaMemcpy(spec->map_im_value_size,d_map_im_value_size,map_im_size,cudaMemcpyDeviceToHost); cudaMemcpy(spec->map_im_num,d_map_im_num,map_im_size,cudaMemcpyDeviceToHost); // printf("%s\n",spec->map_input_values); // printf("%d %d %d\n",*(spec->map_im_key_size),*(spec->map_im_value_size),*(spec->map_im_num)); // cudaFree(d_map_input_keys); // cudaFree(d_map_input_values); // cudaFree(d_map_input_index); cudaFree(d_map_im_key_size); cudaFree(d_map_im_value_size); cudaFree(d_map_im_num); } // __device__ void im_index_gr(Index*im_index,int im_key_offset,int im_key_size,int im_value_offset,int im_value_size){ // im_index->key_offset=im_key_offset; // im_index->key_size=im_key_size; // im_index->value_offset=im_value_offset; // im_index->value_size=im_value_size; // } __device__ void im_emit(char* key,int start, int end, unsigned value,char* im_key,unsigned* im_value,Index* im_index,int g_im_key_offset,int g_im_value_offset,int key_local_offset,int word_num){ int i; // static __shared__ int j=0; for(i=0;i<end-start;i++){ *(im_key+i)=*(key+start+i); //*(im_key+i)='d'; //(*j)++; } // *im_key='i';*(im_key+1)='p'; // *im_key='t';*(im_key+1)='s'; // im_index_gr(im_index+word_num,g_im_value_offset+start,end-start,g_im_value_offset+word_num,1); // __syncthreads(); // *(im_value+word_num)=value; //this statement will affect each other with following statemens. I don't know why. (im_index+word_num)->key_offset=g_im_key_offset+key_local_offset; (im_index+word_num)->key_size=end-start; (im_index+word_num)->value_offset=g_im_value_offset+word_num; (im_index+word_num)->value_size=1; } __device__ void map(char*key,char*value,size_t key_size, size_t value_size,char* im_key,unsigned*im_value,int im_key_size,int im_value_size,Index* im_index,int g_im_key_offset,int g_im_value_offset,int g_im_index_offset){ int i=0; // int start; // int local_offset=0; int key_local_offset=0; int word_num=0; // unsigned emit_value=1; while(i<value_size){ while((i<value_size)&&!isChar(*(value+i))) i++; int start = i; while((i<value_size)&&isChar(*(value+i))) i++; if(start<i){ //(im_index+g_im_index_offset+word_num)->key_offset=g_im_key_offset; //(im_index+g_im_index_offset+word_num)->key_size=(i-start); //if(g_im_index_offset==0){ im_emit(value,start,i,1,im_key+key_local_offset,im_value,im_index+g_im_index_offset,g_im_key_offset,g_im_value_offset,key_local_offset,word_num);//} key_local_offset+=(i-start); // int j; // for(j=0;j<i-start;j++){ // *(im_key+key_local_offset+j)=*(value+start+j); // //*(im_key+key_local_offset+j)='w'; // } //*im_value=g_im_index_offset; } //key_local_offset++; word_num++; } } __global__ void map_warp(char*input_keys,char*input_values,Index*input_index,char*im_key,unsigned*im_value,Index*im_index,Index*im_loc,int* im_index_loc,int input_num){ int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<input_num){ map((input_keys+((input_index+i)->key_offset)),(input_values+((input_index+i)->value_offset)),(input_index+i)->key_size,(input_index+i)->value_size,(im_key+(im_loc+i)->key_offset),(im_value+(im_loc+i)->value_offset),(im_loc+i)->key_size,(im_loc+i)->value_size,im_index,(im_loc+i)->key_offset,(im_loc+i)->value_offset,im_index_loc[i]); } } void map_phase(MapReduceSpec* spec){ int im_value_total_size=0; Index im_loc[spec->map_input_num]; int im_index_loc[spec->map_input_num]; for(int i=0; i<spec->map_input_num;i++){ im_loc[i].key_offset=im_key_total_size; im_loc[i].key_size=*(spec->map_im_key_size+i); im_loc[i].value_offset=im_value_total_size>>2; im_loc[i].value_size=1; im_index_loc[i]=im_num_total; im_key_total_size+=*(spec->map_im_key_size+i); im_value_total_size+=*(spec->map_im_key_size+i); im_num_total+=*(spec->map_im_num+i); } printf("Map output entries: %d\n",im_num_total); //printf("loc[1].key_offset %d\n",im_loc[1].key_offset); spec->im_keys=(char*)malloc(im_key_total_size); spec->im_values=(unsigned*)malloc(im_value_total_size); spec->im_index=(Index*)malloc(im_num_total*sizeof(Index)); char *d_im_keys; unsigned *d_im_values; Index *d_im_index; Index *d_im_loc; int *d_im_index_loc; // char* d_map_input_keys; // char* d_map_input_values; // Index* d_map_input_index; // size_t keys_size=malloc_usable_size(spec->map_input_keys); // size_t values_size=malloc_usable_size(spec->map_input_values); // size_t index_size=malloc_usable_size(spec->map_input_index); cudaMalloc(&d_im_values,im_value_total_size); cudaMalloc(&d_im_keys,im_key_total_size); cudaMalloc(&d_im_index,im_num_total*sizeof(Index)); cudaMalloc(&d_im_loc,spec->map_input_num*sizeof(Index)); // cudamalloc(&d_map_input_keys,keys_size); // cudaMalloc(&d_map_input_values,values_size); // cudaMalloc(&d_map_input_index,index_size); cudaMemcpy(d_im_loc,im_loc,spec->map_input_num*sizeof(Index),cudaMemcpyHostToDevice); cudaMalloc(&d_im_index_loc,spec->map_input_num*sizeof(int)); cudaMemcpy(d_im_index_loc,im_index_loc,spec->map_input_num*sizeof(int),cudaMemcpyHostToDevice); // cudaMemcpy(d_map_input_keys,spec->map_input_keys,keys_size,cudaMemcpyHostToDevice); // cudaMemcpy(d_map_input_values,spec->map_input_values,values_size,cudaMemcpyHostToDevice); // cudaMemcpy(d_map_input_index,spec->map_input_index,index_size,cudaMemcpyHostToDevice); map_warp<<<spec->map_block_num,spec->map_thread_num>>>(d_map_input_keys,d_map_input_values,d_map_input_index,d_im_keys,d_im_values,d_im_index,d_im_loc,d_im_index_loc,spec->map_input_num); cudaMemcpy(spec->im_keys,d_im_keys,im_key_total_size,cudaMemcpyDeviceToHost); cudaMemcpy(spec->im_values,d_im_values,im_value_total_size,cudaMemcpyDeviceToHost); cudaMemcpy(spec->im_index,d_im_index,im_num_total*sizeof(Index),cudaMemcpyDeviceToHost); // printf("%s \n%s \n",spec->im_keys,spec->map_input_values+(spec->map_input_index+1)->value_offset); //printf("%d\n",*(spec->im_values+1)); free(spec->map_input_keys); free(spec->map_input_values); free(spec->map_input_index); free(spec->map_im_key_size); cudaFree(d_map_input_keys); cudaFree(d_map_input_values); cudaFree(d_map_input_index); cudaFree(d_im_keys); cudaFree(d_im_values); cudaFree(d_im_index); cudaFree(d_im_loc); cudaFree(d_im_index_loc); sort(spec); // char* tmp=spec->im_keys; // free(spec->im_keys); } void my_swap(Index*e1,Index*e2,Index*swap){ *swap=*e1; *e1=*e2; *e2=*swap; } //int tmp; void quick_sort(char*im_keys,Index*im_index,int len){ //in-place quick_sort if(len>1){ char piovt_value[(im_index+len)->key_size]; strncpy(piovt_value,im_keys+(im_index+len)->key_offset,(im_index+len)->key_size); int st_pos=0; Index *swap=(Index*)malloc(sizeof(Index)); for(int i=0;i<len-1;i++){ char cur[(im_index+i)->key_size]; strncpy(cur,im_keys+(im_index+i)->key_offset,(im_index+i)->key_size); if(strcmp(cur,piovt_value)<=0){ my_swap(im_index+i,im_index+st_pos,swap); st_pos++; } } my_swap(im_index+st_pos,im_index+len,swap); //free(piovt_value); free(swap); quick_sort(im_keys,im_index,st_pos-1); quick_sort(im_keys,im_index+st_pos+1,len-st_pos); } } void sort(MapReduceSpec* spec){ // while(1) quick_sort(spec->im_keys,spec->im_index,im_num_total-1); char sorted_im_keys[im_key_total_size]; char *tmp=sorted_im_keys; for(int i=0;i<im_num_total-1;i++){ my_strncpy(tmp,(spec->im_keys+(spec->im_index+i)->key_offset),(spec->im_index+i)->key_size); tmp=tmp+(spec->im_index+i)->key_size; } // return sorted_im_keys; // tmp=spec->im_keys; // free(spec->im_keys); spec->im_keys=sorted_im_keys; printf("%s \n",spec->im_keys); } void mapReduce(char *path,MapReduceSpec* spec){ MapFileList* plist; plist=(MapFileList*)malloc(sizeof(MapFileList)); spec->map_file_list=plist; struct dirent* entry = NULL; DIR *pDir; pDir=opendir(path); while((entry=readdir(pDir))!=NULL){ if(entry->d_type==DT_REG){ plist->filename=(char*)malloc(strlen(path)+strlen(entry->d_name)+1); strcpy(plist->filename,path); strcat(plist->filename,entry->d_name); plist->next=(MapFileList*)malloc(sizeof(MapFileList)); plist=plist->next; } } map_input_split(spec); map_count_phase(spec); map_phase(spec); } MAP_COUNT{ unsigned int i; unsigned int im_key_size=0; unsigned int im_value_size=0; int word_num=0; for(i=0;i<value_size;){ while((i<value_size)&&!isChar(*(value+i))) i++; int start = i; while((i<value_size)&&isChar(*(value+i))) i++; if(start<i){ im_key_size+=(i-start); im_value_size+=sizeof(int); word_num++; } } EMIT_IM_COUNT(im_key_size,im_value_size); //emitMapCount(im_key_size,im_value_size,word_num,key_im_size,value_im_size,map_im_num,threadID); } int main(int argc, char **argv){ MapReduceSpec* spec=(MapReduceSpec*)malloc(sizeof(MapReduceSpec)); init_mapreduce_spec(spec); mapReduce(argv[1],spec); free(spec); }
4,332
__global__ void addKernel(const float * a, const float * b, float * res, const int numFloats) { const int index = blockIdx.x * blockDim.x + threadIdx.x; res[index] = a[index] + b[index]; } void addkernel_runSub(const int gs, const int bs, float * p0, float * p1, float * p2, int p3) { addKernel<<<gs, bs>>>(p0, p1, p2, p3); }
4,333
/* College: University of Massachusetts Lowell EECE 7110:High-Performance Comp. on GPUs Semester: Spring 2018 Student : 01639617 Project : Assignment_3 Professor : Dr.Hang Liu Due date: 4/16/2018 Authors : Sai Sri Devesh Kadambari */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> using namespace std; #define zero 0 __global__ void gpu_prefix_add(int *a,int *c, int m) { __shared__ int smem[1]; int tid = blockIdx.x * blockDim.x + threadIdx.x; int p=16384,n=1,pwr; //p=m/blockdim*griddim while((tid<(p*n))&&(tid<(32000000))) { __syncthreads(); //wait until all the threads in the block reach this point for(int depth =0;depth< __logf ( 16384) ;depth++) { pwr=__powf(depth, 2); a[tid]+=a[tid-pwr]; } smem[1]=a[p*n]; tid+=p; a[tid]+=smem[1]; n++; __syncthreads(); } } int main(int argc, char const *argv[]) { int m; printf("please type in m(size) \n"); scanf("%d", &m); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_c;// *h_cc; cudaMallocHost((void **) &h_a, sizeof(int)*m); cudaMallocHost((void **) &h_c, sizeof(int)*m); for (int i = 0; i < m; ++i) { h_a[i] = rand() % 1024; //loading random values } clock_t t; t = clock(); int *d_a,*d_c; cudaMalloc((void **) &d_a, sizeof(int)*m); cudaMalloc((void **) &d_c, sizeof(int)*m); cudaMemcpy(d_a, h_a, sizeof(int)*m, cudaMemcpyHostToDevice); dim3 dimGrid(128); dim3 dimBlock(128); gpu_prefix_add<<<dimGrid, dimBlock>>>(d_a,d_c, m); cudaMemcpy(h_c, d_c, sizeof(int)*m, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); t = clock()-t; double time_taken = ((double)t)/CLOCKS_PER_SEC; printf("Time elapsed on operation of %d: %lf ms.\n\n", m,(time_taken/1000)); cudaFree(d_a); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_c); //cudaFreeHost(h_cc); return 0; }
4,334
// Modified from https://github.com/CVMI-Lab/PAConv/tree/main/scene_seg/lib/pointops/src/knnquery_heap #include <cmath> #include <cstdio> #define THREADS_PER_BLOCK 256 #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) __device__ void swap_float(float *x, float *y) { float tmp = *x; *x = *y; *y = tmp; } __device__ void swap_int(int *x, int *y) { int tmp = *x; *x = *y; *y = tmp; } __device__ void reheap(float *dist, int *idx, int k) { int root = 0; int child = root * 2 + 1; while (child < k) { if(child + 1 < k && dist[child+1] > dist[child]) child++; if(dist[root] > dist[child]) return; swap_float(&dist[root], &dist[child]); swap_int(&idx[root], &idx[child]); root = child; child = root * 2 + 1; } } __device__ void heap_sort(float *dist, int *idx, int k) { int i; for (i = k - 1; i > 0; i--) { swap_float(&dist[0], &dist[i]); swap_int(&idx[0], &idx[i]); reheap(dist, idx, i); } } // input: xyz (b, n, 3) new_xyz (b, m, 3) // output: idx (b, m, nsample) dist2 (b, m, nsample) __global__ void knn_kernel(int b, int n, int m, int nsample, const float *__restrict__ xyz, const float *__restrict__ new_xyz, int *__restrict__ idx, float *__restrict__ dist2) { int bs_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || pt_idx >= m) return; new_xyz += bs_idx * m * 3 + pt_idx * 3; xyz += bs_idx * n * 3; idx += bs_idx * m * nsample + pt_idx * nsample; dist2 += bs_idx * m * nsample + pt_idx * nsample; float new_x = new_xyz[0]; float new_y = new_xyz[1]; float new_z = new_xyz[2]; float best_dist[100]; int best_idx[100]; for(int i = 0; i < nsample; i++){ best_dist[i] = 1e10; best_idx[i] = 0; } for(int i = 0; i < n; i++){ float x = xyz[i * 3 + 0]; float y = xyz[i * 3 + 1]; float z = xyz[i * 3 + 2]; float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); if (d2 < best_dist[0]){ best_dist[0] = d2; best_idx[0] = i; reheap(best_dist, best_idx, nsample); } } heap_sort(best_dist, best_idx, nsample); for(int i = 0; i < nsample; i++){ idx[i] = best_idx[i]; dist2[i] = best_dist[i]; } } void knn_kernel_launcher(int b, int n, int m, int nsample, const float *xyz, const float *new_xyz, int *idx, float *dist2, cudaStream_t stream) { // param new_xyz: (B, m, 3) // param xyz: (B, n, 3) // param idx: (B, m, nsample) cudaError_t err; dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); knn_kernel<<<blocks, threads, 0, stream>>>(b, n, m, nsample, xyz, new_xyz, idx, dist2); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
4,335
/* * Zero-Copy example, using vector addition as showcase */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #define SIZE (1048576) // CUDA kernel, using zerocopy __global__ void vectorAdd(float *A, float *B, float *C, int numElements) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < SIZE) { C[id] = A[id] + B[id]; } } /** * Host main routine */ int main(void) { float *h_A, *d_A, *h_B, *d_B, *h_C, *d_C; // allocate host memory h_A = (float*) malloc(SIZE * sizeof(float)); h_B = (float*) malloc(SIZE * sizeof(float)); h_C = (float*) malloc(SIZE * sizeof(float)); // allocate memory on device cudaMalloc(&d_A, SIZE * sizeof(float)); cudaMalloc(&d_B, SIZE * sizeof(float)); cudaMalloc(&d_C, SIZE * sizeof(float)); for (int i=0; i < SIZE; i++) { h_A[i] = rand() / (float) RAND_MAX; h_B[i] = rand() / (float) RAND_MAX; } cudaMemcpy(d_A, h_A, SIZE * sizeof(float), cudaMemcpyDefault); // we're using UVA... cudaMemcpy(d_B, h_B, SIZE * sizeof(float), cudaMemcpyDefault); printf("> run vectorAdd using copied device memory...\n"); dim3 block(256); dim3 grid((unsigned int) ceil(SIZE / block.x)); // kernel call vectorAdd<<<grid, block>>>(d_A, d_B, d_C, SIZE); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("err: %s\n", cudaGetErrorString(err)); } //vectorAdd<<<1024,1024>>>(d_A, d_B, d_C, SIZE); cudaMemcpy(h_C, d_C, SIZE * sizeof(float), cudaMemcpyDefault); cudaDeviceSynchronize(); printf("> kernel call synchronized\n"); printf("%f vs %f\n", h_A[123]+h_B[123], h_C[123]); printf("> releasing host memory...\n"); free(h_A); free(h_B); free(h_C); cudaFree(h_A); cudaFree(h_B); cudaFree(h_C); printf("> done\n"); return 0; }
4,336
#include <stdio.h> #include <cuda.h> __device__ unsigned dfun(unsigned id) { printf("%d\n", id); if (id > 10 && id < 15) return dfun(id+1); else return 0; } __global__ void dkernel(unsigned n) { dfun(n); } #define BLOCKSIZE 256 int main(int nn, char *str[]) { unsigned N = atoi(str[1]); dkernel<<<1, BLOCKSIZE>>>(N); cudaThreadSynchronize(); return 0; }
4,337
// Template for Programming Assignment 2 // Use "module load cuda" to enable compilation with the Nvidia C compiler nvcc // Use "nvcc -O3" to compile code; this can be done even on OSC login node (does not have a GPU) // To execute compiled code, you must either use a batch submission to run on a node with GPU // or obtain an interactive GPU-node by using: qsub -I -l walltime=0:59:00 -l nodes=1:gpus=1 #include <unistd.h> #include <stdio.h> #include <sys/time.h> #define threshold 1e-8 #define n (4096) // Change n to 4096 for final testing; //#define n (1024) // n is set to 256 since execution time of single thread template version is excessive #define TILE_WIDTH 32 void init(void); void ref(void); void compare(int N, double *wref, double *w); __global__ void test_kernel(int N, double *A, double *B, double *C); double rtclock(void); double a[n][n],b[n][n],c[n][n],cref[n][n]; int main(){ double clkbegin, clkend, t; double *Ad,*Bd,*Cd; int size; printf("Matrix Size = %d\n",n); init(); // clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Seq: Approx GFLOPS: %.1f ; Time = %.3f sec; cref[n/2][n/2-1] = %f; \n", 2.0*n*n*n/t/1e9,t,cref[n/2][n/2-1]); size = sizeof(double)*n*n; cudaMalloc((void **) &Ad,size); cudaMalloc((void **) &Bd,size); cudaMalloc((void **) &Cd,size); cudaMemcpy(Ad,a,size,cudaMemcpyHostToDevice); cudaMemcpy(Bd,b,size,cudaMemcpyHostToDevice); clkbegin = rtclock(); dim3 dimGrid(n/(2*TILE_WIDTH), n/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); test_kernel<<<dimGrid,dimBlock>>>(n,Ad,Bd,Cd); if (cudaDeviceSynchronize() != cudaSuccess) printf ("Error return for test_kernel: Was execution done on a node with a GPU?\n"); else { clkend = rtclock(); t = clkend-clkbegin; cudaMemcpy(c,Cd,size,cudaMemcpyDeviceToHost); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); printf("GPU: Approx GFLOPS: %.1f ; Time = %.3f sec; c[n/2][n/2-1] = %f; \n", 2.0*n*n*n/t/1e9,t,c[n/2][n/2-1]); printf("Correctness Check for GPU solution:\n"); compare(n, (double *) c,(double *) cref); } } __global__ void test_kernel(int N, double *A, double *B, double *C) { // using shared memory for storing TILES corresponding to this block in A & B. Each block is responsible for computing TILE [i][2j] & TILE [i][2j+1] in C __shared__ double As[TILE_WIDTH][TILE_WIDTH]; __shared__ double Bs0[TILE_WIDTH][TILE_WIDTH+1]; __shared__ double Bs1[TILE_WIDTH][TILE_WIDTH+1]; // Block Index along x & y int bx = blockIdx.x; int by = blockIdx.y; //Thread Index along x & y int tx = threadIdx.x; int ty = threadIdx.y; // Row & Columns in resultant matrix C computed by the threads in the block int Row = by * TILE_WIDTH + ty; int Column = bx * 2 * TILE_WIDTH + tx; double Pvalue0 = 0; double Pvalue1 = 0; // breaking kernel into 'N/TILE_WIDTH' phases where 'm' is the current phase for(int m=0; m<N/TILE_WIDTH; ++m) { // Bring elements from A & B to their corresponding shared memory As[ty][tx] = A[Row*N+(m*TILE_WIDTH+tx)]; Bs0[ty][tx] = B[((bx*2*TILE_WIDTH+ty)*N)+(m*TILE_WIDTH+tx)]; Bs1[ty][tx] = B[((bx*2*TILE_WIDTH+TILE_WIDTH+ty)*N)+(m*TILE_WIDTH+tx)]; //wait for the entire tiles in A & B to be in shared memory __syncthreads(); // Accumulate subset of dot products for (int k=0; k<TILE_WIDTH; ++k) { Pvalue0 += As[ty][k]*Bs0[tx][k]; Pvalue1 += As[ty][k]*Bs1[tx][k]; } // wait for the entire values corresponding to this phase is computed __syncthreads(); } // write final values to global memory C[Row*N+Column] = Pvalue0; C[Row*N+Column+TILE_WIDTH] = Pvalue1; } void ref(void) { int i,j,k; for (i=0;i<n;i++) for (j=0;j<n;j++) for(k=0;k<n;k++) cref[i][j] += a[i][k]*b[j][k]; } void init(void) { int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) { c[i][j] = 0.0; cref[i][j] = 0.0; a[i][j] = drand48(); b[i][j] = drand48(); } } void compare(int N, double *wref, double *w) { double maxdiff,this_diff; int numdiffs; int i,j; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) for (j=0;j<N;j++) { this_diff = wref[i*N+j]-w[i*N+j]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void) { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); }
4,338
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void j2d81pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) { //Determing the block's indices int i0 = (int)(blockIdx.x)*(int)(blockDim.x); int i = max(i0,0) + (int)(threadIdx.x); int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y); int j = max(j0,0) + 4*(int)(threadIdx.y); double (*in)[8200] = (double (*)[8200]) l_in; double (*out)[8200] = (double (*)[8200]) l_out; if (i>=0 & j>=0 & i<=N-9 & j<=N-9) { double outjc0ic0; double outjp1ic0; double outjp2ic0; double outjp3ic0; outjc0ic0 = 3.18622 * in[j][i]; outjc0ic0 += 3.18622 * in[j][i+8]; outjc0ic0 += 3.18622 * in[j+8][i]; outjc0ic0 += 3.18622 * in[j+8][i+8]; outjc0ic0 += 4.5339 * in[j][i+1]; outjc0ic0 += 4.5339 * in[j][i+7]; outjc0ic0 += 4.5339 * in[j+1][i]; outjc0ic0 += 4.5339 * in[j+1][i+8]; outjc0ic0 += 4.5339 * in[j+7][i]; outjc0ic0 += 4.5339 * in[j+7][i+8]; outjc0ic0 += 4.5339 * in[j+8][i+1]; outjc0ic0 += 4.5339 * in[j+8][i+7]; outjc0ic0 += -0.000357 * in[j][i+2]; outjc0ic0 += -0.000357 * in[j][i+6]; outjc0ic0 += -0.000357 * in[j+2][i]; outjc0ic0 += -0.000357 * in[j+2][i+8]; outjc0ic0 += -0.000357 * in[j+6][i]; outjc0ic0 += -0.000357 * in[j+6][i+8]; outjc0ic0 += -0.000357 * in[j+8][i+2]; outjc0ic0 += -0.000357 * in[j+8][i+6]; outjc0ic0 += 0.002856 * in[j][i+3]; outjc0ic0 += 0.002856 * in[j][i+5]; outjc0ic0 += 0.002856 * in[j+3][i]; outjc0ic0 += 0.002856 * in[j+3][i+8]; outjc0ic0 += 0.002856 * in[j+5][i]; outjc0ic0 += 0.002856 * in[j+5][i+8]; outjc0ic0 += 0.002856 * in[j+8][i+3]; outjc0ic0 += 0.002856 * in[j+8][i+5]; outjc0ic0 += -0.00508225 * in[j][i+4]; outjc0ic0 += -0.00508225 * in[j+4][i+8]; outjc0ic0 += -0.00508225 * in[j+4][i]; outjc0ic0 += -0.00508225 * in[j+8][i+4]; outjc0ic0 += 0.00064516 * in[j+1][i+1]; outjc0ic0 += 0.00064516 * in[j+1][i+7]; outjc0ic0 += 0.00064516 * in[j+7][i+1]; outjc0ic0 += 0.00064516 * in[j+7][i+7]; outjc0ic0 += -0.00508 * in[j+1][i+2]; outjc0ic0 += -0.00508 * in[j+1][i+6]; outjc0ic0 += -0.00508 * in[j+2][i+1]; outjc0ic0 += -0.00508 * in[j+2][i+7]; outjc0ic0 += -0.00508 * in[j+6][i+1]; outjc0ic0 += -0.00508 * in[j+6][i+7]; outjc0ic0 += -0.00508 * in[j+7][i+2]; outjc0ic0 += -0.00508 * in[j+7][i+6]; outjc0ic0 += 0.04064 * in[j+1][i+3]; outjc0ic0 += 0.04064 * in[j+1][i+5]; outjc0ic0 += 0.04064 * in[j+3][i+1]; outjc0ic0 += 0.04064 * in[j+3][i+7]; outjc0ic0 += 0.04064 * in[j+5][i+1]; outjc0ic0 += 0.04064 * in[j+5][i+7]; outjc0ic0 += 0.04064 * in[j+7][i+3]; outjc0ic0 += 0.04064 * in[j+7][i+5]; outjc0ic0 += -0.0723189 * in[j+1][i+4]; outjc0ic0 += -0.0723189 * in[j+4][i+1]; outjc0ic0 += -0.0723189 * in[j+4][i+7]; outjc0ic0 += -0.0723189 * in[j+7][i+4]; outjc0ic0 += 0.04 * in[j+2][i+2]; outjc0ic0 += 0.04 * in[j+2][i+6]; outjc0ic0 += 0.04 * in[j+6][i+2]; outjc0ic0 += 0.04 * in[j+6][i+6]; outjc0ic0 += -0.32 * in[j+2][i+3]; outjc0ic0 += -0.32 * in[j+2][i+5]; outjc0ic0 += -0.32 * in[j+3][i+2]; outjc0ic0 += -0.32 * in[j+3][i+6]; outjc0ic0 += -0.32 * in[j+5][i+2]; outjc0ic0 += -0.32 * in[j+5][i+6]; outjc0ic0 += -0.32 * in[j+6][i+3]; outjc0ic0 += -0.32 * in[j+6][i+5]; outjc0ic0 += 0.56944 * in[j+2][i+4]; outjc0ic0 += 0.56944 * in[j+4][i+2]; outjc0ic0 += 0.56944 * in[j+4][i+6]; outjc0ic0 += 0.56944 * in[j+6][i+4]; outjc0ic0 += 2.56 * in[j+3][i+3]; outjc0ic0 += 2.56 * in[j+3][i+5]; outjc0ic0 += 2.56 * in[j+5][i+3]; outjc0ic0 += 2.56 * in[j+5][i+5]; outjc0ic0 += -4.55552 * in[j+3][i+4]; outjc0ic0 += -4.55552 * in[j+4][i+3]; outjc0ic0 += -4.55552 * in[j+4][i+5]; outjc0ic0 += -4.55552 * in[j+5][i+4]; outjc0ic0 += in[j+4][i+4] * 8.10655; out[j][i] = outjc0ic0; outjp1ic0 = 3.18622 * in[j+1][i]; outjp1ic0 += 3.18622 * in[j+1][i+8]; outjp1ic0 += 3.18622 * in[j+9][i]; outjp1ic0 += 3.18622 * in[j+9][i+8]; outjp1ic0 += 4.5339 * in[j+1][i+1]; outjp1ic0 += 4.5339 * in[j+1][i+7]; outjp1ic0 += 4.5339 * in[j+2][i]; outjp1ic0 += 4.5339 * in[j+2][i+8]; outjp1ic0 += 4.5339 * in[j+8][i]; outjp1ic0 += 4.5339 * in[j+8][i+8]; outjp1ic0 += 4.5339 * in[j+9][i+1]; outjp1ic0 += 4.5339 * in[j+9][i+7]; outjp1ic0 += -0.000357 * in[j+1][i+2]; outjp1ic0 += -0.000357 * in[j+1][i+6]; outjp1ic0 += -0.000357 * in[j+3][i]; outjp1ic0 += -0.000357 * in[j+3][i+8]; outjp1ic0 += -0.000357 * in[j+7][i]; outjp1ic0 += -0.000357 * in[j+7][i+8]; outjp1ic0 += -0.000357 * in[j+9][i+2]; outjp1ic0 += -0.000357 * in[j+9][i+6]; outjp1ic0 += 0.002856 * in[j+1][i+3]; outjp1ic0 += 0.002856 * in[j+1][i+5]; outjp1ic0 += 0.002856 * in[j+4][i]; outjp1ic0 += 0.002856 * in[j+4][i+8]; outjp1ic0 += 0.002856 * in[j+6][i]; outjp1ic0 += 0.002856 * in[j+6][i+8]; outjp1ic0 += 0.002856 * in[j+9][i+3]; outjp1ic0 += 0.002856 * in[j+9][i+5]; outjp1ic0 += -0.00508225 * in[j+1][i+4]; outjp1ic0 += -0.00508225 * in[j+5][i+8]; outjp1ic0 += -0.00508225 * in[j+5][i]; outjp1ic0 += -0.00508225 * in[j+9][i+4]; outjp1ic0 += 0.00064516 * in[j+2][i+1]; outjp1ic0 += 0.00064516 * in[j+2][i+7]; outjp1ic0 += 0.00064516 * in[j+8][i+1]; outjp1ic0 += 0.00064516 * in[j+8][i+7]; outjp1ic0 += -0.00508 * in[j+2][i+2]; outjp1ic0 += -0.00508 * in[j+2][i+6]; outjp1ic0 += -0.00508 * in[j+3][i+1]; outjp1ic0 += -0.00508 * in[j+3][i+7]; outjp1ic0 += -0.00508 * in[j+7][i+1]; outjp1ic0 += -0.00508 * in[j+7][i+7]; outjp1ic0 += -0.00508 * in[j+8][i+2]; outjp1ic0 += -0.00508 * in[j+8][i+6]; outjp1ic0 += 0.04064 * in[j+2][i+3]; outjp1ic0 += 0.04064 * in[j+2][i+5]; outjp1ic0 += 0.04064 * in[j+4][i+1]; outjp1ic0 += 0.04064 * in[j+4][i+7]; outjp1ic0 += 0.04064 * in[j+6][i+1]; outjp1ic0 += 0.04064 * in[j+6][i+7]; outjp1ic0 += 0.04064 * in[j+8][i+3]; outjp1ic0 += 0.04064 * in[j+8][i+5]; outjp1ic0 += -0.0723189 * in[j+2][i+4]; outjp1ic0 += -0.0723189 * in[j+5][i+1]; outjp1ic0 += -0.0723189 * in[j+5][i+7]; outjp1ic0 += -0.0723189 * in[j+8][i+4]; outjp1ic0 += 0.04 * in[j+3][i+2]; outjp1ic0 += 0.04 * in[j+3][i+6]; outjp1ic0 += 0.04 * in[j+7][i+2]; outjp1ic0 += 0.04 * in[j+7][i+6]; outjp1ic0 += -0.32 * in[j+3][i+3]; outjp1ic0 += -0.32 * in[j+3][i+5]; outjp1ic0 += -0.32 * in[j+4][i+2]; outjp1ic0 += -0.32 * in[j+4][i+6]; outjp1ic0 += -0.32 * in[j+6][i+2]; outjp1ic0 += -0.32 * in[j+6][i+6]; outjp1ic0 += -0.32 * in[j+7][i+3]; outjp1ic0 += -0.32 * in[j+7][i+5]; outjp1ic0 += 0.56944 * in[j+3][i+4]; outjp1ic0 += 0.56944 * in[j+5][i+2]; outjp1ic0 += 0.56944 * in[j+5][i+6]; outjp1ic0 += 0.56944 * in[j+7][i+4]; outjp1ic0 += 2.56 * in[j+4][i+3]; outjp1ic0 += 2.56 * in[j+4][i+5]; outjp1ic0 += 2.56 * in[j+6][i+3]; outjp1ic0 += 2.56 * in[j+6][i+5]; outjp1ic0 += -4.55552 * in[j+4][i+4]; outjp1ic0 += -4.55552 * in[j+5][i+3]; outjp1ic0 += -4.55552 * in[j+5][i+5]; outjp1ic0 += -4.55552 * in[j+6][i+4]; outjp1ic0 += in[j+5][i+4] * 8.10655; out[j+1][i] = outjp1ic0; outjp2ic0 = 3.18622 * in[j+2][i]; outjp2ic0 += 3.18622 * in[j+2][i+8]; outjp2ic0 += 3.18622 * in[j+10][i]; outjp2ic0 += 3.18622 * in[j+10][i+8]; outjp2ic0 += 4.5339 * in[j+2][i+1]; outjp2ic0 += 4.5339 * in[j+2][i+7]; outjp2ic0 += 4.5339 * in[j+3][i]; outjp2ic0 += 4.5339 * in[j+3][i+8]; outjp2ic0 += 4.5339 * in[j+9][i]; outjp2ic0 += 4.5339 * in[j+9][i+8]; outjp2ic0 += 4.5339 * in[j+10][i+1]; outjp2ic0 += 4.5339 * in[j+10][i+7]; outjp2ic0 += -0.000357 * in[j+2][i+2]; outjp2ic0 += -0.000357 * in[j+2][i+6]; outjp2ic0 += -0.000357 * in[j+4][i]; outjp2ic0 += -0.000357 * in[j+4][i+8]; outjp2ic0 += -0.000357 * in[j+8][i]; outjp2ic0 += -0.000357 * in[j+8][i+8]; outjp2ic0 += -0.000357 * in[j+10][i+2]; outjp2ic0 += -0.000357 * in[j+10][i+6]; outjp2ic0 += 0.002856 * in[j+2][i+3]; outjp2ic0 += 0.002856 * in[j+2][i+5]; outjp2ic0 += 0.002856 * in[j+5][i]; outjp2ic0 += 0.002856 * in[j+5][i+8]; outjp2ic0 += 0.002856 * in[j+7][i]; outjp2ic0 += 0.002856 * in[j+7][i+8]; outjp2ic0 += 0.002856 * in[j+10][i+3]; outjp2ic0 += 0.002856 * in[j+10][i+5]; outjp2ic0 += -0.00508225 * in[j+2][i+4]; outjp2ic0 += -0.00508225 * in[j+6][i+8]; outjp2ic0 += -0.00508225 * in[j+6][i]; outjp2ic0 += -0.00508225 * in[j+10][i+4]; outjp2ic0 += 0.00064516 * in[j+3][i+1]; outjp2ic0 += 0.00064516 * in[j+3][i+7]; outjp2ic0 += 0.00064516 * in[j+9][i+1]; outjp2ic0 += 0.00064516 * in[j+9][i+7]; outjp2ic0 += -0.00508 * in[j+3][i+2]; outjp2ic0 += -0.00508 * in[j+3][i+6]; outjp2ic0 += -0.00508 * in[j+4][i+1]; outjp2ic0 += -0.00508 * in[j+4][i+7]; outjp2ic0 += -0.00508 * in[j+8][i+1]; outjp2ic0 += -0.00508 * in[j+8][i+7]; outjp2ic0 += -0.00508 * in[j+9][i+2]; outjp2ic0 += -0.00508 * in[j+9][i+6]; outjp2ic0 += 0.04064 * in[j+3][i+3]; outjp2ic0 += 0.04064 * in[j+3][i+5]; outjp2ic0 += 0.04064 * in[j+5][i+1]; outjp2ic0 += 0.04064 * in[j+5][i+7]; outjp2ic0 += 0.04064 * in[j+7][i+1]; outjp2ic0 += 0.04064 * in[j+7][i+7]; outjp2ic0 += 0.04064 * in[j+9][i+3]; outjp2ic0 += 0.04064 * in[j+9][i+5]; outjp2ic0 += -0.0723189 * in[j+3][i+4]; outjp2ic0 += -0.0723189 * in[j+6][i+1]; outjp2ic0 += -0.0723189 * in[j+6][i+7]; outjp2ic0 += -0.0723189 * in[j+9][i+4]; outjp2ic0 += 0.04 * in[j+4][i+2]; outjp2ic0 += 0.04 * in[j+4][i+6]; outjp2ic0 += 0.04 * in[j+8][i+2]; outjp2ic0 += 0.04 * in[j+8][i+6]; outjp2ic0 += -0.32 * in[j+4][i+3]; outjp2ic0 += -0.32 * in[j+4][i+5]; outjp2ic0 += -0.32 * in[j+5][i+2]; outjp2ic0 += -0.32 * in[j+5][i+6]; outjp2ic0 += -0.32 * in[j+7][i+2]; outjp2ic0 += -0.32 * in[j+7][i+6]; outjp2ic0 += -0.32 * in[j+8][i+3]; outjp2ic0 += -0.32 * in[j+8][i+5]; outjp2ic0 += 0.56944 * in[j+4][i+4]; outjp2ic0 += 0.56944 * in[j+6][i+2]; outjp2ic0 += 0.56944 * in[j+6][i+6]; outjp2ic0 += 0.56944 * in[j+8][i+4]; outjp2ic0 += 2.56 * in[j+5][i+3]; outjp2ic0 += 2.56 * in[j+5][i+5]; outjp2ic0 += 2.56 * in[j+7][i+3]; outjp2ic0 += 2.56 * in[j+7][i+5]; outjp2ic0 += -4.55552 * in[j+5][i+4]; outjp2ic0 += -4.55552 * in[j+6][i+3]; outjp2ic0 += -4.55552 * in[j+6][i+5]; outjp2ic0 += -4.55552 * in[j+7][i+4]; outjp2ic0 += in[j+6][i+4] * 8.10655; out[j+2][i] = outjp2ic0; outjp3ic0 = 3.18622 * in[j+3][i]; outjp3ic0 += 3.18622 * in[j+3][i+8]; outjp3ic0 += 3.18622 * in[j+11][i]; outjp3ic0 += 3.18622 * in[j+11][i+8]; outjp3ic0 += 4.5339 * in[j+3][i+1]; outjp3ic0 += 4.5339 * in[j+3][i+7]; outjp3ic0 += 4.5339 * in[j+4][i]; outjp3ic0 += 4.5339 * in[j+4][i+8]; outjp3ic0 += 4.5339 * in[j+10][i]; outjp3ic0 += 4.5339 * in[j+10][i+8]; outjp3ic0 += 4.5339 * in[j+11][i+1]; outjp3ic0 += 4.5339 * in[j+11][i+7]; outjp3ic0 += -0.000357 * in[j+3][i+2]; outjp3ic0 += -0.000357 * in[j+3][i+6]; outjp3ic0 += -0.000357 * in[j+5][i]; outjp3ic0 += -0.000357 * in[j+5][i+8]; outjp3ic0 += -0.000357 * in[j+9][i]; outjp3ic0 += -0.000357 * in[j+9][i+8]; outjp3ic0 += -0.000357 * in[j+11][i+2]; outjp3ic0 += -0.000357 * in[j+11][i+6]; outjp3ic0 += 0.002856 * in[j+3][i+3]; outjp3ic0 += 0.002856 * in[j+3][i+5]; outjp3ic0 += 0.002856 * in[j+6][i]; outjp3ic0 += 0.002856 * in[j+6][i+8]; outjp3ic0 += 0.002856 * in[j+8][i]; outjp3ic0 += 0.002856 * in[j+8][i+8]; outjp3ic0 += 0.002856 * in[j+11][i+3]; outjp3ic0 += 0.002856 * in[j+11][i+5]; outjp3ic0 += -0.00508225 * in[j+3][i+4]; outjp3ic0 += -0.00508225 * in[j+7][i+8]; outjp3ic0 += -0.00508225 * in[j+7][i]; outjp3ic0 += -0.00508225 * in[j+11][i+4]; outjp3ic0 += 0.00064516 * in[j+4][i+1]; outjp3ic0 += 0.00064516 * in[j+4][i+7]; outjp3ic0 += 0.00064516 * in[j+10][i+1]; outjp3ic0 += 0.00064516 * in[j+10][i+7]; outjp3ic0 += -0.00508 * in[j+4][i+2]; outjp3ic0 += -0.00508 * in[j+4][i+6]; outjp3ic0 += -0.00508 * in[j+5][i+1]; outjp3ic0 += -0.00508 * in[j+5][i+7]; outjp3ic0 += -0.00508 * in[j+9][i+1]; outjp3ic0 += -0.00508 * in[j+9][i+7]; outjp3ic0 += -0.00508 * in[j+10][i+2]; outjp3ic0 += -0.00508 * in[j+10][i+6]; outjp3ic0 += 0.04064 * in[j+4][i+3]; outjp3ic0 += 0.04064 * in[j+4][i+5]; outjp3ic0 += 0.04064 * in[j+6][i+1]; outjp3ic0 += 0.04064 * in[j+6][i+7]; outjp3ic0 += 0.04064 * in[j+8][i+1]; outjp3ic0 += 0.04064 * in[j+8][i+7]; outjp3ic0 += 0.04064 * in[j+10][i+3]; outjp3ic0 += 0.04064 * in[j+10][i+5]; outjp3ic0 += -0.0723189 * in[j+4][i+4]; outjp3ic0 += -0.0723189 * in[j+7][i+1]; outjp3ic0 += -0.0723189 * in[j+7][i+7]; outjp3ic0 += -0.0723189 * in[j+10][i+4]; outjp3ic0 += 0.04 * in[j+5][i+2]; outjp3ic0 += 0.04 * in[j+5][i+6]; outjp3ic0 += 0.04 * in[j+9][i+2]; outjp3ic0 += 0.04 * in[j+9][i+6]; outjp3ic0 += -0.32 * in[j+5][i+3]; outjp3ic0 += -0.32 * in[j+5][i+5]; outjp3ic0 += -0.32 * in[j+6][i+2]; outjp3ic0 += -0.32 * in[j+6][i+6]; outjp3ic0 += -0.32 * in[j+8][i+2]; outjp3ic0 += -0.32 * in[j+8][i+6]; outjp3ic0 += -0.32 * in[j+9][i+3]; outjp3ic0 += -0.32 * in[j+9][i+5]; outjp3ic0 += 0.56944 * in[j+5][i+4]; outjp3ic0 += 0.56944 * in[j+7][i+2]; outjp3ic0 += 0.56944 * in[j+7][i+6]; outjp3ic0 += 0.56944 * in[j+9][i+4]; outjp3ic0 += 2.56 * in[j+6][i+3]; outjp3ic0 += 2.56 * in[j+6][i+5]; outjp3ic0 += 2.56 * in[j+8][i+3]; outjp3ic0 += 2.56 * in[j+8][i+5]; outjp3ic0 += -4.55552 * in[j+6][i+4]; outjp3ic0 += -4.55552 * in[j+7][i+3]; outjp3ic0 += -4.55552 * in[j+7][i+5]; outjp3ic0 += -4.55552 * in[j+8][i+4]; outjp3ic0 += in[j+7][i+4] * 8.10655; out[j+3][i] = outjp3ic0; } } extern "C" void host_code (double *h_in, double *h_out, int N) { double *in; cudaMalloc (&in, sizeof(double)*N*N); check_error ("Failed to allocate device memory for in\n"); cudaMemcpy (in, h_in, sizeof(double)*N*N, cudaMemcpyHostToDevice); double *out; cudaMalloc (&out, sizeof(double)*N*N); check_error ("Failed to allocate device memory for out\n"); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y)); j2d81pt<<<gridconfig, blockconfig>>> (in, out, N); cudaMemcpy (h_out, out, sizeof(double)*N*N, cudaMemcpyDeviceToHost); cudaFree (in); cudaFree (out); }
4,339
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <ctime> #include <cmath> #define N (1024) __global__ void kernel(float *dev) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (N % idx == 0) { dev[idx] = (float) idx; } } int main (int argc, char *argv[]) { //------------------— GPU PART —----------------— float arr [N]; float *dev = NULL; cudaMalloc(&dev, N * sizeof(float)); kernel<<<2, 512>>> (dev); cudaMemcpy(&arr, dev, N * sizeof(float), cudaMemcpyDeviceToHost); for (int idx = 0; idx < N; idx++) { if (arr[idx] != 0) { printf("%f ", arr[idx]); } } cudaFree(dev); return 0; }
4,340
#include "includes.h" cudaEvent_t start, stop; __global__ void cudaComputeYGradient(int* y_gradient, unsigned char* channel, int image_width, int image_height) { int y_kernel[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index == 0) { return; } y_gradient[index] = y_kernel[0][0] * channel[index - 1] + y_kernel[1][0] * channel[index] + y_kernel[2][0] * channel[index + 1] + y_kernel[0][1] * channel[index + image_width - 1] + y_kernel[1][1] * channel[index + image_width] + y_kernel[2][1] * channel[index + image_width + 1] + y_kernel[0][2] * channel[index + 2 * image_width - 1] + y_kernel[1][2] * channel[index + 2 * image_width] + y_kernel[2][2] * channel[index + 2 * image_width + 1]; return; }
4,341
#define KERNEL_INCLUDE extern __shared__ int local_data[]; __global__ void fillUintArray(uint* bob, uint value, uint length) { uint id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) bob[id] = value; }
4,342
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <iostream> #include <fstream> const unsigned int BLOCK_SIZE = 1024; __global__ void fastHisto_kernel(unsigned int ** d_out, unsigned int * d_in, unsigned int SIZE){ unsigned int mid = threadIdx.x + blockIdx.x*blockDim.x; if(mid>=SIZE) return; unsigned int myVal = d_in[mid]; atomicAdd(&(d_out[blockIdx.x][myVal]),1); } __global__ void fill_pointers_kernel(unsigned int * d_in, unsigned int SIZE, unsigned int rows){ unsigned int mid = threadIdx.x + blockDim.x * blockIdx.x; if(mid>=SIZE) return; d_in[mid] = d_in[0] + rows*mid; } __global__ void transpose_kernel(unsigned int ** d_out, unsigned int ** d_in, unsigned int GRID_SIZE, unsigned int OUT_SIZE){ for(int j=0; j < OUT_SIZE; j++) for(int i=0; i < GRID_SIZE; i++) d_out[j][i] = d_in[i][j]; // out(j,i) = in(i,j) } /* -------- KERNEL -------- */ __global__ void reduce_kernel(unsigned int * d_out, unsigned int * d_in, unsigned int SIZE, unsigned int bin, bool last) { // position and threadId unsigned int tid = threadIdx.x; unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; // do reduction in global memory for (unsigned int s = blockDim.x / 2; s>0; s>>=1) { if ((tid < s) && (mid+s < SIZE)) // Handling out of bounds d_in[mid] = d_in[mid] + d_in[mid+s]; __syncthreads(); } // only thread 0 writes result, as thread if ((tid==0) && (mid < SIZE)) if(last==false) d_out[blockIdx.x] = d_in[mid]; else d_out[bin] = d_in[mid]; } /* -------- REDUCE KERNEL WRAPPER -------- */ void reduce(unsigned int * d_out, unsigned int * d_in, unsigned int SIZE, unsigned int bin) { // Setting up blocks and intermediate result holder unsigned int SIZE_REDUCE = SIZE; unsigned int GRID_SIZE_REDUCE = SIZE/BLOCK_SIZE + ((SIZE % BLOCK_SIZE)?1:0); unsigned int * d_intermediate; cudaMalloc(&d_intermediate, sizeof(unsigned int)*GRID_SIZE_REDUCE); // Recursively solving, will run approximately log base BLOCK_SIZE times. do { reduce_kernel<<<GRID_SIZE_REDUCE, BLOCK_SIZE>>>(d_intermediate, d_in, SIZE_REDUCE, false, bin); // Updating SIZE SIZE_REDUCE = GRID_SIZE_REDUCE;//SIZE / NUM_THREADS + SIZE_REST; // Updating input to intermediate std::swap(d_in, d_intermediate); // Updating NUM_BLOCKS to reflect how many blocks we now want to compute on GRID_SIZE_REDUCE = SIZE_REDUCE/BLOCK_SIZE + ((SIZE_REDUCE % BLOCK_SIZE)?1:0); } while(SIZE_REDUCE > BLOCK_SIZE); // if it is too small, compute rest. // Computing rest reduce_kernel<<<1, SIZE>>>(d_out, d_in, SIZE_REDUCE, true, bin); cudaFree(d_intermediate); } void merge(unsigned int * d_out, unsigned int * d_in, unsigned int SIZE, unsigned int bin){ reduce(d_out, d_in, SIZE, bin); } void fastHisto(unsigned int * d_out, unsigned int * d_in, unsigned int IN_SIZE, unsigned int GRID_SIZE, unsigned int OUT_SIZE){ //Setting up major histo printf("---fastHisto: STARTED---\n"); unsigned int ** d_out_all, ** d_out_all_trans; unsigned int GRID_SIZE_ALL = GRID_SIZE * OUT_SIZE; unsigned int GRID_BYTES_ALL = GRID_SIZE_ALL * sizeof(unsigned int); printf("---fastHisto: DECLARED---\n"); printf("---GRID_SIZE_ALL---: %d\n", GRID_SIZE_ALL); printf("---GRID_BYTES_ALL---: %d\n", GRID_BYTES_ALL); cudaMalloc(&d_out_all, GRID_BYTES_ALL); printf("---fastHisto: D_OUT_ALL MALLOC---\n"); cudaMemset(d_out_all, 0, GRID_BYTES_ALL); printf("---fastHisto: D_OUT_ALL MEMSET---\n"); fastHisto_kernel<<<GRID_SIZE, BLOCK_SIZE>>>(d_out_all, d_in, IN_SIZE); printf("---fastHisto: FH KERNEL COMPLETE---\n"); unsigned int * h_out_test = (unsigned int *)malloc(GRID_BYTES_ALL); cudaMemcpy(h_out_test, d_out_all, GRID_BYTES_ALL, cudaMemcpyDeviceToHost); for(unsigned int i = 0; i<OUT_SIZE; i++) printf("%d:%d-\n", i, h_out_test[i]); cudaMalloc(&d_out_all_trans, GRID_BYTES_ALL); transpose_kernel<<<1, 1>>>(d_out_all_trans, d_out_all, GRID_SIZE, OUT_SIZE); cudaFree(d_out_all); // Merging histograms reduce for(unsigned int bin = 0; bin<OUT_SIZE; bin++){ merge(d_out, d_out_all_trans[bin], GRID_SIZE, bin); } cudaFree(d_out_all_trans); } int main(int argc, char **argv){ printf("---STARTED---\n"); // Vars unsigned int IN_SIZE; unsigned int IN_BYTES; unsigned int OUT_SIZE; unsigned int OUT_BYTES; unsigned int GRID_SIZE; unsigned int h_filler; unsigned int sum; for(unsigned int rounds = 1; rounds<2; rounds++){ IN_SIZE = 1<<2; IN_BYTES = sizeof(unsigned int) * IN_SIZE; OUT_SIZE = 1<<rounds; OUT_BYTES = sizeof(unsigned int) * OUT_SIZE; GRID_SIZE = IN_SIZE/BLOCK_SIZE + ((IN_SIZE % BLOCK_SIZE)?1:0); // Generate the input array on host unsigned int * h_in = (unsigned int *)malloc(IN_BYTES); unsigned int * h_out = (unsigned int *)malloc(OUT_BYTES); for (h_filler = 0; h_filler<IN_SIZE; h_filler++) {h_in[h_filler] = h_filler;} // Declare GPU memory pointers printf("\n@@@ROUND@@@: %d\n", rounds); printf("---IN_SIZE---: %d\n", IN_SIZE); printf("---IN_BYTES---: %d\n", IN_BYTES); printf("---OUT_SIZE---: %d\n", OUT_SIZE); printf("---OUT_BYTES---: %d\n", OUT_BYTES); printf("---BLOCK_SIZE---: %d\n", BLOCK_SIZE); printf("---GRID_SIZE---: %d\n", GRID_SIZE); unsigned int * d_in; unsigned int * d_out; // Allocate GPU memory cudaMalloc(&d_in, IN_BYTES); printf("---ALLOCATED D_IN---\n"); cudaMalloc(&d_out, OUT_BYTES); printf("---ALLOCATED D_OUT---\n"); // Transfer the arrays to the GPU cudaMemcpy(d_in, h_in, IN_BYTES, cudaMemcpyHostToDevice); printf("---MEMCPY D_IN---\n"); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); printf("---START/STOP SETUP COMPLETE---\n"); // running the code on the GPU cudaMemset(d_out, 0, OUT_BYTES); printf("---CUDAMEMSET D_OUT COMPLETE---\n"); fastHisto(d_out, d_in, IN_SIZE, GRID_SIZE, OUT_SIZE); // simple_histo<<<GRID_SIZE, BLOCK_SIZE>>>(d_out, d_in, OUT_SIZE, IN_SIZE); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // calculating time float elapsedTime = .0f; cudaEventElapsedTime(&elapsedTime, start, stop); //elapsedTime = elapsedTime / ((float) times); printf(" time: %.5f\n", elapsedTime); // Copy back to HOST cudaMemcpy(h_out, d_out, OUT_BYTES, cudaMemcpyDeviceToHost); sum = 0; for(unsigned int i = 0; i<OUT_SIZE; i++) sum += h_out[i]; for(unsigned int i = 0; (i<OUT_SIZE) && (i<10); i++){ printf("bin %d: count %d\n", i, h_out[i]); } printf("%d\n", sum); // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); free(h_in); free(h_out); } return 0; }
4,343
//compile with: nvcc -arch=sm_20 -lcudart #include <stdio.h> #include <string.h> #define Blocksize 10 __global__ void compute(char*, char*); __device__ __host__ void algorithm(char*, char*); __device__ int cudaMemCmp(const char*, const char*, int); __host__ int main (void) { char targets[100]; char* targets2; char result[100]; char* result2; int size = Blocksize * sizeof(char); memset(targets, 0, 100); memset(result, 0, 100); //speicherreservieren cudaMalloc( &targets2, (size * 10)); cudaMalloc( &result2, (size * 10)); strcpy(targets + 0 * 10, "Baum"); strcpy(targets + 1 * 10, "Haus"); strcpy(targets + 2 * 10, "Daniel"); strcpy(targets + 3 * 10, "Blubber"); strcpy(targets + 4 * 10, "Tackle"); strcpy(targets + 5 * 10, "Pokeball"); strcpy(targets + 6 * 10, "Computer"); strcpy(targets + 7 * 10, "Dingens"); strcpy(targets + 8 * 10, "Baum"); strcpy(targets + 9 * 10, "Baum"); cudaMemcpy(targets2, targets, 100, cudaMemcpyHostToDevice); cudaMemcpy(result2, result, 100, cudaMemcpyHostToDevice); compute<<<1,Blocksize>>>(targets2,result2); cudaMemcpy(result, result2, 10 *size, cudaMemcpyDeviceToHost); cudaMemcpy(targets, targets2, 10 * size, cudaMemcpyDeviceToHost); cudaFree(targets2); cudaFree(result2); return 0; } __global__ void compute(char* target, char* result){ __shared__ char solution[10]; if (threadIdx.x == 0){ memcpy(solution, "p<82/.99", 8); result[Blocksize * threadIdx.x] = '-'; __syncthreads(); } else{ algorithm((target + (Blocksize * threadIdx.x)), (result + Blocksize * threadIdx.x)); if (cudaMemCmp((result + Blocksize * threadIdx.x), solution, Blocksize) == 1){ printf("Found %s for %s\n",(result + Blocksize * threadIdx.x), (target + Blocksize * threadIdx.x)); } else{ printf("NOT Found %s for %s\n",(result + Blocksize * threadIdx.x), (target + Blocksize * threadIdx.x));// *(result + Blocksize * threadIdx.x) = '!'; } __syncthreads(); } } __device__ __host__ void algorithm(char* input, char* result){ for (int i = 0; i < Blocksize; ++i){ if (((int)input[i] < 127) && ((int)input[i]>32)) result[i] = input[i] % 83 +32; else break; } } __device__ int cudaMemCmp(const char* left, const char* right, int length) { int result = 1; while(result && (length > 0)){ --length; result &= (left[length] == right[length]); } return result; }
4,344
#include "includes.h" __global__ void add(int* in, int offset, int n){ int gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid >= n) return ; extern __shared__ int temp[]; temp[threadIdx.x] = in[gid]; __syncthreads(); //can only control threads in a block. if(threadIdx.x >= offset){ in[threadIdx.x] += temp[threadIdx.x-offset]; } else if(gid >= offset){ in[threadIdx.x] += in[gid-offset]; } in[gid] = temp[threadIdx.x]; }
4,345
#include "includes.h" __global__ void gpu_stencil2D_4pt(double * dst, double * src, int M, int N) { //Declaring the shared memory array for source extern __shared__ double shared_mem[]; double * shSrc = shared_mem; //indexes int i, j; //neighbor's values double north, south, east, west; //SharedMem Collumns Dimension int smColDim = HALO*2+blockDim.y*TILE_SIZE; int smRowDim = HALO*2+blockDim.x*TILE_SIZE; //Copying to shared memory //Inner part for ( i = 0 ; i < TILE_SIZE ; i++ ) { for ( j = 0 ; j < TILE_SIZE ; j++ ) { int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO; int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j; shSrc[shMemIndex]=src[globalIndex]; } } //Halos if (threadIdx.x == 0 && threadIdx.y == 0 ) { int indexTopHalo, indexBottomHalo, indexLeftHalo, indexRightHalo; //For Bottom and top row for ( i = 0 ; i < HALO ; i++ ) { for ( j = 0 ; j < smColDim ; j++ ) { indexTopHalo = (blockIdx.x*blockDim.x*TILE_SIZE+i)*N + (blockIdx.y*blockDim.y*TILE_SIZE) + j; indexBottomHalo = (HALO + (blockIdx.x+1)*blockDim.x*TILE_SIZE)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+j; shSrc[i*smColDim+j] = src[indexTopHalo]; shSrc[(HALO+blockDim.x*TILE_SIZE+i)*smColDim + j] = src[indexBottomHalo]; } } //For right and left Columns for ( i = 0 ; i < HALO ; i++ ) { for ( j = 0 ; j < smRowDim-HALO*2; j ++ ) { indexLeftHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+i; indexRightHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + ((blockIdx.y+1)*blockDim.y*TILE_SIZE)+HALO+i; shSrc[(HALO+j)*smColDim+i] = src[indexLeftHalo]; shSrc[(HALO+j+1)*smColDim-HALO+i] = src[indexRightHalo]; } } } __syncthreads(); for ( i = 0 ; i < TILE_SIZE ; i++ ) { for ( j = 0 ; j < TILE_SIZE ; j++ ) { int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO; int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j; //Getting the neighbohrs north = shSrc[shMemIndex-smColDim]; south = shSrc[shMemIndex+smColDim]; east = shSrc[shMemIndex+1]; west = shSrc[shMemIndex-1]; //Real Stencil operation dst[globalIndex] = ( north + south + east + west )/5.5; // dst[globalIndex] = ( north + south + east + west )/4; } } __syncthreads(); }
4,346
/* We use a term *tile* to identify the rectangular submatrices of the image. Not to be confused with the blocks of threads. */ #include <cuda_runtime.h> #include <stdio.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/execution_policy.h> #define DSM_MAX_TILES_PER_BLOCK 500 #define DSM_MAX_TILES_PER_THREAD 500 // threads per block #define TPB_1D 16 #define TPB (TPB_1D * TPB_1D) // satellite pixels per thread #define SAT_PPT_1D 2 #define SAT_PPT (SAT_PPT_1D * SAT_PPT_1D) // satellite pixels per block #define SAT_PPB_1D (SAT_PPT_1D * TPB_1D) #define SAT_PPB (SAT_PPB_1D * SAT_PPB_1D) // DSM pixels per thread #define DSM_PPT_1D 1 #define DSM_PPT (DSM_PPT_1D * DSM_PPT_1D) // DSM pixels per block #define DSM_PPB_1D (DSM_PPT_1D * TPB_1D) // #define DSM_PPB (DSM_PPB_1D * DSM_PPB_1D) // this needs to be large negative number #define DSM_IGNORE_VALUE -1E5 // extern const float DSM_IGNORE_VALUE; #define EPS 1E-3 #define DTYPE float __device__ bool d_rectanglesIntersect(DTYPE* bbox1, DTYPE* bbox2) { if (bbox2[0] > bbox1[2] || bbox2[1] > bbox1[3] || bbox1[0] > bbox2[2] || bbox1[1] > bbox2[3]) { return false; } else { return true; } } __device__ DTYPE d_area(DTYPE x1, DTYPE y1, DTYPE x2, DTYPE y2, DTYPE x3, DTYPE y3) { return abs(x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2; } __device__ DTYPE d_interpolate_three(DTYPE x, DTYPE y, DTYPE x1, DTYPE y1, DTYPE v1, DTYPE x2, DTYPE y2, DTYPE v2, DTYPE x3, DTYPE y3, DTYPE v3) { DTYPE denom = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3); DTYPE w1 = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / denom; DTYPE w2 = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / denom; DTYPE w3 = 1. - w1 - w2; return (w1 * v1 + w2 * v2 + w3 * v3); } __device__ bool d_inside_triangle(DTYPE x, DTYPE y, DTYPE x1, DTYPE y1, DTYPE x2, DTYPE y2, DTYPE x3, DTYPE y3) { DTYPE A = d_area(x1, y1, x2, y2, x3, y3); DTYPE A1 = d_area(x, y, x1, y1, x2, y2); DTYPE A2 = d_area(x, y, x3, y3, x1, y1); DTYPE A3 = d_area(x, y, x2, y2, x3, y3); return (abs(A1 + A2 + A3 - A) < EPS); } __global__ void kernelComputePointsNum(DTYPE* pX, DTYPE* pY, DTYPE* pZ, int* dsmPixelCounts, int nfaces, int dsm_width, int sat_width, int sat_height) { int iface = blockIdx.x * blockDim.x + threadIdx.x; if (iface < nfaces) { int faces_per_row = 2 * (dsm_width - 1); int irow = iface / faces_per_row; int icol = (iface % faces_per_row) / 2; int idx = irow * dsm_width + icol; int idx1, idx2, idx3; if (iface % 2 == 0) { // ** // * idx1 = idx; idx2 = idx + 1; idx3 = idx + dsm_width; } else { // * // ** idx1 = idx + 1; idx2 = idx + dsm_width; idx3 = idx + dsm_width + 1; } if (pZ[idx1] < DSM_IGNORE_VALUE + 1 || pZ[idx2] < DSM_IGNORE_VALUE + 1 || pZ[idx3] < DSM_IGNORE_VALUE + 1) { return; } float x1, y1, x2, y2, x3, y3; x1 = pX[idx1]; y1 = pY[idx1]; x2 = pX[idx2]; y2 = pY[idx2]; x3 = pX[idx3]; y3 = pY[idx3]; int ymin = static_cast<int>( ceilf(fminf(fminf(y1, y2), y3)) ); int xmin = static_cast<int>( ceilf(fminf(fminf(x1, x2), x3)) ); int ymax = static_cast<int>( floorf(fmaxf(fmaxf(y1, y2), y3)) ); int xmax = static_cast<int>( floorf(fmaxf(fmaxf(x1, x2), x3)) ); ymin = fmaxf(0, ymin); xmin = fmaxf(0, xmin); ymax = fminf(sat_height - 1, ymax); xmax = fminf(sat_width - 1, xmax); //if ((xmax - xmin) * (ymax - ymin) > 100) { // dsmPixelCounts[iface] = -1; //} else { { for (int x = xmin; x <= xmax; ++x) { for (int y = ymin; y <= ymax; ++y) { if (d_inside_triangle((float) x - x1, (float) y - y1, 0, 0, x2-x1, y2-y1, x3-x1, y3-y1)) { dsmPixelCounts[iface] += 1; } } } } } } __global__ void kernelGetPoints(DTYPE* pX, DTYPE* pY, DTYPE* pZ, int* dsmPixelCounts, int* faceIDs, int* pixelIDs, int nfaces, int dsm_width, int sat_width, int sat_height) { int iface = blockIdx.x * blockDim.x + threadIdx.x; if (iface < nfaces) { int curIdx = dsmPixelCounts[iface]; int faces_per_row = 2 * (dsm_width - 1); int irow = iface / faces_per_row; int icol = (iface % faces_per_row) / 2; int idx = irow * dsm_width + icol; int idx1, idx2, idx3; if (iface % 2 == 0) { // ** // * idx1 = idx; idx2 = idx + 1; idx3 = idx + dsm_width; } else { // * // ** idx1 = idx + 1; idx2 = idx + dsm_width; idx3 = idx + dsm_width + 1; } if (pZ[idx1] < DSM_IGNORE_VALUE + 1 || pZ[idx2] < DSM_IGNORE_VALUE + 1 || pZ[idx3] < DSM_IGNORE_VALUE + 1) { return; } float x1, y1, x2, y2, x3, y3; x1 = pX[idx1]; y1 = pY[idx1]; x2 = pX[idx2]; y2 = pY[idx2]; x3 = pX[idx3]; y3 = pY[idx3]; int ymin = static_cast<int>( ceilf(fminf(fminf(y1, y2), y3)) ); int xmin = static_cast<int>( ceilf(fminf(fminf(x1, x2), x3)) ); int ymax = static_cast<int>( floorf(fmaxf(fmaxf(y1, y2), y3)) ); int xmax = static_cast<int>( floorf(fmaxf(fmaxf(x1, x2), x3)) ); ymin = fmaxf(0, ymin); xmin = fmaxf(0, xmin); ymax = fminf(sat_height - 1, ymax); xmax = fminf(sat_width - 1, xmax); //if ((xmax - xmin) * (ymax - ymin) > 100) { // dsmPixelCounts[iface] = -1; //} else { { for (int x = xmin; x <= xmax; ++x) { for (int y = ymin; y <= ymax; ++y) { if (d_inside_triangle((float) x - x1, (float) y - y1, 0, 0, x2-x1, y2-y1, x3-x1, y3-y1)) { faceIDs[curIdx] = iface; pixelIDs[curIdx] = y * sat_width + x; curIdx++; } } } } } } __global__ void kernelFindLimits(int* ids, int* limits, int num) { int iel = blockIdx.x * blockDim.x + threadIdx.x; if (iel < num) { int pixelID = ids[iel]; if (iel == 0 || ids[iel - 1] != pixelID) { limits[pixelID * 2 + 0] = iel; } if (iel == num - 1 || ids[iel + 1] != pixelID) { limits[pixelID * 2 + 1] = iel + 1; } } } __global__ void kernelDraw(int* faceIDs, int* pixelIDsLimits, float* pX, float* pY, float* pZ, float* pOut, int sat_npixels, int dsm_width, int sat_width) { int ipixel = blockIdx.x * blockDim.x + threadIdx.x; if (ipixel < sat_npixels) { int faces_per_row = 2 * (dsm_width - 1); for (int i = pixelIDsLimits[2 * ipixel + 0]; i < pixelIDsLimits[2 * ipixel + 1]; ++i) { int iface = faceIDs[i]; int irow = iface / faces_per_row; int icol = (iface % faces_per_row) / 2; int idx = irow * dsm_width + icol; int idx1, idx2, idx3; if (iface % 2 == 0) { // ** // * idx1 = idx; idx2 = idx + 1; idx3 = idx + dsm_width; } else { // * // ** idx1 = idx + 1; idx2 = idx + dsm_width; idx3 = idx + dsm_width + 1; } float x1, y1, elev1, x2, y2, elev2, x3, y3, elev3; x1 = pX[idx1]; y1 = pY[idx1]; elev1 = pZ[idx1]; x2 = pX[idx2]; y2 = pY[idx2]; elev2 = pZ[idx2]; x3 = pX[idx3]; y3 = pY[idx3]; elev3 = pZ[idx3]; float x = static_cast<float>(ipixel % sat_width); float y = static_cast<float>(ipixel / sat_width); float elev = d_interpolate_three(x, y, x1, y1, elev1, x2, y2, elev2, x3, y3, elev3); if (elev > pOut[ipixel]) { pOut[ipixel] = elev; } } } } void cudaRenderSatElevation(DTYPE * pX, DTYPE* pY, DTYPE* pZ, DTYPE* pOut, int dsm_width, int dsm_height, int sat_width, int sat_height) { int dsm_npixels = dsm_width * dsm_height; int sat_npixels = sat_width * sat_height; DTYPE* d_pX; DTYPE* d_pY; DTYPE* d_pZ; DTYPE* d_pOut; cudaMalloc((void **)&d_pX, sizeof(DTYPE) * dsm_npixels); cudaMalloc((void **)&d_pY, sizeof(DTYPE) * dsm_npixels); cudaMalloc((void **)&d_pZ, sizeof(DTYPE) * dsm_npixels); cudaMalloc((void **)&d_pOut, sizeof(DTYPE) * sat_npixels); cudaMemcpy(d_pX, pX, sizeof(DTYPE) * dsm_npixels, cudaMemcpyHostToDevice); cudaMemcpy(d_pY, pY, sizeof(DTYPE) * dsm_npixels, cudaMemcpyHostToDevice); cudaMemcpy(d_pZ, pZ, sizeof(DTYPE) * dsm_npixels, cudaMemcpyHostToDevice); // output memory on host contains all min values cudaMemcpy(d_pOut, pOut, sizeof(DTYPE) * sat_npixels, cudaMemcpyHostToDevice); int nfaces = 2 * (dsm_height - 1) * (dsm_width - 1); int nblocks = (nfaces + TPB - 1) / TPB; // compute # of pixels that each face cover // TODO: change to int int* dsmPixelCounts; cudaMalloc((void **)&dsmPixelCounts, sizeof(int) * nfaces); cudaMemset(dsmPixelCounts, 0, sizeof(int) * nfaces); kernelComputePointsNum<<<nblocks, TPB>>>(d_pX, d_pY, d_pZ, dsmPixelCounts, nfaces, dsm_width, sat_width, sat_height); // cudaThreadSynchronize(); cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) printf( "Error in CUDA kernel attempting to compute number of points " "for each thread!\n" ); int numPixelsLast; cudaMemcpy(&numPixelsLast, dsmPixelCounts + nfaces - 1, sizeof(int), cudaMemcpyDeviceToHost); // exclusive scan to get start index for each face thrust::exclusive_scan(thrust::device, dsmPixelCounts, dsmPixelCounts + nfaces, dsmPixelCounts); // int numPixelsTotal; cudaMemcpy(&numPixelsTotal, dsmPixelCounts + nfaces - 1, sizeof(int), cudaMemcpyDeviceToHost); numPixelsTotal += numPixelsLast; printf("================= %d\n", numPixelsTotal); int* faceIDs; int* pixelIDs; cudaMalloc((void **)&faceIDs, sizeof(int) * numPixelsTotal); cudaMalloc((void **)&pixelIDs, sizeof(int) * numPixelsTotal); kernelGetPoints<<<nblocks, TPB>>>(d_pX, d_pY, d_pZ, dsmPixelCounts, faceIDs, pixelIDs, nfaces, dsm_width, sat_width, sat_height); cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) printf( "Error in CUDA kernel attempting to " "get points ids for each face!\n" ); // sort by key thrust::sort_by_key(thrust::device, pixelIDs, pixelIDs + numPixelsTotal, faceIDs); cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) printf( "Error in CUDA kernel attempting to " "sort!\n" ); // find start and end points for each pixel int* pixelIDsLimits; cudaMalloc((void **)&pixelIDsLimits, 2 * sizeof(int) * sat_npixels); cudaMemset(pixelIDsLimits, 0, 2 * sizeof(int) * sat_npixels); nblocks = (numPixelsTotal + TPB - 1) / TPB; kernelFindLimits<<<nblocks, TPB>>>(pixelIDs, pixelIDsLimits, numPixelsTotal); cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) printf( "Error in CUDA kernel attempting to " "find start and end positions for each pixel!\n" ); // nblocks = (sat_npixels + TPB - 1) / TPB; kernelDraw<<<nblocks, TPB>>>(faceIDs, pixelIDsLimits, d_pX, d_pY, d_pZ, d_pOut, sat_npixels, dsm_width, sat_width); cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) printf( "Error in CUDA kernel attempting to " "draw satellite elevation!\n" ); // cudaMemcpy(pOut, dsmPixelCounts, sizeof(float) * min(sat_npixels, nfaces), cudaMemcpyDeviceToHost); cudaMemcpy(pOut, d_pOut, sizeof(DTYPE) * sat_npixels, cudaMemcpyDeviceToHost); }
4,347
/*#include <stdio.h> #include <assert.h> #define row 22 #define col 22 __global__ void kernel(float * device_matrix, size_t pitch) { for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < row; j += blockDim.y * gridDim.y) { float* row_device_matrix = (float*)((char*)device_matrix + j*pitch); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < col; i += blockDim.x * gridDim.x) { row_device_matrix[i] = (j * col + i) + (j * col + i); } } } void verify(float *h, float *d, int size) { for (int i = 0; i < size; i++) { printf("%2f ",d[i]); assert(h[i] == d[i]); } printf("Results match\n"); } int main() { float *host_matrix; float *device_matrix; float *deviceCopy_matrix; host_matrix = (float *) malloc(col * row * sizeof(float)); deviceCopy_matrix = (float *) malloc(col * row * sizeof(float)); for (int j = 0; j < row; j++) { for (int i = 0; i < col; i++) { host_matrix[j * col + i] = (j * col + i) + (j * col + i); printf("%2d,%2f ", (j * col + i) , host_matrix[j * col + i] ); } } size_t pitch; cudaMallocPitch(&device_matrix, &pitch, col * sizeof(float), row); dim3 block; block.x = (1024/2); block.y = (1024/2); dim3 grid; grid.x = row / block.x; grid.y = col / block.y; kernel<<<grid, block>>>(device_matrix, pitch); cudaMemcpy2D(deviceCopy_matrix, col * sizeof(float), device_matrix, pitch, col * sizeof(float), row, cudaMemcpyDeviceToHost); verify(host_matrix, deviceCopy_matrix, col * row); free(host_matrix); cudaFree(device_matrix); free(deviceCopy_matrix); } */ #include <stdio.h> #include <assert.h> #define N 1024 #define M 1024 __global__ void kernel(float * d_matrix, size_t pitch) { int colsPerThread = 1;//32 threads per block ,256 cells in block-> 256/32 int rowstart = blockIdx.y * blockDim.y + (threadIdx.y * colsPerThread); for (int j = rowstart; j < rowstart+colsPerThread; j ++) { float* row_d_matrix = (float*)((char*)d_matrix + j*pitch); int colstart = blockIdx.x * blockDim.x + (threadIdx.x * colsPerThread); for (int i = colstart; i < colstart + colsPerThread; i ++) { row_d_matrix[i] = j * M + i; } } } void verify(float *h, float *d, int size) { for (int i = 0; i < size; i++) { //printf("h: %f,d: %f ",h[i],d[i]); //printf(" %d ",i); //assert(h[i] == d[i]); if (h[i] != d[i]){ printf("h[%d]= %f,d[%d]= %f ",i, h[i] ,i , d[i] ); //return; } } printf("Results match\n"); } __global__ void fill(float * d_matrix, size_t pitch) { int index; int colsPerThread = 1;//32 threads per block ,256 cells in block-> 256/32 int rowstart = blockIdx.y * blockDim.y + (threadIdx.y * colsPerThread); for (int j = rowstart; j < rowstart+colsPerThread; j ++) { float* row_d_matrix = (float*)((char*)d_matrix + j*pitch); int colstart = blockIdx.x * blockDim.x + (threadIdx.x * colsPerThread); for (int i = colstart; i < colstart + colsPerThread; i ++) { // row_d_matrix[i] = (j * M + i) + (j * M + i); index = j * M + i; if (index <= M || (index % (M-1)) == 0){ row_d_matrix[i] = 0.0; } else if (index >= 67100672){// might be one off row_d_matrix[i] = 100.0; } else{ row_d_matrix[i] = 50.0; } } } /* row = blockIdx.y * blockDim.y + threadIdx.y; if (row == 400 ){ float* row_d_matrix = (float*)((char*)d_matrix + j*pitch); col = colStartPos; for ( i = 0; i < colsPerThread; i++) { col += i; if (col < 331){ ippos[i]=100.0; oppos[i]=100.0; }else{ break; } } } else if (row == 200){ col = colStartPos; if ( col <= 500 && (col + ncols) >= 500 ){ ippos[500]=100.0; oppos[500]=100.0; } } */ } int main() { float *h_matrix; float *d_matrix; float *dc_matrix; h_matrix = (float *) malloc(M * N * sizeof(float)); dc_matrix = (float *) malloc(M * N * sizeof(float)); /* for (int j = 0; j < N; j++) { for (int i = 0; i < M; i++) { h_matrix[j * M + i] = (j * M + i); } } */ int index; for (int j = 0 ; j < N; j++ ) { for (int i = 0 ; i < M; i++) { index = j * M + i; if (j == 0 || i == 0 || i == M-1){ h_matrix[j * M + i] = 0.0; } else if (j == M-1){// might be one off h_matrix[j * M + i] = 100.0; } else{ h_matrix[i] = 50.0; } } } size_t pitch; cudaMallocPitch(&d_matrix, &pitch, M * sizeof(float), N); //dim3 threadsPerBlock((1024/4), (1024/4), 1); // number of threads per block 256x256 //dim3 numBlocks((N/threadsPerBlock.x),(M/threadsPerBlock.y), 1); // number of blocks in grid 32x32 dim3 threadsPerBlock(32, 32, 1); // number of threads per block dim3 numBlocks(N/threadsPerBlock.x,M/threadsPerBlock.y, 1); // number of blocks in grid 16x16 fill<<<numBlocks, threadsPerBlock>>>(d_matrix, pitch); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("%s\n",cudaGetErrorString(error)); return 0; } cudaMemcpy2D(dc_matrix, M * sizeof(float), d_matrix, pitch, M * sizeof(float), N, cudaMemcpyDeviceToHost); error = cudaGetLastError(); if(error != cudaSuccess) { printf("%s\n",cudaGetErrorString(error)); return 0; } verify(h_matrix, dc_matrix, M * N); free(h_matrix); cudaFree(d_matrix); free(dc_matrix); }
4,348
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <time.h> #include <chrono> //#define SIZE 4194303*1024 //1024*1024 size_t SIZE = 131072 * 1024; #define BLOCKSIZE 1024 __global__ void deviceADD(int* a, int* b, int* c) { int off = threadIdx.x + blockIdx.x * blockDim.x; c[off] = a[off] + b[off]; } void fillramdom(size_t size, int* ptr) { for (size_t i = 0; i < size; i++) { i[ptr] = rand(); } } void errhand(cudaError_t err) { if (err) { printf("Error: %s\n", cudaGetErrorString(err)); std::cout << err << std::endl; } } //cpu版計算function std::chrono::duration<double > calByCPU(size_t size, int* a, int* b, int* ans) { auto start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < size; i++) { i[ans] = i[a] + i[b]; } auto end = std::chrono::high_resolution_clock::now(); return std::chrono::duration_cast < std::chrono::duration<double >> (end - start); } //gpu版計算function //計時器只計算扣掉搬data後的計算時間 std::chrono::duration<double > calByGPU(size_t size, int* a, int* b, int* ans) { int* gpuA, * gpuB, * gpuC; cudaError_t err; auto allocStart = std::chrono::high_resolution_clock::now(); err = cudaMalloc((void**)&gpuA, size * sizeof(int)); errhand(err); err = cudaMalloc((void**)&gpuB, size * sizeof(int)); errhand(err); err = cudaMalloc((void**)&gpuC, size * sizeof(int)); errhand(err); err = cudaMemcpy(gpuA, a, size * sizeof(int), cudaMemcpyHostToDevice); errhand(err); err = cudaMemcpy(gpuB, b, size * sizeof(int), cudaMemcpyHostToDevice); errhand(err); auto allocEnd = std::chrono::high_resolution_clock::now(); auto allocTime = std::chrono::duration_cast <std::chrono::duration<double >> (allocEnd - allocStart); std::cout << "Alloc time : " << allocTime.count() << std::endl; dim3 gridDim(SIZE / BLOCKSIZE,1,1); auto start = std::chrono::high_resolution_clock::now(); deviceADD <<<gridDim, BLOCKSIZE >>> (gpuA, gpuB, gpuC); //<<<block, thread>>> cudaDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); err = cudaGetLastError(); errhand(err); //<<<block, thread>>> err = cudaMemcpy(ans, gpuC, size * sizeof(int), cudaMemcpyDeviceToHost); errhand(err); cudaDeviceSynchronize(); cudaFree(gpuA); cudaFree(gpuB); cudaFree(gpuC); return std::chrono::duration_cast <std::chrono::duration<double >> (end - start); } bool equal(size_t size, int* a, int* b) { for (size_t i = 0; i < size; i++) { if (i[a] != i[b]) { std::cout << i[a] << "\t" << i[b] << "\t" << i << std::endl; return false; } } return true; } void printArr(size_t size, int* ptr) { for (size_t i = 0; i < size; i++) { std::cout << i << ": " << i[ptr] << std::endl; } } void benchmark(int time, std::chrono::duration<double > (*func)(size_t, int*, int*, int*), size_t size, int *a , int *b , int * c) { std::cout << "start benchmark" << std::endl; std::cout << "Time\texecute Time" << std::endl; std::cout << "---------------" << std::endl; double total = 0; for (int i = 0; i < time; i++) { auto exeT = (*func)(size, a, b, c); std::cout << i + 1 << "\t" << exeT.count() << std::endl; total += exeT.count(); } std::cout << "average : " << total / time << std::endl; std::cout << "------end------ " << std::endl; } int main(void) { srand(time(NULL)); int* a; int* b; int* c_cpu; int* c_gpu; a = new int[SIZE]; b = new int[SIZE]; c_cpu = new int[SIZE]; c_gpu = new int[SIZE]; fillramdom(SIZE, a); fillramdom(SIZE, b); benchmark(10, calByCPU, SIZE, a, b, c_cpu); benchmark(10, calByGPU, SIZE, a, b, c_gpu); //calByCPU(SIZE, a, b, c_cpu); //calByGPU(SIZE, a, b, c_gpu); //printArr(SIZE, c_gpu); std::cout << (equal(SIZE, c_cpu, c_gpu) ? "True" : "False") << std::endl; delete[] a, b, c_cpu, c_gpu; }
4,349
#include <stdio.h> const int N = 1 << 29; __global__ void vector_add(float *a, float *b, float *out, long n) { int i = blockDim.x * blockIdx.x + threadIdx.x; out[i] = a[i] + b[i]; } int main(int argc, char **args) { float *a, *b, *out; float *d_a, *d_b, *d_out; a = (float*) malloc(sizeof(float) * N); b = (float*) malloc(sizeof(float) * N); out = (float*) malloc(sizeof(float) * N); cudaMalloc((void**) &d_a, sizeof(float) * N); cudaMalloc((void**) &d_b, sizeof(float) * N); cudaMalloc((void**) &d_out, sizeof(float) * N); for(int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; } cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice); vector_add<<<N/1024, 1024>>>(d_a, d_b, d_out, N); cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost); //for(int i = 0; i < N; i++) { // printf("%1.2f\n", out[i]); //} printf("%d", (int)out[N-1]/3); }
4,350
/********************************************************************* * Copyright © 2011-2014, * Marwan Abdellah: <abdellah.marwan@gmail.com> * * This library (cufftShift) is free software; you can redistribute it * and/or modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. ********************************************************************/ #ifndef CUFFTSHIFT_2D_SINGLE_ARRAY_CU #define CUFFTSHIFT_2D_SINGLE_ARRAY_CU #include <cuda.h> // #include <cutil_inline.h> template <typename T> __global__ void cufftShift_2D_kernel(T* data, int N) { // 2D Slice & 1D Line int sLine = N; int sSlice = N * N; // Transformations Equations int sEq1 = (sSlice + sLine) / 2; int sEq2 = (sSlice - sLine) / 2; // Thread Index (1D) int xThreadIdx = threadIdx.x; int yThreadIdx = threadIdx.y; // Block Width & Height int blockWidth = blockDim.x; int blockHeight = blockDim.y; // Thread Index (2D) int xIndex = blockIdx.x * blockWidth + xThreadIdx; int yIndex = blockIdx.y * blockHeight + yThreadIdx; // Thread Index Converted into 1D Index int index = (yIndex * N) + xIndex; T regTemp; if (xIndex < N / 2) { if (yIndex < N / 2) { regTemp = data[index]; // First Quad data[index] = data[index + sEq1]; // Third Quad data[index + sEq1] = regTemp; } } else { if (yIndex < N / 2) { regTemp = data[index]; // Second Quad data[index] = data[index + sEq2]; // Fourth Quad data[index + sEq2] = regTemp; } } } #endif // CUFFTSHIFT_2D_SINGLE_ARRAY_CU
4,351
#include <stdio.h> #include <malloc.h> #include <stdlib.h> #include <time.h> #define MATRIX_SIZE 512 // 行列の1辺の数(1024にすると、俺のマシンだろ落ちちゃう。。。) #define BLOCK_SIZE 16 __global__ void matrixMul(int* inMatrixA, int* inMatrixB, int* inMatrixC) { int col_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = blockIdx.y * blockDim.y + threadIdx.y; /*行列の演算を行う*/ int total = 0; for (int i = 0; i < MATRIX_SIZE; i++) { total += inMatrixA[row_idx * MATRIX_SIZE + i] * inMatrixB[i * MATRIX_SIZE + col_idx]; // オリジナルのコードではシンクロしてるけど、不要だよね。 //__syncthreads(); } inMatrixC[row_idx * MATRIX_SIZE + col_idx] = total; } int main(int argc, char** argv) { unsigned int matrixSize = sizeof(unsigned int) * MATRIX_SIZE * MATRIX_SIZE; // CPU側のバッファ確保 int* hMatrixA = (int*)malloc(matrixSize); int* hMatrixB = (int*)malloc(matrixSize); int* hMatrixC = (int*)malloc(matrixSize); /*初期値設定*/ unsigned int col_idx, row_idx; for (col_idx = 0; col_idx < MATRIX_SIZE; col_idx++){ for (row_idx = 0; row_idx < MATRIX_SIZE; row_idx++){ hMatrixA[col_idx * MATRIX_SIZE + row_idx] = rand() % 1024; hMatrixB[col_idx * MATRIX_SIZE + row_idx] = rand() % 1024; } } // デバイス側のバッファ int* dMatrixA; int* dMatrixB; int* dMatrixC; // デバイス側のバッファ確保 cudaMalloc((void**)&dMatrixA, matrixSize); cudaMalloc((void**)&dMatrixB, matrixSize); cudaMalloc((void**)&dMatrixC, matrixSize); // CPU側バッファからデバイス側バッファへデータ転送 cudaMemcpy(dMatrixA, hMatrixA, matrixSize, cudaMemcpyHostToDevice); cudaMemcpy(dMatrixB, hMatrixB, matrixSize, cudaMemcpyHostToDevice); /*ブロックサイズとグリッドサイズの設定*/ dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(MATRIX_SIZE/BLOCK_SIZE, MATRIX_SIZE/BLOCK_SIZE); // カーネルの起動 matrixMul<<<grid, block>>>(dMatrixA, dMatrixB, dMatrixC); // 結果をデバイス側からCPU側へ転送*/ cudaMemcpy(hMatrixC, dMatrixC, matrixSize, cudaMemcpyDeviceToHost); // CPU側のメモリ開放 free(hMatrixA); free(hMatrixB); free(hMatrixC); // デバイスメモリ開放 cudaFree(dMatrixA); cudaFree(dMatrixB); cudaFree(dMatrixC); cudaDeviceReset(); }
4,352
# include <cuda.h> # include <cuda_runtime.h> extern "C" unsigned char * DFTimageCuda(unsigned char * data, int width, int height); __global__ void processPixelVertical(unsigned char * data_dev, double * PkbReal_dev, double * PkbIm_dev, int width, int height){ int posThread = blockIdx.x*blockDim.x + threadIdx.x; if(posThread < width*height){ int k = posThread/width; int b = posThread - k*width; double sumReal = 0.0; double sumIm = 0.0; for(int a = 0; a < height; a++){ double theta = -2.0*3.1416*k*a/height; sumReal += (double)data_dev[b + width*a]*cosf(theta); sumIm += (double)data_dev[b + width*a]*sinf(theta); } PkbReal_dev[b + width*k] = sumReal/(double)height; PkbIm_dev[b + width*k] = sumIm/(double)height; } } __global__ void processPixelHorizontal(unsigned char *data_dev, double * PkbReal_dev, double * PkbIm_dev, int width, int height){ int posThread = blockIdx.x*blockDim.x + threadIdx.x; if(posThread < width*height){ int k = posThread/width; int l = posThread - k*width; double sumReal = 0.0; double sumIm = 0.0; for(int b = 0; b < width; b++){ double theta = -2.0*3.1416*l*b/width; sumReal += (double)PkbReal_dev[b + k*width]*cosf(theta) - (double)PkbIm_dev[b+k*width]*sinf(theta); sumIm += (double)PkbReal_dev[b + k*width]*sinf(theta) + (double)PkbIm_dev[b+k*width]*cos(theta); } sumReal = sumReal/width; sumIm += sumIm/width; sumReal = sqrtf(sumReal*sumReal + sumIm*sumIm); data_dev[k*width + l] = (unsigned char) sumReal; } } unsigned char * DFTimageCuda(unsigned char * data, int width, int height){ unsigned char * dataDev; cudaMalloc((void**)&dataDev, width*height*sizeof(unsigned char)); double * PkbRealDev; double * PkbImDev; cudaMalloc((void**)&PkbRealDev, width*height*sizeof(double)); cudaMalloc((void**)&PkbImDev, width*height*sizeof(double)); cudaMemcpy(dataDev, data, width*height*sizeof(unsigned char), cudaMemcpyHostToDevice); int nthreads = 1024; int nblocks = width*height/nthreads; if(width*height % nthreads > 0) nblocks++; processPixelVertical<<<nblocks, nthreads>>>(dataDev, PkbRealDev, PkbImDev, width, height); processPixelHorizontal<<<nblocks, nthreads>>>(dataDev, PkbRealDev, PkbImDev, width, height); unsigned char * Dft = new unsigned char[width*height]; cudaMemcpy(Dft, dataDev, width*height*sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaFree(dataDev); cudaFree(PkbRealDev); cudaFree(PkbImDev); return Dft; } int main(){ }
4,353
#include <iostream> #include <cuda_runtime.h> using namespace std; __global__ void sum_kernel(double* A, double* B, double* C, int n){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n){ double a = A[idx]; double b = B[idx]; if (idx % 2 == 0) C[idx] = a + b; else C[idx] = a * b; } } int main(int argc, char **argv){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int n = atoi(argv[1]); double *h_a, *h_b, *h_c; size_t bytes = n * sizeof(double); h_a = (double *) malloc(bytes); h_b = (double *) malloc(bytes); h_c = (double *) malloc(bytes); for (int i = 0; i < n; i++){ h_a[i] = sin(i) * sin(i); h_b[i] = cos(i) * cos(i); } double *d_a, *d_b, *d_c; cudaEventRecord(start); cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; blockSize = 1024; gridSize = (n - 1) / 1024 + 1; sum_kernel<<<gridSize, blockSize>>> (d_a, d_b, d_c, n); cudaDeviceSynchronize(); cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << milliseconds << endl; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); /* for (int i = 0; i < n; i++){ cout << h_c[i] << endl; } */ return 0; }
4,354
#include "includes.h" __global__ void cudaSMaxBackward_kernel(unsigned int size, float* diffInput, const unsigned int idx, unsigned int* argMax, const float beta, float* result) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; if (beta != 0.0f) { for (unsigned int i = index; i < size; i += stride) { result[i] = (argMax[i] == idx) ? (diffInput[i] + beta * result[i]) : beta * result[i]; } } else { for (unsigned int i = index; i < size; i += stride) { result[i] = (argMax[i] == idx) ? diffInput[i] : 0.0f; } } }
4,355
#include "includes.h" __global__ void forward_bias(float *X, float *b, int N, int ch_in, int h_in, int w_in) { int n = blockIdx.x; int ch = blockIdx.y; int h = threadIdx.x; int w = threadIdx.y; X[n * ch_in * h_in * w_in + ch * h_in * w_in + h * w_in + w] += b[ch]; }
4,356
// Mara Isabel Ortiz Naranjo #include <stdio.h> // le agregu el # #include <stdlib.h> #include <cuda_runtime.h> #define N 16 __global__ void kernel( int *a, int *b, int *c ) // Agregu *b { int myID = threadIdx.x + blockDim.x * blockIdx.x; // Solo trabajan N hilos if (myID < N) { c[myID] = a[myID] + b[myID]; } } __global__ void kernel2( int *a, int *b, int *c ) { // Originalmente no funcionaba, ya que faltaba el Id del bloque a utilizar int myID = threadIdx.x + blockDim.x* blockIdx.x; // Solo trabajan N hilos if (myID < N) { c[myID] = a[myID] * b[myID]; } } int main(int argc, char** argv) { cudaStream_t stream1, stream2; int *a1, *b1, *c1; // stream 1 mem ptrs int *a2, *b2, *c2; // stream 2 mem ptrs int *dev_a1, *dev_b1, *dev_c1; // stream 1 mem ptrs int *dev_a2, *dev_b2, *dev_c2; // stream 2 mem ptrs //stream 1 cudaMalloc( (void**)&dev_a1, N * sizeof(int) ); cudaMalloc( (void**)&dev_b1, N * sizeof(int) ); cudaMalloc( (void**)&dev_c1, N * sizeof(int) ); cudaHostAlloc( (void**)&a1, N * sizeof(int), cudaHostAllocDefault); cudaHostAlloc( (void**)&b1, N * sizeof(int), cudaHostAllocDefault); cudaHostAlloc( (void**)&c1, N * sizeof(int), cudaHostAllocDefault); //stream 2 cudaMalloc( (void**)&dev_a2, N * sizeof(int) ); cudaMalloc( (void**)&dev_b2, N * sizeof(int) ); cudaMalloc( (void**)&dev_c2, N * sizeof(int) ); cudaHostAlloc( (void**)&a2, N * sizeof(int), cudaHostAllocDefault); cudaHostAlloc( (void**)&b2, N * sizeof(int), cudaHostAllocDefault); cudaHostAlloc( (void**)&c2, N * sizeof(int), cudaHostAllocDefault); for (int i =0; i<N; i++){ a1[i]= i; b1[i]= a1[i] + i; a2[i]= i; b2[i]= a1[i] * i; } for(int i=0;i < N;i+= N*2) { // loop over data in chunks // interweave stream 1 and steam 2 cudaMemcpyAsync(dev_a1,a1,N*sizeof(int),cudaMemcpyHostToDevice,stream1); // Faltaba los Async en la memoria cuda cudaMemcpyAsync(dev_a2,a2,N*sizeof(int),cudaMemcpyHostToDevice,stream2); // Faltaba los Async en la memoria cuda cudaMemcpyAsync(dev_b1,b1,N*sizeof(int),cudaMemcpyHostToDevice,stream1); // Faltaba los Async en la memoria cuda cudaMemcpyAsync(dev_b2,b2,N*sizeof(int),cudaMemcpyHostToDevice,stream2); // Faltaba los Async en la memoria cuda kernel<<<(int)ceil(N/1024)+1,1024,0,stream1>>>(dev_a1,dev_b1,dev_c1); kernel2<<<(int)ceil(N/1024)+1,1024,0,stream2>>>(dev_a2,dev_b2,dev_c2); cudaMemcpyAsync(c1,dev_c1,N*sizeof(int),cudaMemcpyDeviceToHost,stream1); cudaMemcpyAsync(c2,dev_c2,N*sizeof(int),cudaMemcpyDeviceToHost,stream2); } cudaStreamSynchronize(stream1); // Agregue Synchronize cudaStreamSynchronize(stream2); printf("Stream 1 \n"); printf("a1 \n"); for (int i =0; i<N; i++){ printf("%d \n",a1[i]); } printf("b1 \n"); for (int i =0; i<N; i++){ printf("%d \n",b1[i]); } printf("c1 \n"); for (int i =0; i<N; i++){ printf("%d \n",c1[i]); } printf("Stream 2 \n"); printf("a2 \n"); for (int i =0; i<N; i++){ printf("%d \n",a2[i]); } printf("b2 \n"); for (int i =0; i<N; i++){ printf("%d \n",b2[i]); } printf("c2 \n"); for (int i =0; i<N; i++){ printf("%d \n",c2[i]); } cudaStreamDestroy(stream1); // Agregue un Destroy cudaStreamDestroy(stream2); return 0; }
4,357
// RK45.cu // //This file contains the function that performs Runge Kutta 45 integration using the DCA algorithm //Included Files #include <iostream> //Function Prototypes // Functions found in Functs.cu void arycpy(double A[],double B[],int n); void arycpy2(double A[],double B[],int n); void arycpy3(double A[],double B[],int n); void set_up(double A[], double B[], double C[], int n , double h); void get_final_q(double x[], double h, double a[], double b[], double c[], double d[], double R[], int n); void get_final_qdot(double x[], double h, double a[], double b[], double c[], double d[], double R[], int n); void DCAhelp(int n,bool Active_DOF[],double Coords[], double Speeds[], int dof_index[], double initZetas[],double Mass[], double Inertia[],int DOF, double Y[], int cut_off,double Body_Vectors[]); //RK_45 // Function used to perform Runge-Kutta 45 integration using the DCA algorithm. // This numerical integrator uses a fixed time step. // state is an array of the conditions at that timestep // step is the length of one timestep // n is the number of bodies // bs is a list of bodies // js is a list of joints // Y is the array where the conditions at the next timestep will be stored //RK_45(tstep,n,Active_DOF,Coords,Speeds,dof_index,initZetas); void RK_45(double step, int n,bool Active_DOF[],double Coords[], double Speeds[], double initZetas[],double Mass[], double Inertia[],int DOF, double Y[], double Ydot[], int dof_index[], int cut_off,double Body_Vectors[]) { //Comments have not yet been completed in this file because I do not know how it works //Variable Declarations double *q = new double[DOF]; double *qdot = new double[DOF]; double *q1 = new double[DOF]; double *qdot1 = new double[DOF]; double *q2 = new double[DOF]; double *qdot2 = new double[DOF]; double *q3 = new double[DOF]; double *qdot3 = new double[DOF]; double *q4 = new double[DOF]; double *qdot4 = new double[DOF]; double *Y1 = new double[2*n]; double *Y2= new double[2*n]; double *Y3=new double[2*n]; double *Y4=new double[2*n]; int i = 0; double h = step/2.0; arycpy(q1,Coords,DOF); arycpy(qdot1,Speeds,DOF); DCAhelp(n,Active_DOF, q1, qdot1,dof_index, initZetas,Mass,Inertia,DOF,Y1,cut_off,Body_Vectors); set_up(q,qdot1,q2,DOF,h); set_up(qdot,Y1,qdot2,DOF,h); DCAhelp(n,Active_DOF, q2, qdot2,dof_index, initZetas,Mass,Inertia,DOF,Y2,cut_off,Body_Vectors); set_up(q,qdot2,q3,DOF,h); set_up(qdot,Y2,qdot3,DOF,h); DCAhelp(n,Active_DOF, q3, qdot3,dof_index, initZetas,Mass,Inertia,DOF,Y3,cut_off,Body_Vectors); set_up(q,qdot3,q4,DOF,h*2); set_up(qdot,Y3,qdot4,n,h*2); DCAhelp(n,Active_DOF, q4, qdot4,dof_index, initZetas,Mass,Inertia,DOF,Y4,cut_off,Body_Vectors); get_final_q(q,h,qdot1,qdot2,qdot3,qdot4,Y, DOF); get_final_qdot(qdot,h,Y1,Y2,Y3,Y4,Ydot, DOF); std::cout<<Ydot[0]<<'\t'<<Ydot[1]<<std::endl; delete[] q; delete[] qdot; delete[] q1; delete[] qdot1; delete[] q2; delete[] qdot2; delete[] q3; delete[] qdot3; delete[] q4; delete[] qdot4; delete[] Y1; delete[] Y2; delete[] Y3; delete[] Y4; } //arycpy: // Function used to copy array B into array A // A is the array where the copy will be stored // B is the array to be copied // n is the number of values in the array void arycpy(double A[],double B[],int n) { //Loop through the arrays saving every element in B into A for(int i =0; i<n; i++) { A[i]=B[i]; i++; } } void set_up(double A[], double B[], double C[], int n , double h) { int i = 0; //counter //Loop through the elements in the arrays saving the solution in C while (i<n) { C[i]=A[i]+(h*B[i]); i++; } } //get_final_q: // Function used to get the final q to be used in the next timestep. // x is q // h is have of the length of one timestep // a is qdot1 // b is qdot2 // c is qdot3 // d is qdot4 // R is where the solution is stored // n is the number of bodies void get_final_q(double x[], double h, double a[], double b[], double c[], double d[], double R[], int n) { int i = 0; //counter //Loop through the arrays solving for the position at the next timestep and saving it in R while(i<n) { R[i]= x[i] + ((h/3.0)*(a[i]+(2*b[i])+(2*c[i])+d[i])); i++; } } //get_final_qdot: // Function used to get the final qdot to be used in the next timestep. // x is qdot // h is have of the length of one timestep // a is qddot1 // b is qddot2 // c is qddot3 // d is qddot4 // R is where the solution is stored // n is the number of bodies void get_final_qdot(double x[], double h, double a[], double b[], double c[], double d[], double R[], int n) { int i = 0; //counter //Loop through the arrays solving for the position at the next timestep and saving it in R while(i<n) { R[i]= x[i] + ((h/3.0)*(a[i]+(2*b[i])+(2*c[i])+d[i])); i++; } }
4,358
#include <stdio.h> #include <iostream> #include <cuda.h> #include <math.h> #include <cuda_runtime.h> #include <ctime> //~ #include <thrust/reduce.h> //~ #include <reduction.h> extern "C" void apply_bc_cuda_(double* p_2); extern "C" void catch_divergence_cuda_(double res2,int ierr,int it); extern "C" void collect_res2_cuda_(double* res2, double* tres2, int* ierr, int* it); static void HandleError( cudaError_t err, const char *file,int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) void printGPUprops(void){ int count; cudaDeviceProp prop; HANDLE_ERROR( cudaGetDeviceCount( &count ) ); for (int i=0; i< count; i++) { HANDLE_ERROR( cudaGetDeviceProperties( &prop, i ) ); printf( "-- General Information for device %d ---\n", i ); printf( "Name:%s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap:" ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execition timeout :" ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "--- Memory Information for device %d ---\n", i ); printf( "Total global mem:%ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment:%ld\n", prop.textureAlignment ); printf( "--- MP Information for device %d ---\n", i ); printf( "Multiprocessor count:%d\n",prop.multiProcessorCount ); printf( "Shared mem per mp:%ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp:%d\n", prop.regsPerBlock ); printf( "Threads in warp:%d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } } //////////// CUDA functions /////////////// __device__ __host__ int convert_3Dto1Dsingle(int *coord, int *dim_d) { int index = coord[0] + coord[1]*(dim_d[0]-1) + coord[2]*((dim_d[0]-1)*(dim_d[1]-1)); printf("%d\n", index); return index; } __device__ __host__ void convert_3Dto1D(int *coord, int *dim_d, int* vec, int i) { vec[i] = coord[0] + coord[1]*(dim_d[0]) + coord[2]*((dim_d[0])*(dim_d[1])); } __device__ __host__ void convert_4Dto1D(int *coord, int *dim_d, int* vec, int i) { vec[i] = coord[0] + coord[1]*dim_d[0] + coord[2]*(dim_d[0]*dim_d[1])+ coord[3]*(dim_d[0]*dim_d[1]*dim_d[2]); } __device__ __host__ void convert_1Dto3D(int i, int *dim_d, int *coord) { coord[0] = i%dim_d[0]; coord[1] = (i/dim_d[0])%dim_d[1]; coord[2] = (i/dim_d[0]/dim_d[1]); } __device__ __host__ void indexP(int *iP, int *dim_d, int *coord) { // original int tempC[3]={coord[0],coord[1],coord[2]}; convert_3Dto1D(tempC,dim_d,iP,0); // +X tempC[0]=coord[0]+1; tempC[1]=coord[1]; tempC[2]=coord[2]; convert_3Dto1D(tempC,dim_d,iP,1); // -X tempC[0]=coord[0]-1; tempC[1]=coord[1]; tempC[2]=coord[2]; convert_3Dto1D(tempC,dim_d,iP,2); // +Y tempC[0]=coord[0]; tempC[1]=coord[1]+1; tempC[2]=coord[2]; convert_3Dto1D(tempC,dim_d,iP,3); // -Y tempC[0]=coord[0]; tempC[1]=coord[1]-1; tempC[2]=coord[2]; convert_3Dto1D(tempC,dim_d,iP,4); // +Z tempC[0]=coord[0]; tempC[1]=coord[1]; tempC[2]=coord[2]+1; convert_3Dto1D(tempC,dim_d,iP,5); // -Z tempC[0]=coord[0]; tempC[1]=coord[1]; tempC[2]=coord[2]-1; convert_3Dto1D(tempC,dim_d,iP,6); } __device__ __host__ void indexA(int *iA, int *dim_d, int *coord) { int tempC[4] = {coord[0],coord[1],coord[2],0}; convert_4Dto1D(tempC,dim_d,iA,0); tempC[3] = 1; convert_4Dto1D(tempC,dim_d,iA,1); tempC[3] = 2; convert_4Dto1D(tempC,dim_d,iA,2); tempC[3] = 3; convert_4Dto1D(tempC,dim_d,iA,3); tempC[3] = 4; convert_4Dto1D(tempC,dim_d,iA,4); tempC[3] = 5; convert_4Dto1D(tempC,dim_d,iA,5); tempC[3] = 6; convert_4Dto1D(tempC,dim_d,iA,6); tempC[3] = 7; convert_4Dto1D(tempC,dim_d,iA,7); } __global__ void p_iter(int *dim_d, int dimp, int dimStencil, int Ng, double *A_d, double *p_1, double *p_2, double beta) { int iA[8]; int iP[7]; int coordP[3]; int coordA[3]; int dimS[3]; dimS[0] =*(dim_d) -2*Ng; dimS[1] =*(dim_d+1) -2*Ng; dimS[2] =*(dim_d+2) -2*Ng; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < dimStencil; i += stride) { convert_1Dto3D(i,dimS,coordA); coordP[0]=coordA[0]+Ng; coordP[1]=coordA[1]+Ng; coordP[2]=coordA[2]+Ng; indexP(iP, dim_d, coordP); indexA(iA, dimS, coordA); p_2[iP[0]]=(1.0-beta)*p_1[iP[0]] + (beta/A_d[iA[6]])*( A_d[iA[0]] * p_1[iP[2]] + A_d[iA[1]] * p_1[iP[1]] + A_d[iA[2]] * p_1[iP[4]] + A_d[iA[3]] * p_1[iP[3]] + A_d[iA[4]] * p_1[iP[6]] + A_d[iA[5]] * p_1[iP[5]] + A_d[iA[7]]); } } __global__ void L1L2_norm(int *dim_d, int dimStencil, int Ng, double *A_d, double *p_1, double *res2, int norm) { int iA[8]; int iP[7]; int coordP[3]; int coordA[3]; int dimS[3]; dimS[0] =*(dim_d) -2*Ng; dimS[1] =*(dim_d+1) -2*Ng; dimS[2] =*(dim_d+2) -2*Ng; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < dimStencil; i += stride) { convert_1Dto3D(i,dimS,coordA); coordP[0]=coordA[0]+Ng; coordP[1]=coordA[1]+Ng; coordP[2]=coordA[2]+Ng; indexP(iP, dim_d, coordP); indexA(iA, dimS, coordA); res2[i]=powf(abs(-p_1[iP[0]]*A_d[iA[6]] + A_d[iA[0]] * p_1[iP[2]] + A_d[iA[1]] * p_1[iP[1]] + A_d[iA[2]] * p_1[iP[4]] + A_d[iA[3]] * p_1[iP[3]] + A_d[iA[4]] * p_1[iP[6]] + A_d[iA[5]] * p_1[iP[5]] + A_d[iA[7]]), __int2float_rd(norm)); } } __global__ void catchDivergenceA(int dimA, double *A_d, int catchDiv) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < dimA; i += stride) { if (isnan(A_d[i])){catchDiv = 1;} } } __global__ void catchDivergenceP(int dimp, double *p_d, int catchDiv) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < dimp; i += stride) { if (isnan(p_d[i])){catchDiv = 1;} } } /////////// C-CUDA functions //////////// extern "C" void linearCUDA_(double *A, double *p, int* dimM, int *Ng, double* maxError, double* beta, int *maxit, int* it, int* ierr, int* norm, double* tres2) { static double *A_d=NULL, *p_1=NULL, *p_2=NULL, *res2=NULL; static int *dim_d=NULL; int blockSize = 512; int dimMA[3] = {dimM[0]-2*(*Ng),dimM[1]-2*(*Ng),dimM[2]-2*(*Ng)}; int dimA = (dimMA[0]*dimMA[1]*dimMA[2]*8); int dimp = dimM[0]*dimM[1]*dimM[2]; int dimStencil = (dimMA[0]*dimMA[1]*dimMA[2]); double *res2_reduced; double init_res2; int *CoiA; //~ bool catchDiv = false; int catchDiv=0; //~ printGPUprops(); clock_t t1=0.0, t2=0.0, t3=0.0, t4=0.0, t5=0.0, t0=0.0, t6=0.0; int numBlocks = (dimStencil + blockSize - 1) / blockSize; if (numBlocks>65535){numBlocks=65535;} t0 = clock(); // // ----------------------Allocate Unified Memory – accessible from CPU or GPU if (p_1==NULL){ cudaMallocManaged(&A_d, dimA*sizeof(double)); cudaMallocManaged(&p_1, dimp*sizeof(double)); cudaMallocManaged(&p_2, dimp*sizeof(double)); cudaMallocManaged(&res2, dimStencil*sizeof(double)); cudaMallocManaged(&dim_d, 3*sizeof(int)); cudaMallocManaged(&CoiA, dimStencil*13*sizeof(int)); } cudaMemcpy(A_d, A, dimA*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(p_2, p, dimp*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(p_1, p, dimp*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dim_d, dimM, 3*sizeof(int), cudaMemcpyHostToDevice); t1=t1 + clock() - t0; // // ------------------------------Run kernel on the GPU for ((*it)=0;(*it)<(*maxit);(*it)++){ // Calls one iteration step alternating the matrixes if ((*it)%2==0){ // Calls one iteration step output p_2 t0 = clock(); p_iter<<<numBlocks, blockSize>>>(dim_d, dimp, dimStencil, (*Ng), A_d, p_1, p_2, (*beta)); cudaDeviceSynchronize(); t2 = t2 +clock() -t0; t0 = clock(); // impose the BC through Fortran on p_2 apply_bc_cuda_(p_2); cudaDeviceSynchronize(); t3 = t3 +clock() -t0; t0 = clock(); // Compute the res2 on p_2 for each element and create the vector res_2 L1L2_norm<<<numBlocks, blockSize>>>(dim_d, dimStencil, (*Ng), A_d, p_2, res2, (*norm)); cudaDeviceSynchronize(); t4 = t4 +clock() -t0; } else if((*it)%2==1){ t0 = clock(); // Calls one iteration step output p_1 p_iter<<<numBlocks, blockSize>>>(dim_d, dimp, dimStencil, (*Ng), A_d, p_2, p_1, (*beta)); cudaDeviceSynchronize(); t2 = t2 +clock() -t0; t0 = clock(); // impose the BC through Fortran on p_1 apply_bc_cuda_(p_1); cudaDeviceSynchronize(); t3 = t3 +clock() -t0; t0 = clock(); // Compute the res2 on p_2 for each element and create the vector res_2 L1L2_norm<<<numBlocks, blockSize>>>(dim_d, dimStencil, (*Ng), A_d, p_1, res2, (*norm)); cudaDeviceSynchronize(); t4 = t4 +clock() -t0; t0 = clock(); } // Reduces the res_2 vector to res2_reduced and divedes it for the number of elements TO IMPROVE init_res2 = 0.0; for (int i=0 ; i < dimStencil ; i++){ init_res2=init_res2 + res2[i]; } t5 = t5 +clock() -t0; init_res2=init_res2/dimStencil; res2_reduced=&init_res2; catchDivergenceA<<<numBlocks, blockSize>>>(dimA,A_d,catchDiv); cudaDeviceSynchronize(); if ((*it)%2==0){ catchDivergenceP<<<numBlocks, blockSize>>>(dimp,p_2,catchDiv); cudaDeviceSynchronize(); } else if((*it)%2==1){ catchDivergenceP<<<numBlocks, blockSize>>>(dimp,p_1,catchDiv); cudaDeviceSynchronize(); } if (catchDiv==1){catch_divergence_cuda_((*res2_reduced),(*ierr),(*it));} t0 = clock(); cudaDeviceSynchronize(); collect_res2_cuda_(res2_reduced,tres2,ierr,it); t6 = t6 +clock() -t0; if ((*norm)==2) {(*tres2)=sqrt((*tres2));} cudaDeviceSynchronize(); if (*tres2<*maxError){break;} } // // -----------------------Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); t0 = clock(); // // -----------------------Synchronize the memory if ((*it)%2==0){ cudaMemcpy(p, p_2, dimp*sizeof(double), cudaMemcpyDeviceToHost);} else if((*it)%2==1){ cudaMemcpy(p, p_1, dimp*sizeof(double), cudaMemcpyDeviceToHost);} cudaMemcpy(A, A_d, dimA*sizeof(double), cudaMemcpyDeviceToHost); t1 = t1 +clock() -t0; // printf("%f %f %f %f %f %f \n time elapsed",(double) t1/CLOCKS_PER_SEC,(double)t2/CLOCKS_PER_SEC,(double)t3/ CLOCKS_PER_SEC,(double)t4/ CLOCKS_PER_SEC,(double)t5/ CLOCKS_PER_SEC,(double)t6/ CLOCKS_PER_SEC); //~ // // --------------------------------Free memory //~ cudaFree(A_d); //~ cudaFree(p_1); //~ cudaFree(p_2); //~ cudaFree(res2); //~ cudaFree(dim_d); }
4,359
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <chrono> using namespace std; float makeCPU(int* inData, int N); float cudaParallel(int* inData, int N); void init(int* inData, int N) { for (int i = 0; i < N; i++) inData[i] = 100 - i + 1; } __global__ void reductionKernelMinimum(int *inData, int N) { int tId = threadIdx.x; int k = blockIdx.x * blockDim.x + threadIdx.x; int before = k; int after = (k + 1); while (after < N) { if (inData[before] < inData[after]) continue; else inData[before] = inData[after]; before *= 2; after *= 2; if (before >= N) break; __syncthreads(); } } int main() { int N; while (true) { cout << "Enter number of elements: " << endl; cin >> N; const int elementsCount = N; cout << "Reduction for: " << elementsCount << endl; int *a = new int[elementsCount]; init(a, elementsCount); float gpuTime = cudaParallel(a, elementsCount); cout << "Time on gpu: " << gpuTime << endl; init(a, elementsCount); float cpuTime = makeCPU(a, elementsCount); cout << "Time on cpu in ns " << cpuTime << endl; } return 0; } float makeCPU(int* inData, int N) { int min = inData[0]; std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); for (int i = 1; i < N; i++) { if (inData[i] < min) min = inData[i]; } end = std::chrono::system_clock::now(); int elapsed = std::chrono::duration_cast<std::chrono::nanoseconds> (end - start).count(); return elapsed; } float cudaParallel(int* inData, int N) { int* deviceData; cudaMalloc((void**)&deviceData, N * sizeof(int)); cudaMemcpy(deviceData, inData, N * sizeof(int), cudaMemcpyHostToDevice); cudaEvent_t start, stop; float gpuTime = 0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 threads(256, 1, 1); dim3 blocks(N / 256, 1); cudaEventRecord(start, 0); reductionKernelMinimum <<<blocks, threads>>> (deviceData, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpuTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(inData, deviceData, N * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(deviceData); return gpuTime; }
4,360
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include <iostream> #include <assert.h> template <typename T> struct BinaryAssociativeOperator { __host__ __device__ virtual T operator() (const T left, const T right) const = 0; }; template <typename T> struct Add : public BinaryAssociativeOperator<T> { __host__ __device__ T operator() (const T left, const T right) const; }; template <typename T> struct Max : public BinaryAssociativeOperator<T> { __host__ __device__ T operator () (const T left, const T right) const; }; template<typename T, typename Op> __global__ void reduce(T * v, const int n , Op op); int main(int argc, char** argv) { const int size = 8; int h_v[size] = { 1, 2, 3, 4, 5, 6 , 7 , 8 }; int *d_v = 0; cudaMalloc((void**)&d_v, size * sizeof(int)); cudaMemcpy(d_v, h_v, size * sizeof(int), cudaMemcpyHostToDevice); dim3 grdDim(1, 1, 1); dim3 blkDim(size / 2, 1, 1); // Max Max<int> maxOp; reduce<int, Max<int>> <<<grdDim, blkDim >>>(d_v, size, maxOp); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(error)); // system("pause"); // when using VisStudio exit(-1); } int max; cudaMemcpy(&max, d_v, 1 * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_v); std::cout << "Max ( "; for (int i = 0; i < size; i++) { std::cout << h_v[i] << (i < size - 1 ? " ," : " ) = "); } std::cout << max << std::endl; // system("pause"); // when using VisStudio return 0; } template<typename T> __host__ __device__ T Add<T>::operator() (const T left, const T right) const { return left + right; }; template<typename T> __host__ __device__ T Max<T>::operator() (const T left, const T right) const { return left > right ? left : right; }; template<typename T, typename Op> __global__ void reduce(T *v, const int n, Op op) { // Thread Id const unsigned int tId = threadIdx.x; // Initial thread Count const unsigned int initialThreadCount = blockDim.x; // assert(initialThreadCount >= n); for (int threadCount = initialThreadCount, stepSize = 1; threadCount > 0; threadCount /= 2, stepSize *= 2) { if (tId < threadCount) { int indiceDroite = tId * stepSize * 2; int indiceGauche = indiceDroite + stepSize; v[indiceDroite] = op(v[indiceDroite], v[indiceGauche]); } __syncthreads(); } };
4,361
//xfail:REPAIR_ERROR //--blockDim=16 --gridDim=1 --no-inline // #include <cuda.h> __global__ void foo() { __shared__ int A[16]; A[0] = threadIdx.x; }
4,362
#include <stdio.h> #include <cuda_runtime.h> // #include <helper_cuda.h> #define N 1024 #define THREADS_PER_BLOCK 32 __global__ void SingleBlockLoop(){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < N) printf("%d\n", i); } int main(void){ cudaError_t err = cudaSuccess; SingleBlockLoop<<<(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(); if ((err = cudaGetLastError()) != cudaSuccess){ fprintf(stderr, "Failed to launch kernel: %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaDeviceSynchronize(); return 0; }
4,363
#include <curand_kernel.h> extern "C" __global__ void uniform_double(int n,double lower,double upper,double *result) { int totalThreads = gridDim.x * blockDim.x; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + tid; for(; i < n; i += totalThreads) { double u = result[i]; result[i] = u * upper + (1 - u) * lower; } }
4,364
#include <cstdio> extern "C" { __global__ void find_maxes(int N, int* table, int* max_table) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x >= N || y >= N) return; int max_sum = table[0]; for (int i=y; i<N; ++i) { for (int j=x; j<N; ++j) { int tmp = table[i*N + j]; if (y>0) tmp -= table[(y-1)*N + j]; if (x>0) tmp -= table[i*N + x-1]; if (x>0 && y>0) tmp += table[(y-1)*N + x-1]; max_sum = max(max_sum, tmp); }} max_table[y*N + x] = max_sum; } }
4,365
#include <thrust/complex.h> #include <tuple> using F = double; using T = thrust::complex<F>; constexpr F range_x_max = +2; constexpr F range_x_min = -2; constexpr F range_y_max = +2; constexpr F range_y_min = -2; constexpr int block_x = 256; constexpr int block_y = 256; constexpr int thread_x = 32; constexpr int thread_y = 32; __device__ inline int get_ix() { return threadIdx.x + blockIdx.x * blockDim.x; } __device__ inline int get_iy() { return threadIdx.y + blockIdx.y * blockDim.y; } __device__ inline int get_id(const int ix, const int iy) { return block_x * thread_x * ix + iy; } __device__ inline T get_place(const int ix, const int iy) { return {range_x_min + (range_x_max - range_x_min) * (F)ix / ((F)block_x * (F)thread_x), range_y_min + (range_y_max - range_y_min) * (F)iy / ((F)block_y * (F)thread_y)}; } __device__ inline T newton_method(T x) { const T f = x * thrust::atan(x) - thrust::log(thrust::pow(x, 2) + 1) / 2; const T df = thrust::atan(x); return df.real() == .0 && df.imag() == .0 ? df : x - f / df; } __global__ void calc(F *d_x) { const auto ix = get_ix(), iy = get_iy(); const auto id = get_id(ix, iy); auto c = get_place(ix, iy); /* printf("%lg %lg\n", c.real(), c.imag()); c = newton_method(c); printf("%lg %lg\n", c.real(), c.imag()); printf("%lg\n",thrust::arg(c)); */ for(int i = 0; i < 100; i++) { c = newton_method(c); } d_x[id] = thrust::arg(c); // printf("%lg\n",d_x[id]); // printf("%lg\n", thrust::arg(c)); // printf("id : %d\n", get_id(get_ix(), get_iy())); // printf("place is %lg %lg\n", place.real(), place.imag()); } void print(F *h_x) { for(int x = 0; x < block_x * thread_x; x++) { printf("%lg", h_x[block_x * thread_x * x]); for(int y = 1; y < block_y * thread_y; y++) { printf(",%lg", h_x[block_x * thread_x * x + y]); } printf("\n"); } } int main() { size_t size = block_x * block_y * thread_x * thread_y * sizeof(F); F *h_x, *d_x; cudaMalloc((void **)&d_x, size); dim3 block(block_x, block_y); dim3 thread(thread_x, thread_y); calc<<<block, thread>>>(d_x); h_x = (F *)malloc(size); cudaMemcpy(h_x, d_x, size, cudaMemcpyDeviceToHost); print(h_x); cudaFree(d_x); free(h_x); cudaDeviceReset(); }
4,366
#include <stdio.h> # include "cuda_runtime.h" # include "cuda_profiler_api.h" __global__ void add(int n, float *x, float *y, float *z) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) z[i] = x[i] + y[i]; if (i< n) z[i]++; } int main() { int N = 1<<10; float *x, *y, *z, *d_x, *d_y, *d_z; cudaDeviceReset(); //Allocating memory onto host cudaHostAlloc((void **)&x, N*sizeof(float), cudaHostAllocMapped ); cudaHostAlloc((void **)&y, N*sizeof(float), cudaHostAllocMapped ); cudaHostAlloc((void **)&z, N*sizeof(float), cudaHostAllocMapped ); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } //Getting device pointer cudaHostGetDevicePointer((void **)&d_x, x, 0); cudaHostGetDevicePointer((void **)&d_y, y, 0); cudaHostGetDevicePointer((void **)&d_z, z, 0); cudaDeviceSynchronize(); cudaProfilerStart(); add<<<(N+255)/256, 256>>>(N, d_x, d_y, d_z); cudaProfilerStop(); cudaDeviceSynchronize(); float sum = 0.0; for (int i=0; i<N; i++) { sum = z[i] + sum; } printf("Sum = %f\n", sum); cudaFreeHost(x); cudaFreeHost(y); cudaFreeHost(z); cudaDeviceReset(); }
4,367
#include <stdio.h> #include <cuda_runtime.h> __global__ void hello(void) { printf("Hello World!\n"); } extern "C" int func() { hello <<<1,10>>>(); cudaDeviceReset(); }
4,368
#include <algorithm> #include <chrono> #include <cstdio> #include <fstream> #include <iostream> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #define SIZE 1 using namespace std; int main() { // freopen("swapinout.txt", "w", stdout); long int s[] = { //1, 4, 8, 16, 32, 40, //64, 68, 80, 128, 256, 400, //512, 768, 1024, 1535, 1536, 2048, //2264, 2348, 2600, 2888, 3020, 3164, //4096, 4716, 5120, 8192, 10240, 16384, //17920, 20000, 25600, 32768, 37632, 42452, 49152, 75276, 92928, 98304, 100000, 102400, 131072, 147456, 204800, 262144, 294912, 307200, 409860, 524288, 589824, 1000000, 1048576, 1179648, 1228800, 1280000, 1581056, 1638400, 1638916, 2359296, 2654208, 3538944, 4718592, 6422528, 8306688, 8388608, 9437184, 10000000, 11075584, 11943936, 12845056, 16613376, 16777216, 20447232, 25000000, 25690112, 35831808, 38535168, 39321600, 49561600, 51380224, 67108864, 83886080, 121228800, 142655492, 178438148, 186482692, 191102980, 196608000, 205520896, 237568004, 1258291200, 3200000000}; cout << "size,in,out" << endl; for (int i = 0; i < 59; i++) { long long size = s[i]; void *hostArray = (void *)0; cudaMallocHost(&hostArray, size); void *deviceArray = (void *)0; cudaMalloc((void **)&deviceArray, size); long long a0 = std::chrono::duration_cast<std::chrono::nanoseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(); // auto a0 = (std::chrono::system_clock::now()).time_since_epoch().count(); cudaMemcpy(deviceArray, hostArray, size, cudaMemcpyHostToDevice); long long b0 = std::chrono::duration_cast<std::chrono::nanoseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(); // auto b0 = (std::chrono::system_clock::now()).time_since_epoch().count(); cudaMemcpy(hostArray, deviceArray, size, cudaMemcpyDeviceToHost); // long long c0 = std::chrono::duration_cast<std::chrono::nanoseconds>( // std::chrono::system_clock::now().time_since_epoch()) // .count(); auto c0 = (std::chrono::system_clock::now()).time_since_epoch().count(); cout << s[i] << "," << b0 - a0 << "," << c0 - b0 << endl; } return 0; }
4,369
#include "includes.h" __global__ void normalizeGradient(float* gradient, int* activeMask, int activeSlices, int slices) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= activeSlices) return; int slice = activeMask[i]; float norm = gradient[6 * slices + slice]; if (norm > 0) norm = 1.0f / sqrtf(norm); for (int j = 0; j < 6; ++j) gradient[j*slices + slice] *= norm; }
4,370
#include <stdio.h> #include <cuda_runtime.h> #define REP1(x) x #define REP2(x) REP1(x) REP1(x) #define REP4(x) REP2(x) REP2(x) #define REP8(x) REP4(x) REP4(x) #define REP16(x) REP8(x) REP8(x) #define REP32(x) REP16(x) REP16(x) #define REP64(x) REP32(x) REP32(x) #define REP128(x) REP64(x) REP64(x) #define REP256(x) REP128(x) REP128(x) #define REP512(x) REP256(x) REP256(x) #define REP1024(x) REP512(x) REP512(x) #define REP2048(x) REP1024(x) REP1024(x) #define REP4096(x) REP2048(x) REP2048(x) #define REP8192(x) REP4096(x) REP4096(x) #define REP16384(x) REP8192(x) REP8192(x) #define REP32768(x) REP16384(x) REP16384(x) //#define ARRAY_SIZE 17000 //#define STRIDE 11 #define DATA_TYPE long long __global__ void read_cache(DATA_TYPE* device_array) { DATA_TYPE* j = &device_array[0]; REP4096(j=*(DATA_TYPE**)j;) device_array[0] = (DATA_TYPE)j; } int main(int argc, char* argv[]) { cudaError_t err = cudaSuccess; DATA_TYPE* host_array = NULL; DATA_TYPE* device_array = NULL; size_t size; int i; if (argc < 3) { printf("Not enough parameters! Exitting...\n"); return -1; } int ARRAY_SIZE = atoi(argv[1]); int STRIDE = atoi(argv[2]); size = sizeof(DATA_TYPE) * ARRAY_SIZE; host_array = (DATA_TYPE*)malloc(size); if (host_array == NULL) { printf("Failed to malloc!\n"); return -1; } err = cudaMalloc((void**)&device_array, size); if (err != cudaSuccess) { printf("Failed to cudaMalloc!\n"); free(host_array); return -1; } for (i = 0; i < ARRAY_SIZE; i++) { DATA_TYPE t = i + STRIDE; if (t >= ARRAY_SIZE) t %= STRIDE; host_array[i] = (DATA_TYPE)device_array + (DATA_TYPE)sizeof(DATA_TYPE) * t; } err = cudaMemcpy(device_array, host_array, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("Failed to cudaMemcpy!\n"); free(host_array); cudaFree(device_array); return -1; } read_cache<<<1, 1>>>(device_array); err = cudaGetLastError(); if (err != cudaSuccess) { printf("Failed to invoke kernel!\n"); free(host_array); cudaFree(device_array); return -1; } free(host_array); cudaFree(device_array); return 0; }
4,371
#include "LBM_GPU.cuh" #include <cmath> ofstream fout_GPU("out_GPU.dat"); ofstream fout_GPU_Ux("out_GPU_Ux.dat"); ofstream fout_GPU_Uy("out_GPU_Uy.dat"); ifstream fin_GPU("in_GPU.txt"); LBM_GPU::LBM_GPU() { // ============================================================================ // // LOAD THE PARAMETERS // ============================================================================ // fin_GPU >> nx; fin_GPU >> comment; fin_GPU >> ny; fin_GPU >> comment; fin_GPU >> Lx; fin_GPU >> comment; fin_GPU >> Ly; fin_GPU >> comment; fin_GPU >> a; fin_GPU >> comment; fin_GPU >> Re; fin_GPU >> comment; fin_GPU >> Ux0; fin_GPU >> comment; fin_GPU >> BLOCK_SIZE_X; fin_GPU >> comment; fin_GPU >> BLOCK_SIZE_Y; fin_GPU >> comment; fin_GPU >> BLOCK_SIZE_Z; fin_GPU >> comment; // ============================================================================ // // ============================================================================ // // NEW & CUDAMALLOC // ============================================================================ // is_boundary_node = new int[nx*ny]; cudaMalloc((void**)&d_is_boundary_node, nx*ny * sizeof(int)); U = new float[nx*ny]; cudaMalloc((void**)&d_U, nx*ny * sizeof(float)); Ux = new float[nx*ny]; cudaMalloc((void**)&d_Ux, nx*ny * sizeof(float)); Uy = new float[nx*ny]; cudaMalloc((void**)&d_Uy, nx*ny * sizeof(float)); rho = new float[nx*ny]; cudaMalloc((void**)&d_rho, nx*ny * sizeof(float)); W = new float[nx*ny]; UN = new float[nx*ny]; cudaMalloc((void**)&d_UN, nx*ny * sizeof(float)); UxN = new float[nx*ny]; cudaMalloc((void**)&d_UxN, nx*ny * sizeof(float)); UyN = new float[nx*ny]; cudaMalloc((void**)&d_UyN, nx*ny * sizeof(float)); rhoN = new float[nx*ny]; cudaMalloc((void**)&d_rhoN, nx*ny * sizeof(float)); f = new float[nx*ny*a]; cudaMalloc((void**)&d_f, nx*ny*a * sizeof(float)); ftemp = new float[nx*ny*a]; cudaMalloc((void**)&d_ftemp, nx*ny*a * sizeof(float)); fN = new float[nx*ny*a]; cudaMalloc((void**)&d_fN, nx*ny*a * sizeof(float)); feq = new float[nx*ny*a]; cudaMalloc((void**)&d_feq, nx*ny*a * sizeof(float)); ex = new float[a]; cudaMalloc((void**)&d_ex, a * sizeof(float)); ey = new float[a]; cudaMalloc((void**)&d_ey, a * sizeof(float)); U_p = new float[nx*ny]; Ux_p = new float[nx*ny]; Uy_p = new float[nx*ny]; // ============================================================================ // // ============================================================================ // // Microscopic velocity // ============================================================================ // ex[0] = 0.0, ey[0] = 0.0; ex[1] = 1.0, ey[1] = 0.0; ex[2] = 0.0, ey[2] = 1.0; ex[3] = -1.0, ey[3] = 0.0; ex[4] = 0.0, ey[4] = -1.0; ex[5] = 1.0, ey[5] = 1.0; ex[6] = -1.0, ey[6] = 1.0; ex[7] = -1.0, ey[7] = -1.0; ex[8] = 1.0, ey[8] = -1.0; cudaMemcpy(d_ex, ex, a * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_ey, ey, a * sizeof(float), cudaMemcpyHostToDevice); // ============================================================================ // // ============================================================================ // // SET BOUNDARY NODE // ============================================================================ // for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { if (i == 0 || i == nx - 1 || j == 0 || j == ny - 1) is_boundary_node[i + nx*j] = 1; else is_boundary_node[i + nx*j] = 0; } } cudaMemcpy(d_is_boundary_node, is_boundary_node, nx*ny * sizeof(int), cudaMemcpyHostToDevice); // ============================================================================ // // ============================================================================ // // INITIAL CONDITION // ============================================================================ // del_x = Lx / (float)nx; del_y = Ly / (float)ny; del_t = pow(del_x, 2); Ux0_p = Ux0 * (del_x / del_t); tau = 3.0*(del_t / pow(del_x, 2))*(Ux0_p * Lx / Re) + 0.5; nu = (1.0 / 3.0)*(tau - 0.5); for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { rho[i + nx*j] = 1.0; f[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j]; } } cudaMemcpy(d_rho, rho, nx*ny * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_f, f, nx*ny*a * sizeof(float), cudaMemcpyHostToDevice); // ============================================================================ // } __global__ void Kernel_Streaming(float* f, float* ftemp, int* is_boundary_node, int nx, int ny, int a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; int in, ip, jn, jp; if (!is_boundary_node[i + nx*j]) { in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i == 0) && (j > 0 && j < ny - 1)) { //LEFT in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i > 0 && i < nx - 1) && (j == ny - 1)) { //TOP in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i > 0 && i < nx - 1) && (j == 0)) { //BOTTOM in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; } else if ((i == nx - 1) && (j > 0 && j < ny - 1)) { //RIGHT in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; } else if ((i == 0) && (j == 0)) { //BOTTOM-LEFT in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; } else if ((i == 0) && (j == ny - 1)) { //TOP-LEFT in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i == nx - 1) && (j == ny - 1)) { //TOP-RIGHT in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; } else if ((i == nx - 1) && (j == 0)) { //BOTTOM-RIGHT in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; } } void LBM_GPU::Streaming() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Streaming << < dimGrid, dimBlock >> > (d_f, d_ftemp, d_is_boundary_node, nx, ny, a); } __global__ void Kernel_BC_bounceback(float* f, float* ftemp, int nx, int ny, int a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; else if ((i == 0) && (j > 0 && j < ny - 1)){ //LEFT ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; } else if ((i == nx - 1) && (j > 0 && j < ny - 1)) { //RIGHT ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; } else if ((i > 0 && i < nx - 1) && (j == 0)) { //BOTTOM ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; } else if ((i == 0) && (j == 0)) { //BOTTOM-LEFT ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; } else if ((i == nx - 1) && (j == 0)) { //BOTTOM-RIGHT ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; } } void LBM_GPU::BC_bounceback() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_BC_bounceback << < dimGrid, dimBlock >> > (d_f, d_ftemp, nx, ny, a); } __global__ void Kernel_BC_vel(float* ftemp, float* rho, float Ux0, int nx, int ny, int a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; float rho0, ru; if ((i > 0 && i < nx - 1) && (j == ny - 1)) { //TOP rho0 = ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 3] + 2.0*(ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6]); ru = rho0 * Ux0; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 2.0)*ru + (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 1] - ftemp[i + nx*j + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 2.0)*ru - (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 1] - ftemp[i + nx*j + nx*ny * 3]); } else if ((i == 0) && (j == ny - 1)) { //TOP-LEFT ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 5] = 0.5 * (rho[(i + 1) + nx*(j - 1)] - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 8])); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; } else if ((i == nx - 1) && (j == ny - 1)) { //TOP-RIGHT ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = 0.5 * (rho[(i - 1) + nx*(j - 1)] - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 7])); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; } } void LBM_GPU::BC_vel() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_BC_vel << < dimGrid, dimBlock >> > (d_ftemp, d_rho, Ux0, nx, ny, a); } __global__ void Kernel_Eq(float* ftemp, float* feq, float* Ux, float* Uy, float* rho, float* ex, float* ey, int nx, int ny, int a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; //Calculation of Macroscopic var rho[i + nx*j] = ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]; Ux[i + nx*j] = ftemp[i + nx*j + nx*ny * 1] * ex[1] + ftemp[i + nx*j + nx*ny * 3] * ex[3] + ftemp[i + nx*j + nx*ny * 5] * ex[5] + ftemp[i + nx*j + nx*ny * 6] * ex[6] + ftemp[i + nx*j + nx*ny * 7] * ex[7] + ftemp[i + nx*j + nx*ny * 8] * ex[8]; Uy[i + nx*j] = ftemp[i + nx*j + nx*ny * 2] * ey[2] + ftemp[i + nx*j + nx*ny * 4] * ey[4] + ftemp[i + nx*j + nx*ny * 5] * ey[5] + ftemp[i + nx*j + nx*ny * 6] * ey[6] + ftemp[i + nx*j + nx*ny * 7] * ey[7] + ftemp[i + nx*j + nx*ny * 8] * ey[8]; Ux[i + nx*j] /= rho[i + nx*j]; Uy[i + nx*j] /= rho[i + nx*j]; feq[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - 1.5*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + 3.0 * Ux[i + nx*j] + 4.5*pow(Ux[i + nx*j], 2) - 1.5*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + 3.0 * Uy[i + nx*j] + 4.5*pow(Uy[i + nx*j], 2) - 1.5*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - 3.0 * Ux[i + nx*j] + 4.5*pow(Ux[i + nx*j], 2) - 1.5*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - 3.0 * Uy[i + nx*j] + 4.5*pow(Uy[i + nx*j], 2) - 1.5*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + 3.0 * (Ux[i + nx*j] + Uy[i + nx*j]) + 4.5*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - 1.5*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + 3.0 * (-Ux[i + nx*j] + Uy[i + nx*j]) + 4.5*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - 1.5*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + 3.0 * (-Ux[i + nx*j] - Uy[i + nx*j]) + 4.5*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - 1.5*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + 3.0 * (Ux[i + nx*j] - Uy[i + nx*j]) + 4.5*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - 1.5*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } __global__ void Kernel_Collision(float* fN, float* ftemp, float* feq, int nx, int ny, int a, float tau) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; fN[i + nx*j + nx*ny*k] = ftemp[i + nx*j + nx*ny*k] - (ftemp[i + nx*j + nx*ny*k] - feq[i + nx*j + nx*ny*k]) / tau; } void LBM_GPU::Collision() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Eq << < dimGrid, dimBlock >> > (d_ftemp, d_feq, d_Ux, d_Uy, d_rho, d_ex, d_ey, nx, ny, a); Kernel_Collision << < dimGrid, dimBlock >> > (d_fN, d_ftemp, d_feq, nx, ny, a, tau); } __global__ void Kernel_Error(float* ftemp, float* f, float* Ux, float* Uy, float* U, float* rho, float* fN, float* UxN, float* UyN, float* UN, float* rhoN, float* ex, float* ey, int nx, int ny, int a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; rho[i + nx*j] = f[i + nx*j + nx*ny * 0] + f[i + nx*j + nx*ny * 1] + f[i + nx*j + nx*ny * 2] + f[i + nx*j + nx*ny * 3] + f[i + nx*j + nx*ny * 4] + f[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 8]; Ux[i + nx*j] = f[i + nx*j + nx*ny * 1] * ex[1] + f[i + nx*j + nx*ny * 3] * ex[3] + f[i + nx*j + nx*ny * 5] * ex[5] + f[i + nx*j + nx*ny * 6] * ex[6] + f[i + nx*j + nx*ny * 7] * ex[7] + f[i + nx*j + nx*ny * 8] * ex[8]; Uy[i + nx*j] = f[i + nx*j + nx*ny * 2] * ey[2] + f[i + nx*j + nx*ny * 4] * ey[4] + f[i + nx*j + nx*ny * 5] * ey[5] + f[i + nx*j + nx*ny * 6] * ey[6] + f[i + nx*j + nx*ny * 7] * ey[7] + f[i + nx*j + nx*ny * 8] * ey[8]; Ux[i + nx*j] /= rho[i + nx*j]; Uy[i + nx*j] /= rho[i + nx*j]; U[i + nx*j] = sqrt(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2)); rhoN[i + nx*j] = fN[i + nx*j + nx*ny * 0] + fN[i + nx*j + nx*ny * 1] + fN[i + nx*j + nx*ny * 2] + fN[i + nx*j + nx*ny * 3] + fN[i + nx*j + nx*ny * 4] + fN[i + nx*j + nx*ny * 5] + fN[i + nx*j + nx*ny * 6] + fN[i + nx*j + nx*ny * 7] + fN[i + nx*j + nx*ny * 8]; UxN[i + nx*j] = fN[i + nx*j + nx*ny * 1] * ex[1] + fN[i + nx*j + nx*ny * 3] * ex[3] + fN[i + nx*j + nx*ny * 5] * ex[5] + fN[i + nx*j + nx*ny * 6] * ex[6] + fN[i + nx*j + nx*ny * 7] * ex[7] + fN[i + nx*j + nx*ny * 8] * ex[8]; UyN[i + nx*j] = fN[i + nx*j + nx*ny * 2] * ey[2] + fN[i + nx*j + nx*ny * 4] * ey[4] + fN[i + nx*j + nx*ny * 5] * ey[5] + fN[i + nx*j + nx*ny * 6] * ey[6] + fN[i + nx*j + nx*ny * 7] * ey[7] + fN[i + nx*j + nx*ny * 8] * ey[8]; UxN[i + nx*j] /= rhoN[i + nx*j]; UyN[i + nx*j] /= rhoN[i + nx*j]; UN[i + nx*j] = sqrt(pow(UxN[i + nx*j], 2) + pow(UyN[i + nx*j], 2)); } void LBM_GPU::Error() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Error << < dimGrid, dimBlock >> > (d_ftemp, d_f, d_Ux, d_Uy, d_U, d_rho, d_fN, d_UxN, d_UyN, d_UN, d_rhoN, d_ex, d_ey, nx, ny, a); cudaMemcpy(U, d_U, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(UN, d_UN, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); } __global__ void Kernel_Update(float* fN, float* f, float* Ux, float* Uy, float* U, float* rho, float* ex, float* ey, int nx, int ny, int a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; f[i + nx*j + nx*ny*k] = fN[i + nx*j + nx*ny*k]; rho[i + nx*j] = f[i + nx*j + nx*ny * 0] + f[i + nx*j + nx*ny * 1] + f[i + nx*j + nx*ny * 2] + f[i + nx*j + nx*ny * 3] + f[i + nx*j + nx*ny * 4] + f[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 8]; Ux[i + nx*j] = f[i + nx*j + nx*ny * 1] * ex[1] + f[i + nx*j + nx*ny * 3] * ex[3] + f[i + nx*j + nx*ny * 5] * ex[5] + f[i + nx*j + nx*ny * 6] * ex[6] + f[i + nx*j + nx*ny * 7] * ex[7] + f[i + nx*j + nx*ny * 8] * ex[8]; Uy[i + nx*j] = f[i + nx*j + nx*ny * 2] * ey[2] + f[i + nx*j + nx*ny * 4] * ey[4] + f[i + nx*j + nx*ny * 5] * ey[5] + f[i + nx*j + nx*ny * 6] * ey[6] + f[i + nx*j + nx*ny * 7] * ey[7] + f[i + nx*j + nx*ny * 8] * ey[8]; Ux[i + nx*j] /= rho[i + nx*j]; Uy[i + nx*j] /= rho[i + nx*j]; U[i + nx*j] = sqrt(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2)); } void LBM_GPU::Update() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Update << < dimGrid, dimBlock >> > (d_fN, d_f, d_Ux, d_Uy, d_U, d_rho, d_ex, d_ey, nx, ny, a); } void LBM_GPU::Print() { cudaMemcpy(Ux, d_Ux, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Uy, d_Uy, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(U, d_U, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(rho, d_rho, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); // ============================================================================ // // VORTICITY // ============================================================================ // //INNER for (i = 1; i < nx - 1; i++) { for (j = 1; j < ny - 1; j++) { W[i + nx*j] = (Uy[(i + 1) + nx*j] - Uy[(i - 1) + nx*j]) / (2.0*del_y) - (Ux[i + nx*(j + 1)] - Ux[i + nx*(j - 1)]) / (2.0*del_y); } } //LEFT BOUNDARY i = 0; for (j = 1; j < ny - 1; j++) { W[i + nx*j] = (Uy[(i + 1) + nx*j] - 0.0) / (del_y)-(Ux[i + nx*(j + 1)] - Ux[i + nx*(j - 1)]) / (2.0*del_y); } //RIGHT BOUNDARY i = nx - 1; for (j = 1; j < ny - 1; j++) { W[i + nx*j] = (0.0 - Uy[(i - 1) + nx*j]) / (del_y)-(Ux[i + nx*(j + 1)] - Ux[i + nx*(j - 1)]) / (2.0*del_y); } //TOP BOUNDARY j = ny - 1; for (i = 1; i < nx - 1; i++) { W[i + nx*j] = (Uy[(i + 1) + nx*j] - Uy[(i - 1) + nx*j]) / (2.0*del_y) - (0.0 - Ux[i + nx*(j - 1)]) / (del_y); } //BOTTOM BOUNDARY j = 0; for (i = 1; i < nx - 1; i++) { W[i + nx*j] = (Uy[(i + 1) + nx*j] - Uy[(i - 1) + nx*j]) / (2.0*del_y) - (Ux[i + nx*(j + 1)] - 0.0) / (del_y); } //TOP-LEFT CONNER i = 0; j = ny - 1; W[i + nx*j] = (Uy[(i + 1) + nx*j] - 0.0) / (del_y)-(0.0 - Ux[i + nx*(j - 1)]) / (del_y); //BOTTOM-LEFT CONNER i = 0; j = 0; W[i + nx*j] = (Uy[(i + 1) + nx*j] - 0.0) / (del_y)-(Ux[i + nx*(j + 1)] - 0.0) / (del_y); //TOP-RIGHT CONNER i = nx - 1; j = ny - 1; W[i + nx*j] = (0.0 - Uy[(i - 1) + nx*j]) / (del_y)-(0.0 - Ux[i + nx*(j - 1)]) / (del_y); //BOTTOM-RIGHT CONNER i = nx - 1; j = 0; W[i + nx*j] = (0.0 - Uy[(i - 1) + nx*j]) / (del_y)-(Ux[i + nx*(j + 1)] - 0.0) / (del_y); // ============================================================================ // // ============================================================================ // // NORMALIZATION // ============================================================================ // for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { Ux_p[i + nx*j] = Ux[i + nx*j] / Ux0; Uy_p[i + nx*j] = Uy[i + nx*j] / Ux0; U_p[i + nx*j] = U[i + nx*j] / Ux0; W[i + nx*j] = W[i + nx*j] / Ux0; } } // ============================================================================ // fout_GPU << endl; fout_GPU << "variables = X Y Ux Uy U rho W" << endl; fout_GPU << "zone i=" << nx << " j=" << ny << endl; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { fout_GPU << i << "\t" << j << "\t" << Ux_p[i + nx*j] << "\t" << Uy_p[i + nx*j] << "\t" << U_p[i + nx*j] << "\t" << rho[i + nx*j] << "\t" << W[i + nx*j] << endl; } } fout_GPU_Ux << "variables = X Y Ux " << endl; i = nx / 2; for (j = 0; j < ny; j++) { fout_GPU_Ux << i << "\t" << j << "\t" << Ux_p[i + nx*j] << endl; } fout_GPU_Uy << "variables = X Y Uy " << endl; j = ny / 2; for (i = 0; i < nx; i++) { fout_GPU_Uy << i << "\t" << j << "\t" << Uy_p[i + nx*j] << endl; } } LBM_GPU::~LBM_GPU() { cudaFree(d_is_boundary_node); cudaFree(d_f); cudaFree(d_fN); cudaFree(d_ftemp); cudaFree(d_feq); cudaFree(d_Ux); cudaFree(d_Uy); cudaFree(d_rho); cudaFree(d_ex); cudaFree(d_ey); cudaFree(d_U); cudaFree(d_UN); cudaFree(d_UxN); cudaFree(d_UyN); cudaFree(rhoN); delete[] Uy_p; delete[] Ux_p; delete[] U_p; delete[] ey; delete[] ex; delete[] fN; delete[] feq; delete[] ftemp; delete[] f; delete[] rhoN; delete[] UyN; delete[] UxN; delete[] UN; delete[] W; delete[] rho; delete[] Uy; delete[] Ux; delete[] U; delete[] is_boundary_node; cout << endl << "Done!" << endl; }
4,372
// gpu (device) based matrix/matrix gpu code //------------------------------------------------------------------------- // Included CUDA libraries //------------------------------------------------------------------------- #include <stdio.h> // iceil macro // returns an integer ceil value where integer numerator is first parameter // and integer denominator is the second parameter. iceil is the rounded // up value of numerator/denominator when there is a remainder // equivalent to ((num%den!=0) ? num/den+1 : num/den) #define iceil(num,den) (num+den-1)/den #define TILE_WIDTH 16 // block x and y dimensions void check_error(cudaError_t error_id){ if (error_id != cudaSuccess) { printf("Error is %d", error_id); exit(EXIT_FAILURE); } } // GPU device MatrixMulKernel kernel code __global__ void MatrixMulKernel(float *Pd, float *Md, float *Nd, int Mh, int Mw, int Nw) { // ================================================================== // Solution part 4 // Determine the output index of each thread. // Compute the dot product of one row of Md and one column of Nd // for each thread. // Write the computed value to matrix P at the correct output index // ================================================================== // Calculate the global row and column indices of the Pd matrix int Row; int Col; //**** ENTER YOUR CODE HERE **** Row = blockIdx.y * TILE_WIDTH + threadIdx.y; Col = blockIdx.x * TILE_WIDTH + threadIdx.x; if(Row <Mh && Col< Nw){ // Each thread computes one dot product element of the block sub-matrix // access correct row of Md and Column of Nd assuming row-major allocations // (Note: in second part of hw1 you will want to make sure that only // the threads that are assigned valid regions of the computation are // active //**** ENTER YOUR CODE HERE **** float Pvalue = 0; //**** ENTER YOUR CODE HERE **** for(int k=0; k < Mw ;++k){ Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col]; } // place final result in specified location of global Pd memory //**** ENTER YOUR CODE HERE **** Pd[Row * Nw + Col] = Pvalue; } // End of solution part 4 =========================================== } __global__ void MatrixMulKernelSingleBlock(float *Pd, float *Md, float *Nd, int Mh, int Mw, int Nw) { // ================================================================== // Solution part 4 // Determine the output index of each thread. // Compute the dot product of one row of Md and one column of Nd // for each thread. // Write the computed value to matrix P at the correct output index // ================================================================== // Calculate the global row and column indices of the Pd matrix int Row; int Col; //**** ENTER YOUR CODE HERE **** Row = threadIdx.y; Col = threadIdx.x; if(Row > Mh || Col > Nw) return; // Each thread computes one dot product element of the block sub-matrix // access correct row of Md and Column of Nd assuming row-major allocations // (Note: in second part of hw1 you will want to make sure that only // the threads that are assigned valid regions of the computation are // active //**** ENTER YOUR CODE HERE **** float Pvalue = 0; //**** ENTER YOUR CODE HERE **** for(int k=0; k < Mw ;++k){ Pvalue += Md[Row*Mw+k] * Nd[k*Nw + Col]; } // place final result in specified location of global Pd memory //**** ENTER YOUR CODE HERE **** Pd[Row * Nw + Col] = Pvalue; // End of solution part 4 =========================================== } void compute_GPU(float *P, float *M, float *N, int Mh, int Mw, int Nw) { float *Md, *Nd, *Pd; cudaError_t error_id; // =================================================================== // Solution part 1: Copy Input Data from Host to Device // Create Device Buffers for the two input matrices // Copy memory from the host memory to the device buffer (device memory) // Check for error generated while using each OpenCL API call // =================================================================== // Allocate device memory and Transfer host arrays M and N //**** ENTER YOUR CODE HERE **** size_t size_M = Mh * Mw * sizeof(float); size_t size_N = Mw*Nw * sizeof(float); error_id = cudaMalloc((void**)&Md, size_M); check_error(error_id); error_id = cudaMemcpy(Md, M, size_M, cudaMemcpyHostToDevice); check_error(error_id); error_id = cudaMalloc((void**)&Nd,size_N); check_error(error_id); error_id = cudaMemcpy(Nd, N, size_N, cudaMemcpyHostToDevice); check_error(error_id); // Allocate device memory of P array for results //**** ENTER YOUR CODE HERE **** size_t size_P = Mh*Nw*sizeof(float); error_id = cudaMalloc((void**)&Pd, size_P ); check_error(error_id); // End of solution Part 1 ============================================ // =================================================================== // Solution part 2 // A. Initialize the block and grid dimensions of the kernel about // to be launched. // [You may assume that each matrix dimension is a multiple of the // defined constant block_size.] // B. Launch the kernel with appropriate kernel arguments // Do not forget to check for success at each stage before proceeding. // =================================================================== // Setup the kernel execution configuration parameters/launch kernel // Stage A: Setup the kernel execution configuration parameters // (in second part of homework take into account the case where // the dimmensions are not an even multiple of block size) //**** ENTER YOUR CODE HERE **** // Stage B: Launch the kernel!! -- using the appropriate function arguments // (remember to check for kernel launch failure!) //**** ENTER YOUR CODE HERE **** if(Mh == 16 && Mw == 16 && Nw == 16){ // Single Block 16*16 testing dim3 grid(1,1); dim3 block(TILE_WIDTH, TILE_WIDTH); MatrixMulKernelSingleBlock<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw); } else { int a1 = iceil(Mh, TILE_WIDTH); int a2 = iceil(Nw, TILE_WIDTH); dim3 grid(a2,a1); dim3 block(TILE_WIDTH, TILE_WIDTH); MatrixMulKernel<<<grid,block>>>(Pd, Md, Nd,Mh, Mw, Nw); } // End of solution Part 2 ============================================ // =================================================================== // Solution part 3 // Copy Results Device back to Host // =================================================================== // Transfer P from device to host //**** ENTER YOUR CODE HERE **** error_id = cudaMemcpy(P,Pd,size_P,cudaMemcpyDeviceToHost); check_error(error_id); // End of solution Part 3 ============================================ // CLEAN UP -- Free device memory when finished //**** ENTER YOUR CODE HERE **** error_id = cudaFree(Md); check_error(error_id); error_id = cudaFree(Nd); check_error(error_id); error_id = cudaFree(Pd); check_error(error_id); }
4,373
// We assume row_indices, col_indices, and values are of length count struct SparseMatrixCOO { float* values; int* col_indices; int* row_indices; int M; int N; int count; }; // Compared to the sequential SpMV/CSR, the sequential SpMN/COO doesn't waste // time with fully-zero rows void SpMV_COO(const SparseMatrixCOO A, const float* x, float* y){ for(int element = 0; element < A.count; ++element){ const int column = A.col_indices[element]; const int row = A.row_indices[element]; y[row] += A.values[element] * x[column]; } } __global__ void SpMV_COO_kernel_v1(const SparseMatrixCOO A, const float* x, float* y) { for(int element = threadIdx.x + blockIdx.x * blockDim.x; element < A.count; element += blockDim.x * gridDim.x){ const int column = A.col_indices[element]; const int row = A.row_indices[element]; // Output interference y[row] += A.values[element] * x[column]; } } // Swithcing to an atomic addition will make the output of this kernel coorect // It will also serializat a potential large number of writes // We could solve this using techniques from the histogram pattern(i.e. // privitization) // We'll note that this representation is better suited to sequential hardware // and take a different approach. __global__ void SpMV_COO_kernel_v2(const SparseMatrixCOO A, const float* x, float* y) { for(int element = threadIdx.x + blockIdx.x * blockDim.x; element < A.count; element += blockDim.x * gridDim.x){ const int column = A.col_indices[element]; const int row = A.row_indices[element]; atomicAdd(&y[row], A.values[element] * x[column]); } }
4,374
#include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <cuda_runtime.h> #include <time.h> using namespace std; #define eps 1e-4 //每个thread负责output的一个pixel __global__ void convolution2d(float *img, float *kernel, float* result, int n, int m, int kw, int kh, int out_n, int out_m, bool padding) { int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; int x = bx * blockDim.x + tx; int y = by * blockDim.y + ty; int idx = y * out_m + x; //printf("%d %d %d %d %d %d\n", bx, by, tx, ty, x, y); if(idx < out_n * out_m){ float ret = 0; for(int i = 0; i < kw; i++){ for(int j = 0; j < kh; j++){ //ret += img[(y + j) * m + (x + i)] * kernel[i * kh + j]; //padding = same: (x,y) 为中心点,(x-kw/2, y-kh/2)为左上角第一个点 //padding = valid: (x+kw/2, y+kh/2)为中心点, (x,y)为左上角第一个点 int cur_x = 0, cur_y = 0; if(padding == true){ cur_x = x - kw / 2 + i; cur_y = y - kh / 2 + j; } else{ cur_x = x + i; cur_y = y + j; } if(cur_x >= 0 and cur_x < n and cur_y >= 0 and cur_y < m){ ret += img[cur_y * m + cur_x] * kernel[i * kh + j]; } } } //printf("%d %d %d %f\n", x, y, idx, ret); //__syncthreads(); result[idx] = ret; } } bool check(float *img, float *kernel, float *result, int n, int m, int kw, int kh, int out_n, int out_m, bool padding){ for(int i = 0; i < out_n; i++){ for(int j = 0; j < out_m; j++){ float cur = 0.0; for(int p = 0; p < kw; p++){ for(int q = 0; q < kh; q++){ //cur += img[(i + p) * m + (j + q)] * kernel[p * kh + q]; int cur_x = 0, cur_y = 0; if(padding == true){ cur_x = i - kw /2 + p; cur_y = j - kh /2 + q; } else{ cur_x = i + p; cur_y = j + q; } if(cur_x >= 0 and cur_x < n and cur_y >= 0 and cur_y < m){ cur += img[cur_x * m + cur_y] * kernel[p * kh + q]; } } } //printf("%f %f\n", cur, result[i * out_m + j]); //printf("%f\n", cur); if(abs(cur - result[i * out_m + j]) > eps){ cout<<cur<<' '<<result[i * out_m + j]<<endl; cout<<"Not Equal !!!"<<endl; exit(0); } //cout<<endl; } } cout<<"Nice !!! Equal!!"<<endl; return true; } int main(){ bool padding = false; int n = 512; int m = 512; int kh = 3; int kw = 3; int out_n = 0, out_m = 0; if(padding == false){ out_n = (n - kw + 1); out_m = (m - kh + 1); } else{ out_n = n; out_m = m; } size_t sizer = sizeof(float); float *kernel = NULL; kernel = (float*)malloc(kw * kh * sizer); for(int i = 0; i < kw; i++){ for(int j = 0; j < kh; j++){ kernel[i * kh + j] = 1; } } float *img = NULL; img = (float*)malloc(n * m * sizer); for(int i = 0; i < n; i++){ for(int j = 0; j < m; j++){ img[i * m + j] = (i + j) % 256; //cout<<img[i * m + j]<<' '; } //cout<<endl; } float *result = (float*)malloc(out_m * out_n * sizer); float *img_d = NULL; float *kernel_d = NULL; float *result_d = NULL; cudaMalloc((void**)&kernel_d, kh * kw * sizer); cudaMalloc((void**)&img_d, n * m * sizer); cudaMalloc((void**)&result_d, out_m * out_n * sizer); cudaMemcpy(img_d, img, n * m * sizer, cudaMemcpyHostToDevice); cudaMemcpy(kernel_d, kernel, kh * kw * sizer, cudaMemcpyHostToDevice); dim3 threadPerBlock(2, 2); dim3 BlockPerGrid((out_n + threadPerBlock.x - 1) / threadPerBlock.x, (out_m + threadPerBlock.y - 1)/threadPerBlock.y); convolution2d<<<BlockPerGrid, threadPerBlock>>>(img_d, kernel_d, result_d, n, m, kw, kh, out_n, out_m, padding); cudaDeviceSynchronize(); cudaMemcpy(result, result_d, out_n * out_m * sizer, cudaMemcpyDeviceToHost); // for(int i = 0; i < out_n; i++){ // for(int j = 0; j < out_m; j++){ // cout<<result[i * out_m + j]<<' '; // } // cout<<endl; // } check(img, kernel, result, n, m, kw, kh, out_m, out_n, padding); free(img); free(kernel); free(result); cudaFree(img_d); cudaFree(kernel_d); cudaFree(result_d); return 0; }
4,375
//---------------------------------------------------------------------- /*!\file gpu_algorithms/basicComplexMath.cu * * \author Felix Laufer * * * CUDA: Collection of basic complex math operations and kernels * */ //---------------------------------------------------------------------- #include <math.h> #include <cufft.h> namespace gpu_algorithms { namespace cuda { typedef cufftComplex Complex; typedef cufftReal Real; //---------------------------------------------------------------------- // Helper functions //---------------------------------------------------------------------- // Cyclically shift the matrix 'shift_x' steps in x-direction and 'shift_y' steps in y-direction static __host__ __device__ inline unsigned int SequentialIndex2DCyclicShift(const unsigned int x, const unsigned int y, const unsigned int nx, const unsigned int ny, const int shift_x, const int shift_y) { int xx = x; int yy = y; xx += shift_x; yy += shift_y; xx = (xx < 0) ? xx + (int) nx : (xx >= (int) nx) ? xx - (int) nx : xx; yy = (yy < 0) ? yy + (int) ny : (yy >= (int) ny) ? yy - (int) ny : yy; return yy * nx + xx; } // Cyclically shift the matrix 'shift' steps both in x- and y-direction static __host__ __device__ inline unsigned int SequentialIndex2DCyclicShift(const unsigned int x, const unsigned int y, const unsigned int matrix_size, const int shift) { return SequentialIndex2DCyclicShift(x, y, matrix_size, matrix_size, shift, shift); } // Cyclically shift the matrix s.t. the kernel's center point corresponds to the first sequential array index static __host__ __device__ inline unsigned int SequentialIndex2DFFTShift(const unsigned int x, const unsigned int y, const unsigned int matrix_size) { int shift = ((int) matrix_size - 1) / 2; return SequentialIndex2DCyclicShift(x, y, matrix_size, -shift); } // Cyclically shift the matrix s.t. the first sequential array index to the kernel's center point static __host__ __device__ inline unsigned int SequentialIndex2DInverseFFTShift(const unsigned int x, const unsigned int y, const unsigned int matrix_size) { int shift = ((int) matrix_size - 1) / 2; return SequentialIndex2DCyclicShift(x, y, matrix_size, shift); } // Complex addition static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) { Complex c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex scalar multiplication static __device__ __host__ inline Complex ComplexScale(Complex a, float s) { Complex c; c.x = s * a.x; c.y = s * a.y; return c; } // Complex scalar division static __device__ __host__ inline Complex ComplexScaleDiv(Complex a, float s) { Complex c; c.x = a.x / s; c.y = a.y / s; return c; } // Complex multiplication static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // Complex division static __device__ __host__ inline Complex ComplexDiv(Complex a, Complex b) { float divisor = b.x * b.x + b.y * b.y; Complex c; c.x = (a.x * b.x + a.y * b.y) / divisor; c.y = (-a.x * b.y + a.y * b.x) / divisor; return c; } // Complex bilinear interpolation static __device__ __host__ inline float BilinearInterpolation(float x, float y, const Complex *data, const unsigned int matrix_size) { int x0 = floor(x); int x1 = ceil(x); int y0 = floor(y); int y1 = ceil(y); x0 = (x0 < 0) ? 0 : (x0 >= matrix_size) ? matrix_size - 1 : x0; x1 = (x1 < 0) ? 0 : (x1 >= matrix_size) ? matrix_size - 1 : x1; y0 = (y0 < 0) ? 0 : (y0 >= matrix_size) ? matrix_size - 1 : y0; y1 = (y1 < 0) ? 0 : (y1 >= matrix_size) ? matrix_size - 1 : y1; const unsigned int index00 = y0 * matrix_size + x0; const unsigned int index01 = y1 * matrix_size + x0; const unsigned int index10 = y0 * matrix_size + x1; const unsigned int index11 = y1 * matrix_size + x1; const float v00 = data[index00].x; const float v01 = data[index01].x; const float v10 = data[index10].x; const float v11 = data[index11].x; const float m1 = (abs(y0 - y1) > 0.0f) ? (v00 - v01) / (y0 - y1) : 0.0f; const float m2 = (abs(y0 - y1) > 0.0f) ? (v10 - v11) / (y0 - y1) : 0.0f; const float b1 = v00 - m1 * y0; const float b2 = v10 - m2 * y0; const float vm1 = y * m1 + b1; const float vm2 = y * m2 + b2; const float mi = (abs(x0 - x1) > 0.0f) ? (vm1 - vm2) / (x0 - x1) : 0.0f; const float bi = vm1 - mi * x0; const float vi = x * mi + bi; return vi; } //---------------------------------------------------------------------- // Kernel functions //---------------------------------------------------------------------- static __global__ void ComplexStreamSequentialIndex2DFFTShift(const Complex *idata, Complex *odata, const unsigned int stream_size, const unsigned int matrix_size) { const unsigned int numThreads = blockDim.x * gridDim.x; const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = threadID; i < stream_size; i += numThreads) { unsigned int index = i; int y = i / matrix_size; int x = i - y * matrix_size; index = SequentialIndex2DFFTShift(x, y, matrix_size); odata[index] = idata[i]; } } // Complex point-wise multiplication static __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, const unsigned int stream_size, const float normalization_factor) { const unsigned int numThreads = blockDim.x * gridDim.x; const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = threadID; i < stream_size; i += numThreads) { Complex product = ComplexMul(a[i], b[i]); a[i] = (Complex) {product.x / normalization_factor, product.y / normalization_factor}; } } // Complex point-wise normalized correlation static __global__ void ComplexPointwiseNormalizedCorrelation(Complex* a, const Complex* b, const unsigned int stream_size, const float normalization_factor) { const unsigned int numThreads = blockDim.x * gridDim.x; const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = threadID; i < stream_size; i += numThreads) { Complex product = ComplexMul((Complex) {a[i].x / normalization_factor, a[i].y / normalization_factor}, (Complex) {b[i].x / normalization_factor, -b[i].y / normalization_factor}); Real norm = sqrtf(product.x * product.x + product.y * product.y); Complex result = (norm > 0.0f) ? (Complex) {product.x / norm, product.y / norm} : (Complex) {0.0f, 0.0f}; a[i] = (Complex) {result.x / normalization_factor, result.y / normalization_factor}; } } // Complex square matrix transposition template <unsigned int param_tile_dim, unsigned int param_block_rows> static __global__ void SquareMatrixTranspose(Complex *idata, const Complex *odata, unsigned int matrix_size) { __shared__ Complex tile[param_tile_dim][param_tile_dim + 1]; const unsigned int blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x; const unsigned int blockIdx_y = blockIdx.x; unsigned int xIndex = blockIdx_x * param_tile_dim + threadIdx.x; unsigned int yIndex = blockIdx_y * param_tile_dim + threadIdx.y; const unsigned int index_in = xIndex + yIndex * matrix_size; xIndex = blockIdx_y * param_tile_dim + threadIdx.x; yIndex = blockIdx_x * param_tile_dim + threadIdx.y; const unsigned int index_out = xIndex + yIndex * matrix_size; for (unsigned int i = 0; i < param_tile_dim; i += param_block_rows) { tile[threadIdx.y + i][threadIdx.x] = idata[index_in + i * matrix_size]; } __syncthreads(); for (unsigned int i = 0; i < param_tile_dim; i += param_block_rows) { odata[index_out + i * matrix_size] = tile[threadIdx.x][threadIdx.y+i]; } } // Rotate a complex matrix by the given angle static __global__ void Rotate(const Complex *idata, Complex *odata, const unsigned int stream_size, const unsigned int matrix_size, const float angle) { const unsigned int numThreads = blockDim.x * gridDim.x; const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int offset = (matrix_size - 1) / 2; for (unsigned int i = threadID; i < stream_size; i += numThreads) { const unsigned int y = i / matrix_size; const unsigned int x = i - y * matrix_size; const int x_o = x - offset; const int y_o = y - offset; const float x_r = x_o * cos(angle) - y_o * sin(angle); const float y_r = x_o * sin(angle) + y_o * cos(angle); const float x_src = x_r + offset; const float y_src = y_r + offset; odata[i].x = BilinearInterpolation(x_src, y_src, idata, matrix_size); odata[i].y = 0.0f; } } // Translate a complex matrix by the given translation vector static __global__ void Translate(const Complex *idata, Complex *odata, const unsigned int stream_size, const unsigned int matrix_size, const int x_translation, const int y_translation) { const unsigned int numThreads = blockDim.x * gridDim.x; const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = threadID; i < stream_size; i += numThreads) { int y = i / matrix_size; int x = i - y * matrix_size; x -= x_translation; y -= y_translation; Complex data = (0 <= x && x < matrix_size && 0 <= y && y < matrix_size) ? idata[y * matrix_size + x] : (Complex) {0.0f, 0.0f}; odata[i] = data; } } } }
4,376
#include "includes.h" __device__ __forceinline__ size_t gpu_fieldn_index(unsigned int x, unsigned int y, unsigned int d) { return (NX*(NY*(d-1)+y)+x); } __device__ __forceinline__ size_t gpu_field0_index(unsigned int x, unsigned int y) { return NX*y+x; } __global__ void gpu_bc_charge(double *h0, double *h1, double *h2) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y; perturb = 0; if (y == 0) { double multi0c = 2.0*charge0*w0; double multisc = 2.0*charge0*ws; double multidc = 2.0*charge0*wd; // lower plate for charge density double ht1 = h2[gpu_fieldn_index(x, 0, 1)]; double ht2 = h2[gpu_fieldn_index(x, 0, 2)]; double ht3 = h2[gpu_fieldn_index(x, 0, 3)]; double ht4 = h2[gpu_fieldn_index(x, 0, 4)]; double ht5 = h2[gpu_fieldn_index(x, 0, 5)]; double ht6 = h2[gpu_fieldn_index(x, 0, 6)]; double ht7 = h2[gpu_fieldn_index(x, 0, 7)]; double ht8 = h2[gpu_fieldn_index(x, 0, 8)]; // lower plate for constant charge density h0[gpu_field0_index(x, 0)] = -h0[gpu_field0_index(x, 0)] + multi0c; h1[gpu_fieldn_index(x, 0, 3)] = -ht1 + multisc; h1[gpu_fieldn_index(x, 0, 4)] = -ht2 + multisc; h1[gpu_fieldn_index(x, 0, 1)] = -ht3 + multisc; h1[gpu_fieldn_index(x, 0, 2)] = -ht4 + multisc; h1[gpu_fieldn_index(x, 0, 7)] = -ht5 + multidc; h1[gpu_fieldn_index(x, 0, 8)] = -ht6 + multidc; h1[gpu_fieldn_index(x, 0, 5)] = -ht7 + multidc; h1[gpu_fieldn_index(x, 0, 6)] = -ht8 + multidc; } }
4,377
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <string> #include <iostream> #include <time.h> #include <sys/time.h> int MAX_ITER = 1000; int TEST_TIME = 1; __constant__ double THRESHOLD = 1e-9; double h_THRESHOLD = 1e-9; using namespace std; //Test Convergence for host int isConvergeHost(double *cur_x, double *pre_x, int row){ double diff = 0.0; for (int i = 0 ; i < row;i++){ diff = diff + pow(fabs(cur_x[i]-pre_x[i]),2); } if (diff < h_THRESHOLD){ return 1; } return 0; } //Test Convergence for device __device__ int isConverge(double *cur_x, double *pre_x, int row){ double diff = 0.0; for (int i = 0 ; i < row;i++){ diff = diff + pow(fabs(cur_x[i]-pre_x[i]),2); } if (diff < THRESHOLD){ return 1; } return 0; } // Host version of the Jacobi method void doSequentailJacobi(double *h_cur_x, double *h_pre_x, double *h_A, double *h_b, int row, int col){ for (int i = 0 ; i < row; i++){ double sigma = 0.0; for (int j = 0 ; j < col; j++){ if (i!=j){ sigma += h_A[i*col+j]*h_pre_x[j]; } } h_cur_x[i] = (h_b[i] - sigma) / h_A[i*col+i]; } } // Device version of the Jacobi method __global__ void parallelJacob(double *cur_x, double *pre_x, double *A, double *b, int row, int col, int *isCon){ double sigma = 0.0; int idx = blockDim.x * blockIdx.x + threadIdx.x; // int preCom = idx*col; for (int j=0; j<col; j++){ if (idx != j) sigma += A[idx*col+ j] * pre_x[j]; } cur_x[idx] = (b[idx] - sigma) / A[idx*col + idx]; //Synchronize Threads to determine whether we converge here //__syncthreads(); //*isCon = isConverge(cur_x, pre_x, row); } void checkCudaSucess(cudaError_t cudaStatus, string str){ if (cudaStatus != cudaSuccess) { cout << cudaStatus << endl; fprintf(stderr, "%s failed!\n", str.c_str()); } } void normalJacobi(double *h_cur_x, double *h_pre_x, double *h_A, double *h_b, int row, int col){ for (int k=0; k<MAX_ITER; k++) { if (k%2) doSequentailJacobi (h_cur_x, h_pre_x, h_A, h_b, row, col); else doSequentailJacobi (h_pre_x, h_cur_x, h_A, h_b, row, col); /* if (isConvergeHost(h_cur_x, h_pre_x, row)) { cout << k << endl; break; } */ } } void cudaJacobi(int nBlocks, int blockSize, double *d_cur_x, double *d_pre_x, double *d_A, double *d_b, int row, int col, int *d_isConverge, int h_isConverge){ //Parallel for (int k=0; k<MAX_ITER; k++){ if (k%2) parallelJacob <<< nBlocks, blockSize >>> (d_cur_x, d_pre_x, d_A, d_b, row, col, d_isConverge); else parallelJacob <<< nBlocks, blockSize >>> (d_pre_x, d_cur_x, d_A, d_b, row, col, d_isConverge); /* checkCudaSucess(cudaMemcpy(&h_isConverge, d_isConverge, sizeof(int), cudaMemcpyDeviceToHost),"cudaMemcpy-h_isConverge"); if (h_isConverge) { cout << k << endl; break; } */ } } //Check whether there is any GPU available and ready to use bool InitCUDA(){ int count; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } //Calculate Time difference in us double time_diff(struct timeval x , struct timeval y){ double x_ms , y_ms , diff; x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec; y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec; diff = (double)y_ms - (double)x_ms; return diff; } int main(int argc, char *argv[]){ if(!InitCUDA()) { return -1; } //Time counter initialization struct timeval h_start, h_stop, d_start, d_stop; double diff; //Device variables double *d_cur_x, *d_pre_x; double *d_A, *d_b; int *d_isConverge = 0; //Host variables double *h_cur_x, *h_pre_x; double *h_A, *h_b; int h_isConverge = 0; // Read Matrix From file FILE* file; file = fopen("matrix.txt", "r"); if (file == NULL){ fprintf(stderr, "File does not exist!\n"); return -1; } //Get Matrix From File char *line; int N; size_t len = 0; if (getline(&line, &len, file) != -1){ N = atoi(line); } else { return -1; } int row = N; int col = N; h_A = (double*) malloc(sizeof(double)*row*col); h_b = (double*) malloc(sizeof(double)*col); int i=0; while ((getline(&line, &len, file)) != -1) { if (i<N*N) h_A[i] = atof(line); else h_b[i-N*N] = atof(line); i++; } double *h_x = (double*) malloc(sizeof(double)*row); ////////////////////////// // ans = {1,0,2} //double h_A[row*col] = {3.0,2.0,3.0,2.0,5.0,-7.0,1.0,2.0,-2.0}; //double h_b[row] = {9.0,-12.0,-3.0}; // ans = {1,-3,2} //double h_A[row*col] = {4.0,2.0,3.0,3.0,-5.0,2.0,-2.0,3.0,8.0}; //double h_b[row] = {8.0,-14.0,27.0}; ///////////////////////// // Malloc Memory in Device checkCudaSucess(cudaMalloc((void**) &d_cur_x, sizeof(double)*row), "cudaMalloc-d_cur_x"); checkCudaSucess(cudaMalloc((void**) &d_pre_x, sizeof(double)*row), "cudaMalloc-d_pre_x"); checkCudaSucess(cudaMalloc((void**) &d_A, sizeof(double)*row*col), "cudaMalloc-d_A"); checkCudaSucess(cudaMalloc((void**) &d_b, sizeof(double)*row), "cudaMalloc-d_b"); checkCudaSucess(cudaMalloc((void**) &d_isConverge, sizeof(int)), "cudaMalloc-d_isConverge"); // Malloc Memory in Host h_cur_x = (double*) malloc(sizeof(double)*row); h_pre_x = (double*) malloc(sizeof(double)*row); // Initialize our Guess X = [0] in Device; checkCudaSucess(cudaMemset(d_cur_x, 0, sizeof(double)*row), "cudaMemset-d_cur_x"); checkCudaSucess(cudaMemset(d_pre_x, 0, sizeof(double)*row), "cudaMemset-d_pre_x"); // Initialize our Guess X = [0] in Host; memset(h_cur_x, 0, sizeof(double)*row); memset(h_pre_x, 0, sizeof(double)*row); // Copy memory data from host to device checkCudaSucess(cudaMemcpy(d_A, h_A, sizeof(double)*row*col, cudaMemcpyHostToDevice), "cudaMemcpy-d_A"); checkCudaSucess(cudaMemcpy(d_b, h_b, sizeof(double)*row, cudaMemcpyHostToDevice), "cudaMemcpy-d_b"); checkCudaSucess(cudaMemcpy(d_isConverge, &h_isConverge, sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy-d_isConverge"); //Count Execution Time For Host diff = 0.0; for (int test = 0 ; test < TEST_TIME; test++){ gettimeofday(&h_start, NULL); normalJacobi(h_cur_x, h_pre_x, h_A, h_b, row, col); gettimeofday(&h_stop, NULL); diff += time_diff(h_start , h_stop); //printf("Host computation tooks: %.0lf us\n" , time_diff(h_start , h_stop) ); } printf("Host computation tooks: %.0lf us\n" , diff/TEST_TIME); // For CUDA use int blockSize = row; int nBlocks = 1; //Count Execution Time For Device diff = 0.0; for (int test = 0 ; test < TEST_TIME; test++){ gettimeofday(&d_start, NULL); cudaJacobi(blockSize,nBlocks,d_cur_x, d_pre_x, d_A, d_b, row, col,d_isConverge,h_isConverge); gettimeofday(&d_stop, NULL); diff += time_diff(d_start , d_stop); //printf("Device computation tooks: %.0lf us\n" , time_diff(d_start , d_stop)); } printf("Device computation tooks: %.0lf us\n" , diff/TEST_TIME); // Data <- device cudaMemcpy(h_x, d_cur_x, sizeof(double)*col, cudaMemcpyDeviceToHost); /* //Print the result for comparison for (int i = 0 ; i < col; i++){ cout << h_cur_x[i] << endl; cout << h_x[i] << endl; } */ //Cuda Free cudaFree(d_cur_x); cudaFree(d_pre_x); cudaFree(d_A); cudaFree(d_b); //Normal Free free(h_x); free(h_cur_x); free(h_pre_x); return 0; } /* REF: https://github.com/MMichel/CudaJacobi/blob/master/jacobi.cu */
4,378
#include "sigmoid-cross-entropy-grad.hh" #include "graph.hh" #include "../runtime/node.hh" #include "../memory/alloc.hh" namespace ops { SigmoidCrossEntropyGrad::SigmoidCrossEntropyGrad(Op* y, Op* logits) : Op("sigmoid_cross_entropy_grad", y->shape_get(), {y, logits}) {} void SigmoidCrossEntropyGrad::compile() { auto& g = Graph::instance(); auto& cy = g.compiled(preds()[0]); auto& clogits = g.compiled(preds()[1]); std::size_t len = cy.out_shape.total(); Shape out_shape = cy.out_shape; dbl_t* out_data = tensor_alloc(len); auto out_node = rt::Node::op_sigmoid_cross_entropy_grad(cy.out_data, clogits.out_data, out_data, len, {cy.out_node, clogits.out_node}); g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data); } }
4,379
#include <math.h> #include <stdlib.h> #include <stdio.h> #include "time.h" #include "string.h" int nchans = 1024, nsamp = 32768; #define ANTS 32 // ======================== CUDA HELPER FUNCTIONS ========================== // Error checking function #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) _cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() _cudaCheckError( __FILE__, __LINE__ ) inline void _cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void _cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } // ======================= Rearrange Kernel ============================= // Rearrange medicina antenna data to match beamformer required input // Heap size is 1024 channels, 128 samples, 32 antennas (8 bytes each) // Threadblock size is 128 samples #define HEAP 128 __global__ void rearrange_medicina(unsigned char *input, unsigned char *output, unsigned nsamp, unsigned nchans) { // Each grid row processes a separate channel // Each grid column processes a separate heap for(unsigned h = blockIdx.x; h < nsamp / HEAP; h += gridDim.x) { unsigned int index = blockIdx.y * nsamp * ANTS + h * HEAP * ANTS; // Thread ID acts as pointer to required sample for(unsigned a = 0; a < ANTS * 0.5; a++) { output[index + threadIdx.x * ANTS + a * 2 + 1] = input[index + a * HEAP * 2 + threadIdx.x * 2 + 1]; output[index + threadIdx.x * ANTS + a * 2 ] = input[index + a * HEAP * 2 + threadIdx.x * 2]; } } } // ======================= Main Program ======================= // Process command-line parameters void process_arguments(int argc, char *argv[]) { int i = 1; while(i < argc) { if (!strcmp(argv[i], "-nchans")) nchans = atoi(argv[++i]); else if (!strcmp(argv[i], "-nsamp")) nsamp = atoi(argv[++i]); i++; } } int main(int argc, char *argv[]) { unsigned char *input, *d_input, *output, *d_output; process_arguments(argc, argv); printf("nsamp: %d, nchans: %d\n", nsamp, nchans); printf("Memory requirements: Input: %.2f MB, Output: %.2f \n", nchans * ANTS * nsamp * sizeof(unsigned char) / (1024.0 * 1024), nchans * ANTS * nsamp * sizeof(unsigned char) / (1024.0 * 1024)); // Allocate and initialise arrays input = (unsigned char *) malloc( nsamp * nchans * ANTS * sizeof(unsigned char)); output = (unsigned char *) malloc( nsamp * nchans * ANTS * sizeof(unsigned char)); memset(input, 0, nsamp * nchans * ANTS * sizeof(unsigned char)); memset(output, 0, nsamp * nchans * ANTS * sizeof(unsigned char)); for (unsigned c = 0; c < nchans; c++) for(unsigned s = 0; s < nsamp / HEAP; s++) for(unsigned a = 0; a < ANTS * 0.5; a++) for(unsigned t = 0; t < HEAP; t++) { input[c * ANTS * nsamp + s * ANTS * HEAP + a * HEAP * 2 + t * 2 + 1] = a * 2 + 1; input[c * ANTS * nsamp + s * ANTS * HEAP + a * HEAP * 2 + t * 2] = a * 2; } // Initialise CUDA stuff CudaSafeCall(cudaSetDevice(0)); CudaSafeCall(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared)); cudaEvent_t event_start, event_stop; float timestamp, kernelTime; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); // Allocate GPU memory CudaSafeCall(cudaMalloc((void **) &d_input, nsamp * nchans * ANTS * sizeof(unsigned char))); CudaSafeCall(cudaMalloc((void **) &d_output, nsamp * nchans * ANTS * sizeof(unsigned char))); time_t start = time(NULL); // Copy input to GPU cudaEventRecord(event_start, 0); CudaSafeCall(cudaMemcpy(d_input, input, nsamp * ANTS * nchans * sizeof(unsigned char), cudaMemcpyHostToDevice)); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied to GPU in: %lf\n", timestamp); // Launch shared memory version cudaEventRecord(event_start, 0); dim3 gridDim(nsamp / HEAP, nchans); rearrange_medicina <<< gridDim, HEAP >>> (d_input, d_output, nsamp, nchans); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Perform rearrangement in: %lf [%.2f GFLOPs] \n", timestamp, nsamp * nchans * ANTS / (timestamp * 1e-3) * 1e-9); kernelTime = timestamp; // Copy output from GPU cudaEventRecord(event_start, 0); CudaSafeCall(cudaMemcpy(output, d_output, nsamp * ANTS * nchans * sizeof(unsigned char), cudaMemcpyDeviceToHost) ); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied from GPU in: %lf\n", timestamp); for(unsigned c = 0; c < nchans; c++) for(unsigned s = 0; s < nsamp; s++) for(unsigned a = 0; a < ANTS; a++) if ( ((int) output[c*nsamp*ANTS + s*ANTS+a]) != a) { printf("[%d,%d,%d] %d != %d\n", c,s,a, (int) output[c*nsamp*ANTS + s*ANTS+a], a); // exit(0); } }
4,380
#include <stdio.h> #include <assert.h> #include <iostream> void setGrid(int n, dim3 &blockDim, dim3 &gridDim) { // set your block dimensions and grid dimensions here // remember to edit these two parameters each time you change the block size gridDim.x = n / (blockDim.x * 2); gridDim.y = n / (blockDim.y * 2); if(n % (blockDim.x*2) != 0) gridDim.x++; if(n % (blockDim.y*2) != 0) gridDim.y++; cudaSharedMemConfig shmPreference = cudaSharedMemBankSizeEightByte; cudaDeviceSetSharedMemConfig(shmPreference); }
4,381
#include <cuda_runtime.h> #include "device_launch_parameters.h" #include <iostream> // https://stackoverflow.com/a/14038590/4647107 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } template <typename T, typename C> __global__ void prefix_sum1(T* base, const C* basestart, const C* basestop, int64_t basestartoffset, int64_t basestopoffset, int length, T* sums) { int thid = threadIdx.x + (blockIdx.x * blockDim.x); extern __shared__ T temp[]; int pout = 0, pin = 1; if (thid < length) { if (thid == 0) { temp[threadIdx.x] = 0; } else { temp[threadIdx.x] = basestop[basestopoffset + thid - 1] - basestart[basestartoffset + thid - 1]; } __syncthreads(); for (int offset = 1; offset < 1024; offset *=2) { pout = 1 - pout; pin = 1 - pout; if (threadIdx.x >= offset) { temp[pout*1024 + threadIdx.x] = temp[pin*1024 + threadIdx.x - offset] + temp[pin*1024 + threadIdx.x]; } else { temp[pout*1024 + threadIdx.x] = temp[pin*1024 + threadIdx.x]; } __syncthreads(); } base[thid] = temp[pout*1024 + threadIdx.x]; __syncthreads(); if ((thid == 1023) || ((blockIdx.x != 0) && (thid == ((1024 * (blockIdx.x + 1))-1))) || (thid == length-1)) { sums[blockIdx.x] = base[thid]; } } } // Need another kernel because of conditional __syncthreads() template <typename T> __global__ void prefix_sum2(T* base, int length) { int thid = threadIdx.x + (blockIdx.x * blockDim.x); extern __shared__ T temp[]; int pout = 0, pin = 1; if (thid < length) { temp[thid] = base[thid]; __syncthreads(); for (int offset = 1; offset < length; offset *=2) { pout = 1 - pout; pin = 1 - pout; if (thid >= offset) temp[pout*length + thid] = temp[pin*length + thid - offset] + temp[pin*length + thid]; else temp[pout*length + thid] = temp[pin*length + thid]; __syncthreads(); } base[thid] = temp[pout*length + thid]; } } template<typename T> __global__ void adder(T* base, T* sums, int64_t length) { int thid = threadIdx.x + (blockIdx.x * blockDim.x); if (blockIdx.x != 0 && thid < length) base[thid] += sums[blockIdx.x - 1]; } template <typename T, typename C> void offload(T* base, C* basestart1, C* basestop1, int64_t basestartoffset, int64_t basestopoffset, int64_t length) { int block, thread=1024; if (length > 1024) { if (length%1024 != 0) block = (length / 1024) + 1; else block = length/1024; } else { block = 1; } int modlength = block*thread; // Padding the input arrays C basestart[modlength], basestop[modlength]; for (int i=0; i<modlength; i++) { if (i<length){ basestart[i] = basestart1[i]; basestop[i] = basestop1[i]; } else { basestart[i] = 0; basestop[i] = 0; } } T* d_tooffsets, * d_sums; C* d_fromstarts, * d_fromstops; gpuErrchk(cudaMalloc((void**)&d_tooffsets, (modlength+1) * sizeof(T))); gpuErrchk(cudaMalloc((void**)&d_fromstarts, modlength * sizeof(C))); gpuErrchk(cudaMemcpy(d_fromstarts, basestart, modlength * sizeof(C), cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void**)&d_fromstops, modlength * sizeof(C))); gpuErrchk(cudaMemcpy(d_fromstops, basestop, modlength * sizeof(C), cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void**)&d_sums, block*sizeof(T))); prefix_sum1<T, C><<<block, thread, thread*2*sizeof(T)>>>(d_tooffsets, d_fromstarts, d_fromstops, basestartoffset, basestopoffset, modlength, d_sums); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); prefix_sum2<T><<<1, block, block*2*sizeof(T)>>>(d_sums, block); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); adder<T><<<block, thread>>>(d_tooffsets, d_sums, modlength); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(base, d_tooffsets, (length + 1) * sizeof(T), cudaMemcpyDeviceToHost)); base[length] = base[length - 1] + basestop[length - 1 + basestopoffset] - basestart[length - 1 + basestartoffset]; gpuErrchk(cudaFree(d_tooffsets)); gpuErrchk(cudaFree(d_fromstarts)); gpuErrchk(cudaFree(d_fromstops)); gpuErrchk(cudaFree(d_sums)); } int main() { const int size = 400000; int base[size + 1], basestart[size], basestop[size]; for (int i = 0; i < size; i++) { basestart[i] = i; basestop[i] = i + 10; } offload<int, int>(base, basestart, basestop, 0, 0, size); for (int i = 0; i < size + 1; i++) { std::cout << base[i] << "\n"; } return 0; }
4,382
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #include <math_constants.h> extern "C" { __global__ void rtruncnorm_kernel(float *vals, int n, float *mu, float *sigma, float *lo, float *hi, int rng_a, int rng_b, int rng_c) { // Usual block/thread indexing... int myblock = blockIdx.x + blockIdx.y * gridDim.x; int blocksize = blockDim.x * blockDim.y * blockDim.z; int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; // Setup the RNG: curandState rng; curand_init(rng_a+idx*rng_b, rng_c, 0, &rng); // Sample: if (idx < n) { if (isfinite(lo[idx]) && isfinite(hi[idx])) { // sample from both finite float mu_neg = (lo[idx] - mu[idx])/sigma[idx]; float mu_pos = (hi[idx] - mu[idx])/sigma[idx]; float z = mu_neg + curand_uniform(&rng)*(mu_pos-mu_neg); float psi_z = expf(-z*z/2); if (mu_neg > 0) float psi_z = expf(-(mu_neg*mu_neg - z*z)/2); if (mu_pos < 0) float psi_z = expf(-(mu_pos*mu_pos - z*z)/2); float u = curand_uniform(&rng); while (u >= psi_z) { z = mu_neg + curand_uniform(&rng)*(mu_pos-mu_neg); psi_z = expf(-z*z/2); if (mu_neg > 0) psi_z = expf(-(mu_neg*mu_neg - z*z)/2); if (mu_pos < 0) psi_z = expf(-(mu_pos*mu_pos - z*z)/2); u = curand_uniform(&rng); } vals[idx] = sigma[idx]*z+mu[idx]; } if (!isfinite(lo[idx])) { // sample from truncated norm -b to infinity, then reverse sign float mu_neg = -(hi[idx] - mu[idx])/sigma[idx]; float alpha = (mu_neg + sqrtf(mu_neg*mu_neg + 4))/2; float expo_rand = logf(1 - curand_uniform(&rng))/(-alpha); float z = mu_neg + expo_rand; float psi_z = expf(-(mu_neg - alpha)*(mu_neg - alpha)/2 - (alpha - z)*(alpha - z)/2); if (mu_neg < alpha) float psi_z = expf(-(alpha - z)*(alpha - z)/2); float u = curand_uniform(&rng); while (u >= psi_z) { expo_rand = logf(1 - curand_uniform(&rng))/(-alpha); z = mu_neg + expo_rand; psi_z = expf(-(mu_neg - alpha)*(mu_neg - alpha)/2 - (alpha - z)*(alpha - z)/2); if (mu_neg < alpha) psi_z = expf(-(alpha - z)*(alpha - z)/2); u = curand_uniform(&rng); } vals[idx] = (sigma[idx]*(-z)+mu[idx]); } if (!isfinite(hi[idx])) { // sample from truncated norm a to infinity float mu_neg = (lo[idx] - mu[idx])/sigma[idx]; float alpha = (mu_neg + sqrtf(mu_neg*mu_neg + 4))/2; float expo_rand = logf(1 - curand_uniform(&rng))/(-alpha); float z = mu_neg + expo_rand; float psi_z = expf(-(mu_neg - alpha)*(mu_neg - alpha)/2 - (alpha - z)*(alpha - z)/2); if (mu_neg < alpha) float psi_z = expf(-(alpha - z)*(alpha - z)/2); float u = curand_uniform(&rng); while (u >= psi_z) { expo_rand = logf(1 - curand_uniform(&rng))/(-alpha); z = mu_neg + expo_rand; psi_z = expf(-(mu_neg - alpha)*(mu_neg - alpha)/2 - (alpha - z)*(alpha - z)/2); if (mu_neg < alpha) psi_z = expf(-(alpha - z)*(alpha - z)/2); u = curand_uniform(&rng); } vals[idx] = sigma[idx]*z+mu[idx]; } } return; } } // END extern "C"
4,383
#include <cuda_runtime.h> //#include <helper_cuda.h> #define cutilSafeCall(err) __cudaSafeCall (err, __FILE__, __LINE__) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { if( cudaSuccess != err) { //fprintf(stderr, "%s(%i) : cudaSafeCall() Runtime API error %d: %s.\n", // file, line, (int)err, cudaGetErrorString( err ) ); //exit(-1); } } int main() { cutilSafeCall( cudaDeviceReset() ); // cudaDeviceReset(); return 0; }
4,384
#include "includes.h" __global__ void stats_kernal(const float *data, float * device_soln, const int size, const int num_calcs, const int num_threads, const int offset) { float sum = 0.0f; float sum_sq = 0.0f; int idx = threadIdx.x + blockIdx.x*num_threads + offset; for(int i = 0; i < size; i++){ int index = i*size + idx % size + ((idx/size)*size*size); //for coalesing float datum = data[index]; //so we dno't need multiple accesses to global mem, I would think that the compiler would optimize this, but the manual said to program like this.... sum += datum; sum_sq += datum*datum; } device_soln[idx] = sum; device_soln[idx + num_calcs] = sum_sq; }
4,385
//#include "Reduction.h" //#include "RenderScene.h" //#include "ParallelScan.h" // //__global__ void BuildAdjecencyMatrixKernel(cv::cuda::PtrStepSz<float> AM, // PtrSz<ORBKey> TrainKeys, PtrSz<ORBKey> QueryKeys, // PtrSz<float> MatchDist) { // // int x = blockDim.x * blockIdx.x + threadIdx.x; // int y = blockDim.y * blockIdx.y + threadIdx.y; // if (x < AM.cols && y < AM.rows) { // float score = 0; // if(x == y) { // score = expf(-MatchDist[x]); // } else { // ORBKey* match_0_train = &TrainKeys[x]; // ORBKey* match_0_query = &QueryKeys[x]; // ORBKey* match_1_train = &TrainKeys[y]; // ORBKey* match_1_query = &QueryKeys[y]; // float d_0 = norm(match_0_train->pos - match_1_train->pos); // float d_1 = norm(match_0_query->pos - match_1_query->pos); // if(d_0 > 1e-6 && d_1 > 1e-6) { // float alpha_0 = acosf(match_0_train->normal * match_1_train->normal); // float alpha_1 = acosf(match_0_query->normal * match_1_query->normal); // float beta_0 = acosf(match_0_train->normal * (match_1_train->pos - match_0_train->pos) / d_0); // float beta_1 = acosf(match_1_train->normal * (match_1_train->pos - match_0_train->pos) / d_0); // float gamma_0 = acosf(match_0_query->normal * (match_1_query->pos - match_0_query->pos) / d_1); // float gamma_1 = acosf(match_1_query->normal * (match_1_query->pos - match_0_query->pos) / d_1); // score = expf(-(fabs(d_0 - d_1) + fabs(alpha_0 - alpha_1) + fabs(beta_0 - beta_1) + fabs(gamma_0 - gamma_1))); // } // } // if(isnan(score)) // score = 0; // AM.ptr(y)[x] = score; // } //} // //__global__ void SelectMatches(PtrSz<ORBKey> TrainKeys, PtrSz<ORBKey> QueryKeys, // PtrSz<ORBKey> TrainKeys_selected, PtrSz<ORBKey> QueryKeys_selected, // PtrSz<int> SelectedMatches, PtrSz<int> QueryIdx, PtrSz<int> SelectedIdx) { // int x = blockDim.x * blockIdx.x + threadIdx.x; // if (x < SelectedMatches.size) { // int idx = SelectedMatches[x]; // ORBKey* trainKey = &TrainKeys[idx]; // ORBKey* queryKey = &QueryKeys[idx]; // memcpy((void*) &TrainKeys_selected[x], (void*) trainKey, sizeof(ORBKey)); // memcpy((void*) &QueryKeys_selected[x], (void*) queryKey, sizeof(ORBKey)); // SelectedIdx[x] = QueryIdx[idx]; // } //} // //void BuildAdjecencyMatrix(cv::cuda::GpuMat& AM, DeviceArray<ORBKey>& TrainKeys, // DeviceArray<ORBKey>& QueryKeys, DeviceArray<float>& MatchDist, // DeviceArray<ORBKey>& train_select, DeviceArray<ORBKey>& query_select, // DeviceArray<int>& QueryIdx, DeviceArray<int>& SelectedIdx) { // // dim3 block(32, 8); // dim3 grid(DivUp(AM.cols, block.x), DivUp(AM.rows, block.y)); // // BuildAdjecencyMatrixKernel<<<grid, block>>>(AM, TrainKeys, QueryKeys, MatchDist); // // SafeCall(cudaDeviceSynchronize()); // SafeCall(cudaGetLastError()); // // cv::cuda::GpuMat result; // cv::cuda::reduce(AM, result, 0, CV_REDUCE_SUM); // cv::Mat cpuResult, indexMat; // result.download(cpuResult); // // cv::sortIdx(cpuResult, indexMat, CV_SORT_EVERY_ROW + CV_SORT_DESCENDING); // int selection = indexMat.cols >= 100 ? 100 : indexMat.cols; // DeviceArray<int> SelectedMatches(selection); // SelectedMatches.upload((void*)indexMat.data, selection); // train_select.create(selection); // query_select.create(selection); // SelectedIdx.create(selection); // // dim3 block2(MaxThread); // dim3 grid2(DivUp(selection, block2.x)); // // SelectMatches<<<grid2, block2>>>(TrainKeys, QueryKeys, train_select, // query_select, SelectedMatches, QueryIdx, SelectedIdx); // // SafeCall(cudaDeviceSynchronize()); // SafeCall(cudaGetLastError()); //}
4,386
#include "includes.h" __global__ void kOutpTraceMultiplyImages(float *expanded_images, float *expanded_derivs, int num_images, int num_channels, int num_modules_batch, int kernel_size){ int color = blockIdx.y; int module_id = blockIdx.x; expanded_images += num_images * num_modules_batch * kernel_size * color; expanded_images += num_images * module_id; expanded_derivs += num_images * num_modules_batch * color; expanded_derivs += num_images * module_id; for (int kpos = 0; kpos < kernel_size; kpos++) { for (int im = threadIdx.x; im < num_images; im += blockDim.x) { int image_idx = im + num_images * num_modules_batch * kpos; int deriv_idx = im; expanded_images[image_idx] *= expanded_derivs[deriv_idx]; } __syncthreads(); } }
4,387
#include "includes.h" __global__ void pnpoly_cnGPU(char *cs, const float *px, const float *py, const float *vx, const float *vy, int npoint, int nvert) { extern __shared__ int s[]; float *tvx = (float*) s; float *tvy = (float*)&s[nvert]; int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < npoint) { int j, k, c = 0; for (j = 0, k = nvert-1; j < nvert; k = j++) { tvx[j] = vx [j]; tvy[j] = vy [j]; } __syncthreads(); for (j = 0, k = nvert-1; j < nvert; k = j++) { if ( ((tvy[j]>py[i]) != (tvy[k]>py[i])) && (px[i] < (tvx[k]-tvx[j]) * (py[i]-tvy[j]) / (tvy[k]-tvy[j]) + tvx[j]) ) c = !c; } cs[i] = c & 1; } }
4,388
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #define L 114 static int AREA = L*L; static int NTOT = L*L - (4*L -4); // #define T 6. // #define T 0.1 // #define T 2.26918531421 #define T_CYCLE_START 1.5 #define T_CYCLE_END 3 #define T_CYCLE_STEP 0.04 #define SINGLETEMP 3.0 int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP); #define J 1. #define SEED 1000 // print history true/false #define HISTORY 1 struct measure_plan { int steps_repeat; int t_max_sim; int t_measure_wait; int t_measure_interval; } static PLAN = { .steps_repeat = 100, .t_max_sim = 250, .t_measure_wait = 50, .t_measure_interval = 10 }; // average tracker struct struct avg_tr { double sum; double sum_squares; int n; }; struct avg_tr new_avg_tr(int locn) { struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn}; return a; } void update_avg(struct avg_tr * tr_p, double newval) { tr_p->sum += newval; tr_p->sum_squares += (newval*newval); } double average( struct avg_tr tr) { return (tr.sum)/((double) tr.n) ; } double stdev( struct avg_tr tr) { return sqrt( ( tr.sum_squares)/((double) tr.n) - pow(( (tr.sum)/((double) tr.n) ),2) ); } double variance( struct avg_tr tr) { return ( ( tr.sum_squares)/((double) tr.n) - pow(( (tr.sum)/((double) tr.n) ),2) ); } double unitrand(){ return (double)rand() / (double)RAND_MAX; } void init_random(char grid[L][L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x][y] = rand() & 1; } } } void init_t0(char grid[L][L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x][y] = 0; } } } // can segfault char grid_step(char grid[L][L], int x, int y, int xstep, int ystep) { return grid[x+xstep][y+ystep]; } // segfault if applied to an edge spin, must be called only on the inner L-1 grid // *2 -4 remaps {0,1} into {-1,1} char deltaH(char grid[L][L], int x, int y) { char s0 = grid[x][y]; char j1 = s0 ^ grid_step(grid, x, y, 1, 0); char j2 = s0 ^ grid_step(grid, x, y, -1, 0); char j3 = s0 ^ grid_step(grid, x, y, 0, 1); char j4 = s0 ^ grid_step(grid, x, y, 0, -1); return -((j1 + j2 + j3 + j4) *2 -4)*2*J; } void flip(char grid[L][L], int x, int y) { grid[x][y] = !grid[x][y]; } void update_spin(char grid[L][L], int x, int y, double temperature) { double dh = (double) deltaH(grid, x, y); // printf("dh: %f \n", dh); double p = exp( -dh / temperature); double ur = unitrand(); //CHANGE // printf("p: %f, unitrand: %f \n", p, ur); if(ur < p ) { flip(grid, x, y); } } void update_grid_white(char grid[L][L], double temperature) { for(int x = 1; x<L-1; x+=1) { for(int y = (1 + x%2) ; y<L-1; y+=2) { update_spin(grid, x, y, temperature); } } } void update_grid_black(char grid[L][L], double temperature) { for(int x = 1; x<L-1; x+=1) { for(int y = (1 + (x+1)%2) ; y<L-1; y+=2) { update_spin(grid, x, y, temperature); } } } void dump(char grid[L][L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { // if(grid[x][y] == 0) printf("•"); // else printf("◘"); if(grid[x][y] == 0) printf(" "); else printf("█"); // printf("%i", grid[x][y]); } printf("\n"); } printf("\n"); } double measure_m(char grid[L][L]) { int m = 0; for(int x = 1; x<L-1; x++) { for(int y = 1; y<L-1; y++) { m += (grid[x][y]*2. -1.); // printf("x %i m %f \n", x, grid[x][y] ); } } return (((double) m ) / (double) NTOT) ; } void measure_cycle(char startgrid[L][L], struct measure_plan pl, FILE *resf, double temperature) { char grid[L][L]; //OUTER REP LOOP double n_measures_per_sim = (double) ((pl.t_max_sim - pl.t_measure_wait)/pl.t_measure_interval); struct avg_tr avg_of_all_sims_tr = new_avg_tr(pl.steps_repeat); float avg_of_sims = 0; for( int krep=0; krep< pl.steps_repeat; krep++) { srand(SEED + krep); memcpy(grid, startgrid, L*L*sizeof(char) ); // INNER SIM LOOPS if(HISTORY) printf("# simulation %i\n", krep+1); if(HISTORY) printf("# waiting thermalization for the first %i sim steps\n", pl.t_measure_wait); int ksim=0; for( ; ksim<pl.t_measure_wait; ksim++) { update_grid_black(grid, temperature); update_grid_white(grid, temperature); if( ksim % pl.t_measure_interval == 0) { // print all history if(HISTORY) printf("%i %f\n", ksim, measure_m(grid)); } } if(HISTORY) printf("# end thermalization\n"); struct avg_tr sim_avg_tr = new_avg_tr(n_measures_per_sim); for( ; ksim<pl.t_max_sim; ksim++) { update_grid_black(grid, temperature); update_grid_white(grid, temperature); if( ksim % pl.t_measure_interval == 0) { double locres = measure_m(grid); // print all history if(HISTORY) printf("%i %f\n", ksim, locres); update_avg(&sim_avg_tr, locres); } } // END INNER SIM LOOPS if(HISTORY) printf("# end simulation %i\n", krep+1); if(HISTORY) printf("# average for simulation %i: %f +- %f \n", krep+1, average(sim_avg_tr), stdev(sim_avg_tr)); update_avg(&avg_of_all_sims_tr, average(sim_avg_tr)); } // END OUTER REP LOOP fprintf(resf, "%f ", temperature); fprintf(resf, "%f ", average(avg_of_all_sims_tr)); fprintf(resf, "%f\n", stdev(avg_of_all_sims_tr)); // fprintf(resf, "\n\n"); if(HISTORY) dump(grid); } int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); FILE *resf = fopen("results.txt", "w"); fprintf(resf, "# cpu1\n"); fprintf(resf, "# parameters:\n# linear_size: %i\n", L); fprintf(resf, "#temp_start: %f\n# coupling: %f\n# repetitions: %i\n", 0., J, PLAN.steps_repeat); fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", PLAN.t_max_sim, PLAN.t_measure_wait, PLAN.t_measure_interval, SEED); fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT); fprintf(resf, "\n"); fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n"); srand(SEED); char startgrid[L][L]; init_t0(startgrid); // dump(startgrid); // cycle for( double kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) { measure_cycle(startgrid, PLAN, resf, kt); } // just one // measure_cycle(startgrid, PLAN, resf, SINGLETEMP); cudaEventRecord(stop); cudaEventSynchronize(stop); float total_time = 0; cudaEventElapsedTime(&total_time, start, stop); FILE *timef = fopen("time.txt", "w"); long int total_flips = ((long int)(n_temps))* ((long int)((PLAN.steps_repeat))) * ((long int)(PLAN.t_max_sim)) * ((long int)(NTOT)); fprintf(timef, "# cpu1\n"); fprintf(timef, "# total execution time (milliseconds):\n"); fprintf(timef, "%f\n", total_time); fprintf(timef, "# total spin flips performed:\n"); fprintf(timef, "%li\n", total_flips); fprintf(timef, "# average spin flips per millisecond:\n"); fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) ); fclose(timef); fclose(resf); }
4,389
#include<cuda.h> #include<stdio.h> #include<math.h> #define TILEWIDTH 32 __global__ void vecConvKernel(float* A, float* B, float* C, int n){ //identify the index of the data to be read int tx=threadIdx.x; int bx=blockIdx.x; int index=bx*blockDim.x+tx; __shared__ float Ads[TILEWIDTH]; __shared__ float Bds[2*TILEWIDTH]; //assuming n is multiple of TILEWIDTH // if(index<n){ int i; float val=0.0; for(i=0;i<gridDim.x-1;i++){ Ads[tx] = A[i*TILEWIDTH+tx]; Bds[tx] = B[i*TILEWIDTH+tx]; Bds[TILEWIDTH + tx] = B[(i+1)*TILEWIDTH + tx]; __syncthreads(); for(int k=0;k<TILEWIDTH;k++){ val+= Ads[k]*Bds[tx+k]; } __syncthreads(); } Ads[tx] = A[i*TILEWIDTH + tx]; Bds[tx] = B[i*TILEWIDTH+tx]; Bds[TILEWIDTH + tx] = B[tx]; __syncthreads(); for(int k=0;k<TILEWIDTH;k++){ val+= Ads[k]*Bds[tx+k]; } __syncthreads(); C[index] = val; // } } __host__ void vecConv(float* A,float* B,float* C, int n){ int c=ceil(n/256.0); int size = n * sizeof(float); float *d_A, *d_B, *d_C; //Allocate device memory for A,B,C cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); //copy A,B to device memory cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); dim3 dimBlock(TILEWIDTH,1,1); dim3 dimGrid(ceil(n/(float)TILEWIDTH),1,1); //call kernal function that the calculates sum and stores it in C vecConvKernel<<< dimGrid,dimBlock >>>(d_A,d_B,d_C,n); //the y and z dimensions are set to 1 by default //copy C from devce memory cudaMemcpy( C,d_C, size, cudaMemcpyDeviceToHost); //free device memories cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } //Kernal function that runs in each thread int main(){ float *A,*B,*C; int n=10; A=(float*)malloc(n*sizeof(float)); B=(float*)malloc(n*sizeof(float)); C=(float*)malloc(n*sizeof(float)); int i; for(i=0;i<n;i++){ A[i]=(float)i; B[i]=(float)2*i; } vecConv(A,B,C,n); for(i=0;i<n;i++){ printf("%f ",C[i]); } free(A); free(B); free(C); return 0; }
4,390
#include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i=index; i<n; i += stride) y[i] = y[i] + x[i]; } int main(void) { int n = 1000000; float *x, *y; cudaMallocManaged(&x, n * sizeof(float)); cudaMallocManaged(&y, n * sizeof(float)); // Initialize arrays for (int i=0; i<n; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 256; int numBlocks = (n + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(n, x, y); cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) int error = 0; for (int i=0; i<n; i++) { if (y[i]-3.0 != 0) error += 1; } printf("Errors: %d\n", error); // Free memory cudaFree(x); cudaFree(y); return 0; }
4,391
/* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> __device__ inline int get_batch_id(int* accu_list, int batch_size, int id) { for (int b=0; b<batch_size-1; b++) { if (id >= accu_list[b]) { if(id < accu_list[b+1]) return b; } } return batch_size - 1; } __global__ void grid_sampling_gpu_kernel(int batch_size, int input_point_num, float resolution, int grid_w, int grid_l, int grid_h, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_idx_temp, int* output_num_list, int* grid_buffer) { if (batch_size*input_point_num <=0) { printf("GridSamplingOp exits due to void inputs.\n"); return; } int grid_size = grid_w * grid_l * grid_h; int point_id = threadIdx.x + blockIdx.x * blockDim.x; if (point_id < input_point_num) { int batch_id = get_batch_id(input_accu_list, batch_size, point_id); int grid_coor_w = __float2int_rz(input_coors[point_id*3 + 0] / resolution); int grid_coor_l = __float2int_rz(input_coors[point_id*3 + 1] / resolution); int grid_coor_h = __float2int_rz(input_coors[point_id*3 + 2] / resolution); int grid_buffer_idx = batch_id * grid_size + grid_coor_w * grid_l * grid_h + grid_coor_l * grid_h + grid_coor_h; int ret = atomicAdd(&grid_buffer[grid_buffer_idx], 1); if (ret == 0) { int count = atomicAdd(&output_num_list[batch_id], 1); output_idx_temp[input_accu_list[batch_id] + count] = point_id; } } } void grid_sampling_gpu_launcher(int batch_size, int input_point_num, float resolution, int grid_w, int grid_l, int grid_h, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_idx_temp, int* output_num_list, int* grid_buffer) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, grid_sampling_gpu_kernel, 0, input_point_num); // Round up according to array size gridSize = (input_point_num + blockSize - 1) / blockSize; grid_sampling_gpu_kernel<<<gridSize, blockSize>>>(batch_size, input_point_num, resolution, grid_w, grid_l, grid_h, input_coors, input_num_list, input_accu_list, output_idx_temp, output_num_list, grid_buffer); }
4,392
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdlib.h> #include<stdio.h> #include<malloc.h> #include<time.h> #define arm 32 __device__ int globalArray[32]; __global__ void add(int *a,int *c) { int tid = threadIdx.x; int temp=a[tid]; int count=0; while(temp!=0) { count++; temp=temp/2; } atomicAdd(&globalArray[count], 1); c[count]=globalArray[count]; } int bitcount(int num); void Sort(int arr[], int n) ; void swap(int* a, int* b) { int t = *a; *a = *b; *b = t; } int partition (int arr[], int low, int high) { int pivot = arr[high]; // pivot int i = (low - 1); // Index of smaller element for (int j = low; j <= high- 1; j++) { // If current element is smaller than the pivot if (arr[j] < pivot) { i++; // increment index of smaller element swap(&arr[i], &arr[j]); } } swap(&arr[i + 1], &arr[high]); return (i + 1); } void quicksort(int arr[], int low, int high) { if (low < high) { /* pi is partitioning index, arr[p] is now at right place */ int pi = partition(arr, low, high); // Separately sort elements before // partition and after partition quicksort(arr, low, pi - 1); quicksort(arr, pi + 1, high); } } int main(void) { int arr[50000]={0}; int bitband[arm]={0}; int pos; int resultant[50000]={0}; int bitmap[arm]={0}; int n=0; int count=0; ///a[20]={1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20}; /*for ( int i = 0; i < 20; ++i) { arr[i]=i+1; count++; }*/ FILE *fptr; int num; fptr = fopen("integers", "r"); printf("\ncounting the File:\n"); while ( (num = getw(fptr)) != EOF ) { arr[count]=num; count++; } fclose(fptr); n=count; /*for (int i = 0; i < count; ++i) { printf("%d\n",arr[i] ); }*/ int *d_a,*d_c; int size = sizeof(int); cudaMalloc((void **)&d_a,size*n); cudaMalloc((void **)&d_c,size*arm); cudaMemcpy(d_a,arr,size*n,cudaMemcpyHostToDevice); clock_t start,stop; start=clock(); add<<<1,n>>>(d_a,d_c); cudaMemcpy(bitband,d_c,size*arm,cudaMemcpyDeviceToHost); for (int i = 0; i < 32; ++i) { printf("%d\n",bitband[i]); } for(int i=1;i<=arm;i++) { bitband[i] += bitband[i-1]; } for(int i=0;i<count;i++) { pos=bitcount(arr[i]); resultant[ ((pos==0)?0:bitband[pos-1]) + bitmap[pos] ] = arr[i]; bitmap[pos] += 1; } for(int i=1;i<=arm;i++) { quicksort(resultant,bitband[i-1],bitband[i]-1); } stop=clock(); printf("\nSorting Completed in time"); printf("CLOCKS PER SECOND = %ld\n",CLOCKS_PER_SEC); printf("START CLOCK = %ld \nSTOP CLOCK = %ld \n",start,stop); printf("TIME TAKEN = %f\n",(float)(stop-start)/CLOCKS_PER_SEC); cudaFree(d_a); cudaFree(d_c); return 0; } int bitcount(int num) { unsigned int count1=0,count2=0, tmp,value=4278190080U,result; // checking the byte no. of register if(num==0) return 0; while(!(num & value)) { value = value >> 8; count1++; } value = 2147483648U; tmp=count1; // moving the first bit to proper byte while(tmp--) { value = value >> 8; } // moving the bit in a particular byte while(!(num & value)) { value = value >> 1; count2++; } result = 32 - (count1 * 8 + count2); return result; } void Sort(int arr[], int n) { int i, j; for (i = 0; i < n-1; i++) // Last i elements are already in place for (j = 0; j < n-i-1; j++) if (arr[j] > arr[j+1]) swap(&arr[j], &arr[j+1]); }
4,393
/* C stuff */ #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <string.h> #include <errno.h> // C++ stuff #include <iostream> #include <fstream> #include <string> #include <iomanip> #include <sstream> // Open-CV for the vision stuff //#include <opencv2/opencv.hpp> /* Cuda stuff */ #include <cuda_runtime_api.h> #include <cuda.h> typedef unsigned char byte; typedef byte * pbyte; clock_t LastProfilingClock=clock(); #define ARCH_NEWLINE "\n" /*************************************************************************** Writes profiling output (milli-seconds since last call) ***************************************************************************/ extern clock_t LastProfilingClock; inline float profiling (const char *s, clock_t *whichClock=NULL) { if (whichClock==NULL) whichClock=&LastProfilingClock; clock_t newClock=clock(); float res = (float) (newClock-*whichClock) / (float) CLOCKS_PER_SEC; if (s!=NULL) std::cerr << "Time: " << s << ": " << res << std::endl; *whichClock = newClock; return res; } inline float profilingTime (const char *s, time_t *whichClock) { time_t newTime=time(NULL); float res = (float) (newTime-*whichClock); if (s!=NULL) std::cerr << "Time(real): " << s << ": " << res << std::endl; return res; } /*************************************************************************** CREATES AN EMPTY IMAGE ***************************************************************************/ unsigned char **CREATE_IMAGE (int ysize, int xsize) { unsigned char ** im; unsigned char *big; im = new pbyte [xsize]; big = new byte [xsize*ysize]; for (int i = 0 ; i < xsize ; i++) im[i] = big + i*ysize; return (im); } /*************************************************************************** Frees an image ***************************************************************************/ void FREE_IMAGE (byte **im) { delete [] im[0]; delete [] im; } /*************************************************************************** Reads a grayscale image ***************************************************************************/ void readImage (const char *filename, unsigned char***_p2darr, int *_ysize, int *_xsize) { char *buf; char shortbuf[256]; short int x, y; int color, foo; char c; FILE * inpic; int entete, z; int ysize, xsize; unsigned char **R; if ( (inpic = fopen(filename,"r+b")) == NULL) { std::cerr << "can't open file '" << filename << "': " << strerror(errno) << std::endl; exit(1); } if (fscanf(inpic,"%c%c\n",&c,&c) != 2) { std::cerr << "Image::readGray():\n Wrong Image Format: no .ppm!!\n" << "filename: " << filename << std::endl; exit(2); } if (c == '6') { z = 3 ; std::cerr << "Image::readGray():: disabled due to bug.\n" "Use Image::readColor() + Image::convertRGB2GrayScale() instead\n"; exit(3); } else { if (c != '5') { std::cerr << "Image::readGray():: wrong image format: " "for .ppm only versions P5 and P6 are supported!\n"; exit(4); } z = 1 ; } fscanf(inpic,"%c",&c) ; entete = 3 ; while (c == '#') { entete++ ; while (c != '\n') { entete++ ; fscanf(inpic,"%c",&c) ; } fscanf(inpic,"%c",&c) ; } if ( (inpic = freopen(filename,"r+b",inpic)) == NULL) { std::cerr << "can't open file " << filename << ":" << strerror(errno) << "\n"; exit(5); } fread(shortbuf,1,entete,inpic); if (fscanf(inpic,"%d%d\n%d",&xsize,&ysize,&color) != 3) { std::cerr << "Internal error (2):" << filename << std::endl; exit(6); } fread(shortbuf,1,1,inpic) ; buf = new char [z*xsize+10]; R = CREATE_IMAGE(ysize,xsize) ; for ( y = 0 ; y < ysize ; y++) { if ((foo=fread(buf,1,z*xsize,inpic)) != z*xsize) { std::ostringstream s; s << "file " << filename << ":\nrow " << y << " input failure: " << "got " << foo << " instead of " << z*xsize << " bytes!\n"; if (!feof(inpic)) s << "No "; s << "EOF occured.\n"; if (!ferror(inpic)) s << "No "; std::cerr << "error in the sense of ferror() occured.\n"; exit(7); } else { if (z == 1) { for ( x = 0 ; x < xsize ; x++) R[x][y] = buf[x] ; } else { for ( x = 0 ; x < z*xsize ; x += z ) R[x/z][y] = (int)(.299*(float)buf[x] + 0.587*(float)buf[x+1] + 0.114*(float)buf[x+2]); } } } fclose (inpic); delete [] buf; *_ysize = ysize; *_xsize = xsize; *_p2darr = R; } // ************************************************************* // Writes a grayscale image // ************************************************************* void writeImage(const char *filename, unsigned char **R, int ysize, int xsize) { FILE *fp; char *buf; short int y, x; if ((fp=fopen(filename,"w+b"))==NULL) { std::cerr << "Cannot create output file '" << filename << "': " << strerror(errno) << "!\n"; exit(1); } buf = new char [xsize+10]; sprintf(buf,"P5%s%d %d%s255%s",ARCH_NEWLINE,xsize,ysize,ARCH_NEWLINE,ARCH_NEWLINE) ; x = strlen(buf); clearerr(fp); fwrite(buf,1,x,fp); if (ferror(fp)) { std::cerr << "Could not write image to file (Image::writeGray())!\n"; exit(1); } for ( y = 0 ; y < ysize ; y++) { for ( x = 0 ; x < xsize ; x++ ) { buf[x] = R[x][y]; } clearerr(fp); fwrite(buf,1,xsize,fp); if (ferror(fp)) { std::cerr << "Could not write image to file (Image::writeGray())!\n"; exit(1); } } delete [] buf; fclose(fp); } /*************************************************************************** USAGE ***************************************************************************/ void usage (char *com) { std::cerr<< "usage: " << com << " <inputimagename> <outputimagename>\n"; exit(1); } /*************************************************************************** The CPU version ***************************************************************************/ void cpuFilter(unsigned char *in, unsigned char *out, int rows, int cols) { // General case for (int y=1; y<rows-1; ++y) for (int x=1; x<cols-1; ++x) { float f = ( 4.0*in[x*rows+y] + 2.0*in[(x-1)*rows+y] + 2.0*in[(x+2)*rows+y] + 2.0*in[x*rows+y+1] + 2.0*in[x*rows+y-1] + in[(x-1)*rows+y-1] + in[(x-1)*rows+y+1] + in[(x+1)*rows+y-1] + in[(x+1)*rows+y+1] )/16.0; if (f<0) f=0; if (f>255) f=255; out[x*rows+y] = (unsigned char) f; } // Borders for (int y=0; y<rows; ++y) { out[0*rows+y] = in[0*rows+y]; out[(cols-1)*rows+y] = in[(cols-1)*rows+y]; } for (int x=0; x<cols; ++x) { out[x*rows+0] = in[x*rows+0]; out[x*rows+rows-1] = in[x*rows+rows-1]; } } /*************************************************************************** The GPU version - the kernel ***************************************************************************/ __global__ void gpuHostRun(int mxWidth, unsigned char* input, unsigned char* output) { int x = blockIdx.x*blockDim.x + threadIdx.x; // cols int y = blockIdx.y*blockDim.y + threadIdx.y; // rows if(y*mxWidth + x <= mxWidth*mxWidth) { if(!(y == mxWidth-1 || y == 0 || x == mxWidth-1 || x == 0)){ float f = ( 4.0*input[x*mxWidth+y] + 2.0*input[(x-1)*mxWidth+y] + 2.0*input[(x+2)*mxWidth+y] + 2.0*input[x*mxWidth+y+1] + 2.0*input[x*mxWidth+y-1] + input[(x-1)*mxWidth+y-1] + input[(x-1)*mxWidth+y+1] + input[(x+1)*mxWidth+y-1] + input[(x+1)*mxWidth+y+1] )/16.0; if (f<0) f=0; if (f>255) f=255; output[x*mxWidth+y] = (unsigned char) f; } else { output[x*mxWidth+y] = input[x*mxWidth+y]; } } } /*************************************************************************** The GPU version - the host code ***************************************************************************/ void gpuFilter(unsigned char *imarr, unsigned char *resarr, int rows, int cols ) // dimY == nbRows, dimX == nbCol { unsigned char *gpuMatrix1; //input unsigned char *gpuMatrix2; //output int matrixInByte = rows*cols*sizeof(char); cudaMalloc((void**) &gpuMatrix1, matrixInByte); cudaMalloc((void**) &gpuMatrix2, matrixInByte); cudaError_t ok = cudaMemcpy(gpuMatrix1, imarr, matrixInByte, cudaMemcpyHostToDevice ); if(ok != cudaSuccess) { std::cerr <<"*** Could not transfer\n"; exit(1); } dim3 dimBlock(32,32); dim3 dimGrid(cols/dimBlock.x,rows/dimBlock.y); gpuHostRun<<<dimGrid, dimBlock>>>(cols, gpuMatrix1, gpuMatrix2); cudaMemcpy(resarr, gpuMatrix2, matrixInByte, cudaMemcpyDeviceToHost ); if(ok != cudaSuccess) { std::cerr <<"*** Could not transfer\n"; exit(1); } } /*************************************************************************** Main program ***************************************************************************/ int main (int argc, char **argv) { int c; // Argument processing while ((c = getopt (argc, argv, "h")) != EOF) { switch (c) { case 'h': usage(*argv); break; case '?': usage (*argv); std::cerr << "\n" << "*** Problem parsing the options!\n\n"; exit (1); } } int requiredArgs=2; if (argc-optind!=requiredArgs) { usage (*argv); exit (1); } char *inputfname=argv[optind]; char *outputfname=argv[optind+1]; std::cout << "Reading image " << inputfname << "\n"; unsigned char **image; int rows; int cols; readImage (inputfname, &image, &rows, &cols); std::cout << "=====================================================\n" << "Loaded image of size " << cols << "x" << rows << ".\n"; unsigned char *imarr = *image; unsigned char *resarr = new unsigned char [cols*rows]; profiling (NULL); for (int i=0; i<100; ++i) cpuFilter(imarr, resarr, rows, cols); profiling ("CPU version"); for (int i=0; i<100; ++i) gpuFilter(imarr, resarr, rows, cols); profiling ("GPU version"); // Copy flat array back to image structure for (int i=0; i<rows*cols; ++i) imarr[i] = resarr[i]; writeImage (outputfname, image, rows, cols); std::cout << "Program terminated correctly.\n"; return 0; }
4,394
#include <stdio.h> #include <assert.h> #define cudaCheckError() { \ cudaError_t e = cudaGetLastError(); \ if (e != cudaSuccess) { \ printf("CUDA Failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } inline cudaError_t cudaCheckErrorInline(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA runtime error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int col = threadIdx.x + blockDim.x * blockIdx.x; int gridStride = gridDim.x * blockDim.x; for(int i = col; i < N; i += gridStride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *array, int N) { for(int i = 0; i < N; i++) { if(array[i] != target) { printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target); exit(1); } } printf("SUCCESS! All values added correctly.\n"); } int main() { const int N = 2<<20; size_t size = N * sizeof(float); float *h_a; float *h_b; float *h_c; // cudaCheckErrorInline(cudaMallocManaged(&h_a, size)); // cudaCheckErrorInline(cudaMallocManaged(&h_b, size)); // cudaCheckErrorInline(cudaMallocManaged(&h_c, size)); h_a = (float *)malloc(size); h_b = (float *)malloc(size); h_c = (float *)malloc(size); initWith(3, h_a, N); initWith(4, h_b, N); initWith(0, h_c, N); float *dev_a, *dev_b, *dev_c; cudaMalloc((void **) &dev_a, sizeof(float)*N); cudaMalloc((void **) &dev_b, sizeof(float)*N); cudaMalloc((void **) &dev_c, sizeof(float)*N); cudaMemcpy(dev_a, h_a, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, h_b, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(dev_c, h_c, sizeof(float)*N, cudaMemcpyHostToDevice); size_t thread_per_block = 1024; size_t number_of_blocks = (N+thread_per_block - 1) / thread_per_block; addVectorsInto<<<number_of_blocks, thread_per_block>>>(dev_c, dev_a, dev_b, N); cudaCheckError(); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(h_c, dev_c, sizeof(float)*N, cudaMemcpyDeviceToHost); checkElementsAre(7, h_c, N); free(h_a); free(h_b); free(h_c); cudaCheckErrorInline(cudaFree(dev_a)); cudaCheckErrorInline(cudaFree(dev_b)); cudaCheckErrorInline(cudaFree(dev_c)); }
4,395
#include <stdio.h> #define SIZE 8 __global__ void addVector(int vectorAns[SIZE], int vectorA[SIZE], int vectorB[SIZE]); int main() { int vectorA[SIZE]; int vectorB[SIZE]; int vectorAns[SIZE]; int i; for (i = 0; i < SIZE; i++) { vectorA[i] = i; vectorB[i] = SIZE - i; } int *d_A; int *d_B; int *d_C; //allocate memmory cudaMalloc((void **) &d_A, sizeof(int) * SIZE); cudaMalloc((void **) &d_B, sizeof(int) * SIZE); cudaMalloc((void **) &d_C, sizeof(int) * SIZE); //copy inputs from RAM to GPU cudaMemcpy(d_A, vectorA, sizeof(int) * SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_B, vectorB, sizeof(int) * SIZE, cudaMemcpyHostToDevice); //calculation function addVector<<< 1, SIZE >>> (d_C, d_A, d_B); //copy back to RAM cudaMemcpy(vectorAns, d_C, sizeof(int) * SIZE, cudaMemcpyDeviceToHost); //cuda free cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("Answer is : \n"); for (i = 0; i < SIZE; i++) { printf("%d ", vectorAns[i]); } printf("\n"); return 0; } __global__ void addVector(int vectorAns[SIZE], int vectorA[SIZE], int vectorB[SIZE]) { int i = threadIdx.x; vectorAns[i] = vectorA[i] + vectorB[i]; }
4,396
/* CUDA Library for Skeleton 2D Electrostatic GPU PIC Code */ /* written by Viktor K. Decyk, UCLA */ #include <stdlib.h> #include <stdio.h> #include "cuda.h" extern int nblock_size; extern int maxgsx; static cudaError_t crc; /*--------------------------------------------------------------------*/ __device__ void liscan2(int *isdata, int nths) { /* performs local prefix reduction of integer data shared by threads */ /* using binary tree method. */ /* local data */ int l, mb, kxs, lb, kb; l = threadIdx.x; mb = l; kxs = 1; while (kxs < nths) { lb = kxs*mb; kb = 2*lb + kxs - 1; lb += l + kxs; if (lb < nths) { isdata[lb] += isdata[kb]; } __syncthreads(); mb >>= 1; kxs <<= 1; } return; } /*--------------------------------------------------------------------*/ __device__ void lsum2(float *sdata, int n) { /* finds local sum of nths data items shared by threads */ /* using binary tree method. input is modified. */ /* local data */ int l, k; float s; l = threadIdx.x; k = blockDim.x >> 1; s = 0.0f; if (l < n) s = sdata[l]; while (k > 0) { if (l < k) { if ((l+k) < n) { s += sdata[l+k]; sdata[l] = s; } } __syncthreads(); k >>= 1; } return; } /*--------------------------------------------------------------------*/ __global__ void gpuppush2l(float ppart[], float fxy[], int kpic[], float qbm, float dt, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with various boundary conditions. threaded version using guard cells data read in tiles particles stored segmented array 44 flops/particle, 12 loads, 4 stores input: all, output: ppart, ek equations used are: vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt, vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt, where q/m is charge/mass, and x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1) + dx*fy(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = velocity vx of particle n in tile m ppart[m][3][n] = velocity vy of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) that is, convolution of electric field over particle shape kpic = number of particles per tile qbm = particle charge/mass dt = time interval between successive calculations kinetic energy/mass at time t is also calculated, using ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2) idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of field arrays, must be >= nx+1 nyv = second dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) local data */ int noff, moff, npoff, npp, mxv; int i, j, k, ii, nn, mm; float qtm, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy; /* The sizes of the shared memory arrays are as follows: */ /* float sfxy[2*(mx+1)*(my+1)], sek[blockDim.x]; */ /* to conserve memory, sek overlaps with sfxy */ /* and the name sfxy is used instead of sek */ extern __shared__ float sfxy[]; double sum1; qtm = qbm*dt; sum1 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0f; edgerx = (float) (nx-1); } mxv = mx + 1; /* k = tile number */ k = blockIdx.x + gridDim.x*blockIdx.y; /* loop over tiles */ if (k < mxy1) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = idimp*nppmx*k; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; ii = threadIdx.x; while (ii < mxv*(my+1)) { j = ii/mxv; i = ii - mxv*j; if ((i < nn) && (j < mm)) { sfxy[2*ii] = fxy[2*(i+noff+nxv*(j+moff))]; sfxy[1+2*ii] = fxy[1+2*(i+noff+nxv*(j+moff))]; } ii += blockDim.x; } /* synchronize threads */ __syncthreads(); /* loop over particles in tile */ j = threadIdx.x; while (j < npp) { /* find interpolation weights */ x = ppart[j+npoff]; nn = x; y = ppart[j+npoff+nppmx]; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nn = 2*(nn - noff) + 2*mxv*(mm - moff); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find acceleration */ dx = amx*sfxy[nn]; dy = amx*sfxy[1+nn]; dx = amy*(dxp*sfxy[2+nn] + dx); dy = amy*(dxp*sfxy[3+nn] + dy); nn += 2*mxv; vx = amx*sfxy[nn]; vy = amx*sfxy[1+nn]; dx += dyp*(dxp*sfxy[2+nn] + vx); dy += dyp*(dxp*sfxy[3+nn] + vy); /* new velocity */ vx = ppart[j+npoff+nppmx*2]; vy = ppart[j+npoff+nppmx*3]; dx = vx + qtm*dx; dy = vy + qtm*dy; /* average kinetic energy */ vx += dx; vy += dy; sum1 += (double) (vx*vx + vy*vy); ppart[j+npoff+nppmx*2] = dx; ppart[j+npoff+nppmx*3] = dy; /* new position */ dx = x + dx*dt; dy = y + dy*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[j+npoff]; ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2]; } if ((dy < edgely) || (dy >= edgery)) { dy = ppart[j+npoff+nppmx]; ppart[j+npoff+nppmx*3] = -ppart[j+npoff+nppmx*3]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[j+npoff]; ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2]; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+npoff+nppmx] = dy; j += blockDim.x; } /* synchronize threads */ __syncthreads(); /* add kinetic energies in tile */ sfxy[threadIdx.x] = (float) sum1; /* synchronize threads */ __syncthreads(); lsum2(sfxy,blockDim.x); /* normalize kinetic energy of tile */ if (threadIdx.x==0) { ek[k] = 0.125f*sfxy[0]; } } return; } /*--------------------------------------------------------------------*/ __global__ void gpuppushf2l(float ppart[], float fxy[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with periodic boundary conditions. also determines list of particles which are leaving this tile threaded version using guard cells data read in tiles particles stored segmented array 44 flops/particle, 12 loads, 4 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc equations used are: vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt, vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt, where q/m is charge/mass, and x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1) + dx*fy(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = velocity vx of particle n in tile m ppart[m][3][n] = velocity vy of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) that is, convolution of electric field over particle shape kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass dt = time interval between successive calculations kinetic energy/mass at time t is also calculated, using ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2) idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of field arrays, must be >= nx+1 nyv = second dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ int noff, moff, npoff, nhoff, mhoff, npp, mxv; int i, j, k, ii, ih, nn, mm; float qtm, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy; float anx, any, edgelx, edgely, edgerx, edgery; /* The sizes of the shared memory arrays are as follows: */ /* float sfxy[2*(mx+1)*(my+1)], sek[blockDim.x]; */ /* int sih[blockDim.x], sncl[8], nh[1]; */ /* to conserve memory, sek overlaps with sfxy and sih */ /* and the name sfxy is used instead of sek */ float *sfxy; int *sncl, *sih, *nh; extern __shared__ int shm[]; sfxy = (float *)&shm[0]; sih = (int *)&sfxy[2*(mx+1)*(my+1)]; sncl = (int *)&sih[blockDim.x]; nh = (int *)&sfxy[blockDim.x]; sncl = sncl > nh ? sncl : nh; nh = (int *)&sncl[8]; double sum1; qtm = qbm*dt; anx = (float) nx; any = (float) ny; sum1 = 0.0; mxv = mx + 1; /* k = tile number */ k = blockIdx.x + gridDim.x*blockIdx.y; /* loop over tiles */ if (k < mxy1) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = idimp*nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; /* load local fields from global array */ ii = threadIdx.x; while (ii < mxv*(my+1)) { j = ii/mxv; i = ii - mxv*j; if ((i < nn+1) && (j < mm+1)) { sfxy[2*ii] = fxy[2*(i+noff+nxv*(j+moff))]; sfxy[1+2*ii] = fxy[1+2*(i+noff+nxv*(j+moff))]; } ii += blockDim.x; } /* clear counters */ j = threadIdx.x; while (j < 8) { sncl[j] = 0; j += blockDim.x; } if (threadIdx.x==0) { nh[0] = 0; } /* synchronize threads */ __syncthreads(); /* loop over particles in tile */ ii = (npp - 1)/(int) blockDim.x + 1; nhoff = 0; for (i = 0; i < ii; i++) { j = threadIdx.x + blockDim.x*i; sih[threadIdx.x] = 0; if (j < npp) { /* find interpolation weights */ x = ppart[j+npoff]; nn = x; y = ppart[j+npoff+nppmx]; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nn = 2*(nn - noff) + 2*mxv*(mm - moff); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find acceleration */ dx = amx*sfxy[nn]; dy = amx*sfxy[1+nn]; dx = amy*(dxp*sfxy[2+nn] + dx); dy = amy*(dxp*sfxy[3+nn] + dy); nn += 2*mxv; vx = amx*sfxy[nn]; vy = amx*sfxy[1+nn]; dx += dyp*(dxp*sfxy[2+nn] + vx); dy += dyp*(dxp*sfxy[3+nn] + vy); /* new velocity */ vx = ppart[j+npoff+nppmx*2]; vy = ppart[j+npoff+nppmx*3]; dx = vx + qtm*dx; dy = vy + qtm*dy; /* average kinetic energy */ vx += dx; vy += dy; sum1 += (double) (vx*vx + vy*vy); ppart[j+npoff+nppmx*2] = dx; ppart[j+npoff+nppmx*3] = dy; /* new position */ dx = x + dx*dt; dy = y + dy*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } /* using prefix scan for ih to keep holes ordered */ if (mm > 0) { atomicAdd(&sncl[mm-1],1); sih[threadIdx.x] = 1; } } /* synchronize threads */ __syncthreads(); nn = npp - blockDim.x*i; if (nn > blockDim.x) nn = blockDim.x; /* perform local prefix reduction */ liscan2(sih,nn); if (j < npp) { /* set new position */ ppart[j+npoff] = dx; ppart[j+npoff+nppmx] = dy; /* write out location and direction of departing particles */ ih = sih[threadIdx.x]; mhoff = 0; if (threadIdx.x > 0) mhoff = sih[threadIdx.x-1]; /* this thread has a hole present */ if (ih > mhoff) { ih += nhoff; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh[0] = 1; } } } /* update number of holes in this iteration */ if (nn > 0) nhoff += sih[nn-1]; /* synchronize threads */ __syncthreads(); } /* add kinetic energies in tile */ sfxy[threadIdx.x] = (float) sum1; /* synchronize threads */ __syncthreads(); lsum2(sfxy,blockDim.x); /* write out counters */ j = threadIdx.x; while (j < 8) { ncl[j+8*k] = sncl[j]; j += blockDim.x; } /* set error and end of file flag */ if (threadIdx.x==0) { /* ihole overflow */ ih = nhoff; if (nh[0] > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; /* normalize kinetic energy of tile */ ek[k] = 0.125f*sfxy[0]; } } return; } /*--------------------------------------------------------------------*/ __global__ void gpu2ppost2l(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int nxv, int nyv, int mx1, int mxy1) { /* for 2d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries threaded version using guard cells data deposited in tiles particles stored segmented array 17 flops/particle, 6 loads, 4 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m)=qm*(1.-dx)*(1.-dy) q(n+1,m)=qm*dx*(1.-dy) q(n,m+1)=qm*(1.-dx)*dy q(n+1,m+1)=qm*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m q[k][j] = charge density at grid point j,k kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 4 mx/my = number of grids in sorting cell in x/y nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 local data */ int noff, moff, npoff, npp, mxv; int i, j, k, ii, nn, mm, np, mp; float dxp, dyp, amx, amy; /* The size of the shared memory array is as follows: */ /* float sq[(mx+1)*(my+1)] */ extern __shared__ float sq[]; mxv = mx + 1; /* k = tile number */ k = blockIdx.x + gridDim.x*blockIdx.y; /* loop over tiles */ if (k < mxy1) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = idimp*nppmx*k; /* zero out local accumulator */ i = threadIdx.x; while (i < mxv*(my+1)) { sq[i] = 0.0f; i += blockDim.x; } /* synchronize threads */ __syncthreads(); /* loop over particles in tile */ j = threadIdx.x; while (j < npp) { /* find interpolation weights */ dxp = ppart[j+npoff]; nn = dxp; dyp = ppart[j+npoff+nppmx]; mm = dyp; dxp = qm*(dxp - (float) nn); dyp = dyp - (float) mm; nn = nn - noff; mm = mxv*(mm - moff); amx = qm - dxp; mp = mm + mxv; amy = 1.0f - dyp; np = nn + 1; /* deposit charge within tile to local accumulator */ /* original deposit charge, has data hazard on GPU */ /* sq[np+mp] += dxp*dyp; */ /* sq[nn+mp] += amx*dyp; */ /* sq[np+mm] += dxp*amy; */ /* sq[nn+mm] += amx*amy; */ /* for devices with compute capability 2.x */ atomicAdd(&sq[np+mp],dxp*dyp); atomicAdd(&sq[nn+mp],amx*dyp); atomicAdd(&sq[np+mm],dxp*amy); atomicAdd(&sq[nn+mm],amx*amy); j += blockDim.x; } /* synchronize threads */ __syncthreads(); /* deposit charge to global array */ nn = mxv < nxv-noff ? mxv : nxv-noff; mm = my+1 < nyv-moff ? my+1 : nyv-moff; ii = threadIdx.x; while (ii < mxv*(my+1)) { j = ii/mxv; i = ii - mxv*j; if ((i < nn) && (j < mm)) { /* original deposit charge, has data hazard on GPU */ /* q[i+noff+nxv*(j+moff)] += sq[ii]; */ /* for devices with compute capability 2.x */ atomicAdd(&q[i+noff+nxv*(j+moff)],sq[ii]); } ii += blockDim.x; } } return; } /*--------------------------------------------------------------------*/ __global__ void gpucaguard2l(float2 qc[], float q[], int nx, int ny, int nxe, int nye, int nxvh, int nyv) { /* copy and accumulate extended periodic scalar field q into complex output field qc linear interpolation nx/ny = system length in x/y direction nxe = first dimension of input field array q, must be >= nx+1 nye = second dimension of input field array q, must be >= ny+1 nxvh = first dimension of output field array qc, must be >= nx/2+1 nyv = second dimension of output field array qc, must be >= ny */ /* local data */ int j, k, nxh; float at1, at2; float2 a; nxh = nx/2; k = blockIdx.x; if (k < ny) { j = threadIdx.x; at2 = 0.0f; while (j < nxh) { if (k==0) { at1 = q[2*j+nxe*ny]; at2 = q[2*j+1+nxe*ny]; if (j==0) { at1 += q[nx] + q[nx+nxe*ny]; } } if (k > 0) { at1 = 0.0f; if (j==0) { at1 = q[nx+nxe*k]; } } a.x = q[2*j+nxe*k] + at1; a.y = q[2*j+1+nxe*k] + at2; qc[j+nxvh*k] = a; j += blockDim.x; } } return; } /*--------------------------------------------------------------------*/ __global__ void gpuccguard2l(float2 fxyc[], float fxy[], int nx, int ny, int nxe, int nye, int nxvh, int nyv) { /* copy and replicate complex input 2d vector field fxyc into extended periodic field fxy linear interpolation nx/ny = system length in x/y direction nxe = second dimension of input field array fxy, must be >= nx+1 nye = third dimension of input field array fxy, must be >= ny+1 nxvh = first dimension of input field array fxyc, must be >= nx/2+1 nyv = third dimension of input field array fxyc, must be >= ny */ /* local data */ int j, k, nxh; float2 a, b; nxh = nx/2; k = blockIdx.x; /* copy interior points */ if (k < ny) { j = threadIdx.x; while (j < nxh) { a = fxyc[j+nxvh*2*k]; b = fxyc[j+nxvh*(1+2*k)]; fxy[2*(2*j+nxe*k)] = a.x; fxy[1+2*(2*j+nxe*k)] = b.x; fxy[2*(2*j+1+nxe*k)] = a.y; fxy[1+2*(2*j+1+nxe*k)] = b.y; j += blockDim.x; } } /* accumulate edges of extended field */ if (blockIdx.x==0) { k = threadIdx.x; while (k < ny) { a = fxyc[nxvh*2*k]; b = fxyc[nxvh*(1+2*k)]; fxy[2*(nx+nxe*k)] = a.x; fxy[1+2*(nx+nxe*k)] = b.x; k += blockDim.x; } j = threadIdx.x; while (j < nxh) { a = fxyc[j]; b = fxyc[j+nxvh]; fxy[2*(2*j+nxe*ny)] = a.x; fxy[1+2*(2*j+nxe*ny)] = b.x; fxy[2*(2*j+1+nxe*ny)] = a.y; fxy[1+2*(2*j+1+nxe*ny)] = b.y; j += blockDim.x; } if (threadIdx.x==0) { a = fxyc[0]; b = fxyc[nxvh*1]; fxy[2*(nx+nxe*ny)] = a.x; fxy[1+2*(nx+nxe*ny)] = b.x; } } return; } /*--------------------------------------------------------------------*/ __global__ void gpuppfnd2l(float ppart[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int nx, int ny, int mx, int my, int mx1, int my1, int ntmax, int *irc) { /* this subroutine performs first step of a particle sort by x,y grid in tiles of mx, my, where one finds the particles leaving tile and stores their number, location, and destination in ncl and ihole. linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 2D linear memory input: all except ncl, ihole, irc output: ppart, ncl, ihole, irc ppart[k][0][n] = position x of particle n in tile k ppart[k][1][n] = position y of particle n in tile k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, noff, moff, npp, j, k, ih, ist, nn, mm, nths; float anx, any, edgelx, edgely, edgerx, edgery, dx, dy; /* The sizes of the shared memory arrays are as follows: */ /* int sncl[8], sih[blockDim.x], nh[1]; */ int *sncl, *sih, *nh; extern __shared__ int shm[]; sncl = (int *)&shm[0]; sih = (int *)&shm[8]; nh = (int *)&shm[8+blockDim.x]; mxy1 = mx1*my1; anx = (float) nx; any = (float) ny; /* k = tile number */ k = blockIdx.x + gridDim.x*blockIdx.y; /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ /* loop over tiles */ if (k < mxy1) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; /* clear counters */ j = threadIdx.x; while (j < 8) { sncl[j] = 0; j += blockDim.x; } if (threadIdx.x==0) { nh[0] = 0; } /* synchronize threads */ __syncthreads(); /* loop over particles in tile */ mm = (npp - 1)/(int) blockDim.x + 1; noff = 0; for (nn = 0; nn < mm; nn++) { j = threadIdx.x + blockDim.x*nn; sih[threadIdx.x] = 0; if (j < npp) { dx = ppart[j+nppmx*(idimp*k)]; dy = ppart[j+nppmx*(1+idimp*k)]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[j+nppmx*(idimp*k)] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0f; ppart[j+nppmx*(idimp*k)] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[j+nppmx*(1+idimp*k)] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) ist += 3; else dy = 0.0f; ppart[j+nppmx*(1+idimp*k)] = dy; } else { ist += 3; } } /* using prefix scan for ih to keep holes ordered */ if (ist > 0) { atomicAdd(&sncl[ist-1],1); sih[threadIdx.x] = 1; } } /* synchronize threads */ __syncthreads(); nths = npp - blockDim.x*nn; if (nths > blockDim.x) nths = blockDim.x; /* perform local prefix reduction */ liscan2(sih,nths); if (j < npp) { ih = sih[threadIdx.x]; moff = 0; if (threadIdx.x > 0) moff = sih[threadIdx.x-1]; /* this thread has a hole present */ if (ih > moff) { ih += noff; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = ist; } else { nh[0] = 1; } } } /* update number of holes in this iteration */ if (nths > 0) noff += sih[nths-1]; /* synchronize threads */ __syncthreads(); } /* write out counters */ j = threadIdx.x; while (j < 8) { ncl[j+8*k] = sncl[j]; j += blockDim.x; } /* set error and end of file flag */ if (threadIdx.x==0) { /* ihole overflow */ ih = noff; if (nh[0] > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } } return; } /*--------------------------------------------------------------------*/ __global__ void gpuppmov2l(float ppart[], float ppbuff[], int ncl[], int ihole[], int idimp, int nppmx, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* this subroutine performs second step of a particle sort by x,y grid in tiles of mx, my, where prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 2D linear memory input: all except ppbuff, irc output: ppbuff, ncl, irc ppart[k][i][n] = i co-ordinate of particle n in tile k ppbuff[k][i][n] = i co-ordinate of particle n in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 4 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, i, j, k, ii, nh, ist, j1, ierr; /* The sizes of the shared memory arrays are as follows: */ /* int sncl[8], ip[1]; */ /* blockDim.x should be >= 8 */ int *sncl, *ip; extern __shared__ int shm[]; sncl = (int *)&shm[0]; ip = (int *)&shm[8]; mxy1 = mx1*my1; ierr = 0; /* k = tile number */ k = blockIdx.x + gridDim.x*blockIdx.y; j = threadIdx.x; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ if (k < mxy1) { /* find address offset for ordered ppbuff array */ if (j < 8) { ist = ncl[j+8*k]; sncl[j] = ist; } if (threadIdx.x==0) ip[0] = 0; /* synchronize threads */ __syncthreads(); /* perform local prefix reduction */ liscan2(sncl,8); if (j < 8) sncl[j] -= ist; /* synchronize threads */ __syncthreads(); nh = ihole[2*(ntmax+1)*k]; /* loop over particles leaving tile */ while (j < nh) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1; ist = ihole[1+2*(j+1+(ntmax+1)*k)]; ii = atomicAdd(&sncl[ist-1],1); if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*(i+idimp*k)] = ppart[j1+nppmx*(i+idimp*k)]; } } else { ip[0] = 1; } j += blockDim.x; } /* synchronize threads */ __syncthreads(); /* write out counters */ j = threadIdx.x; if (j < 8) { ncl[j+8*k] = sncl[j]; } /* set error */ if (threadIdx.x==0) { if (ip[0] > 0) ierr = ierr > sncl[7] ? ierr : sncl[7]; } } /* ppbuff overflow */ if (ierr > 0) *irc = ierr; return; } /*--------------------------------------------------------------------*/ __global__ void gpuppord2l(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* this subroutine performs third step of a particle sort by x,y grid in tiles of mx, my, where incoming particles from other tiles are copied into ppart. linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 2D linear memory input: all except irc output: ppart, kpic, irc ppart[k][i][n] = i co-ordinate of particle n in tile k ppbuff[k][i][n] = i co-ordinate of particle n in tile k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 4 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, npp, ncoff, i, j, k, ii, jj, kx, ky, ni, nh; int nn, mm, ll, ip, j1, j2, kxl, kxr, kk, kl, kr; int nths; /* The sizes of the shared memory arrays are as follows: */ /* int ks[8], sip[8], sj[blockDim.x], sj1[1], ist[1]; */ int *ks, *sip, *sj, *sj1, *ist; extern __shared__ int shm[]; ks = (int *)&shm[0]; sip = (int *)&shm[8]; sj = (int *)&shm[16]; sj1 = (int *)&shm[16+blockDim.x]; ist = (int *)&shm[17+blockDim.x]; mxy1 = mx1*my1; /* k = tile number */ k = blockIdx.x + gridDim.x*blockIdx.y; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ if (k < mxy1) { npp = kpic[k]; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ if (threadIdx.x==0) { ks[0] = kxr + kk; ks[1] = kxl + kk; ks[2] = kx + kr; ks[3] = kxr + kr; ks[4] = kxl + kr; ks[5] = kx + kl; ks[6] = kxr + kl; ks[7] = kxl + kl; sj1[0] = 0; ist[0] = 0; } /* synchronize threads */ __syncthreads(); /* find number of incoming particles */ kk = 0; ncoff = 0; ip = 0; ii = threadIdx.x; if (ii < 8) { kk = ks[ii]; if (ii > 0) ncoff = ncl[ii-1+8*kk]; ip = ncl[ii+8*kk] - ncoff; kk = ncoff + idimp*npbmx*kk; sip[ii] = ip; } /* synchronize threads */ __syncthreads(); /* perform local prefix reduction */ liscan2(sip,8); ni = sip[7]; /* loop over directions */ nh = ihole[2*(ntmax+1)*k]; j1 = 0; mm = (ni - 1)/(int) blockDim.x + 1; for (nn = 0; nn < mm; nn++) { j = threadIdx.x + blockDim.x*nn; sj[threadIdx.x] = 0; if (threadIdx.x==0) sj[0] = sj1[0]; /* synchronize threads */ __syncthreads(); /* calculate offset for reading from particle buffer */ if (ii < 8) { /* mark next location where direction ii changes */ jj = sip[ii] - blockDim.x*nn; if ((jj >= 0) && (jj < blockDim.x)) { if (ip > 0) sj[jj] -= kk + ip; } } /* synchronize threads */ __syncthreads(); /* calculate offset for reading from particle buffer */ if (ii < 8) { /* mark location where direction ii starts */ jj -= ip; if ((jj >= 0) && (jj < blockDim.x)) { if (ip > 0) sj[jj] += kk; } } nths = ni - blockDim.x*nn; if (nths > blockDim.x) nths = blockDim.x; /* synchronize threads */ __syncthreads(); /* perform local prefix reduction */ liscan2(sj,nths); /* save last value for next time */ if (threadIdx.x==0) { jj = 0; if (nths > 0) jj = sj[nths-1]; sj1[0] = jj; } if (j < ni) { /* insert incoming particles into holes */ if (j < nh) { j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1; } /* place overflow at end of array */ else { j1 = npp + (j - nh); } if (j1 < nppmx) { jj = sj[threadIdx.x]; for (i = 0; i < idimp; i++) { ppart[j1+nppmx*(i+idimp*k)] = ppbuff[j+jj+npbmx*i]; } } else { ist[0] = 1; } } /* synchronize threads */ __syncthreads(); } /* update particle number if all holes have been filled */ jj = ni - nh; if (jj > 0) npp += jj; /* fill up remaining holes in particle array with particles from end */ ip = nh - ni; if (ip > 0) { mm = (ip - 1)/(int) blockDim.x + 1; kk = 0; ll = 0; /* loop over holes */ for (nn = 0; nn < mm; nn++) { j = threadIdx.x + blockDim.x*nn; /* j1 = locations of particles to fill holes, in decreasing order */ j1 = 0; if (j < ip) { j1 = npp - j - 1; } /* j2 = locations of holes at the end, in decreasing order */ j2 = 0; jj = nh - ll - threadIdx.x; if (jj > 0) { j2 = ihole[2*(jj+(ntmax+1)*k)] - 1; } /* holes with locations greater than npp-ip do not need to be filled */ /* identify such holes */ sj[threadIdx.x] = 1; /* synchronize threads */ __syncthreads(); /* omit particles at end that are holes */ ii = npp - (j2 + blockDim.x*nn) - 1; if ((ii >= 0) && (ii < blockDim.x)) sj[ii] = 0; nths = ip - blockDim.x*nn; if (nths > blockDim.x) nths = blockDim.x; /* synchronize threads */ __syncthreads(); /* perform local prefix reduction */ liscan2(sj,nths); /* ii = number particles at end to be moved */ ii = 0; if (nths > 0) ii = sj[nths-1]; /* identify which particles at end to be moved */ if (ii < nths) { ncoff = 0; if (j < ip) { if (threadIdx.x > 0) ncoff = sj[threadIdx.x-1]; jj = sj[threadIdx.x]; } /* synchronize threads */ __syncthreads(); if (j < ip) { if (jj > ncoff) { sj[jj-1] = j1; } } /* synchronize threads */ __syncthreads(); } /* j2 = locations of holes to be filled in increasing order */ j2 = 0; if (j < ip) { j1 = npp - j - 1; jj = threadIdx.x + ni + kk + 1; if (jj <= nh) j2 = ihole[2*(jj+(ntmax+1)*k)] - 1; } /* move particles from end into remaining holes */ if (j < (ii+blockDim.x*nn)) { if (ii < nths) j1 = sj[threadIdx.x]; for (i = 0; i < idimp; i++) { ppart[j2+nppmx*(i+idimp*k)] = ppart[j1+nppmx*(i+idimp*k)]; } } /* accumulate number of holes filled */ kk += ii; /* accumulate number of holes skipped over */ ii = nths - ii; ll += ii; } /* update number of particles */ npp -= ip; } /* set error and update particle */ if (threadIdx.x==0) { /* ppart overflow */ if (ist[0] > 0) *irc = npp; kpic[k] = npp; } } return; } /*--------------------------------------------------------------------*/ __global__ void gpupois22t(float2 qt[], float2 fxyt[], float2 ffct[], float *we, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine solves 2d poisson's equation in fourier space for force/charge (or convolution of electric field over particle shape) with periodic boundary conditions, without packed data. vector length is second dimension input: qt,ffct,nx,ny,nxvh,nyv,nxhd,nyhd, output: fxyt,we approximate flop count is: 26*nxc*nyc + 12*(nxc + nyc) where nxc = nx/2 - 1, nyc = ny/2 - 1 equation used is: fx[kx][ky] = -sqrt(-1)*kx*g[kx][ky]*s[kx][ky]*q[kx][ky], fy[kx][ky] = -sqrt(-1)*ky*g[kx][ky]*s[kx][ky]*q[kx][ky], where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers, g[kx][ky] = (affp/(kx**2+ky**2))*s[kx,ky], s[kx][ky] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0. qt[j][k] = complex charge density for fourier mode (k,j) fxyt[j][0][k] = x component of complex force/charge, fxyt[j][1][k] = y component of complex force/charge, all for fourier mode (k,j) caimag(ffct[j][k]) = finite-size particle shape factor s creal(ffct([j][k])) = potential green's function g for fourier mode (k,j) electric field energy is also calculated, using we = nx*ny*sum((affp/(kx**2+ky**2))*|q[kx][ky]*s[kx][ky]|**2) nx/ny = system length in x/y direction nxvh = second dimension of field arrays, must be >= nxh+1 nyv = first dimension of field arrays, must be >= ny nxhd = second dimension of form factor array, must be >= nxh nyhd = first dimension of form factor array, must be >= nyh local data */ int nxh, nyh, nxh1, j, k, k1, jj, jk, jk2; float dnx, dny, dkx, at1, at2, at3, at4; float2 zero, zt1, zt2, zt3; /* The size of the shared memory array is as follows: */ /* float ss[blockDim.x]; */ extern __shared__ float ss[]; double wp; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; dnx = 6.28318530717959f/(float) nx; dny = 6.28318530717959f/(float) ny; zero.x = 0.0f; zero.y = 0.0f; /* calculate force/charge and sum field energy */ wp = 0.0; /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ /* for (j = 1; j < nxh; j++) { */ j = blockIdx.x; if ((j > 0) && (j < nxh)) { dkx = dnx*(float) j; jj = nyhd*j; jk = nyv*j; jk2 = 2*jk; /* for (k = 1; k < nyh; k++) { */ k = threadIdx.x; while (k < nyh) { if (k > 0) { k1 = ny - k; zt1 = ffct[k+jj]; at1 = zt1.x*zt1.y; at2 = at1*dkx; at3 = at1*dny*(float) k; zt1 = qt[k+jk]; at4 = zt1.x; zt1.x = zt1.y; zt1.y = -at4; zt2 = qt[k1+jk]; at4 = zt2.x; zt2.x = zt2.y; zt2.y = -at4; zt3.x = at2*zt1.x; zt3.y = at2*zt1.y; fxyt[k+jk2] = zt3; zt3.x = at3*zt1.x; zt3.y = at3*zt1.y; fxyt[k+nyv+jk2] = zt3; zt3.x = at2*zt2.x; zt3.y = at2*zt2.y; fxyt[k1+jk2] = zt3; zt3.x = -at3*zt2.x; zt3.y = -at3*zt2.y; fxyt[k1+nyv+jk2] = zt3; wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y + zt2.x*zt2.x + zt2.y*zt2.y)); } k += blockDim.x; } } /* mode numbers ky = 0, ny/2 */ if (blockIdx.x==0) { k1 = nyh; /* for (j = 1; j < nxh; j++) { */ j = threadIdx.x; while (j < nxh) { if (j > 0) { jj = nyhd*j; jk = nyv*j; jk2 = 2*jk; zt1 = ffct[jj]; at1 = zt1.x*zt1.y; at2 = at1*dnx*(float) j; zt1 = qt[jk]; at4 = zt1.x; zt3.x = at2*zt1.y; zt3.y = -at2*at4; fxyt[jk2] = zt3; fxyt[nyv+jk2] = zero; fxyt[k1+jk2] = zero; fxyt[k1+nyv+jk2] = zero; wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y)); } j += blockDim.x; } /* mode numbers kx = 0, nx/2 */ nxh1 = 2*nyv*nxh; /* for (k = 1; k < nyh; k++) { */ k = threadIdx.x; while (k < nyh) { if (k > 0) { k1 = ny - k; zt1 = ffct[k]; at1 = zt1.x*zt1.y; at3 = at1*dny*(float) k; zt1 = qt[k]; at4 = zt1.x; zt3.x = at3*zt1.y; zt3.y = -at3*at4; fxyt[k] = zero; fxyt[k+nyv] = zt3; fxyt[k1] = zero; zt3.y = -zt3.y; fxyt[k1+nyv] = zt3; fxyt[k+nxh1] = zero; fxyt[k+nyv+nxh1] = zero; fxyt[k1+nxh1] = zero; fxyt[k1+nyv+nxh1] = zero; wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y)); } k += blockDim.x; } if (threadIdx.x==0) { k1 = nyh; fxyt[0] = zero; fxyt[nyv] = zero; fxyt[k1] = zero; fxyt[k1+nyv] = zero; fxyt[nxh1] = zero; fxyt[nxh1+nyv] = zero; fxyt[k1+nxh1] = zero; fxyt[k1+nyv+nxh1] = zero; } } j = blockIdx.x; if (j <= nxh) { /* sum potential energies for each x co-ordinate */ ss[threadIdx.x] = (float) wp; /* synchronize threads */ __syncthreads(); lsum2(ss,blockDim.x); /* normalize potential energy for each x co-ordinate */ if (threadIdx.x==0) we[j] = ss[0]*((float) (nx*ny)); } return; } /*--------------------------------------------------------------------*/ __global__ void gpuctpose4(float2 f[], float2 g[], int nx, int ny, int nxv, int nyv) { /* complex transpose using blocking algorithm with gaps */ /* local data */ int j, k, js, ks, joff, koff, mx, mxv; /* The size of the shared memory array is as follows: */ /* float2 shm2[(mx + 1)*mx]; */ extern __shared__ float2 shm2[]; mx = blockDim.x; mxv = mx + 1; joff = mx*blockIdx.x; koff = mx*blockIdx.y; js = threadIdx.x; ks = threadIdx.y; /* copy into block */ j = js + joff; k = ks + koff; if ((j < nx) && (k < ny)) { shm2[js+mxv*ks] = f[j+nxv*k]; } __syncthreads(); /* copy out from block */ j = ks + joff; k = js + koff; if ((j < nx) && (k < ny)) { g[k+nyv*j] = shm2[ks+mxv*js]; } return; } /*--------------------------------------------------------------------*/ __global__ void gpuctpose4n(float2 fn[], float2 gn[], int nx, int ny, int ndim, int nxv, int nyv) { /* complex vector transpose using blocking algorithm with gaps */ /* ndim = vector dimension */ /* local data */ int i, j, k, js, ks, joff, koff, mx, mxv, nmxv, nnxv, nnyv, jj, kk; /* The size of the shared memory array is as follows: */ /* float2 shmn2[ndim*(mx + 1)*mx]; */ extern __shared__ float2 shmn2[]; mx = blockDim.x; mxv = mx + 1; joff = mx*blockIdx.x; koff = mx*blockIdx.y; js = threadIdx.x; ks = threadIdx.y; nmxv = ndim*mxv; nnxv = ndim*nxv; nnyv = ndim*nyv; /* copy into block */ j = js + joff; k = ks + koff; if ((j < nx) && (k < ny)) { jj = j + nnxv*k; kk = js + nmxv*ks; for (i = 0; i < ndim; i++) { shmn2[kk+mxv*i] = fn[jj+nxv*i]; } } __syncthreads(); /* copy out from block */ j = ks + joff; k = js + koff; if ((j < nx) && (k < ny)) { kk = k + nnyv*j; jj = ks + nmxv*js; for (i = 0; i < ndim; i++) { gn[kk+nyv*i] = shmn2[jj+mxv*i]; } } return; } /*--------------------------------------------------------------------*/ __global__ void gpufft2rcxs(float2 f[], int isign, int mixup[], float2 sct[], int indx, int indy, int nyi, int nyp, int nxhd, int nyd, int nxhyd, int nxyhd, int nsize) { /* this subroutine performs the x part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of y, using complex arithmetic, with data not packed for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform in x is performed f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx)) if isign = 1, a forward fourier transform in x is performed f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = first dimension of f >= nx/2+1 nyd = second dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 nsize = amount of scratch complex memory used fourier coefficients are stored as follows: f[k][j].x, f[k][j].y = real, imaginary part of mode j,k, where 0 <= j < nx/2+1 and 0 <= k < ny written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt; int nrx, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, jj, kk; int n, nn, in, nt, nh; float ani, at1, at2; float2 t1, t2, t3; /* The size of the shared memory array is as follows: */ /* float2 s[nsize]; */ extern __shared__ float2 s[]; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nyt = nyi + nyp - 1; /* calculate extent of shared memory usage: */ /* nn = size of shared memory in x */ nn = nxh; in = 0; while (nn > nsize) { nn = nn/2; in += 1; } /* nt = number of iterations in x */ nt = 1L<<in; in = indx1 - in; nh = nn/2; /* inverse fourier transform */ if (isign < 0) { /* bit-reverse array elements in x */ nrx = nxhy/nxh; /* for (k = nyi-1; k < nyt; k++) { */ k = blockIdx.x + nyi - 1; if (k < nyt) { jj = nxhd*k; /* for (j = 0; j < nxh; j++) { */ j = threadIdx.x; while (j < nxh) { j1 = (mixup[j] - 1)/nrx; if (j < j1) { t1 = f[j1+jj]; f[j1+jj] = f[j+jj]; f[j+jj] = t1; } j += blockDim.x; } /* synchronize threads */ __syncthreads(); } /* copy data to local memory */ nrx = nxy/nxh; /* for (i = nyi-1; i < nyt; i++) { */ i = blockIdx.x + nyi - 1; if (i < nyt) { jj = nxhd*i; for (n = 0; n < nt; n++) { /* for (kk = 0; kk < nn; kk++) { */ kk = threadIdx.x; while (kk < nn) { s[kk] = f[kk+nn*n+jj]; kk += blockDim.x; } /* synchronize threads */ __syncthreads(); /* transform using local data in x */ ns = 1; for (l = 0; l < in; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; /* for (kk = 0; kk < nh; kk++) { */ kk = threadIdx.x; while (kk < nh) { k = kk/ns; j = kk - ns*k; k1 = ns2*k; k2 = k1 + ns; j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t2 = s[j2]; at1 = t1.x*t2.x - t1.y*t2.y; at2 = t1.x*t2.y + t1.y*t2.x; t2 = s[j1]; t3.x = t2.x - at1; t3.y = t2.y - at2; s[j2] = t3; t3.x = t2.x + at1; t3.y = t2.y + at2; s[j1] = t3; kk += blockDim.x; } ns = ns2; /* synchronize threads */ __syncthreads(); } /* copy data to global memory */ /* for (kk = 0; kk < nn; kk++) { */ kk = threadIdx.x; while (kk < nn) { f[kk+nn*n+jj] = s[kk]; kk += blockDim.x; } /* synchronize threads */ __syncthreads(); } /* transform using global data in x */ ns = 1L<<in; for (l = in; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; /* for (kk = 0; kk < nxhh; kk++) { */ kk = threadIdx.x; while (kk < nxhh) { k = kk/ns; j = kk - ns*k; k1 = ns2*k; k2 = k1 + ns; j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t2 = f[j2+jj]; at1 = t1.x*t2.x - t1.y*t2.y; at2 = t1.x*t2.y + t1.y*t2.x; t2 = f[j1+jj]; t3.x = t2.x - at1; t3.y = t2.y - at2; f[j2+jj] = t3; t3.x = t2.x + at1; t3.y = t2.y + at2; f[j1+jj] = t3; kk += blockDim.x; } ns = ns2; /* synchronize threads */ __syncthreads(); } } /* unscramble coefficients and normalize */ kmr = nxy/nx; ani = 0.5f/(((float) nx)*((float) ny)); /* for (k = nyi-1; k < nyt; k++) */ k = blockIdx.x + nyi - 1; if (k < nyt) { jj = nxhd*k; /* for (j = 1; j < nxhh; j++) { */ j = threadIdx.x; while (j < nxhh) { if (j > 0) { t3 = sct[kmr*j]; at1 = t3.y; at2 = -t3.x; t2 = f[nxh-j+jj]; t2.y = -t2.y; t3 = f[j+jj]; t1.x = t3.x + t2.x; t1.y = t3.y + t2.y; t3.x -= t2.x; t3.y -= t2.y; t2.x = t3.x*at1 - t3.y*at2; t2.y = t3.x*at2 + t3.y*at1; t3.x = ani*(t1.x + t2.x); t3.y = ani*(t1.y + t2.y); f[j+jj] = t3; t3.x = ani*(t1.x - t2.x); t3.y = ani*(t2.y - t1.y); f[nxh-j+jj] = t3; } j += blockDim.x; } if (threadIdx.x==0) { ani = 2.0f*ani; t3 = f[nxhh+jj]; t3.x = ani*t3.x; t3.y = -ani*t3.y; f[nxhh+jj] = t3; t3 = f[jj]; at1 = t3.x; at2 = t3.y; t3.x = ani*(at1 - at2); t3.y = 0.0f; f[nxh+jj] = t3; t3.x = ani*(at1 + at2); f[jj] = t3; } /* synchronize threads */ __syncthreads(); } } /* forward fourier transform */ if (isign > 0) { /* scramble coefficients */ kmr = nxy/nx; /* for (k = nyi-1; k < nyt; k++) { */ k = blockIdx.x + nyi - 1; if (k < nyt) { jj = nxhd*k; /* for (j = 1; j < nxhh; j++) { */ j = threadIdx.x; while (j < nxhh) { if (j > 0) { t3 = sct[kmr*j]; at1 = t3.y; at2 = t3.x; t2 = f[nxh-j+jj]; t2.y = -t2.y; t3 = f[j+jj]; t1.x = t3.x + t2.x; t1.y = t3.y + t2.y; t3.x -= t2.x; t3.y -= t2.y; t2.x = t3.x*at1 - t3.y*at2; t2.y = t3.x*at2 + t3.y*at1; t3.x = t1.x + t2.x; t3.y = t1.y + t2.y; f[j+jj] = t3; t3.x = t1.x - t2.x; t3.y = t2.y - t1.y; f[nxh-j+jj] = t3; } j += blockDim.x; } if (threadIdx.x==0) { t3 = f[nxhh+jj]; t3.x = 2.0f*t3.x; t3.y = -2.0f*t3.y; f[nxhh+jj] = t3; t3 = f[jj]; at1 = t3.x; t3 = f[nxh+jj]; at2 = t3.x; t3.x = at1 + at2; t3.y = at1 - at2; f[jj] = t3; } /* synchronize threads */ __syncthreads(); } /* bit-reverse array elements in x */ nrx = nxhy/nxh; /* for (k = nyi-1; k < nyt; k++) { */ k = blockIdx.x + nyi - 1; if (k < nyt) { jj = nxhd*k; /* for (j = 0; j < nxh; j++) { */ j = threadIdx.x; while (j < nxh) { j1 = (mixup[j] - 1)/nrx; if (j < j1) { t1 = f[j1+jj]; f[j1+jj] = f[j+jj]; f[j+jj] = t1; } j += blockDim.x; } /* synchronize threads */ __syncthreads(); } /* copy data to local memory */ nrx = nxy/nxh; /* for (i = nyi-1; i < nyt; i++) { */ i = blockIdx.x + nyi - 1; if (i < nyt) { jj = nxhd*i; for (n = 0; n < nt; n++) { /* for (kk = 0; kk < nn; kk++) { */ kk = threadIdx.x; while (kk < nn) { s[kk] = f[kk+nn*n+jj]; kk += blockDim.x; } /* synchronize threads */ __syncthreads(); /* transform using local data in x */ ns = 1; for (l = 0; l < in; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; /* for (kk = 0; kk < nh; kk++) { */ kk = threadIdx.x; while (kk < nh) { k = kk/ns; j = kk - ns*k; k1 = ns2*k; k2 = k1 + ns; j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t1.y = -t1.y; t2 = s[j2]; at1 = t1.x*t2.x - t1.y*t2.y; at2 = t1.x*t2.y + t1.y*t2.x; t2 = s[j1]; t3.x = t2.x - at1; t3.y = t2.y - at2; s[j2] = t3; t3.x = t2.x + at1; t3.y = t2.y + at2; s[j1] = t3; kk += blockDim.x; } ns = ns2; /* synchronize threads */ __syncthreads(); } /* copy data to global memory */ /* for (kk = 0; kk < nn; kk++) { */ kk = threadIdx.x; while (kk < nn) { f[kk+nn*n+jj] = s[kk]; kk += blockDim.x; } /* synchronize threads */ __syncthreads(); } /* transform using global data in x */ ns = 1L<<in; for (l = in; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; /* for (kk = 0; kk < nxhh; kk++) { */ kk = threadIdx.x; while (kk < nxhh) { k = kk/ns; j = kk - ns*k; k1 = ns2*k; k2 = k1 + ns; j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t1.y = -t1.y; t2 = f[j2+jj]; at1 = t1.x*t2.x - t1.y*t2.y; at2 = t1.x*t2.y + t1.y*t2.x; t2 = f[j1+jj]; t3.x = t2.x - at1; t3.y = t2.y - at2; f[j2+jj] = t3; t3.x = t2.x + at1; t3.y = t2.y + at2; f[j1+jj] = t3; kk += blockDim.x; } ns = ns2; /* synchronize threads */ __syncthreads(); } } } return; } /*--------------------------------------------------------------------*/ __global__ void gpufft2rcys(float2 g[], int isign, int mixup[], float2 sct[], int indx, int indy, int nxi, int nxp, int nxhd, int nyd, int nxhyd, int nxyhd, int nsize) { /* this subroutine performs the y part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of x, using complex arithmetic, with data not packed for isign = (-1,1), input: all, output: g for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform in y is performed g[n][m] = sum(g[j][k]*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform in y is performed g[j][k] = sum(g[n][m]*exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nxi = initial x index used nxp = number of x indices used nxhd = second dimension of g >= nx/2+1 nyd = first dimension of g >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 nsize = amount of scratch complex memory used fourier coefficients are stored as follows: g[j][k] = real, imaginary part of mode j,k, where 0 <= j < nx/2+1 and 0 <= k < ny written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt; int nry, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, koff, kk; int n, nn, in, nt, nh; float at1, at2; float2 t1, t2, t3; /* The size of the shared memory array is as follows: */ /* float2 s[nsize]; */ extern __shared__ float2 s[]; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nyh = ny/2; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nxt = nxi + nxp - 1; /* calculate extent of shared memory usage: */ /* nn = size of shared memory in y */ nn = ny; in = 0; while (nn > nsize) { nn = nn/2; in += 1; } /* nt = number of iterations in y */ nt = 1L<<in; in = indy - in; nh = nn/2; /* bit-reverse array elements in y */ nry = nxhy/ny; /* for (j = nxi-1; j < nxt; j++) { */ j = blockIdx.x + nxi - 1; if (j < nxt) { kk = nyd*j; /* for (k = 0; k < ny; k++) { */ k = threadIdx.x; while (k < ny) { k1 = (mixup[k] - 1)/nry; if (k < k1) { t1 = g[k1+kk]; g[k1+kk] = g[k+kk]; g[k+kk] = t1; } k += blockDim.x; } /* synchronize threads */ __syncthreads(); } nry = nxy/ny; /* inverse fourier transform in y */ if (isign < 0) { /* copy data to local memory */ /* for (i = nxi-1; i < nxt; i++) { */ i = blockIdx.x + nxi - 1; if (i < nxt) { koff = nyd*i; for (n = 0; n < nt; n++) { /* for (kk = 0; kk < nn; kk++) { */ kk = threadIdx.x; while (kk < nn) { s[kk] = g[kk+nn*n+koff]; kk += blockDim.x; } /* synchronize threads */ __syncthreads(); /* transform using local data in y */ ns = 1; for (l = 0; l < in; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; /* for (kk = 0; kk < nh; kk++) { */ kk = threadIdx.x; while (kk < nh) { k = kk/ns; j = kk - ns*k; k1 = ns2*k; k2 = k1 + ns; j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t2 = s[j2]; at1 = t1.x*t2.x - t1.y*t2.y; at2 = t1.x*t2.y + t1.y*t2.x; t3.x = t2.x - at1; t3.y = t2.y - at2; t2 = s[j1]; t3.x = t2.x - at1; t3.y = t2.y - at2; s[j2] = t3; t3.x = t2.x + at1; t3.y = t2.y + at2; s[j1] = t3; kk += blockDim.x; } ns = ns2; /* synchronize threads */ __syncthreads(); } /* copy data to global memory */ /* for (kk = 0; kk < nn; kk++) { */ kk = threadIdx.x; while (kk < nn) { g[kk+nn*n+koff] = s[kk]; kk += blockDim.x; } /* synchronize threads */ __syncthreads(); } /* transform using global data in y */ ns = 1L<<in; for (l = in; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; /* for (kk = 0; kk < nyh; kk++) { */ kk = threadIdx.x; while (kk < nyh) { k = kk/ns; j = kk - ns*k; k1 = ns2*k; k2 = k1 + ns; j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t2 = g[j2+koff]; at1 = t1.x*t2.x - t1.y*t2.y; at2 = t1.x*t2.y + t1.y*t2.x; t3.x = t2.x - at1; t3.y = t2.y - at2; t2 = g[j1+koff]; t3.x = t2.x - at1; t3.y = t2.y - at2; g[j2+koff] = t3; t3.x = t2.x + at1; t3.y = t2.y + at2; g[j1+koff] = t3; kk += blockDim.x; } ns = ns2; /* synchronize threads */ __syncthreads(); } } } /* forward fourier transform in y */ if (isign > 0) { /* copy data to local memory */ /* for (i = nxi-1; i < nxt; i++) { */ i = blockIdx.x + nxi - 1; if (i < nxt) { koff = nyd*i; for (n = 0; n < nt; n++) { /* for (kk = 0; kk < nn; kk++) { */ kk = threadIdx.x; while (kk < nn) { s[kk] = g[kk+nn*n+koff]; kk += blockDim.x; } /* synchronize threads */ __syncthreads(); /* transform using local data in y */ ns = 1; for (l = 0; l < in; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; /* for (kk = 0; kk < nh; kk++) { */ kk = threadIdx.x; while (kk < nh) { k = kk/ns; j = kk - ns*k; k1 = ns2*k; k2 = k1 + ns; j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t1.y = -t1.y; t2 = s[j2]; at1 = t1.x*t2.x - t1.y*t2.y; at2 = t1.x*t2.y + t1.y*t2.x; t3.x = t2.x - at1; t3.y = t2.y - at2; t2 = s[j1]; t3.x = t2.x - at1; t3.y = t2.y - at2; s[j2] = t3; t3.x = t2.x + at1; t3.y = t2.y + at2; s[j1] = t3; kk += blockDim.x; } ns = ns2; /* synchronize threads */ __syncthreads(); } /* copy data to global memory */ /* for (kk = 0; kk < nn; kk++) { */ kk = threadIdx.x; while (kk < nn) { g[kk+nn*n+koff] = s[kk]; kk += blockDim.x; } /* synchronize threads */ __syncthreads(); } /* transform using global data in y */ ns = 1L<<in; for (l = in; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; /* for (kk = 0; kk < nyh; kk++) { */ kk = threadIdx.x; while (kk < nyh) { k = kk/ns; j = kk - ns*k; k1 = ns2*k; k2 = k1 + ns; j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t1.y = -t1.y; t2 = g[j2+koff]; at1 = t1.x*t2.x - t1.y*t2.y; at2 = t1.x*t2.y + t1.y*t2.x; t3.x = t2.x - at1; t3.y = t2.y - at2; t2 = g[j1+koff]; t3.x = t2.x - at1; t3.y = t2.y - at2; g[j2+koff] = t3; t3.x = t2.x + at1; t3.y = t2.y + at2; g[j1+koff] = t3; kk += blockDim.x; } ns = ns2; /* synchronize threads */ __syncthreads(); } } } return; } /*--------------------------------------------------------------------*/ __global__ void gpusum1(float a[], float *sa, int nx) { /* 1d serial sum reduction */ /* nx = length of data */ /* sa = sum(a) */ /* local data */ int j, js, jb, mx, joff, mxm; float t; /* The size of the shared memory array is as follows: */ /* ss[blockDim.x]; */ extern __shared__ float ss[]; mx = blockDim.x; js = threadIdx.x; jb = blockIdx.x; joff = mx*jb; j = js + joff; /* copy global data to shared memory */ if (j < nx) ss[js] = a[j]; /* synchronize to make sure each thread in block has the data */ __syncthreads(); if (js==0) { mxm = nx - joff; if (mxm > mx) mxm = mx; /* perform serial local sum reduction: result in t */ t = 0.0f; for (j = 0; j < mxm; j++) { t += ss[j]; } /* accumulate results to global memory for each block */ /* for devices with compute capability 2.x */ atomicAdd(&sa[0],t); } return; } /*--------------------------------------------------------------------*/ __global__ void gpusum2(float a[], float d[], int nx) { /* segmented 1d sum reductions, each of length mx = blockDim.x */ /* nx = length of data */ /* forall (j = 1:nbx); d(j) = sum(a(1+mx*(j-1):min(nx,mx*j))) */ /* local data */ int j, js, jb, mx, joff, mxm; /* The size of the shared memory array is as follows: */ /* ss[blockDim.x]; */ extern __shared__ float ss[]; mx = blockDim.x; js = threadIdx.x; jb = blockIdx.x; joff = mx*jb; j = js + joff; /* copy global data to shared memory */ if (j < nx) ss[js] = a[j]; /* synchronize to make sure each thread in block has the data */ __syncthreads(); mxm = nx - joff; if (mxm > mx) mxm = mx; /* perform parallel local sum reduction: result in ss[0] */ lsum2(ss,mxm); /* write out result to global memory for each block */ if (js==0) d[jb] = ss[0]; return; } #ifndef _USE_OPENACC_ /*--------------------------------------------------------------------*/ extern "C" void cgpuppush2l(float *ppart, float *fxy, int *kpic, float qbm, float dt, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* Push Interface for C */ int n, m, ns; dim3 dimBlock(nblock_size); n = mxy1; m = (n - 1)/maxgsx + 1; n = n < maxgsx ? n : maxgsx; dim3 dimGrid(n,m); ns = 2*(mx + 1)*(my + 1)*sizeof(float); n = nblock_size*sizeof(float); ns = ns > n ? ns : n; crc = cudaGetLastError(); gpuppush2l<<<dimGrid,dimBlock,ns>>>(ppart,fxy,kpic,qbm,dt,ek,idimp, nppmx,nx,ny,mx,my,nxv,nyv,mx1, mxy1,ipbc); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppush2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } #endif /*--------------------------------------------------------------------*/ extern "C" void cgpuppushf2l(float *ppart, float *fxy, int *kpic, int *ncl, int *ihole, float qbm, float dt, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* Push Interface for C */ int n, m, ns; dim3 dimBlock(nblock_size); n = mxy1; m = (n - 1)/maxgsx + 1; n = n < maxgsx ? n : maxgsx; dim3 dimGrid(n,m); ns = 2*(mx + 1)*(my + 1)*sizeof(float) + nblock_size*sizeof(int); n = nblock_size*sizeof(float); ns = ns > n ? ns : n; ns += 9*sizeof(int); crc = cudaGetLastError(); gpuppushf2l<<<dimGrid,dimBlock,ns>>>(ppart,fxy,kpic,ncl,ihole,qbm,dt, ek,idimp,nppmx,nx,ny,mx,my,nxv, nyv,mx1,mxy1,ntmax,irc); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppushf2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } #ifndef _USE_OPENACC_ /*--------------------------------------------------------------------*/ extern "C" void cgpu2ppost2l(float *ppart, float *q, int *kpic, float qm, int nppmx, int idimp, int mx, int my, int nxv, int nyv, int mx1, int mxy1) { /* Deposit Interface for C */ int n, m, ns; dim3 dimBlock(nblock_size); n = mxy1; m = (n - 1)/maxgsx + 1; n = n < maxgsx ? n : maxgsx; dim3 dimGrid(n,m); ns = (mx + 1)*(my + 1)*sizeof(float); crc = cudaGetLastError(); gpu2ppost2l<<<dimGrid,dimBlock,ns>>>(ppart,q,kpic,qm,nppmx,idimp,mx, my,nxv,nyv,mx1,mxy1); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpu2ppost2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } #endif /*--------------------------------------------------------------------*/ extern "C" void cgpucaguard2l(float2 *qc, float *q, int nx, int ny, int nxe, int nye, int nxvh, int nyv) { /* Guard Cell Interface for C */ dim3 dimBlock(nblock_size); dim3 dimGrid(ny); crc = cudaGetLastError(); gpucaguard2l<<<dimGrid,dimBlock>>>(qc,q,nx,ny,nxe,nye,nxvh,nyv); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpucaguard2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuccguard2l(float2 *fxyc, float *fxy, int nx, int ny, int nxe, int nye, int nxvh, int nyv) { /* Guard Cell Interface for C */ dim3 dimBlock(nblock_size); dim3 dimGrid(ny); crc = cudaGetLastError(); gpuccguard2l<<<dimGrid,dimBlock>>>(fxyc,fxy,nx,ny,nxe,nye,nxvh,nyv); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuccguard2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuppord2l(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int idimp, int nppmx, int nx, int ny, int mx, int my, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* Sort Interface for C */ int mxy1, n, m, ns; dim3 dimBlock(nblock_size); mxy1 = mx1*my1; m = (mxy1 - 1)/maxgsx + 1; n = mxy1 < maxgsx ? mxy1 : maxgsx; dim3 dimGrid(n,m); /* find which particles are leaving tile */ ns = (nblock_size+9)*sizeof(int); crc = cudaGetLastError(); gpuppfnd2l<<<dimGrid,dimBlock,ns>>>(ppart,kpic,ncl,ihole,idimp,nppmx, nx,ny,mx,my,mx1,my1,ntmax,irc); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuppfnd2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } /* buffer particles that are leaving tile and sum ncl */ ns = 9*sizeof(int); crc = cudaGetLastError(); gpuppmov2l<<<dimGrid,dimBlock,ns>>>(ppart,ppbuff,ncl,ihole,idimp, nppmx,mx1,my1,npbmx,ntmax,irc); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuppmov2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } /* copy incoming particles from ppbuff into ppart, update kpic */ ns = (nblock_size+18)*sizeof(int); crc = cudaGetLastError(); gpuppord2l<<<dimGrid,dimBlock,ns>>>(ppart,ppbuff,kpic,ncl,ihole, idimp,nppmx,mx1,my1,npbmx,ntmax, irc); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppord2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuppordf2l(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int idimp, int nppmx, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* Sort Interface for C */ int mxy1, n, m, ns; dim3 dimBlock(nblock_size); mxy1 = mx1*my1; m = (mxy1 - 1)/maxgsx + 1; n = mxy1 < maxgsx ? mxy1 : maxgsx; dim3 dimGrid(n,m); /* buffer particles that are leaving tile and sum ncl */ ns = 9*sizeof(int); crc = cudaGetLastError(); gpuppmov2l<<<dimGrid,dimBlock,ns>>>(ppart,ppbuff,ncl,ihole,idimp, nppmx,mx1,my1,npbmx,ntmax,irc); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuppmov2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } /* copy incoming particles from ppbuff into ppart, update kpic */ ns = (nblock_size+18)*sizeof(int); crc = cudaGetLastError(); gpuppord2l<<<dimGrid,dimBlock,ns>>>(ppart,ppbuff,kpic,ncl,ihole, idimp,nppmx,mx1,my1,npbmx,ntmax, irc); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppord2l error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } #ifndef _USE_OPENACC_ /*--------------------------------------------------------------------*/ extern "C" void cgpupois22t(float2 *qt, float2 *fxyt, float2 *ffct, float *we, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* Poisson Solver Interface for C */ int nxh1, ns; dim3 dimBlock(nblock_size); nxh1 = nx/2 + 1; dim3 dimGrid(nxh1); ns = nblock_size*sizeof(float); crc = cudaGetLastError(); gpupois22t<<<dimGrid,dimBlock,ns>>>(qt,fxyt,ffct,we,nx,ny,nxvh,nyv, nxhd,nyhd); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpupois22t error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } #endif /*--------------------------------------------------------------------*/ extern "C" void cgpuwfft2rcs(float2 *f, float2 *g, int isign, int *mixup, float2 *sct, int indx, int indy, int nxhd, int nyd, int nxhyd, int nxyhd) { /* wrapper function for real to complex fft, without packed data */ /* if isign = -1, f = input, g = output */ /* if isign = 1, g = input, f = output */ /* nxhd must be >= nx/2 + 1 */ /* local data */ int nxh, nxh1, ny, nsize, ns; int nxi = 1, nyi = 1, mx = 16; dim3 dimBlock(nblock_size); dim3 dimBlockt(mx,mx); /* calculate range of indices */ nxh = 1L<<(indx - 1); nxh1 = nxh + 1; ny = 1L<<indy; dim3 dimGridx(nxh1); dim3 dimGridy(ny); dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1); dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1); /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ nsize = nxh < 1024 ? nxh : 1024; ns = nsize*sizeof(float2); crc = cudaGetLastError(); gpufft2rcxs<<<dimGridy,dimBlock,ns>>>(f,isign,mixup,sct,indx,indy, nyi,ny,nxhd,nyd,nxhyd,nxyhd, nsize); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpufft2rcxs error=%d:%s\n", crc,cudaGetErrorString(crc)); exit(1); } /* transpose f to g */ ns = (mx+1)*mx*sizeof(float2); crc = cudaGetLastError(); gpuctpose4<<<dimGridtx,dimBlockt,ns>>>(f,g,nxh1,ny,nxhd,nyd); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuctpose4 error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } /* perform y fft */ nsize = ny < 1024 ? ny : 1024; ns = nsize*sizeof(float2); crc = cudaGetLastError(); gpufft2rcys<<<dimGridx,dimBlock,ns>>>(g,isign,mixup,sct,indx,indy, nxi,nxh1,nxhd,nyd,nxhyd, nxyhd,nsize); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpufft2rcys error=%d:%s\n", crc,cudaGetErrorString(crc)); exit(1); } } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ nsize = ny < 1024 ? ny : 1024; ns = nsize*sizeof(float2); crc = cudaGetLastError(); gpufft2rcys<<<dimGridx,dimBlock,ns>>>(g,isign,mixup,sct,indx,indy, nxi,nxh1,nxhd,nyd,nxhyd, nxyhd,nsize); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpufft2rcys error=%d:%s\n", crc,cudaGetErrorString(crc)); exit(1); } /* transpose g to f */ ns = (mx+1)*mx*sizeof(float2); crc = cudaGetLastError(); gpuctpose4<<<dimGridty,dimBlockt,ns>>>(g,f,ny,nxh1,nyd,nxhd); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuctpose4 error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } /* perform x fft */ nsize = nxh < 1024 ? nxh : 1024; ns = nsize*sizeof(float2); crc = cudaGetLastError(); gpufft2rcxs<<<dimGridy,dimBlock,ns>>>(f,isign,mixup,sct,indx,indy, nyi,ny,nxhd,nyd,nxhyd,nxyhd, nsize); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpufft2rcxs error=%d:%s\n", crc,cudaGetErrorString(crc)); exit(1); } } return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuwfft2rcsn(float2 *fn, float2 *gn, int isign, int *mixup, float2 *sct, int indx, int indy, int ndim, int nxhd, int nyd, int nxhyd, int nxyhd) { /* wrapper function for multiple real to complex ffts, */ /* without packed data */ /* if isign = -1, fn = input, gn = output */ /* if isign = 1, gn = input, fn = output */ /* ndim = vector dimension */ /* nxhd must be >= nx/2 + 1 */ /* local data */ int nxh, nxh1, ny, nxp, nyp, nnxd, nnyd, nsize, ns; int nxi = 1, nyi = 1, mx = 16; dim3 dimBlock(nblock_size); dim3 dimBlockt(mx,mx); /* calculate range of indices */ nxh = 1L<<(indx - 1); nxh1 = nxh + 1; ny = 1L<<indy; nxp = ndim*nxh1; nyp = ndim*ny; nnxd = ndim*nxhd; nnyd = ndim*nyd; dim3 dimGridx(nxp); dim3 dimGridy(nyp); dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1); dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1); /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ nsize = nxh < 1024 ? nxh : 1024; ns = nsize*sizeof(float2); crc = cudaGetLastError(); gpufft2rcxs<<<dimGridy,dimBlock,ns>>>(fn,isign,mixup,sct,indx, indy,nyi,nyp,nxhd,nnyd, nxhyd,nxyhd,nsize); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpufft2rcxs error=%d:%s\n", crc,cudaGetErrorString(crc)); exit(1); } /* transpose f to g */ ns = ndim*(mx+1)*mx*sizeof(float2); crc = cudaGetLastError(); gpuctpose4n<<<dimGridtx,dimBlockt,ns>>>(fn,gn,nxh1,ny,ndim,nxhd, nyd); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuctpose4n error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } /* perform y fft */ nsize = ny < 1024 ? ny : 1024; ns = nsize*sizeof(float2); crc = cudaGetLastError(); gpufft2rcys<<<dimGridx,dimBlock,ns>>>(gn,isign,mixup,sct,indx, indy,nxi,nxp,nnxd,nyd,nxhyd, nxyhd,nsize); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpufft2rcys error=%d:%s\n", crc,cudaGetErrorString(crc)); exit(1); } } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ nsize = ny < 1024 ? ny : 1024; ns = nsize*sizeof(float2); crc = cudaGetLastError(); gpufft2rcys<<<dimGridx,dimBlock,ns>>>(gn,isign,mixup,sct,indx, indy,nxi,nxp,nnxd,nyd,nxhyd, nxyhd,nsize); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpufft2rcys error=%d:%s\n", crc,cudaGetErrorString(crc)); exit(1); } /* transpose g to f */ ns = ndim*(mx+1)*mx*sizeof(float2); crc = cudaGetLastError(); gpuctpose4n<<<dimGridty,dimBlockt,ns>>>(gn,fn,ny,nxh1,ndim,nyd, nxhd); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuctpose4n error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } /* perform x fft */ nsize = nxh < 1024 ? nxh : 1024; ns = nsize*sizeof(float2); crc = cudaGetLastError(); gpufft2rcxs<<<dimGridy,dimBlock,ns>>>(fn,isign,mixup,sct,indx, indy,nyi,nyp,nxhd,nnyd, nxhyd,nxyhd,nsize); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpufft2rcxs error=%d:%s\n", crc,cudaGetErrorString(crc)); exit(1); } } return; } /*--------------------------------------------------------------------*/ extern "C" void cgpusum2(float *a, float *sa, int nx) { /* segmented 1d parallel sum reduction of input array a, of length nx */ /* first reduce individual blocks in parallel, writing result to scr */ /* then reduce scr serially, result is written to sa */ /* local data */ int nbx, nbs, ns; void *gptr; static int len = 0; static float *scr = NULL; nbx = (nx - 1)/nblock_size + 1; dim3 dimBlock(nblock_size); dim3 dimGrid(nbx); nbs = (nbx - 1)/nblock_size + 1; dim3 dimGrid1(nbs); /* create scratch array */ if (len < nbx) { if (len > 0) crc = cudaFree((void *)scr); crc = cudaMalloc(&gptr,sizeof(float)*nbx); if (crc) { printf("cudaMalloc cgpusum2 float Error=%d:%s,l=%d\n",crc, cudaGetErrorString(crc),nbx); exit(1); } scr = (float *)gptr; len = nbx; } /* reduce individual blocks in parallel */ ns = nblock_size*sizeof(float); crc = cudaGetLastError(); gpusum2<<<dimGrid,dimBlock,ns>>>(a,scr,nx); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpusum2 error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } /* 1d serial reduction */ crc = cudaGetLastError(); gpusum1<<<dimGrid1,dimBlock,ns>>>(scr,sa,nbx); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpusum1 error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } /* Interfaces to Fortran */ #ifndef _USE_OPENACC_ /*--------------------------------------------------------------------*/ extern "C" void cgpuppush2l_(unsigned long *gp_ppart, unsigned long *gp_fxy, unsigned long *gp_kpic, float *qbm, float *dt, unsigned long *gp_ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { float *ppart, *fxy, *ek; int *kpic; ppart = (float *)*gp_ppart; fxy = (float *)*gp_fxy; kpic = (int *)*gp_kpic; ek = (float *)*gp_ek; cgpuppush2l(ppart,fxy,kpic,*qbm,*dt,ek,*idimp,*nppmx,*nx,*ny,*mx,*my, *nxv,*nyv,*mx1,*mxy1,*ipbc); return; } #endif /*--------------------------------------------------------------------*/ extern "C" void cgpuppushf2l_(unsigned long *gp_ppart, unsigned long *gp_fxy, unsigned long *gp_kpic, unsigned long *gp_ncl, unsigned long *gp_ihole, float *qbm, float *dt, unsigned long *gp_ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, unsigned long *gp_irc) { float *ppart, *fxy, *ek; int *kpic, *ncl, *ihole, *irc; ppart = (float *)*gp_ppart; fxy = (float *)*gp_fxy; kpic = (int *)*gp_kpic; ncl = (int *)*gp_ncl; ihole = (int *)*gp_ihole; ek = (float *)*gp_ek; irc = (int *)*gp_irc; cgpuppushf2l(ppart,fxy,kpic,ncl,ihole,*qbm,*dt,ek,*idimp,*nppmx,*nx, *ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc); return; } #ifndef _USE_OPENACC_ /*--------------------------------------------------------------------*/ extern "C" void cgpu2ppost2l_(unsigned long *gp_ppart, unsigned long *gp_q, unsigned long *gp_kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1) { float *ppart, *q; int *kpic; ppart = (float *)*gp_ppart; q = (float *)*gp_q; kpic = (int *)*gp_kpic; cgpu2ppost2l(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1, *mxy1); return; } #endif /*--------------------------------------------------------------------*/ extern "C" void cgpucaguard2l_(unsigned long *gp_qc, unsigned long *gp_q, int *nx, int *ny, int *nxe, int *nye, int *nxvh, int *nyv) { float2 *qc; float *q; qc = (float2 *)*gp_qc; q = (float *)*gp_q; cgpucaguard2l(qc,q,*nx,*ny,*nxe,*nye,*nxvh,*nyv); return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuccguard2l_(unsigned long *gp_fxyc, unsigned long *gp_fxy, int *nx, int *ny, int *nxe, int *nye, int *nxvh, int *nyv) { float2 *fxyc; float *fxy; fxyc = (float2 *)*gp_fxyc; fxy = (float *)*gp_fxy; cgpuccguard2l(fxyc,fxy,*nx,*ny,*nxe,*nye,*nxvh,*nyv); return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuppord2l_(unsigned long *gp_ppart, unsigned long *gp_ppbuff, unsigned long *gp_kpic, unsigned long *gp_ncl, unsigned long *gp_ihole, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *mx1, int *my1, int *npbmx, int *ntmax, unsigned long *gp_irc) { float *ppart, *ppbuff; int *kpic, *ncl, *ihole, *irc; ppart = (float *)*gp_ppart; ppbuff = (float *)*gp_ppbuff; kpic = (int *)*gp_kpic; ncl = (int *)*gp_ncl; ihole = (int *)*gp_ihole; irc = (int *)*gp_irc; cgpuppord2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx, *my,*mx1,*my1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuppordf2l_(unsigned long *gp_ppart, unsigned long *gp_ppbuff, unsigned long *gp_kpic, unsigned long *gp_ncl, unsigned long *gp_ihole, int *idimp, int *nppmx, int *mx1, int *my1, int *npbmx, int *ntmax, unsigned long *gp_irc) { float *ppart, *ppbuff; int *kpic, *ncl, *ihole, *irc; ppart = (float *)*gp_ppart; ppbuff = (float *)*gp_ppbuff; kpic = (int *)*gp_kpic; ncl = (int *)*gp_ncl; ihole = (int *)*gp_ihole; irc = (int *)*gp_irc; cgpuppordf2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1, *npbmx,*ntmax,irc); return; } #ifndef _USE_OPENACC_ /*--------------------------------------------------------------------*/ extern "C" void cgpupois22t_(unsigned long *gp_qt, unsigned long *gp_fxyt, unsigned long *gp_ffct, unsigned long *gp_we, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { float2 *qt, *fxyt, *ffct; float *we; qt = (float2 *)*gp_qt; fxyt = (float2 *)*gp_fxyt; ffct = (float2 *)*gp_ffct; we = (float *)*gp_we; cgpupois22t(qt,fxyt,ffct,we,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd); return; } #endif /*--------------------------------------------------------------------*/ extern "C" void cgpuwfft2rcs_(unsigned long *gp_f, unsigned long *gp_g, int *isign, unsigned long *gp_mixup, unsigned long *gp_sct, int *indx, int *indy, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { float2 *f, *g, *sct; int *mixup; f = (float2 *)*gp_f; g = (float2 *)*gp_g; mixup = (int *)*gp_mixup; sct = (float2 *)*gp_sct; cgpuwfft2rcs(f,g,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuwfft2rcsn_(unsigned long *gp_fn, unsigned long *gp_gn, int *isign, unsigned long *gp_mixup, unsigned long *gp_sct, int *indx, int *indy, int *ndim, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { float2 *fn, *gn, *sct; int *mixup; fn = (float2 *)*gp_fn; gn = (float2 *)*gp_gn; mixup = (int *)*gp_mixup; sct = (float2 *)*gp_sct; cgpuwfft2rcsn(fn,gn,*isign,mixup,sct,*indx,*indy,*ndim,*nxhd,*nyd, *nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ extern "C" void cgpusum2_(unsigned long *gp_a, unsigned long *gp_sa, int *nx) { float *a, *sa; a = (float *)*gp_a; sa = (float *)*gp_sa; cgpusum2(a,sa,*nx); return; }
4,397
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <locale.h> #include <stdlib.h> #define N 2000 #define inf 1000000; #define div 200 __global__ void floydCycle(int* b, int i) { int k = blockIdx.x*(N/div)+threadIdx.x; for (int j = 0; j < N; ++j) { int v1 = b[j * N + k]; int v2 = b[j * N + i] + b[i * N + k]; if (v1 > v2) { b[j * N + k] = v2; } } } int main() { // 3 A,B,C C=A+B NxN // 1 C - N^2 int* a, * b; a = new int[N * N]; b = new int[N * N]; for (int i = 0; i < N; ++i) { for (int j = i; j < N; ++j) { if (i == j) { a[i * N + j] = 0; } else { if (rand() % 100 > 65) { a[i * N + j] = a[j * N + i] = b[i * N + j] = b[j * N + i] = rand() % 100; } else { a[i * N + j] = a[j * N + i] = -1; b[i * N + j] = b[j * N + i] = inf; } } } } int * dev_b; cudaError_t cudaStatus; cudaMalloc((void**)&dev_b, N * N * sizeof(int)); cudaError_t error; error = cudaMemcpy(dev_b, b, N * N * sizeof(int), cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("%s\n", cudaGetErrorString(error)); } // dim3 grid(div , 1); dim3 blocks(N/div, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // - for (int i = 0; i < N; ++i) { floydCycle <<<grid, blocks >>> (dev_b, i); } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s\n", cudaGetErrorString(error)); } cudaDeviceSynchronize(); error = cudaMemcpy(b, dev_b, N * N * sizeof(int), cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("%s\n", cudaGetErrorString(error)); } /*for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { printf("%d ", a[i * N + j]); } printf("\n"); } printf("=======================================\n"); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { printf("%d ", b[i * N + j]); } printf("\n"); }*/ printf("%f milliseconds\n", milliseconds); delete a; delete b; cudaFree(dev_b); return 0; }
4,398
#include "includes.h" extern "C" { } __global__ void u8_to_one_hot_f32(const unsigned char* x, unsigned int nclasses, float* y, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid*nclasses+x[tid]] = 1.0f; } }
4,399
#include <stdlib.h> #include <stdio.h> #include <math.h> #define BLOCK_SIZE 3 #define WA 3 #define HA 3 #define WB 3 #define HB WA #define WC WB #define HC HA void randomInit(float * data ,int size) { for(int i = 0; i < size; ++i) data[i] = i; } __global__ void matrixMul(float* C,float* A,float* B,int wA,int wB) { int tx = threadIdx.x; int ty = threadIdx.y; float value = 0; for(int i = 0; i < wA; ++i) { float elementA = A[ty * wA + i]; float elementB = B[i * wB + tx]; value += elementA * elementB; } C[ty * wA + tx] = value; } int main(int argc ,char** argv) { srand(2006); unsigned int size_A = WA * HA; unsigned int mem_size_A =sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B =sizeof(float) * size_B; float * h_B = (float*) malloc(mem_size_B); randomInit(h_A, size_A); randomInit(h_B, size_B); printf("\n\nMatrix A\n"); for(int i = 0; i < size_A; i++) { printf("%f ", h_A[i]); if(((i + 1) % WA) == 0) printf("\n"); } printf("\n\nMatrix B\n"); for(int i = 0; i < size_B; i++) { printf ("%f ", h_B[i]); if(((i + 1) % WB) == 0) printf("\n"); } unsigned int size_C = WC * HC; unsigned int mem_size_C =sizeof(float) * size_C; float * h_C = (float *) malloc(mem_size_C); float* d_A; float* d_B; cudaMalloc((void**) &d_A, mem_size_A); cudaMalloc((void**) &d_B, mem_size_B); cudaMemcpy(d_A, h_A,mem_size_A ,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B,mem_size_B ,cudaMemcpyHostToDevice); float* d_C; cudaMalloc((void**) &d_C, mem_size_C); dim3 threads(BLOCK_SIZE , BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); matrixMul<<< grid , threads >>>(d_C, d_A,d_B, WA, WB); cudaMemcpy(h_C, d_C, mem_size_C ,cudaMemcpyDeviceToHost); printf("\n\n Matrix C ( Results ) \n "); for(int i = 0;i<size_C; i ++){ printf("%f",h_C[i]); if(((i+ 1) % WC) == 0) printf("\n"); } printf("\n"); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); }
4,400
#include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <stdlib.h> #include <math.h> #include <stdio.h> #define IDX(w, t, n_walkers) ((w) + ((t)*(n_walkers))) /***************************************************************/ __global__ void init_curand_states(int seed, unsigned int size, curandState_t *states); __global__ void init_walkers(float *walkers, unsigned int n_walkers, unsigned int n_theta, unsigned int r, curandState_t *states); __global__ void step_walkers(float *walkers, unsigned int n_walkers, unsigned int s1_sz, unsigned int offset, unsigned int n_theta, float a, curandState_t *states); void walkers_to_file(float* walkers, unsigned int n_walkers, unsigned int n_theta, const char *f_name); void get_means(float *walkers, double *means, unsigned int n_walkers, unsigned int n_theta, int step); void means_to_file(double* means, unsigned int steps, unsigned int n_theta, const char *f_name); /***************************************************************/ unsigned int get_block_size(unsigned int n_walkers) { unsigned int factor = ceil((double)n_walkers / 4800); unsigned int blocksize = factor*32; if(blocksize > 256) { blocksize = 256; } return blocksize; } int main(int argc, char*argv[]) { curandState_t *states; float *walkers_d; double *means; int seed = 10; int a = 2; int r = 2; if(argc !=4) { fprintf(stderr, "usage emcee_emcee_gpu " "n_walkers, n_theta, steps\n"); fprintf(stderr, "n_walkers: number of walkers " "to use in ensemble\n"); fprintf(stderr, "n_theta: the dimension of the " "probability space to sample " "from \n"); fprintf(stderr, "steps: number of steps each " "walker will take in the " "simulation\n"); return 1; } unsigned int n_walkers = atoi(argv[1]); unsigned int n_theta = atoi(argv[2]); unsigned int steps = atoi(argv[3]); unsigned int s1_sz = ceil((float) n_walkers / 2); unsigned int s2_sz = n_walkers - s1_sz; unsigned int block_sz = get_block_size(n_walkers); unsigned int n_blocks = ceil((float) n_walkers / block_sz); long states_byte_sz = n_walkers*sizeof(curandState_t); long walker_byte_sz = n_walkers*n_theta*sizeof(float); unsigned int s_mem_sz = 2*n_theta*sizeof(float); long means_sz = n_theta*steps*sizeof(double); means = (double*) malloc(means_sz); fprintf(stdout,"LAUNCHING %d BLOCKS OF SIZE %d\n", n_blocks, block_sz); // allocate and init individual random number seeds cudaMalloc((void**) &states, states_byte_sz); init_curand_states<<<2*n_blocks,block_sz>>>( seed, n_walkers, states); // allocate and init each walker. cudaMalloc((void**) &walkers_d, walker_byte_sz); init_walkers<<<2*n_blocks,block_sz>>>(walkers_d, n_walkers, n_theta, r, states); for(int s = 0; s < steps; s++) { //step with first half of walkers step_walkers<<<n_blocks, block_sz, s_mem_sz>>>( walkers_d, n_walkers, s1_sz, 0, n_theta, a, states); //step with second half of walkers step_walkers<<<n_blocks, block_sz, s_mem_sz>>>( walkers_d, n_walkers, s2_sz, s1_sz, n_theta, a, states); get_means(walkers_d, means, n_walkers, n_theta, s); } const char* f_means = "means_gpu.out"; means_to_file(means, steps, n_theta, f_means); return 0; } /***************************************************************/ __global__ void init_curand_states(int seed, unsigned int size, curandState_t *states) { int id = threadIdx.x + blockIdx.x*blockDim.x; if(id < size) { curand_init(seed, id, 0, &states[id]); } } /***************************************************************/ __global__ void init_walkers(float *walkers, unsigned int n_walkers, unsigned int n_theta, unsigned int r, curandState_t *states) { int id = threadIdx.x + blockIdx.x*blockDim.x; if(id < n_walkers) { for(int t = 0; t < n_theta; t++) { walkers[IDX(id,t,n_walkers)] = 2*r*(curand_uniform(&states[id]) -.5); } } } /***************************************************************/ /* this is inverse CDF of the proposal distribution suggested in Weare and Goodman 2010. Parameter a is scaling value nominally set to 2.0. Parameter u is a random uniform drawn from [0, 1]. The return value is random draw from the proposal distribution. */ __device__ float G(float a, float u) { return powf((u*(a-1)+1) / sqrtf(a), 2); } /***************************************************************/ /* The Rosenbrock distribution is the test distribution we wish to approximate expected values from. See https://en.wikipedia.org/wiki/Rosenbrock_function for details. */ __device__ double Rosenbrock(float *walker) { return ((double) exp(-((100*pow(walker[1] - pow(walker[0],2),2)) + pow(1 - walker[0],2)) / 20)); } /***************************************************************/ __global__ void step_walkers(float *walkers, unsigned int n_walkers, unsigned int s1_sz, unsigned int offset, unsigned int n_theta, float a, curandState_t *states) { int id = threadIdx.x + (blockIdx.x * blockDim.x); if(id < s1_sz) { extern __shared__ float w1[]; float *w1_prime = &w1[n_theta]; curandState_t localState = states[id]; int w1_idx = id + offset; int w2_idx = s1_sz + ceil((n_walkers - s1_sz) * curand_uniform(&localState)) - 1 - offset; float z = G(a,curand_uniform(&localState)); for(int t=0; t<n_theta; t++) { w1[t] = walkers[IDX(w1_idx, t, n_walkers)]; w1_prime[t] = walkers[IDX(w2_idx, t, n_walkers)] + z*(w1[t] - walkers[IDX(w2_idx, t, n_walkers)]); } if (curand_uniform(&localState) < (powf(z,n_theta-1)*( Rosenbrock(w1_prime) / Rosenbrock(w1)) )) { for(int t=0; t<n_theta; t++) { walkers[IDX(w1_idx, t, n_walkers)] = w1_prime[t]; } } states[id] = localState; } } /***************************************************************/ void walkers_to_file(float* walkers, unsigned int n_walkers, unsigned int n_theta, const char *f_name) { FILE *fp = fopen(f_name,"w"); for(int w = 0; w < n_walkers; w++) { for(int t = 0; t < n_theta; t++){ fprintf(fp, "%f\t", walkers[ IDX(w,t,n_walkers)]); } fprintf(fp, "\n"); } fclose(fp); } /***************************************************************/ void get_means(float *walkers, double *means, unsigned int n_walkers, unsigned int n_theta, int step) { float *start_ind, *stop_ind; for(int t =0; t < n_theta; t++) { start_ind = walkers + t*n_walkers; stop_ind = walkers + (t+1)*n_walkers; thrust::device_vector<float> vec( start_ind, stop_ind); means[t + n_theta*step] = thrust::reduce( vec.begin(), vec.end()) / n_walkers; } } /***************************************************************/ void means_to_file(double* means, unsigned int steps, unsigned int n_theta, const char *f_name) { FILE *fp = fopen(f_name,"w"); for(int s = 0; s < steps; s++) { for(int t = 0; t < n_theta; t++){ fprintf(fp, "%f\t", means[t + n_theta*s]); } fprintf(fp, "\n"); } fclose(fp); }