serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
1,101
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <stdbool.h> #include <unistd.h> #include <cuda.h> #define NUM_THREADs 5 #define BLOCK_SIZE 16 #define PI 3.141592654 #define MEGEXTRA 1000000 typedef struct Matrix { int width; int height; double* elements; } Matrix; __global__ void MatMulKernel(Matrix, Matrix, Matrix); extern "C" void MatMul(Matrix AA, Matrix B, Matrix C, int dev_no) { int nDevices; cudaGetDeviceCount(&nDevices); if(nDevices!=6){ printf("shit\n"); nDevices = 6; } dev_no = ((dev_no)%(nDevices)); dev_no = nDevices - dev_no -1 ; cudaSetDevice(dev_no); // loading matrix A Matrix d_A; d_A.width=AA.width; d_A.height=AA.height; size_t size=AA.width * AA.height * sizeof(double); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, AA.elements, size, cudaMemcpyHostToDevice); //loading matrix B Matrix d_B; d_B.width=B.width; d_B.height=B.height; size= B.width * B.height * sizeof(double); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(double); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((B.width + dimBlock.x - 1)/dimBlock.x, (AA.height + dimBlock.y - 1)/dimBlock.y); //printf("%d=\n",(B.width + dimBlock.x - 1)/dimBlock.x); //printf("%d=\n",(A.height + dimBlock.y - 1)/dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaThreadSynchronize(); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } __global__ void MatMulKernel(Matrix AA, Matrix B, Matrix C) { double Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row>AA.height||col>B.width) return; for (int e = 0; e < AA.width; ++e) Cvalue += (AA.elements[row * AA.width + e]) * (B.elements[e * B.width + col]); C.elements[row * C.width + col] = Cvalue; }
1,102
// System includes #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> // CUDA runtime #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <time.h> #define BLOCK_SIZE 128 #define N 4194304 // #define N 2048 __global__ void reduce0(int *g_idata, int *g_odata , int size) { __shared__ int sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // printf("%d\n" , i); sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2) { if (tid % (2 * s) == 0) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce1(int *g_idata, int *g_odata , int size) { __shared__ int sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce2(int *g_idata, int *g_odata, int size) { __shared__ int sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce3(int *g_idata, int *g_odata, int size) { __shared__ int sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; if (i < size) { sdata[tid] = g_idata[i] + g_idata[i + blockDim.x]; __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } } __global__ void reduce4(int *g_idata, int *g_odata, int size) { __shared__ int sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; if (i < size) { sdata[tid] = g_idata[i] + g_idata[i + blockDim.x]; __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid < 32) { sdata[tid] += sdata[tid + 32]; __syncthreads(); sdata[tid] += sdata[tid + 16]; __syncthreads(); sdata[tid] += sdata[tid + 8]; __syncthreads(); sdata[tid] += sdata[tid + 4]; __syncthreads(); sdata[tid] += sdata[tid + 2]; __syncthreads(); sdata[tid] += sdata[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } } #define KERNEL_NUM 5 void (*KERNELS[KERNEL_NUM])(int *, int * ,int) = { reduce0, reduce1, reduce2, reduce3, reduce4}; void constantInit(int *data, int size, int val) { for (int i = 0; i < size; ++i) { data[i] = val; } } void checkError(cudaError_t error, int line) { if (error != cudaSuccess) { printf("### error occurred in line %d \n error : %s", line, cudaGetErrorString(error)); exit(EXIT_FAILURE); } } float serial_reduction() { clock_t beginn = clock(); int *A = (int *)malloc(sizeof(int) * N); constantInit(A, N, 1); long int sum = 0; clock_t begin = clock(); for (int i = 0; i < N; i++) sum += A[i]; clock_t end = clock(); float time_spent = ((float)(end - begin) / CLOCKS_PER_SEC) * 1000; printf("serial execution : %f ms\n", time_spent); clock_t endd = clock(); float time_spentt = ((float)(endd - beginn) / CLOCKS_PER_SEC) * 1000; printf("total serial execution : %f ms\n", time_spentt); return time_spent; } /** * Run a simple test of matrix multiplication using CUDA */ int reduction(int argc, char **argv, int n, int func_index) { // Allocate host memory for matrices A and B unsigned int msize = n; unsigned int mem_size = sizeof(int) * msize; int *h_in = (int *)malloc(mem_size); constantInit(h_in, msize, 1); // Allocate device memory int *d_in; int *d_out; int grid_size = (n - 1) / BLOCK_SIZE + 1; if (func_index >= 3) grid_size /= 2; cudaError_t error; clock_t begin = clock(); error = cudaMalloc((void **)&d_in, mem_size); checkError(error, __LINE__); int output_size = grid_size * sizeof(int); error = cudaMalloc((void **)&d_out,output_size ); checkError(error, __LINE__); // copy host memory to device error = cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice); checkError(error, __LINE__); float total_time = 0.0f; printf("grid size : %d block size : %d number of threads : %d \n", grid_size, BLOCK_SIZE, grid_size * BLOCK_SIZE); int stride = 1; int size = N; while (grid_size >= 1) { output_size = grid_size * sizeof(int); cudaEvent_t start; error = cudaEventCreate(&start); checkError(error, __LINE__); cudaEvent_t stop; error = cudaEventCreate(&stop); checkError(error, __LINE__); // Record the start event error = cudaEventRecord(start, NULL); checkError(error, __LINE__); dim3 threads(BLOCK_SIZE, 1, 1); dim3 grid(grid_size, 1, 1); KERNELS[func_index]<<<grid, threads>>>(d_in, d_out, size); error = cudaGetLastError(); checkError(error, __LINE__); // Record the stop event error = cudaEventRecord(stop, NULL); checkError(error, __LINE__); // Wait for the stop event to complete error = cudaEventSynchronize(stop); checkError(error, __LINE__); float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); total_time += msecTotal; error = cudaEventElapsedTime(&msecTotal, start, stop); checkError(error, __LINE__); // Copy result from device to host grid_size /= BLOCK_SIZE; stride *= BLOCK_SIZE; size /= BLOCK_SIZE; cudaFree(d_in); d_in = d_out; error = cudaMalloc((void **)&d_out, output_size); checkError(error, __LINE__); } int *h_out = (int *)malloc(output_size); error = cudaMemcpy(h_out, d_in, output_size, cudaMemcpyDeviceToHost); checkError(error, __LINE__); int total_sum = 0; for(int i = 0 ; i < output_size / sizeof(int) ; i++) total_sum += h_out[i]; printf("Elapsed time in msec = %f and bandwidth %f GB/s result = %d \n", total_time, mem_size / (total_time * 1e6), total_sum); // Clean up memory free(h_in); free(h_out); cudaFree(d_in); cudaFree(d_out); clock_t end = clock(); float time_spent = ((float)(end - begin) / CLOCKS_PER_SEC) * 1000; printf("execution + memory allocations : %f ms\n", time_spent); return EXIT_SUCCESS; } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Reduction Using CUDA] - Starting...\n"); // By default, we use device 0 int devID = 0; cudaSetDevice(devID); cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); checkError(error, __LINE__); error = cudaGetDeviceProperties(&deviceProp, devID); checkError(error, __LINE__); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } int n = N; printf("Array with size (%d)\n", n); // serial_reduction(); for (size_t i = 0; i < KERNEL_NUM; i++) { printf("\n num implementation : %d \n", (int)i + 1); reduction(argc, argv, n, i); } return 0; }
1,103
#include <stdio.h> #include <math.h> __global__ void multi(float *a, float *b, float *c, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; float result = 0; if (col < width && row < width) { for (int k = 0; k < width; k++) { result += a[row * width + k] * b[k * width + col]; } c[row * width + col] = result; } } int main(int arg0, char **arg1) { cudaThreadSynchronize(); int width = atoi(arg1[1]); int THREADS_PER_BLOCK = 256; if(arg0 == 3) THREADS_PER_BLOCK = atoi(arg1[2]); int sqrtThreads = sqrt(THREADS_PER_BLOCK); int nBlocks = width/sqrtThreads; if (width % sqrtThreads != 0) { nBlocks++; } dim3 grid(nBlocks, nBlocks, 1); dim3 block(sqrtThreads, sqrtThreads, 1); float *a_h; float *b_h; float *c_h; float *d_h; float *a_d; float *b_d; float *c_d; int size; cudaEvent_t start; cudaEvent_t stop; float elapsed1; size = width * width * sizeof(float); a_h = (float*) malloc(size); b_h = (float*) malloc(size); c_h = (float*) malloc(size); d_h = (float*) malloc(size); for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { a_h[i * width + j] = i; b_h[i * width + j] = i; } } cudaMalloc((void**)&a_d, size); cudaMalloc((void**)&b_d, size); cudaMalloc((void**)&c_d, size); cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); multi<<<grid, block>>>(a_d, b_d, c_d, width); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed1, start, stop); printf("%f\n", elapsed1/1000); cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); free(a_h); free(b_h); free(c_h); free(d_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
1,104
#include <stdio.h> __device__ float* hello; __global__ void TryHello(float* hello) { // float hi = *hello; // printf("%f\n", hi); printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); } __global__ void setHello(float* hello, float num) { *hello = num; } // int main() { // TryHello<<<1, 5>>>(&hello); // cudaDeviceSynchronize(); // setHello<<<1, 5>>>(&hello, 99.0f); // cudaDeviceSynchronize(); // TryHello<<<1, 5>>>(&hello); // cudaDeviceSynchronize(); // } __global__ void helloCUDA(float f) { printf("Hello thread %d, f=%f\n", threadIdx.x, f); } int main() { helloCUDA<<<1, 5>>>(1.2345f); cudaDeviceSynchronize(); // setHello<<<1, 5>>>(hello, 99.0f); TryHello<<<1, 5>>>(hello); cudaDeviceSynchronize(); return 0; }
1,105
#include <stdio.h> #include <math.h> #include <cuda_runtime.h> const double pi = 3.14159265358979323846; void X_init(double *a, int numTicks, double delta) { for (int i = 0; i <= numTicks; ++i) for (int j = 0; j <= numTicks; ++j) a[i*(numTicks+1) + j] = (double)j * delta; } void Y_init(double *a, int numTicks, double delta) { for (int i = 0; i <= numTicks; ++i) for (int j = 0; j <= numTicks; ++j) a[i*(numTicks+1) + j] = (double)i * delta; } void array_show(double *a, int numTicks) { for (int i = 0; i <= numTicks; ++i) { for (int j = 0; j <= numTicks; ++j) printf("%3d ", (int)a[i*(numTicks+1) + j]); printf("\n"); } } __global__ void kernel(double *x, double *y, double *z, double mux, double muy, double b4, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= size) return; // initial values double x0 = (double)x[tid]; double y0 = (double)y[tid]; double px0 = 0.0, py0 = 0.0; // проверка вхождение в круг, до первой итерации if (x0*x0 + y0*y0 >= 1.0) { z[tid] = 0.0; return; } int n; double x1, px1, px2, y1, py1, py2; for (n = 0; n <= 100001; n++) { if (x0*x0 + y0*y0 < 1.0) { x1 = x0 * cos(2.0*pi*mux) + px0 * sin(2.0*pi*mux); px1 = -x0 * sin(2.0*pi*mux) + px0 * cos(2.0*pi*mux); y1 = y0 * cos(2.0*pi*muy) + py0 * sin(2.0*pi*muy); py1 = -y0 * sin(2.0*pi*muy) + py0 * cos(2.0*pi*muy); px2 = px1 + b4 * (x1*x1*x1 - 3.0*x1*y1*y1); py2 = py1 - b4 * (y1*y1*y1 - 3.0*x1*x1*y1); x0 = x1; y0 = y1; px0 = px2; py0 = py2; } else { break; } } n--; z[tid] = (double)(n); } int main(void) { const double mux = 0.32; const double muy = 0.32; const double b4 = 0.50; int numTicks = 10; double delta = 1.0 / numTicks; int arraySize =(numTicks + 1) * (numTicks + 1); int numBytes = arraySize * sizeof(double); double *x, *y, *z, *x_dev, *y_dev, *z_dev; // allocate host memory x = (double *) malloc(numBytes); y = (double *) malloc(numBytes); z = (double *) malloc(numBytes); // allocate X, Y array X_init(x, numTicks, delta); Y_init(y, numTicks, delta); // allocate device memory cudaMalloc( (void**) &x_dev, numBytes ); cudaMalloc( (void**) &y_dev, numBytes ); cudaMalloc( (void**) &z_dev, numBytes ); // copy X, Y from host to device cudaMemcpy( x_dev, x, numBytes, cudaMemcpyHostToDevice); cudaMemcpy( y_dev, y, numBytes, cudaMemcpyHostToDevice); cudaMemcpy( z_dev, z, numBytes, cudaMemcpyHostToDevice); // GPU kernel int threadNum = 512; kernel <<< arraySize/threadNum + 1, threadNum >>> (x_dev, y_dev, z_dev, mux, muy, b4, arraySize); // copy Z from device to host cudaMemcpy( z, z_dev, numBytes, cudaMemcpyDeviceToHost); // show result array_show(z, numTicks); // memory free cudaFree(x_dev); free(x); cudaFree(y_dev); free(y); cudaFree(z_dev); free(z); return 0; }
1,106
#include "includes.h" __global__ void reset_states_u_after_spikes_kernel(float *d_states_u, float * d_param_d, float* d_last_spike_time_of_each_neuron, float current_time_in_seconds, size_t total_number_of_neurons) { int idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < total_number_of_neurons) { if (d_last_spike_time_of_each_neuron[idx] == current_time_in_seconds) { d_states_u[idx] += d_param_d[idx]; } idx += blockDim.x * gridDim.x; } __syncthreads(); }
1,107
#include "includes.h" __global__ void add_vector(int* a,int* b,int*c) { int i = blockIdx.x*blockDim.x+ threadIdx.x; c[i] = a[i] + b[i]; }
1,108
#include "includes.h" __global__ void OPT_1_HIST(int* lcm, int* hist, int n) { // int vertex = blockIdx.x; int vcomp = threadIdx.x; bool equal; // __shared__ int cval; // if(vcomp == 0) cval = 0; __syncthreads(); // if(vertex < n && vcomp < n) for(int i = vcomp; i < n; i += blockDim.x) { if(vertex == i) { atomicAdd(&cval, 1); continue; } equal = false; for(int j = 0; j < n; j++) { if(lcm[vertex*n + j] == lcm[i*n + j]) equal = true; else { equal = false; break; } } if(equal) atomicAdd(&cval, 1); } __syncthreads(); if(vertex < n && vcomp == 0 && cval > 0) { atomicAdd(&hist[cval], 1); //printf("\nv%d: %d\n", vertex, cval); } }
1,109
#include "includes.h" __global__ void boxFilter(unsigned char *srcImage, unsigned char *dstImage, unsigned int width, unsigned int height, int channel) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // only threads inside image will write results if((x>=FILTER_WIDTH/2) && (x<(width-FILTER_WIDTH/2)) && (y>=FILTER_HEIGHT/2) && (y<(height-FILTER_HEIGHT/2))) { for(int c=0 ; c<channel ; c++) { // Sum of pixel values float sum = 0; // Number of filter pixels float kS = 0; // Loop inside the filter to average pixel values for(int ky=-FILTER_HEIGHT/2; ky<=FILTER_HEIGHT/2; ky++) { for(int kx=-FILTER_WIDTH/2; kx<=FILTER_WIDTH/2; kx++) { float fl = srcImage[((y+ky)*width + (x+kx))*channel+c]; sum += fl; kS += 1; } } dstImage[(y*width+x)*channel+c] = sum / kS; } } }
1,110
//60070501054 //60070501064 #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #define blocksize 1024 #define gridsize 1 #define threadsize 1024 __global__ void piEstimate(long long int *countStore, int *iterations) { int rank = (blockIdx.x * blockDim.x) + threadIdx.x; int i = 0; long long int count = 0; int itr = iterations[0]; double x, y; curandState state; curand_init(rank, 0, 0, &state); while(i < itr) { x = curand_uniform_double(&state); y = curand_uniform_double(&state); if(((x * x) + (y * y)) <= 1.0) count++; i++; } countStore[rank] +=count; } int main(int argc, char **argv) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int iterationsHost, *iterationsDev; long long int *countStoreHost, *countStoreDev; printf("Enter iterations: "); scanf("%d", &iterationsHost); countStoreHost = (long long int *)calloc(blocksize, sizeof(long long int)); cudaMalloc((void **)&countStoreDev, sizeof(long long int) * blocksize); cudaMalloc((void **)&iterationsDev, sizeof(int)); int i; cudaMemcpy(countStoreDev, countStoreHost, sizeof(long long int) * blocksize, cudaMemcpyHostToDevice); cudaMemcpy(iterationsDev, &iterationsHost, sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(start); piEstimate<<<gridsize, blocksize, threadsize>>>(countStoreDev, iterationsDev); cudaEventRecord(stop); cudaMemcpy(countStoreHost, countStoreDev, sizeof(long long int) * blocksize, cudaMemcpyDeviceToHost); float runningTime = 0; cudaEventElapsedTime(&runningTime, start, stop); printf("CUDA done, took %f ms\n", runningTime); /*int max = max_per_round; roundHost = iterationsHost / max; int rem = iterationsHost % max; for(i = 0; i < roundHost; i++) { //printf("Start round %d\n", i); cudaMemcpy(countStoreDev, countStoreHost, sizeof(long long int) * blocksize, cudaMemcpyHostToDevice); cudaMemcpy(roundDev, &i, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(iterationsDev, &max, sizeof(int), cudaMemcpyHostToDevice); piEstimate<<<gridsize, blocksize>>>(countStoreDev, roundDev, iterationsDev); cudaMemcpy(countStoreHost, countStoreDev, sizeof(long long int) * blocksize, cudaMemcpyDeviceToHost); } if(rem != 0) { //printf("Start rem round with %d iterations\n", rem); cudaMemcpy(countStoreDev, countStoreHost, sizeof(long long int) * blocksize, cudaMemcpyHostToDevice); cudaMemcpy(roundDev, &i, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(iterationsDev, &rem, sizeof(int), cudaMemcpyHostToDevice); piEstimate<<<gridsize, blocksize>>>(countStoreDev, roundDev, iterationsDev); cudaMemcpy(countStoreHost, countStoreDev, sizeof(long long int) * blocksize, cudaMemcpyDeviceToHost); }*/ double pi = 0.0; for(i = 0; i < blocksize; i++) pi += countStoreHost[i]; long long int totalPlot = blocksize * iterationsHost; pi = (pi / totalPlot) * 4; printf("Pi estimate for %d iterations = %.10lf\n", iterationsHost, pi); free(countStoreHost); cudaFree(countStoreDev); cudaFree(iterationsDev); return 0; }
1,111
#include "includes.h" __global__ void init_i32 (int* vector, int value, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { vector[idx] = value; } }
1,112
//xfail:TIMEOUT //--gridDim=64 --blockDim=128 --warp-sync=32 #include "common.h" template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n); template __global__ void reduceMultiPass<128, true>(const float *g_idata, float *g_odata, unsigned int n); template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n) { reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n); }
1,113
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. * * See cuda.h for error code descriptions. */ #define CHECK_CUDA_RESULT(N) { \ CUresult result = N; \ if (result != 0) { \ printf("CUDA call on line %d returned error %d\n", __LINE__, \ result); \ exit(1); \ } } /*Thread - take in a number and square it*/ __global__ void square(float * d_in, float * d_out) { /*threadIdx is actually a C struct with three members * x, y, z - we only need x right now*/ int threadId = threadIdx.x; float data = d_in[threadId]; d_out[threadId] = data * data; } int main(int argc, char **argv) { const int ARRAY_SIZE = 64; const int ARRAY_BYTES = 64 * sizeof(float); /*Allocate CPU memory*/ float * h_in = (float *) malloc(ARRAY_BYTES); float * h_out = (float *) malloc(ARRAY_BYTES); /*Declare GPU pointers*/ float * d_in; float * d_out; for (int index = 0; index < ARRAY_SIZE; index++) { /*Fill in host array*/ h_in[index] = float(index); } /*Allocate memory on the GPU*/ cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); /*Now memory is allocated and filled on CPU side, and allocated on GPU side * Next step is to copy the input array from the CPU to the GPU*/ cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); //Host is CPU, device is GPU /*Launch the GPU kernel * 1 block * ARRAY_SIZE threads in the block*/ square<<<1, ARRAY_SIZE>>>(d_in, d_out); /*the kernel call is blocking? * Anyways copy from d_out to h_out*/ cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); /*On CPU, print results to ensure correctness*/ for (int index = 0; index < ARRAY_SIZE; index++) { printf("Num: %f \t Num Squared: %f\n", h_in[index], h_out[index]); } /*Never forget to free the memory when you are done*/ free(h_out); free(h_in); cudaFree(d_out); cudaFree(d_in); return 0; }
1,114
#define THREAD_BLOCK_SIZE 512 #define NUM_BLOCKS 320 // Define the size of a tile /* This function uses a compare and swap technique to acquire a mutex/lock. */ __device__ void lock(int *mutex) { while(atomicCAS(mutex, 0, 1) != 0); } /* This function uses an atomic exchange operation to release the mutex/lock. */ __device__ void unlock(int *mutex) { atomicExch(mutex, 0); } __global__ void vector_dot_product_kernel_atomics(float *A, float *B, float *C, unsigned int num_elements,int *mutex) { __shared__ float sum_per_thread[THREAD_BLOCK_SIZE]; unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; // Obtain the index of the thread unsigned int stride = blockDim.x * gridDim.x; float sum = 0.0f; unsigned int i = thread_id; while(i < num_elements){ sum += A[i] * B[i]; i += stride; } sum_per_thread[threadIdx.x] = sum; // Copy sum to shared memory __syncthreads(); i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i) sum_per_thread[threadIdx.x] += sum_per_thread[threadIdx.x + i]; __syncthreads(); i /= 2; } if(threadIdx.x == 0){ lock(mutex); *C += sum_per_thread[0]; unlock(mutex); } }
1,115
#include <stdio.h> // __device__ float lerp1d(int a, int b, float w) // { // if(b>a){ // return a + w*(b-a); // } // else{ // return b + w*(a-b); // } // } __device__ double lerp1d(int a, int b, float w) { return fma(w, (float)b, fma(-w,(float)a,(float)a)); } __device__ double lerp2d(int f00, int f01, int f10, int f11, float centroid_h, float centroid_w ) { centroid_w = (1 + lroundf(centroid_w) - centroid_w)/2; centroid_h = (1 + lroundf(centroid_h) - centroid_h)/2; double r0, r1, r; r0 = lerp1d(f00,f01,centroid_w); r1 = lerp1d(f10,f11,centroid_w); r = lerp1d(r0, r1, centroid_h); //+ 0.00001 // printf("%f, %f | %f, %f | %f | %d, %d, %d, %d \n", centroid_h , centroid_w, r0, r1, r, f00, f01, f10, f11); return r; } __global__ void GPU_validation(void) { printf("GPU has been activated \n"); } __global__ void cuRESIZE(unsigned char* src_img, unsigned char* dst_img, const int src_h, const int src_w, const int dst_h, const int dst_w, const float scale_h, const float scale_w) { /* Input: src_img - NHWC channel C, default = 3 Output: dst_img - NHWC */ // int const N = gridDim.y; // batch size int const n = blockIdx.y; // batch number int const C = gridDim.z; // channel int const c = blockIdx.z; // channel number long idx = n * blockDim.x * gridDim.x * C + threadIdx.x * gridDim.x * C + blockIdx.x * C + c; // some overhead threads in each image process // when thread idx in one image exceed one image size return; if (idx%(blockDim.x * gridDim.x * C) >= dst_h* dst_w * C){return;} /* Now implementation : ( (1024 * int(DST_SIZE/3/1024)+1) - (src_h * src_w) )* N = overhead * N times to do: put the batch into gridDim.x dim3 dimGrid(int(DST_SIZE*batch/3/1024)+1,1,3); */ int H = dst_h; int W = dst_w; int img_coor = idx % (dst_h*dst_w*C); //coordinate of one image, not idx of batch image int h = img_coor / (W*C); // dst idx int w = img_coor % (W*C)/C; // dst idx float centroid_h, centroid_w; centroid_h = scale_h * (h + 0.5); // h w c -> x, y, z : 1080 , 1920 , 3 centroid_w = scale_w * (w + 0.5); // // unsigned long = 4,294,967,295 , up to (1080p,RGB)*600 imgs long f00,f01,f10,f11; int src_h_idx = lroundf(centroid_h)-1; int src_w_idx = lroundf(centroid_w)-1; if (src_h_idx<0){src_h_idx=0;} if (src_w_idx<0){src_w_idx=0;} // printf("h:%d w:%d\n",src_h_idx,src_w_idx); // printf("src_h_idx:%d , h: %d | src_w_idx:%d , w: %d\n",src_h_idx,h,src_w_idx,w); // idx = NHWC = n*(HWC) + h*(WC) + w*C + c; f00 = n * src_h * src_w * C + src_h_idx * src_w * C + src_w_idx * C + c; f01 = n * src_h * src_w * C + src_h_idx * src_w * C + (src_w_idx+1) * C + c; f10 = n * src_h * src_w * C + (src_h_idx+1) * src_w * C + src_w_idx * C + c; f11 = n * src_h * src_w * C + (src_h_idx+1) * src_w * C + (src_w_idx+1) * C + c; int rs; if (int(f10/ (src_h * src_w * C)) > n ){ centroid_w = (1 + lroundf(centroid_w) - centroid_w)/2; rs = lroundf(lerp1d(f00,f01,centroid_w)); }else{ rs = lroundf(lerp2d(src_img[f00], src_img[f01], src_img[f10], src_img[f11], centroid_h, centroid_w)); } long dst_idx = n * (H * W * C) + h * (W * C) + w * C + c; dst_img[dst_idx] = (unsigned char)rs; } int main(){ int SRC_HEIGHT = 20; int SRC_WIDTH = 20; int SRC_SIZE = SRC_HEIGHT * SRC_WIDTH * 3; int DST_HEIGHT = 40; int DST_WIDTH = 40; int DST_SIZE = DST_HEIGHT * DST_WIDTH * 3; int batch = 1; // cudaStream_t stream1, stream2, stream3, stream4 ; cudaStream_t stream1; cudaStreamCreate ( &stream1) ; dim3 dimBlock(1024, 1,1); // maximum threads: 1024 dim3 dimGrid(int(DST_SIZE/3/1024)+1,batch,3); unsigned char host_src[SRC_SIZE]; // unsigned char host_dst[1108992]; unsigned char host_dst[DST_SIZE]; // init src image for(int i = 0; i < SRC_SIZE; i++){ host_src[i] = i+1; // host_src[i] = (i%3); } float scale_h = (float)SRC_HEIGHT / DST_HEIGHT; float scale_w = (float)SRC_WIDTH / DST_WIDTH; unsigned char *device_src, *device_dst; cudaMalloc((unsigned char **)&device_src, SRC_SIZE* sizeof(unsigned char)); cudaMalloc((unsigned char **)&device_dst, DST_SIZE* sizeof(unsigned char)); cudaMemcpy(device_src , host_src , SRC_SIZE * sizeof(unsigned char), cudaMemcpyHostToDevice); GPU_validation<<<1,1>>>(); cudaDeviceSynchronize(); cuRESIZE<<<dimGrid, dimBlock, 0, stream1>>>(device_src, device_dst, SRC_HEIGHT, SRC_WIDTH, DST_HEIGHT, DST_WIDTH, scale_h, scale_w); cudaDeviceSynchronize(); // for(int i = 0; i<10; i++){ // tester<<<dimGrid, dimBlock>>>(device_src, device_dst, // SRC_HEIGHT, SRC_WIDTH, // scale_h, scale_w); // cudaDeviceSynchronize(); // } cudaMemcpy(host_dst, device_dst, DST_SIZE * sizeof(unsigned char), cudaMemcpyDeviceToHost); // DEBUG : print first image in batch , first 30 pixel in 3 channels. // for(int i = 0; i < 30*3; i+=3){ // NHWC // printf("%d\n",host_src[i]); // } printf("============================\n"); for(int c = 0; c<3*DST_HEIGHT*DST_WIDTH ; c+=DST_HEIGHT*DST_WIDTH){ // if NCHW for(int i = 0 ; i < 30; i++){ printf("%d %d %d\n", c+i, i, host_dst[c+i]); } printf("------------------------------\n"); } // print first 30 elements from each chanel // for(int c = 0; c<3; c++){ // NHWC // for(int i = 0 ; i < 30; i++){ // int idx = i*3 +c; // printf("%d %d %d\n", c+i*3, i, host_dst[idx]); // } // printf("------------------------------\n"); // } // int count_0=0; // int count_1=0; // int count_2=0; // for(int idx = 0; idx<sizeof(host_dst)/sizeof(unsigned char); idx++){ // NHWC // printf("%d %d\n", idx, host_dst[idx]); // if (host_dst[idx]==0){count_0++;} // if (host_dst[idx]==1){count_1++;} // if (host_dst[idx]==2){count_2++;} // } // printf("%d, %d, %d\n",count_0,count_1,count_2); // printf("%ld \n",sizeof(host_dst)/sizeof(unsigned char)); cudaFree(device_src); cudaFree(device_dst); return 0; } // clear && nvcc resize_free.cu -o resize_free.o && ./resize_free.o
1,116
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float* var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25) { comp += (var_3 + var_4 + (-1.6873E36f + var_5)); for (int i=0; i < var_1; ++i) { comp = (var_6 - (var_7 * (var_8 * var_9))); } for (int i=0; i < var_2; ++i) { comp += var_11 - +0.0f / var_12; var_10[i] = (+1.6916E34f * (var_13 * -1.6072E-44f + fabsf(+1.4933E-41f))); comp = var_10[i] / -1.3024E21f / (-1.7461E35f - asinf(var_14 * -1.1414E26f)); } if (comp <= atan2f(asinf((+1.2823E5f * var_15 * atanf((var_16 - var_17 + log10f((-1.4652E7f - -0.0f / +0.0f)))))), (var_18 - cosf((-1.5060E-26f + logf((var_19 - +1.3599E34f * (-0.0f * var_20)))))))) { float tmp_1 = var_21 * var_22 - (+0.0f + var_23); comp += tmp_1 * (-1.4849E-15f / var_24 - (-0.0f - var_25 / -1.3768E36f)); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float* tmp_11 = initPointer( atof(argv[11]) ); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26); cudaDeviceSynchronize(); return 0; }
1,117
#include<stdio.h> #include<stdlib.h> __global__ void arradd(int* md, int* nd, int* pd, int size) { int myid = blockIdx.x*blockDim.x + threadIdx.x; pd[myid] = md[myid] + nd[myid]; } int main() { int size = 2000 * sizeof(int); int m[2000], n[2000], p[2000],*md, *nd,*pd; int i=0; for(i=0; i<2000; i++ ) { m[i] = i; n[i] = i; p[i] = 0; } cudaMalloc(&md, size); cudaMemcpy(md, m, size, cudaMemcpyHostToDevice); cudaMalloc(&nd, size); cudaMemcpy(nd, n, size, cudaMemcpyHostToDevice); cudaMalloc(&pd, size); dim3 DimGrid(10, 1); dim3 DimBlock(200, 1); arradd<<< DimGrid,DimBlock >>>(md,nd,pd,size); cudaMemcpy(p, pd, size, cudaMemcpyDeviceToHost); cudaFree(md); cudaFree(nd); cudaFree (pd); for(i=0; i<2000; i++ ) { printf("\t%d",p[i]); } }
1,118
// // Created by root on 2020/12/3. // #include "thrust/host_vector.h" #include "thrust/device_vector.h" #include "thrust/reduce.h" #include "thrust/generate.h" #include "thrust/transform.h" #include "math.h" #include "stdio.h" #define N (1 << 20) using namespace thrust::placeholders; int main() { thrust::host_vector<float> h_x(N); thrust::host_vector<float> h_y(N); thrust::generate(h_x.begin(), h_x.end(), rand); thrust::generate(h_y.begin(), h_y.end(), rand); thrust::device_vector<float> d_x = h_x; thrust::device_vector<float> d_y = h_y; thrust::transform(d_x.begin(), d_x.end(), d_x.begin(), _1 / RAND_MAX); // change data value to [0, 1] thrust::transform(d_y.begin(), d_y.end(), d_y.begin(), _1 / RAND_MAX); thrust::device_vector<float> d_inCircle(N); thrust::transform(d_x.begin(), d_x.end(), d_y.begin(), d_inCircle.begin(), (_1 * _1 + _2 * _2) < 1); // first1, last1, first2, result, operation(1 for true, 0 for false if op is judgement) float pi = thrust::reduce(d_inCircle.begin(), d_inCircle.end()) * 4.f / N; // reduce to get the sum of d_inCircle. printf("pi = %f\n", pi); return 0; }
1,119
#include "includes.h" __constant__ float *c_Kernel; __global__ void subtract(float *d_dst, float*d_src_1, float* d_src_2, int len) { int baseX = blockIdx.x * blockDim.x + threadIdx.x; if (baseX < len) { d_dst[baseX] = d_src_1[baseX] - d_src_2[baseX]; } }
1,120
#include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <time.h> #include <string.h> #include <math.h> #include <float.h> // includes, kernels #include "vector_dot_product_kernel.cu" void run_test(unsigned int); float compute_on_device(float *, float *,int); void check_for_error(char *); extern "C" float compute_gold( float *, float *, unsigned int); int main( int argc, char** argv) { if(argc != 2){ printf("Usage: vector_dot_product <num elements> \n"); exit(0); } unsigned int num_elements = atoi(argv[1]); run_test(num_elements); return 0; } /* Perform vector dot product on the CPU and the GPU and compare results for correctness. */ void run_test(unsigned int num_elements) { struct timeval start, stop; // Obtain the vector length unsigned int size = sizeof(float) * num_elements; // Allocate memory on the CPU for the input vectors A and B float *A = (float *)malloc(size); float *B = (float *)malloc(size); // Randomly generate input data. Initialize the input data to be floating point values between [-.5 , 5] printf("Generating random vectors with values between [-.5, .5]. \n"); srand(time(NULL)); for(unsigned int i = 0; i < num_elements; i++){ A[i] = (float)rand()/(float)RAND_MAX - 0.5; B[i] = (float)rand()/(float)RAND_MAX - 0.5; } printf("Generating dot product on the CPU. \n"); // Compute the reference solution on the CPU gettimeofday(&start, NULL); float reference = compute_gold(A, B, num_elements); gettimeofday(&stop, NULL); printf("CPU Execution time = %fus. \n", (float)(stop.tv_usec - start.tv_usec + (stop.tv_usec - start.tv_usec)/(float)1000000)); /* Edit this function to compute the result vector on the GPU. The result should be placed in the gpu_result variable. */ float gpu_result = compute_on_device(A, B, num_elements); /* Compare the CPU and GPU results. */ float threshold = 0.001; printf("Result on CPU: %f, result on GPU: %f. \n", reference, gpu_result); if(fabsf((reference - gpu_result)/reference) < threshold){ printf("TEST passed. \n"); } else{ printf("TEST failed. \n"); } // cleanup memory free(A); free(B); return; } /* Edit this function to compute the dot product on the device. */ float compute_on_device(float *A_on_host, float *B_on_host, int num_elements) { struct timeval start, stop; float *A_dev = NULL; float *B_dev = NULL; float *C_dev = NULL; cudaMalloc( (void **)&A_dev, num_elements * sizeof(float) ); cudaMalloc( (void **)&B_dev, num_elements * sizeof(float) ); cudaMalloc( (void **)&C_dev, GRID_SIZE * sizeof(float) ); cudaMemcpy( A_dev, A_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( B_dev, B_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice ); cudaMemset( C_dev, 0.0f, GRID_SIZE * sizeof(float) ); dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid(GRID_SIZE, 1); int *mutex = NULL; cudaMalloc( (void **)&mutex, sizeof(int) ); cudaMemset( mutex, 0, sizeof(int) ); printf("Starting Parallel Code \n"); gettimeofday(&start, NULL); vector_dot_product_kernel <<< dimGrid, dimBlock >>> (num_elements, A_dev, B_dev, C_dev, mutex); cudaThreadSynchronize(); gettimeofday(&stop, NULL); printf("GPU Execution time = %fus. \n", (float)(stop.tv_usec - start.tv_usec + (stop.tv_usec - start.tv_usec)/(float)1000000)); check_for_error("Error in Kernel execution"); float result; cudaMemcpy( &result, C_dev, sizeof(float), cudaMemcpyDeviceToHost ); cudaFree(A_dev); cudaFree(B_dev); cudaFree(C_dev); return result; } /* This function checks for errors returned by the CUDA run time. */ void check_for_error(char *msg){ cudaError_t err = cudaGetLastError(); if(cudaSuccess != err){ printf("CUDA ERROR: %s (%s). \n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } }
1,121
#include <stdio.h> #include <vector> __global__ void add_kernel(int *a, int *b, int *c) { *c = *a + *b; } //template <typename T> //struct Matrix { // int width; // int height; // std::vector<T> data; //}; // //template <> //struct Matrix { // int width; // int height; // std::vector<float> data; //}; #define BS 2 typedef struct { int width; int height; int stride; float *data; } Matrix; // run inner GPU threads __device__ float GetElement(Matrix mat, int row, int col) { return mat.data[row * mat.width + col]; } __device__ void SetElement(Matrix mat, int row, int col, float val) { mat.data[row * mat.stride + col] = val; } __device__ Matrix GetSubMatrix(Matrix mat, int row, int col) { Matrix subx; subx.width = subx.height = BS; subx.stride = mat.stride; subx.data = &mat.data[row * BS * mat.width + col * BS]; return subx; } // kernel __global__ void KDot(Matrix A, Matrix B, Matrix C) { int block_row = blockIdx.y; int block_col = blockIdx.x; // in RK(inner register in each thread) float res = 0; // b * b sub matrix Matrix csub = GetSubMatrix(C, block_row, block_col); int row = threadIdx.y; int col = threadIdx.x; for (int k = 0; k < A.width / BS; k++) { Matrix asub = GetSubMatrix(A, block_row, k); Matrix bsub = GetSubMatrix(B, k, block_col); // shm set __shared__ float a_shm[BS][BS]; __shared__ float b_shm[BS][BS]; // each thread load an element from global memory only once a_shm[row][col] = GetElement(asub, row, col); b_shm[row][col] = GetElement(bsub, row, col); __syncthreads(); // sync all threads ensure all elements in sub matrix are stored to shm // compute this op for (int e = 0; e < BS; e++) { res += a_shm[row][e] * b_shm[e][col]; } __syncthreads(); } SetElement(csub, row, col, res); } // call the kernel void MatMul(const Matrix A, const Matrix B, Matrix C) { dim3 dim_block(BS, BS); // block internal dim3 dim_grid(A.height / BS, B.width / BS); Matrix d_A, d_B, d_C; d_A.width = A.width; d_A.height = A.height; d_A.stride = A.stride; // only zcopy 'data' field to GPU memory size_t size = d_A.width * d_A.height * sizeof(float); cudaError_t err = cudaMalloc(&d_A.data, size); if (err) { printf("Error while allocate matrix A ==> %s\n", cudaGetErrorString(err)); exit(-1); } // pointer to dest, pointer to src, size, type cudaMemcpy(d_A.data, A.data, size, cudaMemcpyHostToDevice); d_B.width = B.width; d_B.height = B.height; d_B.stride = B.stride; // only zcopy 'data' field to GPU memory size = d_B.width * d_B.height * sizeof(float); err = cudaMalloc(&d_B.data, size); if (err) { printf("Error while allocate matrix B ==> %s\n", cudaGetErrorString(err)); exit(-1); } // pointer to dest, pointer to src, size, type cudaMemcpy(d_B.data, B.data, size, cudaMemcpyHostToDevice); d_C.width = C.width; d_C.height = C.height; d_C.stride = C.stride; // only zcopy 'data' field to GPU memory size = d_C.width * d_C.height * sizeof(float); err = cudaMalloc(&d_C.data, size); if (err) { printf("Error while allocate matrix C ==> %s\n", cudaGetErrorString(err)); exit(-1); } // invoke kernel KDot<<<dim_grid, dim_block>>>(d_A, d_B, d_C); err = cudaThreadSynchronize(); if (err) { printf("Error while launch kernel ==> %s\n", cudaGetErrorString(err)); } // zcopy result to host when computation done err = cudaMemcpy(C.data, d_C.data, size, cudaMemcpyDeviceToHost); if (err) { printf("Error while copy result to host memory ==> %s\n", cudaGetErrorString(err)); } // free all these memory finally cudaFree(d_A.data); cudaFree(d_B.data); cudaFree(d_C.data); } int main() { Matrix A, B, C; // A[6 * 5] B[5 * 6] C[6 * 6] A.width = A.stride = B.height = 4; A.height = B.stride = B.width = 6; C.width = C.height = C.stride = 6; // allocate memory for these matrices A.data = (float*)malloc(A.width * A.height * sizeof(float)); B.data = (float*)malloc(B.width * B.height * sizeof(float)); C.data = (float*)malloc(C.width * C.height * sizeof(float)); // initialize A B to I[M * N] for (int i = 0; i < A.height; i++) { for (int j = 0; j < A.width; j++) { A.data[i * A.width + j] = 1; } } for (int i = 0; i < B.height; i++) { for (int j = 0; j < B.width; j++) { B.data[i * B.width + j] = 1; } } MatMul(A, B, C); printf("res: \n"); // print out the result for (int i = 0; i < C.height; i++) { for (int j = 0; j < C.width; j++) { printf("%.1f ", C.data[i * C.width + j]); } printf("\n"); } return 0; }
1,122
#include <stdio.h> #define N 64 #define TPB 32 __device__ float scale(int i, int n){ return ((float) i)/(n - 1); } __device__ float distance(float x1, float x2){ return sqrt((x2-x1)*(x2-x1)); } __global__ void distanceKernel(float *d_out, float ref, int len){ const int i = blockIdx.x*blockDim.x + threadIdx.x; const float x = scale(i, len); d_out[i] = distance(x, ref); printf("blockIdx:%2d,blockDim:%2d,threadIdx:%2d,i = %2d: dist from %f to %f.\n", blockIdx.x,blockDim.x,threadIdx.x,i, ref, x, d_out[i]); } int main(){ const float ref = 0.5f; float *d_out = 0; cudaMalloc(&d_out, N*sizeof(float)); distanceKernel<<<N/TPB, TPB>>>(d_out, ref, N); cudaFree(d_out); return 0; }
1,123
#include <stdio.h> #include <sys/time.h> #include "cuda.h" #include <string.h> #define MAX_ARGS 10 #define REC_LENGTH 49 // size of a record in db #ifndef REC_WINDOW #define REC_WINDOW 15000 // number of records to take in at a time #endif #define LATITUDE_POS 28 // character position of the latitude value in each record #define OPEN 10000 // initial value of nearest neighbors struct neighbor { char entry[REC_LENGTH]; float dist; }; /** * Kernel * Executed on GPU * Calculates the Euclidean distance from each record in the database to the target position */ __global__ void euclid(char *data, float x2, float y2,float *z, int N, int W, int L_POS) { int idx=blockIdx.x*blockDim.x+threadIdx.x; float tmp_lat=0.0, tmp_long=0.0; int position = ( idx * W ) + L_POS - 1; if(idx < N) { char temp1[5]; for( int i = 0 ; i < 5 ; i++ ) { temp1[i] = data[position+i]; } char temp2[5]; for( int i = 0 ; i < 5 ; i++ ) { temp2[i] = data[position+6+i]; } int dig1, dig2, dig3, dig_1; if( temp1[0] == ' ' ) { dig1 = 0; } else { dig1 = temp1[0] - 48; tmp_lat += dig1 * 100; } if( temp1[1] == ' ' ) { dig2 = 0; } else { dig2 = temp1[1] - 48; tmp_lat += dig2 * 10; } if( temp1[2] == ' ' ) { dig3 = 0; } else { dig3 = temp1[2] - 48; tmp_lat += dig3 * 1; } dig_1 = temp1[4] - 48; tmp_lat += (float) dig_1 / 10; if( temp2[0] == ' ' ) { dig1 = 0; } else { dig1 = temp2[0] - 48; tmp_long += dig1 * 100; } if( temp2[1] == ' ' ) { dig2 = 0; } else { dig2 = temp2[1] - 48; tmp_long += dig2 * 10; } if( temp2[2] == ' ' ) { dig3 = 0; } else { dig3 = temp2[2] - 48; tmp_long += dig3 * 1; } dig_1 = temp2[4] - 48; tmp_long += (float) dig_1 / 10; z[idx]=sqrt(((tmp_lat-x2)*(tmp_lat-x2))+((tmp_long-y2)*(tmp_long-y2))); } } /** * This program finds the k-nearest neighbors * Usage: ./nn <filelist> <num> <target latitude> <target longitude> * filelist: File with the filenames to the records * num: Number of nearest neighbors to find * target lat: Latitude coordinate for distance calculations * target long: Longitude coordinate for distance calculations * The filelist and data are generated by hurricane_gen.c * REC_WINDOW has been assigned to read in a large number of records to increase work */ int main(int argc, char* argv[]) { FILE *flist,*fp; int i=0,j=0, k=0, rec_count=0, done=0; char sandbox[REC_WINDOW * REC_LENGTH], dbname[64]; struct neighbor *neighbors = NULL; float target_lat, target_long; char* goldfile; if(argc < 6) { fprintf(stderr, "Invalid set of arguments\n"); exit(-1); } flist = fopen(argv[1], "r"); if(!flist) { printf("error opening flist\n"); exit(1); } k = atoi(argv[2]); target_lat = atof(argv[3]); target_long = atof(argv[4]); goldfile = argv[5]; neighbors = (struct neighbor*) malloc(k*sizeof(struct neighbor)); if(neighbors == NULL) { fprintf(stderr, "no room for neighbors\n"); exit(0); } for(j=0;j<k;j++) { neighbors[j].dist = OPEN; } /**Main processing **/ if(fscanf(flist, "%s\n", dbname) != 1) { fprintf(stderr, "error reading filelist\n"); exit(0); } fp = fopen(dbname, "r"); if(!fp) { printf("error opening flist\n"); exit(1); } //Pointers to host memory float *z; //Pointers to device memory float *z_d; char *data; /** * Allocate memory on host and device */ z = (float *) malloc(REC_WINDOW * sizeof(float)); cudaMalloc((void **) &data, sizeof(char) * REC_WINDOW * REC_LENGTH); cudaMalloc((void **) &z_d, sizeof(float) * REC_WINDOW); for(unsigned i=0; i<REC_WINDOW * REC_LENGTH;i++) { sandbox[i]=' '; } while(!done) { /** * Read in REC_WINDOW records of length REC_LENGTH * If this is the last file in the filelist, then done * else open next file to be read next iteration */ rec_count = fread(sandbox, REC_LENGTH, REC_WINDOW, fp); if(rec_count != REC_WINDOW) { if(!ferror(flist)) {// an eof occured fclose(fp); if(feof(flist)) done = 1; else { if(fscanf(flist, "%s\n", dbname) != 1) { fprintf(stderr, "error reading filelist\n"); exit(0); } fp = fopen(dbname, "r"); if(!fp) { printf("error opening a db\n"); exit(1); } } } else { perror("Error"); exit(0); } } //Initialize a and b float x2 = target_lat; float y2 = target_long; /** * Transfer data from host to device */ cudaMemcpy( data, sandbox, (sizeof(char)*REC_WINDOW*REC_LENGTH), cudaMemcpyHostToDevice); /** * Execute kernel */ //Compute the execution configuration int block_size = 16; dim3 dimBlock(block_size); dim3 dimGrid( (REC_WINDOW/dimBlock.x) + (!(REC_WINDOW%dimBlock.x)?0:1) ); //Add a and b, store in c euclid<<<dimGrid,dimBlock>>>(data, x2, y2, z_d, REC_WINDOW, REC_LENGTH, LATITUDE_POS); cudaThreadSynchronize(); //Copy data from device memory to host memory cudaMemcpy( z, z_d, sizeof(float)*REC_WINDOW, cudaMemcpyDeviceToHost ); /** * Update list of nearest neighbors */ for(i=0;i< rec_count;i++) { float max_dist = -1; int max_idx = 0; // find a neighbor with greatest dist and take his spot if allowed! for( j = 0 ; j < k ; j++ ) { if(neighbors[j].dist > max_dist) { max_dist = neighbors[j].dist; max_idx = j; } } // compare each record with max value to find the nearest neighbor if(z[i] < neighbors[max_idx].dist) { sandbox[(i+1)*REC_LENGTH-1] = '\0'; strcpy(neighbors[max_idx].entry, sandbox +i*REC_LENGTH); neighbors[max_idx].dist = z[i]; } } } //End while //Free memory free(z); cudaFree(data); cudaFree(z_d); fprintf(stderr, "The %d nearest neighbors are:\n", k); FILE* fpo = fopen("result.txt", "w"); for( j = 0 ; j < k ; j++ ) { if(!(neighbors[j].dist==OPEN)){ fprintf(stderr, "%s --> %f\n", neighbors[j].entry, neighbors[j].dist); fprintf(fpo, "%s --> %f\n", neighbors[j].entry, neighbors[j].dist); } } fclose(fpo); if(goldfile){ FILE *gold = fopen(goldfile, "r"); FILE *result = fopen("result.txt", "r"); int result_error=0; while(!feof(gold)&&!feof(result)){ if (fgetc(gold)!=fgetc(result)) { result_error = 1; break; } } if((feof(gold)^feof(result)) | result_error) { printf("\nFAILED\n"); } else { printf("\nPASSED\n"); } fclose(gold); fclose(result); } free(neighbors); fclose(flist); }
1,124
/** * Author: Zachariah Bryant * Description: Pre-generates a thermalized lattice configuration for later use. */ // ******************** // * Headers * // ******************** #include <sys/stat.h> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <fstream> #include <string> #include "./Headers/Complex.cuh" #include "./Headers/LattiCuda.cuh" using namespace std; // ************************************ // * Definition of Variables * // ************************************ #define LATTSIZE 16 #define BETA 5.7 #define THERMAL 1000 // ************************** // * Main Function * // ************************** int main() { LattiCuda model(LATTSIZE, BETA); for(int i = 0; i < THERMAL; i++){ model.equilibrate(); } model.save(); return 0; }
1,125
#include <stdio.h> #include <stdlib.h> #include <curand_kernel.h> // CURAND lib header file #define TRIALS_PER_THREAD 1024 // Set the value for global variables #define BLOCKS 256 #define THREADS 512 #define PI 3.1415926535 // Known value of pi, to calculate error __global__ void pi_mc(float *estimate, curandState *states){ unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; float x, y; // Initialize CURAND curand_init(tid, 0, 0, &states[tid]); for(int i = 0; i < TRIALS_PER_THREAD; i++){ x = curand_uniform(&states[tid]); y = curand_uniform(&states[tid]); // Count if x & y is in the circle points_in_circle += (x*x + y*y <= 1.0f); } estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; } int main(int argc, char *argv[]){ float host[BLOCKS * THREADS]; float *dev; curandState *devStates; // Allocate memory on GPU cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); cudaMalloc((void **) &devStates, BLOCKS * THREADS * sizeof(curandState)); // Invoke the kernel pi_mc<<<BLOCKS, THREADS>>>(dev, devStates); // Copy from device back to host cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost); // Free the memory on GPU cudaFree(dev); cudaFree(devStates); // Get the average estimate pi value among all blocks and threads, and calculate error float pi_gpu = 0.0; for(int i = 0; i < BLOCKS * THREADS; i++){ pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); printf("Trials per thread is: %d, number of blocks is: %d, number of threads is: %d\n", TRIALS_PER_THREAD, BLOCKS, THREADS); printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); return 0; }
1,126
#include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void matrixMultGPU(int *A,int *B,int *C, int N,int mod){ int k, sum=0; int col = threadIdx.x + blockDim.x * blockIdx.x; int fil = threadIdx.y + blockDim.y * blockIdx.y; if (col < N && fil < N) { for (k = 0; k < N; k++) { sum += A[fil * N + k] * B[k * N + col]; } if(mod==0) { C[fil * N + col] = sum; } else { C[fil * N + col] += sum; } __syncthreads(); } } int main (void){ //Creación de variables del sistema int *a, *b, *c, N,NN; int *a_ul,*a_ur,*a_ll,*a_lr,*b_ul,*b_ur,*b_ll,*b_lr,*c_ul,*c_ur,*c_ll,*c_lr; int *DB,*DA,*DC1,*DC2; int i,j; int T,div=1, iteraciones=10,ind=0; float elapsedTime; printf("Ingrese el tamano deseado para las matrices:\n"); scanf("%d",&NN); if(NN%2!=0 || NN<2) { printf("El tamaño debe ser mayor a dos y par\n"); exit(1); } N=(int)NN/2; //Creación de variables de tiempo cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); printf("Creando espacio e inicializando matrices...\n"); //Asignación e inicialización de memoria a=(int*)malloc(NN*NN*sizeof(int)); b=(int*)malloc(NN*NN*sizeof(int)); c=(int*)malloc(NN*NN*sizeof(int)); a_ll=(int*)malloc(N*N*sizeof(int)); a_lr=(int*)malloc(N*N*sizeof(int)); a_ul=(int*)malloc(N*N*sizeof(int)); a_ur=(int*)malloc(N*N*sizeof(int)); b_ll=(int*)malloc(N*N*sizeof(int)); b_lr=(int*)malloc(N*N*sizeof(int)); b_ul=(int*)malloc(N*N*sizeof(int)); b_ur=(int*)malloc(N*N*sizeof(int)); c_ll=(int*)malloc(N*N*sizeof(int)); c_lr=(int*)malloc(N*N*sizeof(int)); c_ul=(int*)malloc(N*N*sizeof(int)); c_ur=(int*)malloc(N*N*sizeof(int)); //Inicialización de Matrices for(i=0;i<NN;i++) { for(j=0;j<NN;j++) { a[i*NN+j]=i*j; b[i*NN+j]=i*j; } } //Creación de submatrices for(i=0;i<N;i++) { for(j=0;j<N;j++) { a_ul[i*N+j]=a[i*NN+j]; a_ur[i*N+j]=a[i*NN+j+N]; a_ll[i*N+j]=a[(i+N)*NN+j]; a_lr[i*N+j]=a[(i+N)*NN+j+N]; b_ul[i*N+j]=b[i*NN+j]; b_ur[i*N+j]=b[i*NN+j+N]; b_ll[i*N+j]=b[(i+N)*NN+j]; b_lr[i*N+j]=b[(i+N)*NN+j+N]; } } if(cudaMalloc(&DA,N*N*sizeof(int))!=cudaSuccess) { printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n"); exit(1); } if(cudaMalloc(&DB,N*N*sizeof(int))!=cudaSuccess) { printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n"); exit(1); } if(cudaMalloc(&DC1,N*N*sizeof(int))!=cudaSuccess) { printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n"); exit(1); } if(cudaMalloc(&DC2,N*N*sizeof(int))!=cudaSuccess) { printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n"); exit(1); } printf("Asignacion de memoria correcta\n"); //Cálculo de bloques e hilos while((float)N/(float)div>32) { div++; } float f_N=(float)N,f_div=(float)div; T=(int)ceil(f_N/f_div); dim3 ThreadsBloque(T,T); dim3 Bloques(div, div); printf("Se va a realizar la suma con %d bloques y %d hilos\n",div,T); printf("Se va a realizar %d iteraciones de matrices %dx%d\n",iteraciones,NN,NN); //Ejecución de kernel cudaEventRecord(start,0); while(ind<iteraciones) { if(cudaMemcpy(DA,a_ul,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } if(cudaMemcpy(DB,b_ul,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } matrixMultGPU<<<Bloques, ThreadsBloque>>>(DA,DB,DC1,N,0); if(cudaMemcpy(DB,b_ur,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } matrixMultGPU<<<Bloques, ThreadsBloque>>>(DA,DB,DC2,N,0); if(cudaMemcpy(DA,a_ur,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } if(cudaMemcpy(DB,b_ll,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } matrixMultGPU<<<Bloques, ThreadsBloque>>>(DA,DB,DC1,N,1); if(cudaMemcpy(DB,b_lr,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } matrixMultGPU<<<Bloques, ThreadsBloque>>>(DA,DB,DC2,N,1); cudaMemcpy(c_ul,DC1,N*N*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(c_ur,DC1,N*N*sizeof(int),cudaMemcpyDeviceToHost); if(cudaMemcpy(DA,a_ll,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } if(cudaMemcpy(DB,b_ul,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } matrixMultGPU<<<Bloques, ThreadsBloque>>>(DA,DB,DC1,N,0); if(cudaMemcpy(DB,b_ur,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } matrixMultGPU<<<Bloques, ThreadsBloque>>>(DA,DB,DC2,N,0); if(cudaMemcpy(DA,a_lr,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } if(cudaMemcpy(DB,b_ll,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } matrixMultGPU<<<Bloques, ThreadsBloque>>>(DA,DB,DC1,N,1); if(cudaMemcpy(DB,b_lr,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess) { printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n"); exit(1); } matrixMultGPU<<<Bloques, ThreadsBloque>>>(DA,DB,DC2,N,1); cudaMemcpy(c_ll,DC1,N*N*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(c_lr,DC2,N*N*sizeof(int),cudaMemcpyDeviceToHost); ind++; } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); printf("El tiempo tomado para %d iteraciones fue de %3.5f ms\n",iteraciones,elapsedTime); for(i=0;i<N;i++) { for(j=0;j<N;j++) { c[i*NN+j]=c_ul[i*N+j]; c[i*NN+j+N]=c_ur[i*N+j]; c[(i+N)*NN+j]=c_ll[i*N+j]; c[(i+N)*NN+j+N]=c_lr[i*N+j]; } } printf("Por ejemplo %d deberia ser 0\n",c[3*NN]); printf("Por ejemplo %d deberia ser 0\n",c[(int)NN/2]); printf("Por ejemplo %d deberia ser %d\n",c[NN+1],(int)((2*pow(NN-1,3)+3*pow(NN-1,2)+NN-1)/6)); /* for(i=0;i<NN;i++) { printf("\n"); for(j=0;j<NN;j++) { printf("\t%d",a[i*NN+j]); } //printf("\t"); for(j=0;j<NN;j++) { printf("\t%d",b[i*NN+j]); } //printf("\t"); for(j=0;j<NN;j++) { printf("\t%d",c[i*NN+j]); } } */ free(a); free(a_ll); free(a_lr); free(a_ul); free(a_ur); free(b_ur); free(b_ll); free(b_lr); free(b_ul); free(c_ll); free(c_lr); free(c_ul); free(c_ur); free(b); free(c); cudaFree(DA); cudaFree(DB); cudaFree(DC1); cudaFree(DC2); return 0; }
1,127
#pragma once #include "Vector3.cuh.cu" #include "Ray.cuh.cu" namespace RayTracing { class Plane { protected: Point3 m_A, m_B, m_C; Vector3 m_normal; float m_D; public: Plane( const Vector3 &A, const Vector3 &B, const Vector3 &C, const Point3 &origin ) : m_A(A + origin), m_B(B + origin), m_C(C + origin) { m_normal = (m_B - m_A).Cross((m_C - m_A)).UnitVector(); m_D = m_A.Dot(m_normal); } __host__ __device__ float PlanePoint(const Ray &ray) const { return (m_D - m_normal.Dot(ray.origin)) / m_normal.Dot(ray.direction); } }; } // namespace RayTracing
1,128
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <assert.h> //#include <time.h> #define N 2 __global__ void MoreSums(int *a, int *b, int *c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; }
1,129
#include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void vector_add(float *a, float *b, float *c, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < n) c[tid] = a[tid] + b[tid]; } int main( int argc, char* argv[] ) { cudaEvent_t start,stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); if(argc < 2) { printf("need exactly 1 argument\n"); return 0; } int vector_size = atoi(argv[1]); float* host_a = (float*)malloc(sizeof(float) * vector_size); float* host_b = (float*)malloc(sizeof(float) * vector_size); float* host_c = (float*)malloc(sizeof(float) * vector_size); float* device_a; float* device_b; float* device_c; cudaMalloc(&device_a, sizeof(float)*vector_size); cudaMalloc(&device_b, sizeof(float)*vector_size); cudaMalloc(&device_c, sizeof(float)*vector_size); int i; for(i=0; i<vector_size; i++) { host_a[i] = 1; host_b[i] = 1; //host_a[i] = rand() % vector_size; //host_b[i] = rand() % vector_size; } cudaEventRecord(start,0); cudaMemcpy( device_a, host_a, sizeof(float)*vector_size, cudaMemcpyHostToDevice); cudaMemcpy( device_b, host_a, sizeof(float)*vector_size, cudaMemcpyHostToDevice); int block_size = 1024; int grid_size = vector_size / block_size; if(vector_size % block_size) { grid_size = grid_size + 1; } vector_add<<<grid_size, block_size>>>(device_a, device_b, device_c, vector_size); cudaMemcpy( host_c, device_c, sizeof(float)*vector_size, cudaMemcpyDeviceToHost); float sum = 0; for(i=0; i<vector_size; i++) { sum += host_c[i]; } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); printf("%f\n", elapsedTime); cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); free(host_a); free(host_b); free(host_c); return 0; }
1,130
#include "includes.h" __global__ void update_positions( const int size, const double position_step, const double* force_per_atom, const double* position_per_atom, double* position_per_atom_temp) { const int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < size) { const double position_change = force_per_atom[n] * position_step; position_per_atom_temp[n] = position_per_atom[n] + position_change; } }
1,131
#include "includes.h" __global__ void calc_lut(int *lut, int * hist_in, int img_size, int nbr_bin){ __shared__ int shared_hist[256]; shared_hist[threadIdx.x] = hist_in[threadIdx.x]; __syncthreads(); int i, cdf, min, d; cdf = 0; min = 0; i = 0; while(min == 0){ min = shared_hist[i++]; } d = img_size - min; for(i = 0; i <= threadIdx.x; i ++){ //tha mporouse na ginei me prefix sum san veltistoipohsh FIXME cdf += shared_hist[i]; //lut[i] = (cdf - min)*(nbr_bin - 1)/d; } lut[threadIdx.x] = (int)(((float)cdf - min)*255/d + 0.5); if(lut[threadIdx.x] < 0){ lut[threadIdx.x] = 0; } }
1,132
#include "includes.h" __global__ void zupdate2_dummy(float *z1, float *z2, float *f, float tau, int nx, int ny) { int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; float a, b, t; if (px<nx && py<ny) { // compute the gradient a = 0; b = 0; float fc = f[idx]; // float fr=f[idx+1]; // float fu=f[idx+nx]; // if (!(px==(nx-1))) a = fr - fc; // if (!(py==(ny-1))) b = fu - fc; a = fc; b = fc; // update z t = 1 / (1 + tau*sqrtf(a*a + b*b)); z1[idx] = (z1[idx] + tau*a)*t; z2[idx] = (z2[idx] + tau*b)*t; } }
1,133
#include <stdio.h> #include <thrust/extrema.h> #include <thrust/device_vector.h> struct type { int key; int value; }; struct comparator { __host__ __device__ bool operator()(type a, type b) { return a.key < b.key; } }; int main() { srand(time(NULL)); comparator comp; int i, i_max = -1, n = 100000; type *arr = (type *)malloc(sizeof(type) * n); for(i = 0; i < n; i++) { arr[i].key = 5; arr[i].value = 5; if (i_max == -1 || comp(arr[i_max], arr[i])) i_max = i; } type *dev_arr; cudaMalloc(&dev_arr, sizeof(type) * n); cudaMemcpy(dev_arr, arr, sizeof(type) * n, cudaMemcpyHostToDevice); thrust::device_ptr<type> p_arr = thrust::device_pointer_cast(dev_arr); thrust::device_ptr<type> res = thrust::max_element(p_arr, p_arr + n,comp); int pos = (int)(res - p_arr); printf("cpu: %d\ngpu: %d\n", i_max, pos); printf("%d\n", arr[pos].key); cudaFree(dev_arr); free(arr); return 0; }
1,134
/******************************************************************************* * Copyright (C) 2019 Marvin Löbel <loebel.marvin@gmail.com> * Copyright (C) 2019 Oliver Magiera <oliver.magiera@tu-dortmund.de> * * All rights reserved. Published under the BSD-3 license in the LICENSE file. ******************************************************************************/ #include<iostream> #include<cstdint> #include<cstddef> #include "cuda_util.cuh" #define map_single_error_code(e) case e: out << #e; break; static void map_error_code(cudaError e, std::ostream& out) { switch (e) { map_single_error_code(cudaErrorMissingConfiguration) map_single_error_code(cudaErrorMemoryAllocation) map_single_error_code(cudaErrorInitializationError) map_single_error_code(cudaErrorLaunchFailure) map_single_error_code(cudaErrorPriorLaunchFailure) map_single_error_code(cudaErrorLaunchTimeout) map_single_error_code(cudaErrorLaunchOutOfResources) map_single_error_code(cudaErrorInvalidDeviceFunction) map_single_error_code(cudaErrorInvalidConfiguration) map_single_error_code(cudaErrorInvalidDevice) map_single_error_code(cudaErrorInvalidValue) map_single_error_code(cudaErrorInvalidPitchValue) map_single_error_code(cudaErrorInvalidSymbol) map_single_error_code(cudaErrorMapBufferObjectFailed) map_single_error_code(cudaErrorUnmapBufferObjectFailed) map_single_error_code(cudaErrorInvalidHostPointer) map_single_error_code(cudaErrorInvalidDevicePointer) map_single_error_code(cudaErrorInvalidTexture) map_single_error_code(cudaErrorInvalidTextureBinding) map_single_error_code(cudaErrorInvalidChannelDescriptor) map_single_error_code(cudaErrorInvalidMemcpyDirection) map_single_error_code(cudaErrorAddressOfConstant) map_single_error_code(cudaErrorTextureFetchFailed) map_single_error_code(cudaErrorTextureNotBound) map_single_error_code(cudaErrorSynchronizationError) map_single_error_code(cudaErrorInvalidFilterSetting) map_single_error_code(cudaErrorInvalidNormSetting) map_single_error_code(cudaErrorMixedDeviceExecution) map_single_error_code(cudaErrorCudartUnloading) map_single_error_code(cudaErrorUnknown) map_single_error_code(cudaErrorNotYetImplemented) map_single_error_code(cudaErrorMemoryValueTooLarge) map_single_error_code(cudaErrorInvalidResourceHandle) map_single_error_code(cudaErrorNotReady) map_single_error_code(cudaErrorInsufficientDriver) map_single_error_code(cudaErrorSetOnActiveProcess) map_single_error_code(cudaErrorInvalidSurface) map_single_error_code(cudaErrorNoDevice) map_single_error_code(cudaErrorECCUncorrectable) map_single_error_code(cudaErrorSharedObjectSymbolNotFound) map_single_error_code(cudaErrorSharedObjectInitFailed) map_single_error_code(cudaErrorUnsupportedLimit) map_single_error_code(cudaErrorDuplicateVariableName) map_single_error_code(cudaErrorDuplicateTextureName) map_single_error_code(cudaErrorDuplicateSurfaceName) map_single_error_code(cudaErrorDevicesUnavailable) map_single_error_code(cudaErrorInvalidKernelImage) map_single_error_code(cudaErrorNoKernelImageForDevice) map_single_error_code(cudaErrorIncompatibleDriverContext) map_single_error_code(cudaErrorPeerAccessAlreadyEnabled) map_single_error_code(cudaErrorPeerAccessNotEnabled) map_single_error_code(cudaErrorDeviceAlreadyInUse) map_single_error_code(cudaErrorProfilerDisabled) map_single_error_code(cudaErrorProfilerNotInitialized) map_single_error_code(cudaErrorProfilerAlreadyStarted) map_single_error_code(cudaErrorProfilerAlreadyStopped) map_single_error_code(cudaErrorStartupFailure) map_single_error_code(cudaErrorApiFailureBase) map_single_error_code(cudaErrorIllegalAddress) default: out << "unknown code " << int(e); } } void cuda_check_internal(char const* file, int line, cudaError v, char const* reason) { if (v != cudaSuccess) { std::cerr << "CUDA ERROR at " << file << ":" << line << ": "; map_error_code(v, std::cerr); if (std::string(reason) != "") { std::cerr << " (" << reason << ")" << std::endl; } std::abort(); } }
1,135
#include <cuda.h> //#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <assert.h> #define N 16 __device__ int index(int col, int row, int ord){ return (row *ord)+col; } __global__ void Transpose(int *c, const int *a){ int col = (blockDim.x * blockIdx.x) + threadIdx.x; int row = (blockDim.y * blockIdx.y) + threadIdx.y; c[index(row,col,4)] = a[index(col, row, 4)] ; }
1,136
#include <cuda_runtime_api.h> __global__ void linear_bias_fwd_kernel( const float *in_buf, int dim, int batch_size, const float *bias, float *out_buf) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int k = idx % dim; int batch_idx = idx / dim; if (k < dim && batch_idx < batch_size) { out_buf[idx] = in_buf[idx] + bias[k]; } } extern "C" void neuralops_cuda_linear_bias_fwd( const float *in_buf, size_t dim, size_t batch_size, const float *bias, float *out_buf, cudaStream_t stream) { int n = dim * batch_size; linear_bias_fwd_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( in_buf, dim, batch_size, bias, out_buf); } extern "C" void neuralops_cuda_linear_bias_fwd_inplace( float *out_buf, size_t dim, size_t batch_size, const float *bias, cudaStream_t stream) { int n = dim * batch_size; linear_bias_fwd_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( out_buf, dim, batch_size, bias, out_buf); } __global__ void linear_bias_bwd_kernel( const float *out_grad, int dim, int batch_size, float *in_grad) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int k = idx % dim; int batch_idx = idx / dim; if (k < dim && batch_idx < batch_size) { atomicAdd(&in_grad[k], out_grad[idx]); } } extern "C" void neuralops_cuda_linear_bias_bwd( const float *out_grad, size_t dim, size_t batch_size, float *in_grad, cudaStream_t stream) { int n = dim * batch_size; linear_bias_bwd_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( out_grad, dim, batch_size, in_grad); }
1,137
__global__ void DTWSSM(float* SSMA, float* SSMB, float* CSM, int M, int N, int diagLen, int diagLenPow2) { //Have circularly rotating system of 3 buffers extern __shared__ float x[]; //Circular buffer int off = 0; int upoff = 0; //Other local variables int i, k; int i1, i2, j1, j2; int thisi, thisj; int idx; float val, score; int ci = blockIdx.x; int cj = blockIdx.y; //Figure out K (number of batches) int K = diagLenPow2 >> 9; if (K == 0) { K = 1; } //Initialize all buffer elements to -1 for (k = 0; k < K; k++) { for (off = 0; off < 3; off++) { if (512*k + threadIdx.x < diagLen) { x[512*k + threadIdx.x + off*diagLen] = -1; } } } off = 0; //Process each diagonal for (i = 0; i < N + M - 1; i++) { //Figure out the bounds of this diagonal i1 = i; j1 = 0; upoff = -1; if (i1 >= M) { i1 = M-1; j1 = i - (M-1); upoff = 0; } j2 = i; i2 = 0; if (j2 >= N) { j2 = N-1; i2 = i - (N-1); } //Update each batch for (k = 0; k < K; k++) { idx = k*512 + threadIdx.x; if (idx >= diagLen) { break; } thisi = i1 - idx; thisj = j1 + idx; if (thisi < i2 || thisj > j2) { x[off*diagLen + idx] = -1; continue; } if (!((thisi <= ci && thisj <= cj) || (thisi >= ci && thisj >= cj))) { x[off*diagLen + idx] = -1; continue; } val = SSMA[ci*M + thisi] - SSMB[cj*N + thisj]; if (val < 0) { val *= -1; } score = -1; //Above if (idx + upoff + 1 < N + M - 1 && thisi > 0) { if (x[((off+1)%3)*diagLen + idx + upoff + 1] > -1) { score = val + x[((off+1)%3)*diagLen + idx + upoff + 1]; } } if (idx + upoff >= 0 && thisj > 0) { //Left if (x[((off+1)%3)*diagLen + idx + upoff] > -1) { if (score == -1 || x[((off+1)%3)*diagLen + idx + upoff] + val < score) { score = x[((off+1)%3)*diagLen + idx + upoff] + val; } } } if (i1 == M-1 && j1 > 1) { upoff = 1; } if (!((thisi == ci && thisj == cj + 1) || (thisi == ci + 1 && thisj == cj))) { if (idx + upoff >= 0 && thisi > 0) { //Diagonal if (x[((off+2)%3)*diagLen + idx + upoff] > -1) { if (score == -1 || x[((off+2)%3)*diagLen + idx + upoff] + val < score) { score = x[((off+2)%3)*diagLen + idx + upoff] + val; } } } } if (score == -1) { score = val; } x[off*diagLen + idx] = score; if (i == N + M - 2) { CSM[ci*N + cj] = score; } } off = (off + 2) % 3; //Cycle buffers __syncthreads(); } }
1,138
#include "includes.h" __global__ void cuda_conv2D_updateDeltas(double* delta, double* biasDelta, const double* upStreamActivation, const double* err, double momentum, size_t kernelCount, size_t kernelRows, size_t kernelCols, size_t outputRows, size_t outputCols, size_t inputChannels, size_t inputRows, size_t inputCols, size_t padding, size_t stride) { // Do all values for i, j, and k in parallel int id = blockIdx.x * blockDim.x + threadIdx.x; size_t i = id % outputCols; id /= outputCols; size_t j = id % outputRows; id /= outputRows; if(id >= kernelCount) return; size_t k = id; // Compute some intermediate values size_t outChannelOffset = k * outputRows * outputCols; size_t outRowOffset = j * outputCols; int inRowOffset = j * stride - padding; // This block of code is derived from the serial implementation size_t kk = k * inputChannels * kernelRows * kernelCols; size_t index = outChannelOffset + outRowOffset + i; int inColOffset = i * stride - padding; biasDelta[k] += err[index]; for(size_t z = 0; z < inputChannels; z++) { size_t kernelChannelOffset = z * kernelRows * kernelCols; size_t inChannelOffset = z * inputRows * inputCols; for(size_t y = 0; y < kernelRows; y++) { size_t kernelRowOffset = y * kernelCols; int inRow = inRowOffset + y; for(size_t x = 0; x < kernelCols; x++) { int inCol = inColOffset + x; if(inRow >= 0 && inRow < (int)inputRows && inCol >= 0 && inCol < (int)inputRows) { size_t idx = inChannelOffset + inputCols * inRow + inCol; delta[kk + kernelChannelOffset + kernelRowOffset + x] += err[index] * upStreamActivation[idx]; } } } } }
1,139
#include <stdio.h> #include <stdlib.h> __global__ void kernel(int *d, int n){ __shared__ int s[64]; int t =threadIdx.x; int tr=n-t-1; s[t]=d[t]; __syncthreads(); d[t]=s[tr]; } int main (void) { const int n=64; int a[n],r[n],d[n]; for(int i=0;i<n;i++) { a[i]=i; r[i]=n-i-1; d[i]=0; } int *d_d; cudaMalloc(&d_d, n * sizeof(int)); cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice); kernel<<<1,n>>>(d_d, n); cudaMemcpy(d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Verificar- Hay un error"); printf("En teoria 3 deberia ser igual a %d\n",d[n-3-1]); }
1,140
#include "includes.h" __global__ void Interpolate(float* input1, float* input2, float* output, float weight, int inputSize) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize) { if (weight <= 0) { output[threadId] = input1[threadId]; } else if (weight >= 1) { output[threadId] = input2[threadId]; } else { output[threadId] = (1 - weight) * input1[threadId] + weight * input2[threadId]; } } }
1,141
#include <cuda.h> #include <cuComplex.h> // Launch configuration should be as follows: // 1. blocks of dim3(threads_per_dim, threads_per_dim, 1) size // where threads_per_dim = min(N, 16) // 2. grid of dim3((N+threads_per_dim-1)/threads_per_dim, (N-1)/(threads_per_dim * 2)+1, 1) blocks template <class T> static __device__ __inline__ void fftshift_kernel_common(T* data, int shift, int N, int pitch){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x > N-1 || y > (N+1)/2-1) return; int x1 = x + shift; if (x1 > N-1) x1 -= N; int y1 = y + shift; if (y1 > N-1) y1 -= N; int i = x + y * pitch , i1 = x1 + y1 * pitch; T tmp; tmp = data[i]; data[i] = data[i1]; data[i1] = tmp; } extern "C" { __global__ void fftshift_half_hermitian(cuDoubleComplex* data, int N, int pitch) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= N/4 || y >= N) return; // shift+mirror int x1 = N/2 - x - 1; // N - (N + N/2) - 1 int y1 = N/2 - y - 1; // N - (N + N/2) - 1 if (y1 < 0) y1 += N; // offsets int i = x+y*pitch, i1 = x1+y1*pitch; // Swap + conjugate (note that x == N/2-x1-1!) cuDoubleComplex tmp; tmp = data[i]; data[i].x = data[i1].x; data[i].y = -data[i1].y; data[i1].x = tmp.x; data[i1].y = -tmp.y; } __global__ void fftshift_kernel_cx(cuDoubleComplex* data, int N, int pitch) { fftshift_kernel_common<cuDoubleComplex>(data, N/2, N, pitch); } __global__ void ifftshift_kernel_cx(cuDoubleComplex* data, int N, int pitch) { fftshift_kernel_common<cuDoubleComplex>(data, N/2 + N%2, N, pitch); } __global__ void fftshift_kernel_r(double* data, int N, int pitch) { fftshift_kernel_common<double>(data, N/2, N, pitch); } __global__ void ifftshift_kernel_r(double* data, int N, int pitch) { fftshift_kernel_common<double>(data, N/2 + N%2, N, pitch); } }
1,142
#include <math.h> #include <stdlib.h> #include <time.h> #include <stdio.h> #include <sys/timeb.h> // Hypercube // Version: optimisé pour une partagée // On sépare le gros tableau en multiples sous-tableaux qu'on réduit de la même manière // Pas limité en taille #define pow2(x) (1<<(x)) // Nombre de threads par bloc #define NBTHREADS_MAX 1024 #define check(error) { checkCudaError((error), __FILE__, __LINE__); } void checkCudaError(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "Erreur CUDA: %s:%d %s\n", file, line, cudaGetErrorString(code)); exit(EXIT_FAILURE); } } // Kernel de la somme hypercube // stride: puissances de NBTHREADS pour savoir à quelle itération on réduit __global__ void kernel_hypercube(int *t, int tailleTotal, int stride) { __shared__ int s[NBTHREADS_MAX]; const int off = stride * blockIdx.x * blockDim.x; // Position absolue de la 1ière case pour ce block const int x = threadIdx.x; const int nbits = (int)ceil(log2((double)blockDim.x)); int d, mask, in, out; // Chaque thread copie dans la mémoire partagée if(threadIdx.x * stride + off < tailleTotal) { s[threadIdx.x] = t[threadIdx.x * stride + off]; } else { // on met 0, comme ça quand on réduit, cela ne change pas le résultat sans avoir besoin de vérifier les bords // (possiblement en dehors pour le dernier block uniquement) s[threadIdx.x] = 0; } __syncthreads(); // Réduction optimisée sur 1024 éléments for(d = 1; d <= nbits; ++d) { if (x < pow2(nbits - d)) { mask = x << d; in = mask | pow2(d - 1); out = mask; s[out] += s[in]; } // On doit synchroniser mêmes les threads en dehors sinon deadlock __syncthreads(); } // Copie de la somme total de s dans la 1ière case du tableau if(threadIdx.x == 0) { t[off] = s[0]; } } // Wrap l'appel du kernel // Retourne le nombre de millisecondes écoulées float hypercube(int *h_t, int taille) { float millis; int nbBlocks; int nBytes = sizeof(int) * taille; int *d_t = NULL; int stride = 1; cudaEvent_t start, stop; check(cudaMalloc(&d_t, nBytes)); check(cudaEventCreate(&start)); check(cudaEventCreate(&stop)); check(cudaMemcpy(d_t, h_t, nBytes, cudaMemcpyHostToDevice)); cudaEventRecord(start); do { printf("Réduction: stride=%d\n", stride); nbBlocks = (int)ceil((double)taille / stride / NBTHREADS_MAX); printf("nbBlocks=%d\n", nbBlocks); kernel_hypercube<<<nbBlocks, NBTHREADS_MAX>>>(d_t, taille, stride); check(cudaDeviceSynchronize()); stride *= NBTHREADS_MAX; } while(stride < taille); check(cudaEventRecord(stop)); check(cudaEventSynchronize(stop)); check(cudaEventElapsedTime(&millis, start, stop)); check(cudaMemcpy(h_t, d_t, nBytes, cudaMemcpyDeviceToHost)); check(cudaFree(d_t)); check(cudaEventDestroy(start)); check(cudaEventDestroy(stop)); return millis; } // Somme séquentielle int somme(int *arr, int taille) { long i, tot = 0; for(i = 0; i < taille; ++i) { tot += arr[i]; } return tot; } void fillRandomly(int *t, int taille) { int i; for(i = 0; i < taille; ++i) { t[i] = rand() % 3; } } void printArr(int *t, int taille) { int i; for(i = 0; i < taille; ++i) { printf("%d ", t[i]); } printf("\n"); } // Toujours la même graine pour qu'on puisse avoir des résultats reproductibles int main(int argc, char **argv) { float millis; size_t nBytes; int *h_arr = NULL; struct timeb tav, tap; double te; long somCpu; size_t taille = argc < 2 ? 1000000 : strtol(argv[1], NULL, 10); nBytes = sizeof(int) * taille; h_arr = (int*)malloc(nBytes); if(!h_arr) { fprintf(stderr, "Erreur d'allocation mémoire host\n"); exit(1); } srand(1234); fillRandomly(h_arr, taille); if(taille < 100) printArr(h_arr, taille); ftime(&tav); somCpu = somme(h_arr, taille); ftime(&tap); te = (double)((tap.time*1000+tap.millitm)-(tav.time*1000+tav.millitm))/1000; printf("%ld éléments, %.3fMo\n", taille, taille / 512.0 / 1024.0 * sizeof(int)); printf("SequentielCPU: %ld, %.3lfs\n", somCpu, te); // somme_hypercube() change le tableau, on remet comme avant srand(1234); fillRandomly(h_arr, taille); millis = hypercube(h_arr, taille); if(taille < 100) printArr(h_arr, taille); printf("HypercubeCUDA: %d, %.3fs\n", h_arr[0], millis / 1000.0f); free(h_arr); return 0; }
1,143
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * * This sample is a simple code that illustrates basic usage of * cooperative groups within the thread block. The code launches a single * thread block, creates a cooperative group of all threads in the block, * and a set of tiled partition cooperative groups. For each, it uses a * generic reduction function to calculate the sum of all the ranks in * that group. In each case the result is printed, together with the * expected answer (which is calculated using the analytical formula * (n-1)*n)/2, noting that the ranks start at zero). * */ #include <stdio.h> #include <cooperative_groups.h> using namespace cooperative_groups; /** * CUDA device function * * calculates the sum of val across the group g. The workspace array, x, * must be large enough to contain g.size() integers. */ __device__ int sumReduction(thread_group g, int *x, int val) { // rank of this thread in the group int lane = g.thread_rank(); // for each iteration of this loop, the number of threads active in the // reduction, i, is halved, and each active thread (with index [lane]) // performs a single summation of it's own value with that // of a "partner" (with index [lane+i]). for (int i = g.size()/2; i > 0; i /= 2) { // store value for this thread in temporary array x[lane] = val; // synchronize all threads in group g.sync(); if(lane<i) // active threads perform summation of their value with // their partner's value val += x[lane + i]; // synchronize all threads in group g.sync(); } // master thread in group returns result, and others return -1. if (g.thread_rank()==0) return val; else return -1; } /** * CUDA kernel device code * * Creates cooperative groups and performs reductions */ __global__ void cgkernel(){ // threadBlockGroup includes all threads in the block thread_block threadBlockGroup = this_thread_block(); int threadBlockGroupSize=threadBlockGroup.size(); // workspace array in shared memory required for reduction extern __shared__ int workspace[]; int input, output, expectedOutput; // input to reduction, for each thread, is its' rank in the group input=threadBlockGroup.thread_rank(); // expected output from analytical formula (n-1)(n)/2 // (noting that indexing starts at 0 rather than 1) expectedOutput=(threadBlockGroupSize-1)*threadBlockGroupSize/2; // perform reduction output=sumReduction(threadBlockGroup, workspace, input); // master thread in group prints out result if(threadBlockGroup.thread_rank()==0){ printf(" Sum of all ranks 0..%d in threadBlockGroup is %d (expected %d)\n\n", (int)threadBlockGroup.size()-1,output, expectedOutput); printf(" Now creating %d groups, each of size 16 threads:\n\n", (int)threadBlockGroup.size()/16); } threadBlockGroup.sync(); // each tiledPartition16 group includes 16 threads thread_block_tile<16> tiledPartition16 = tiled_partition<16>(threadBlockGroup); // This offset allows each group to have its own unique area in the workspace array int workspaceOffset=threadBlockGroup.thread_rank()-tiledPartition16.thread_rank(); // input to reduction, for each thread, is its' rank in the group input=tiledPartition16.thread_rank(); // expected output from analytical formula (n-1)(n)/2 // (noting that indexing starts at 0 rather than 1) expectedOutput=15*16/2; // Perform reduction output=sumReduction(tiledPartition16, workspace+workspaceOffset, input); // each master thread prints out result if(tiledPartition16.thread_rank()==0) printf(" Sum of all ranks 0..15 in this tiledPartition16 group is %d (expected %d)\n",output,expectedOutput); return; } /** * Host main routine */ int main(){ // Error code to check return values for CUDA calls cudaError_t err; //Launch the kernel int blocksPerGrid=1; int threadsPerBlock=64; printf("\nLaunching a single block with %d threads...\n\n",threadsPerBlock); // we use the optional third argument to specify the size // of shared memory required in the kernel cgkernel <<<blocksPerGrid,threadsPerBlock,threadsPerBlock*sizeof(int)>>> (); err = cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("\n...Done.\n\n"); return 0; }
1,144
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Winter Semester 2015/2016, March 15 - April 15 // ### // ### #include <cuda_runtime.h> #include <iostream> using namespace std; // cuda error checking #define CUDA_CHECK cuda_check(__FILE__,__LINE__) void cuda_check(string file, int line) { cudaError_t e = cudaGetLastError(); if (e != cudaSuccess) { cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl; exit(1); } } __device__ void add ( float *a, float *b, float *c,int n ){ int ind = threadIdx.x + blockDim.x * blockIdx.x; if (ind < n) c[ind] = a[ind] + b[ind]; } __global__ void vecAdd ( float *a, float *b, float *c,int n ){ add( a, b, c, n); } int main(int argc, char **argv) { // alloc and init input arrays on host (CPU) int n = 20; float *a = new float[n]; float *b = new float[n]; float *c = new float[n]; for(int i=0; i<n; i++) { a[i] = i; b[i] = (i%5)+1; c[i] = 0; } // CPU computation for(int i=0; i<n; i++) c[i] = a[i] + b[i]; // print result cout << "CPU:"<<endl; for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl; cout << endl; // init c for(int i=0; i<n; i++) c[i] = 0; // GPU computation // ### // ### TODO: Implement the array addition on the GPU, store the result in "c" // ### // ### Notes: // ### 1. Remember to free all GPU arrays after the computation // ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "cudaMalloc(...); CUDA_CHECK;" // ### For convenience this macro is defined directly in this file, later we will only include "helper.h" float *d_a, *d_b, *d_c; cudaMalloc( &d_a, n * sizeof(float) ); cudaMalloc( &d_b, n * sizeof(float) ); cudaMalloc( &d_c, n * sizeof(float) ); cudaMemcpy( d_a, a, n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( d_b, b, n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( d_c, c, n * sizeof(float), cudaMemcpyHostToDevice); //Device Blocka allocation dim3 block = dim3(64, 1, 1); //64 threads // allocate blocks in grid dim3 grid = dim3( (n + block.x - 1 ) / block.x, 1, 1); vecAdd <<< grid, block >>> (d_a, d_b, d_c, n); cudaMemcpy ( c, d_c, n * sizeof(float), cudaMemcpyDeviceToHost ); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // print result cout << "GPU:"<<endl; for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl; cout << endl; // free CPU arrays delete[] a; delete[] b; delete[] c; }
1,145
#include <cmath> #include <cstdio> #include <cuda_runtime.h> #include "Sudoku.cuh" /** * Takes array and resets all values to false. */ __device__ void clearArray(bool *arr, int size) { for (int i = 0; i < size; i++) { arr[i] = false; } } /** * Checks if the state of board is valid. * board is one-dimensional array which stores the sudoku board. */ __device__ bool isBoardValid(const int *board) { bool visited[N]; // indicates already visited values in row or column or sub-board clearArray(visited, N); // rows for (int row = 0; row < N; row++) { clearArray(visited, N); for (int col = 0; row < N; col++) { int value = board[row * N + col]; if (value != 0) { if (visited[value - 1]) { return false; } else { visited[value - 1] = true; } } } } // columns for (int row = 0; row < N; row++) { clearArray(visited, N); for (int col = 0; col < N; col++) { int val = board[col * N + row]; if (val != 0) { if (visited[val - 1]) { return false; } else { visited[val - 1] = true; } } } } // sub-boards for (int subr = 0; subr < n; subr++) { for (int subc = 0; subc < n; subc++) { clearArray(visited, N); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { int value = board[(subr * n + i) * N + (subc * n + j)]; if (value != 0) { if (visited[value - 1]) { return false; } else { visited[value - 1] = true; } } } } } } //the board is valid return true; } /** * Takes a board and an index between 0 and N * N - 1. This function assumes the board * without the value at index is valid and checks for validity given the new change. * * index: index of the changed value */ __device__ bool isBoardValid(const int *board, int index) { int r = index / 9; int c = index % 9; if (index < 0) { return isBoardValid(board); } if ((board[index] < 1) || (board[index] > 9)) { //not the values from sudoku return false; } bool visited[N];// from 0 to 8 clearArray(visited, N); // row (with the value at index) for (int i = 0; i < N; i++) { int value = board[r * N + i]; if (value != 0) { if (visited[value - 1]) { return false; } else { visited[value - 1] = true; } } } clearArray(visited, N); // column (with the value at index) for (int j = 0; j < N; j++) { int value = board[j * N + c]; if (value != 0) { if (visited[value - 1]) { return false; } else { visited[value - 1] = true; } } } // sub-board int subr = r / n; int subc = c / n; clearArray(visited, N); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { int value = board[(subr * n + i) * N + (subc * n + j)]; if (value != 0) { if (visited[value - 1]) { return false; } else { visited[value - 1] = true; } } } } //valid return true; } /** * Each thread solves the different board using backtracking algorithm. * * boards: The array of boards N*N , where the number of boards is numBoards, * boards[x*N*N + r*N + c] - specific value in board x. * * numBoards: The total number of boards in the boards array. * * emptySpaces: The array which stores indices of empty spaces, the size of array is numBoards * N * N * * numEmptySpaces: The array which stores number of empty spaces in each board of boards. * * finished: The flag indicating to stop the kernel when solution is found. * * solved: Output array with solution N*N. */ __global__ void sudokuBacktrack(int *boards, const int numBoards, int *emptySpaces, int *numEmptySpaces, int *finished, int *solved) { int index = blockDim.x * blockIdx.x + threadIdx.x; // the number of board int *currentBoard; int *currentEmptySpaces; int currentNumEmptySpaces; while ((*finished == 0) && (index < numBoards)) { // not finished, not all boards done int emptyIndex = 0;// empty spaces index currentBoard = boards + index * N * N;// select board currentEmptySpaces = emptySpaces + index * N * N;// the empty spaces indices currentNumEmptySpaces = numEmptySpaces[index];// the number of empty spaces while ((emptyIndex >= 0) && (emptyIndex < currentNumEmptySpaces)) { //walk through empty spaces currentBoard[currentEmptySpaces[emptyIndex]]++; if (!isBoardValid(currentBoard, currentEmptySpaces[emptyIndex])) { // all numbers were tried, backtrack if (currentBoard[currentEmptySpaces[emptyIndex]] >= 9) { currentBoard[currentEmptySpaces[emptyIndex]] = 0; emptyIndex--; } } // move forward else { emptyIndex++; } } if (emptyIndex == currentNumEmptySpaces) { //all spaces filled // we found the solution *finished = 1; // copy board to output for (int i = 0; i < N * N; i++) { solved[i] = currentBoard[i]; } } index += gridDim.x * blockDim.x; // move to next board } } void cudaSudokuBacktrack(const unsigned int blocks, const unsigned int threadsPerBlock, int *boards, const int numBoards, int *emptySpaces, int *numEmptySpaces, int *finished, int *solved) { sudokuBacktrack << <blocks, threadsPerBlock >> > (boards, numBoards, emptySpaces, numEmptySpaces, finished, solved); } /** * This is generating kernel, which genearates next boards from old one. * Uses breadth first search to find new boards. * * old_boards: Each N * N section is another board. This array stores the previous set of boards. * * new_boards: This array stores the next set of boards. * * total_boards: Number of old boards. * * board_index: Index specifying the index of the next frontier in new_boards. * * empty_spaces: Each N * N section is another board, storing the * indices of empty spaces in new_boards. * * empty_space_count: empty spaces number in corresponding board. */ __global__ void cudaBFSKernel(int *old_boards, int *new_boards, int total_boards, int *board_index, int *empty_spaces, int *empty_space_count) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;// index of board while (index < total_boards) { // empty space index int found = 0; for (int i = (index * N * N); (i < (index * N * N) + N * N) && (found == 0); i++) {// search in each board // found an empty space if (old_boards[i] == 0) { found = 1; int temp = i - N * N * index; int row = temp / N; int col = temp % N; // try numbers for (int attempt = 1; attempt <= N; attempt++) { int correct = 1; // row constraint, test columns for (int c = 0; c < N; c++) { if (old_boards[row * N + c + N * N * index] == attempt) {// found equal in column correct = 0; } } // column contraint, test rows for (int r = 0; r < N; r++) { if (old_boards[r * N + col + N * N * index] == attempt) {// found equal in row correct = 0; } } // sub-board for (int r = n * (row / n); r < n; r++) { for (int c = n * (col / n); c < n; c++) { if (old_boards[r * N + c + N * N * index] == attempt) {// equal in sub-board correct = 0; } } } if (correct == 1) { // copy the whole board to new boards int next_board_index = atomicAdd(board_index, 1);// stores result back at same address int empty_index = 0; for (int r = 0; r < N; r++) { for (int c = 0; c < N; c++) { new_boards[next_board_index * N * N + r * N + c] = old_boards[index * N * N + r * N + c]; if (old_boards[index * N * N + r * N + c] == 0 && (r != row || c != col)) { empty_spaces[empty_index + N * N * next_board_index] = r * N + c;// the index of empty space empty_index++;// count empty spaces } } } empty_space_count[next_board_index] = empty_index; new_boards[next_board_index * N * N + row * N + col] = attempt;// put the correct number } } } } index += blockDim.x * gridDim.x; // move forward } } void callBFSKernel(const unsigned int blocks, const unsigned int threadsPerBlock, int *old_boards, int *new_boards, int total_boards, int *board_index, int *empty_spaces, int *empty_space_count) { cudaBFSKernel << <blocks, threadsPerBlock >> > (old_boards, new_boards, total_boards, board_index, empty_spaces, empty_space_count); }
1,146
#include "stdio.h" #define N 10 __global__ void add(int *a,int *b,int *c) { int tID = blockIdx.x; if(tID<N) { c[tID] = a[tID] + b[tID]; } } int main() { int a[N],b[N],c[N]; int *dev_a,*dev_b,*dev_c; cudaMalloc((void**)&dev_a,N*sizeof(int)); cudaMalloc((void**)&dev_b,N*sizeof(int)); cudaMalloc((void**)&dev_c,N*sizeof(int)); for(int i=0;i<N;i++) { a[i]=i; b[i]=i; } cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,N*sizeof(int),cudaMemcpyHostToDevice); add<<<N,1>>>(dev_a,dev_b,dev_c); cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<N;i++) { printf("%d + %d is %d\n",a[i],b[i],c[i]); } return 0; }
1,147
// CUDA by Example // Ch10: page-locked (pinned) host memory #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> static void HandleError(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define SIZE (10*1024*1024) float cuda_malloc_test(int size, bool up) { cudaEvent_t start, stop; int *a, *d_a; float d_t; cudaEventCreate(&start); cudaEventCreate(&stop); a = (int*)malloc(size*sizeof(*a)); cudaMalloc(&d_a, size * sizeof(*a)); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { if (up) { cudaMemcpy(d_a, a, size * sizeof(*a), cudaMemcpyHostToDevice); } else { cudaMemcpy(a, d_a, size * sizeof(*a), cudaMemcpyDeviceToHost); } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&d_t, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); free(a); cudaFree(d_a); return d_t; } float cuda_host_alloc_test(int size, bool up) { cudaEvent_t start, stop; int *a, *d_a; float d_t; cudaEventCreate(&start); cudaEventCreate(&stop); cudaHostAlloc(&a, size*sizeof(*a), cudaHostAllocDefault); cudaMalloc(&d_a, size*sizeof(*a)); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { if (up) { cudaMemcpy(d_a, a, size * sizeof(*a), cudaMemcpyHostToDevice); } else { cudaMemcpy(a, d_a, size * sizeof(*a), cudaMemcpyDeviceToHost); } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&d_t, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFreeHost(a); cudaFree(d_a); return d_t; } int main() { float elapsedTime; float MB = (float)100 * SIZE * sizeof(int) / 1024 / 1024; elapsedTime = cuda_malloc_test(SIZE, true); printf("Time using cudaMalloc: %3.1f ms\n", elapsedTime); printf("\t MB/s during copy up: %3.1f\n", MB/(elapsedTime/1000)); elapsedTime = cuda_malloc_test(SIZE, false); printf("Time using cudaMalloc: %3.1f ms\n", elapsedTime); printf("\t MB/s during copy down: %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_host_alloc_test(SIZE, true); printf("Time using cudaHostAlloc: %3.1f ms\n", elapsedTime); printf("\t MB/s during copy up: %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_host_alloc_test(SIZE, false); printf("Time using cudaHostAlloc: %3.1f ms\n", elapsedTime); printf("\t MB/s during copy down: %3.1f\n", MB / (elapsedTime / 1000)); return 0; }
1,148
extern "C" __global__ void add(int n, float *cRarr, float *cIarr, int *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { float cR = cRarr[i]; float cI = cIarr[i]; int n = 0; float x = 0; float y = 0; for(n = 0; (y*y) < 4 && n < 255; n++) { float xNew = (x * x) - (y * y) + cR; y = (2 * x * y) + cI; x = xNew; } result[i] = n; } }
1,149
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <iostream> #include <fstream> #include <time.h> #include <thrust/device_vector.h> #define BLOCK_SIZE 16 __global__ void MV(int *vec, int *mat, int *out, const int N, const int M){ int tid=threadIdx.x+blockIdx.x*blockDim.x; int sum=0; if(tid<M){ for(int i=0; i<N; i++) sum += vec[i]*mat[(i*M)+tid]; out[tid]=sum; } } int main(int argc, char const *argv[]) { // open the output file std::ofstream ofile; // customize output filename ofile.open("matrix_vector_gpu_5000_points_Tesla.csv"); // number of instances of data generated int NUM = 5000; // [1*n] * [n*m] for (int iterator = 0; iterator <= NUM; iterator++) { if (iterator % 10 == 0) std::cout << "iter: " << iterator << std::endl; int m, n; double d; n = rand() % 1024 + 1; m = rand() % 1024 + 1; int power = rand()%int((log2(double(m*n))+1)); // TODO ONLY CONSIDER SPARSE d = 1/pow(2,power); thrust::device_vector<int> den(m * n, 0); int *h_a, *h_b, *h_c; cudaMallocHost((void **) &h_a, sizeof(int) * n); cudaMallocHost((void **) &h_b, sizeof(int) * n * m); cudaMallocHost((void **) &h_c, sizeof(int) * m); // initialize vector A for (int i = 0; i < n; i++) h_a[i] = rand() % 1024 + 1; // initialize matrix B // if B is a sparse matrix if (d <= 0.5){ int count_b = m * n * d; for (int it = 0; it < count_b; it++){ // approximation int i = rand() % m; int j = rand() % n; h_b[i*n+j] = rand() % 1024 + 1; } } // if B is a dense matrix else{ for (int i = 0; i < m; i++){ for (int j = 0; j < n; j++){ h_b[i*n+j] = rand() % 1024 + 1; } } } // Allocate memory space on the device int *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(int) * n); cudaMalloc((void **) &d_b, sizeof(int) * n * m); cudaMalloc((void **) &d_c, sizeof(int) * m); // copy matrix A and B from host to device memory cudaMemcpy(d_a, h_a, sizeof(int) * n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int) * n * m, cudaMemcpyHostToDevice); float gpu_elapsed_time_ms; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // launch kernel MV <<< m / 256 + 1, 256 >>> (d_a, d_b, d_c, n, m); cudaMemcpy(h_c, d_c, sizeof(int) * m, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); int c = m*n; ofile << gpu_elapsed_time_ms / 1000; ofile << "," << m << "," << n << "," << d << "," << c << ",\n"; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); } ofile.close(); return 0; }
1,150
#include <cuda.h> #include <cuda_runtime.h> #include <time.h> #define N 32*1024*1024 #define BLOCK_SIZE 256 __global__ void reduce_v1(float *g_idata,float *g_odata){ __shared__ float sdata[BLOCK_SIZE]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { // if (tid % (2*s) == 0) { // sdata[tid] += sdata[tid + s]; // } int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } int main() { float *input_host = (float*)malloc(N*sizeof(float)); float *input_device; cudaMalloc((void **)&input_device, N*sizeof(float)); for (int i = 0; i < N; i++) input_host[i] = 2.0; cudaMemcpy(input_device, input_host, N*sizeof(float), cudaMemcpyHostToDevice); int32_t block_num = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; float *output_host = (float*)malloc((N / BLOCK_SIZE) * sizeof(float)); float *output_device; cudaMalloc((void **)&output_device, (N / BLOCK_SIZE) * sizeof(float)); dim3 grid(N / BLOCK_SIZE, 1); dim3 block(BLOCK_SIZE, 1); reduce_v1<<<grid, block>>>(input_device, output_device); cudaMemcpy(output_host, output_device, block_num * sizeof(float), cudaMemcpyDeviceToHost); return 0; }
1,151
#include "includes.h" __global__ void cuArraysSetConstant_kernel(float *image, int size, float value) { int idx = threadIdx.x + blockDim.x*blockIdx.x; if(idx < size) { image[idx] = value; } }
1,152
// // Assignment 1: ParallelSine // CSCI 415: Networking and Parallel Computation // Spring 2017 // Name(s):Chengyao Tang,Victoria Kyereme // // Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch // standard imports #include <stdio.h> #include <math.h> #include <iomanip> #include <iostream> #include <string> #include <sys/time.h> // problem size (vector length) N static const int N = 12345678; // Number of terms to use when approximating sine static const int TERMS = 6; // kernel function (CPU - Do not modify) void sine_serial(float *input, float *output) { int i; for (i=0; i<N; i++) { float value = input[i]; float numer = input[i] * input[i] * input[i]; int denom = 6; // 3! int sign = -1; for (int j=1; j<=TERMS;j++) { value += sign * numer / denom; numer *= input[i] * input[i]; denom *= (2*j+2) * (2*j+3); sign *= -1; } output[i] = value; } } // kernel function (CUDA device) // TODO: Implement your graphics kernel here. See assignment instructions for method information __global__ void sine_parallel(float*d_input,float*d_output ){ int idx = blockIdx.x*blockDim.x+threadIdx.x; float d_value = d_input[idx]; float d_numer = d_input[idx]*d_input[idx]*d_input[idx]; int d_denom = 6; int d_sign = -1; for (int d_j=1;d_j<=TERMS; d_j++){ d_value += d_sign *d_numer/d_denom; d_numer *= d_input[idx]* d_input[idx]; d_denom *= (2*d_j+2)* (2*d_j+3); d_sign *= -1; } d_output[idx] = d_value; } // BEGIN: timing and error checking routines (do not modify) // Returns the current time in microseconds long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } // Prints the time elapsed since the specified time long long stop_timer(long long start_time, std::string name) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; std::cout << std::setprecision(5); std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n"; return end_time - start_time; } void checkErrors(const char label[]) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } // END: timing and error checking routines (do not modify) int main (int argc, char **argv) { //BEGIN: CPU implementation (do not modify) float *h_cpu_result = (float*)malloc(N*sizeof(float)); float *h_input = (float*)malloc(N*sizeof(float)); //Initialize data on CPU int i; for (i=0; i<N; i++) { h_input[i] = 0.1f * i; } //Execute and time the CPU version long long CPU_start_time = start_timer(); sine_serial(h_input, h_cpu_result); long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time"); //END: CPU implementation (do not modify) //TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results float *h_gpu_result = (float*)malloc(N*sizeof(float)); //declare GPU memory pointers float *d_input; float *d_output; long long Memory_Allocation_start_time = start_timer(); long long GPU_start_time = start_timer(); //allocate GPU memory cudaMalloc((void **) &d_input,N*sizeof(float)); cudaMalloc((void **) &d_output,N*sizeof(float)); long long Memory_Allocation_end_time = stop_timer(Memory_Allocation_start_time,"\nGPU Memory allocation time:"); //transfer the array to the GPU long long Memory_copy_to_device_start_time = start_timer(); cudaMemcpy(d_input, h_input, N*sizeof(float),cudaMemcpyHostToDevice); long long Memory_copy_to_device_end_time = stop_timer(Memory_copy_to_device_start_time,"\nGPU Memory Copy to Device time:"); //launch the kernel int threards = N/1024; long long Kernal_run_start_time = start_timer(); sine_parallel<<<threards,1024>>>(d_input,d_output); long long Kernal_run_end_time = stop_timer(Kernal_run_start_time,"\nGPU Kernal run Time:"); //copy back the result array to the CPU long long Memory_copy_to_Host_start_time = start_timer(); cudaMemcpy(h_gpu_result,d_output,N*sizeof(float),cudaMemcpyDeviceToHost); long long Memory_copy_to_Host_end_time = stop_timer(Memory_copy_to_Host_start_time,"\nGPU Memory Copy to Host time:"); long long GPU_end_time = stop_timer(GPU_start_time,"\nTotal GPU Run time:"); // Checking to make sure the CPU and GPU results match - Do not modify int errorCount = 0; for (i=0; i<N; i++) { if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6) errorCount = errorCount + 1; } if (errorCount > 0) printf("Result comparison failed.\n"); else printf("Result comparison passed.\n"); // Cleaning up memory free(h_input); free(h_cpu_result); free(h_gpu_result); //Cleaning up memory for gpu pointers cudaFree(d_input); cudaFree(d_output); return 0; }
1,153
// simple stupid dot product example from chapter 5 of CUDA by Example // as worked by myself from scratch #include <stdio.h> #define N 1000000 #define BPG 16 #define TPB 16 __global__ void dot(float *a, float *b, float *c){ //accumulate thread result on shared mem (per block) __shared__ float cache[TPB]; int tid = (blockDim.x * blockIdx.x) + threadIdx.x; float acum = 0; while( tid < N ){ acum += a[tid] * b[tid]; tid += gridDim.x * blockDim.x; } cache[ threadIdx.x ] = acum; //need to agregate the result for this block into //a single scalar, from the individual thread results // for that, we must be sure all threads have finished //barrier here. __syncthreads(); for(int i=blockDim.x/2; i > 0; i /= 2){ tid = threadIdx.x; // because all threads are gonna go thru here anyway, // but only the i leftmost ones should do sth. if( tid < i ){ cache[tid] += cache[tid + i]; } __syncthreads(); // regroup before next iteration } //block's sum at cache[0] //somebody's gotta write it back to the host if( threadIdx.x == 0 ){ c[blockIdx.x] = cache[0]; } } #define sum_squares(x) ((x*(x+1)*(2*x+1))/6) int main(){ float a[N], b[N], c[BPG]; float *dev_a, *dev_b, *dev_c; cudaMalloc( (void**)&dev_a, N * sizeof(float) ); cudaMalloc( (void**)&dev_b, N * sizeof(float) ); cudaMalloc( (void**)&dev_c, BPG * sizeof(float) ); for(int i=0; i < N; i++){ a[i] = i; b[i] = 2*i; } cudaMemcpy( dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice ); dot<<<BPG, TPB>>>(dev_a, dev_b, dev_c); cudaMemcpy( c, dev_c, BPG*sizeof(float), cudaMemcpyDeviceToHost ); //sum the results from the individual blocks float res=0; for(int i=0; i < BPG; i++){ printf("c[%d] = %f\n", i, c[i]); res += c[i]; } printf("Result: %f (vs. %f)\n", res, 2*sum_squares((float)(N-1))); cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
1,154
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15) { for (int i=0; i < var_1; ++i) { if (comp == acosf(+1.7250E35f)) { comp += (var_3 / var_4 - var_5 / var_6); if (comp > (+1.2638E36f - ldexpf(-1.6108E34f - (var_7 - +1.7065E-19f), 2))) { comp = sqrtf(+1.4345E35f + +0.0f); comp += (var_8 / -1.7229E34f / (-1.5260E35f - var_9 + -1.7803E3f)); } for (int i=0; i < var_2; ++i) { comp = -1.3042E-44f - (var_10 * var_11); comp = (var_12 * (var_13 - var_14 / +1.3264E8f)); } if (comp > -1.8163E-29f - +1.0793E-16f) { float tmp_1 = -0.0f - var_15; comp = tmp_1 - -1.4916E3f / +0.0f / +1.6909E10f; } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16); cudaDeviceSynchronize(); return 0; }
1,155
__global__ void broadcast_kernel(int n, const float* x, float *z) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) z[i] = x[0]; } void broadcast(int n, const float* x, float *z) { broadcast_kernel<<<(n+255)/256, 256>>>(n, x, z); }
1,156
#include <math.h> #include <stdlib.h> #include <stdio.h> #include "unistd.h" #include "time.h" #include "string.h" // Stores temporary shift values __constant__ float dm_shifts[4096]; // ---------------------- Optimised Dedispersion Loop ------------------------------ __global__ void dedisperse_loop(float *outbuff, float *buff, int nsamp, int nchans, float tsamp, float startdm, float dmstep, int maxshift) { extern __shared__ float shared[]; int c, s = threadIdx.x + blockIdx.x * blockDim.x; float shift_temp = (startdm + blockIdx.y * dmstep) / tsamp; for (s = threadIdx.x + blockIdx.x * blockDim.x; s < nsamp; s += blockDim.x * gridDim.x) { shared[threadIdx.x] = 0; for(c = 0; c < nchans; c++) { int shift = c * (nsamp + maxshift) + floor(dm_shifts[c] * shift_temp); shared[threadIdx.x] += buff[s + shift]; } outbuff[blockIdx.y * nsamp + s] = shared[threadIdx.x]; } } // -------------------------- Main Program ----------------------------------- float fch1 = 156, foff = -0.005859375, tsamp = 0.000165, dmstep = 0.02, startdm = 0; int nchans = 1024, nsamp = 1024, tdms = 1024; int gridsize = 128, blocksize = 128; // Process command-line parameters void process_arguments(int argc, char *argv[]) { int i = 1; while((fopen(argv[i], "r")) != NULL) i++; while(i < argc) { if (!strcmp(argv[i], "-nchans")) nchans = atoi(argv[++i]); else if (!strcmp(argv[i], "-nsamp")) nsamp = atoi(argv[++i]); else if (!strcmp(argv[i], "-dmstep")) dmstep = atof(argv[++i]); else if (!strcmp(argv[i], "-startdm")) startdm = atof(argv[++i]); else if (!strcmp(argv[i], "-tdms")) tdms = atoi(argv[++i]); else if (!strcmp(argv[i], "-gridsize")) gridsize = atoi(argv[++i]); else if (!strcmp(argv[i], "-blocksize")) blocksize = atoi(argv[++i]); else if (!strcmp(argv[i], "-tsamp")) tsamp = atof(argv[++i]); else if (!strcmp(argv[i], "-foff")) foff = -atof(argv[++i]); i++; } } // Fill buffer with data (blocking call) void generate_data(float* buffer, int nsamp, int nchans) { for(int i = 0; i < nsamp * nchans; i++) buffer[i] = 0.1; } // DM delay calculation float dmdelay(float f1, float f2) { return(4148.741601 * ((1.0 / f1 / f1) - (1.0 / f2 / f2))); } int main(int argc, char *argv[]) { float *input, *output, *d_input, *d_output; int maxshift, i, j; process_arguments(argc, argv); // Calculate temporary DM-shifts float *dmshifts = (float *) malloc(nchans * sizeof(float)); for (unsigned i = 0; i < nchans; i++) dmshifts[i] = dmdelay(fch1 + (foff * i), fch1); // Calculate maxshift maxshift = ceil(dmshifts[nchans - 1] * (startdm + dmstep * tdms) / tsamp); // Allocate and initialise arrays input = (float *) malloc( (nsamp + maxshift) * nchans * sizeof(float)); output = (float *) malloc( nsamp * tdms * sizeof(float)); for(i = 0; i < nchans; i++) for(j = 0; j < nsamp + maxshift; j++) { input[i * (nsamp + maxshift) + j] = i; } // Initialise CUDA stuff ( cudaSetDevice(1)); cudaEvent_t event_start, event_stop; float timestamp, kernelTime; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); printf("nsamp: %d, nchans: %d, tsamp: %f, startdm: %f, dmstep: %f, tdms: %d, fch1: %f, foff: %f, maxshift: %d\n", nsamp, nchans, tsamp, startdm, dmstep, tdms, fch1, foff, maxshift); // Allocate CUDA memory and copy dmshifts ( cudaMalloc((void **) &d_input, (nsamp + maxshift) * nchans * sizeof(float))); ( cudaMalloc((void **) &d_output, nsamp * tdms * sizeof(float))); ( cudaMemset(d_output, 0, nsamp * tdms * sizeof(float))); ( cudaMemcpyToSymbol(dm_shifts, dmshifts, nchans * sizeof(int)) ); time_t start = time(NULL); // Copy input to GPU cudaEventRecord(event_start, 0); ( cudaMemcpy(d_input, input, (nsamp + maxshift) * nchans * sizeof(float), cudaMemcpyHostToDevice) ); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied to GPU in: %lf\n", timestamp); dim3 gridDim(nsamp / blocksize, tdms); cudaEventRecord(event_start, 0); dedisperse_loop<<<gridDim, blocksize, 512>>>(d_output, d_input, nsamp, nchans, tsamp, startdm, dmstep, maxshift); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Processed in: %lf\n", timestamp); kernelTime = timestamp; // Copy output from GPU cudaEventRecord(event_start, 0); ( cudaMemcpy(output, d_output, nsamp * tdms * sizeof(float), cudaMemcpyDeviceToHost) ); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied from GPU in: %lf\n", timestamp); // Check values int val = 0; for(i = 0; i < nchans; i++) val += i; /* for(i = 0; i < tdms; i++) for(j = 0; j < nsamp; j++) if (output[i * nsamp + j] != val) printf("Error: dm: %d nsamp: %d value:%f \n", i, j, output[i*nsamp+j]); */ printf("Total time: %d\n", (int) (time(NULL) - start)); printf("Performance: %lf Gflops\n", (nchans * tdms) * (nsamp * 1.0 / kernelTime / 1.0e6)); }
1,157
#define t_max 1 #define t 1 /* (u[0][0][0][0][0]=((alpha*(ux[1][0][0][0][1]-ux[-1][0][0][0][1]))+((beta*(uy[0][1][0][0][2]-uy[0][-1][0][0][2]))+(gamma*(uz[0][0][1][0][3]-uz[0][0][-1][0][3]))))) */ __global__ void divergence(float * * u_0_0_out, float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max) { /* float * const u__u_0[16] = { u_0_0 } ; float * const u__ux_1[16] = { ux_1_0 } ; float * const u__uy_2[16] = { uy_2_0 } ; float * const u__uz_3[16] = { uz_3_0 } ; */ int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int idx_1_2; int p_idx_x; int p_idx_x_max; int p_idx_y; int p_idx_y_max; int p_idx_z; int p_idx_z_max; int size_1_1; int size_1_2; //int t; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x)); p_idx_x_max=(p_idx_x+1); p_idx_y=(threadIdx.y+(tmp*blockDim.y)); p_idx_y_max=(p_idx_y+1); p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); p_idx_z_max=(p_idx_z+1); /* Implementation */ /* //for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0]) */ /* _idx0 = (((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(p_idx_y*x_max))+((2*p_idx_y)*t))+p_idx_x)+2) */ _idx0=(((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(p_idx_y*x_max))+((2*p_idx_y)*t))+p_idx_x)+2); /* _idx1 = ((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(p_idx_y*x_max))+((2*p_idx_y)*t))+p_idx_x) */ _idx1=(_idx0-2); /* _idx2 = ((((p_idx_z*x_max)*y_max)+(((((2*p_idx_z)*t)+p_idx_y)+2)*x_max))+p_idx_x) */ _idx2=(((_idx1-(((2*p_idx_z)*t)*y_max))+((((2*p_idx_z)*t)+2)*x_max))-((2*p_idx_y)*t)); /* _idx3 = ((((p_idx_z*x_max)*y_max)+((((2*p_idx_z)*t)+p_idx_y)*x_max))+p_idx_x) */ _idx3=(_idx2-(2*x_max)); /* _idx4 = (((((p_idx_z+2)*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x) */ _idx4=((_idx3+((2*x_max)*y_max))-(((2*p_idx_z)*t)*x_max)); /* _idx5 = ((((p_idx_z*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x) */ _idx5=(_idx4-((2*x_max)*y_max)); u_0_0[_idx5]=((alpha*(ux_1_0[_idx0]-ux_1_0[_idx1]))+((beta*(uy_2_0[_idx2]-uy_2_0[_idx3]))+(gamma*(uz_3_0[_idx4]-uz_3_0[_idx5])))); } } __global__ void initialize(float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max) { float * const u__u_0[16] = { u_0_0 } ; float * const u__ux_1[16] = { ux_1_0 } ; float * const u__uy_2[16] = { uy_2_0 } ; float * const u__uz_3[16] = { uz_3_0 } ; int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int idx_1_2; int p_idx_x; int p_idx_x_max; int p_idx_y; int p_idx_y_max; int p_idx_z; int p_idx_z_max; int size_1_1; int size_1_2; //int t; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x)); p_idx_x_max=(p_idx_x+1); p_idx_y=(threadIdx.y+(tmp*blockDim.y)); p_idx_y_max=(p_idx_y+1); p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); p_idx_z_max=(p_idx_z+1); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0]) */ /* _idx0 = ((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(p_idx_y*x_max))+((2*p_idx_y)*t))+p_idx_x) */ _idx0=((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(p_idx_y*x_max))+((2*p_idx_y)*t))+p_idx_x); ux_1_0[_idx0]=0.2; /* _idx1 = ((((p_idx_z*x_max)*y_max)+((((2*p_idx_z)*t)+p_idx_y)*x_max))+p_idx_x) */ _idx1=(((_idx0-(((2*p_idx_z)*t)*y_max))+(((2*p_idx_z)*t)*x_max))-((2*p_idx_y)*t)); uy_2_0[_idx1]=0.30000000000000004; /* _idx2 = ((((p_idx_z*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x) */ _idx2=(_idx1-(((2*p_idx_z)*t)*x_max)); uz_3_0[_idx2]=0.4; /* _idx3 = (((((p_idx_z+2)*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x) */ _idx3=(_idx2+((2*x_max)*y_max)); uz_3_0[_idx3]=0.4; /* _idx4 = ((((p_idx_z*x_max)*y_max)+(((((2*p_idx_z)*t)+p_idx_y)+2)*x_max))+p_idx_x) */ _idx4=(_idx1+(2*x_max)); uy_2_0[_idx4]=0.30000000000000004; /* _idx5 = (((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(p_idx_y*x_max))+((2*p_idx_y)*t))+p_idx_x)+2) */ _idx5=(_idx0+2); ux_1_0[_idx5]=0.2; u_0_0[_idx2]=0.1; } }
1,158
#include <stdio.h> #include <stdlib.h> #define N 32 //Código device __global__ void soma_vetor(int *a, int *b, int *c){ int indice = blockIdx.x; if(indice < N) c[indice] = a[indice] + b[indice]; } //Código host int main(){ int a[N],b[N],c[N]; int* dev_a; int* dev_b; int* dev_c; int tam = N*sizeof(int); //Inicializando as variáveis do host: for(int i = 0; i < N; i++){ a[i] = i; b[i] = i*2; } //Alocando espaço para as variáveis da GPU: cudaMalloc((void**)&dev_a, tam); cudaMalloc((void**)&dev_b, tam); cudaMalloc((void**)&dev_c, tam); //Copiando as variáveis da CPU para a GPU: cudaMemcpy(dev_a, &a, tam, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, &b, tam, cudaMemcpyHostToDevice); //Chamada à função da GPU (kernel): soma_vetor<<<N,1>>>(dev_a, dev_b, dev_c); //Copiando o resultado da GPU para a CPU: cudaMemcpy(&c, dev_c, tam, cudaMemcpyDeviceToHost); //Visualizando o resultado: for(int i = 0; i < N; i++) printf("%d ",c[i]); printf("\n\n"); //Liberando a memória na GPU: cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
1,159
#include <stdio.h> __global__ void global_scan(float* d_out, float* d_in) { int idx = threadIdx.x; float out = 0.00f; d_out[idx] = d_in[idx]; __syncthreads(); for (int interpre = 1; interpre<sizeof(d_in); interpre *= 2) { if (idx - interpre >= 0) { out = d_out[idx] + d_out[idx - interpre]; } __syncthreads(); if (idx - interpre >= 0) { d_out[idx] = out; out = 0.00f; } } } __global__ void shmem_scan_kernel(float * d_out, const float * d_in) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int idx = threadIdx.x; float out = 0.00f; // load shared mem from global mem sdata[idx] = d_in[idx]; __syncthreads(); for (int interpre = 1; interpre<sizeof(d_in); interpre *= 2) { if (idx - interpre >= 0) { out = sdata[idx] + sdata[idx - interpre]; } __syncthreads(); if (idx - interpre >= 0) { sdata[idx] = out; out = 0.00f; } } // writes all thread result for this block back to global mem d_out[idx] = sdata[idx]; } int main(int argc, char** argv) { const int ARRAY_SIZE = 8; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i<ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float* d_in; float* d_out; // allocate GPU memory cudaMalloc((void**)&d_in, ARRAY_BYTES); cudaMalloc((void**)&d_out, ARRAY_BYTES); // transfer the array to GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel // global_scan << <1, ARRAY_SIZE >> >(d_out, d_in); shmem_scan_kernel <<<1, ARRAY_SIZE, ARRAY_SIZE * sizeof(float) >>>(d_out, d_in); // shmem_scan_kernel <<<1, ARRAY_SIZE >>>(d_out, d_in); // copy back the result array to the GPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i = 0; i<ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); system("pause"); return 0; }
1,160
//////////////////////////////////////////////////////////////////////////////// // CUDA LATTICE RELAXATION // WRITTEN BY: CLAYTON RAYMENT // // I wrote this program to help teach myself CUDA. While MATLAB and OCTAVE // are multithreaded applications, this program runs the majority of the // calculation directly on the GPU, which has the capability of being much // parallel than a standard CPU multithreaded application. Since I haven't // learned how to manage threads very well, the application is currently // running close to its memory limit. However, with a better implimentation // of thread handling, I would be able to utilize all 1664 CUDA cores on my // GTX 970. Currently I only use blocks, which reduces that significantly. // The program is fully scalable, as the user can select how many divisions // each centimeter is split up into using the U global variable. Large values // of U however, exceed the maximum memory of the card due to my poor thread // management. // ////////////////////////////////////////////////////////////////////////////////// #include <time.h> #include <iostream> //=========== //GLOBALS: //=========== //-------------- // VARIABLES //-------------- const int U = 200; //Number of blocks per unit. This allows the grid spacing to be user defined. const int H = 11*U; //Height of the outer box const int W = 12*U; //Width of the outer box const int h = 3*U; //Height of the inner box const int w = 4*U; //Width of the inner box const int N = H*W; //Number of threads required //-------------- // FUNCTIONS //-------------- //CUDA FUNCTIONS: //Form the base simulation area: __global__ void fill(double *a){ //Form the outer rectangle: if(blockIdx.x < W || blockIdx.x > (N-W) || blockIdx.x % W == 0 || (blockIdx.x-(W-1))%W == 0){ a[blockIdx.x] = 9; } else{ a[blockIdx.x] = 1; } //Form the inner rectangle: if(blockIdx.x > W*((H-h)/2) && blockIdx.x < W*((H-h)/2+h) && blockIdx.x % W+1 > (W-w)/2 && blockIdx.x % W < (W-w)/2+w){ a[blockIdx.x] = 0; } } //Perform one iteration of the matrix relaxation: __global__ void average(double *a, double *b){ //if we are at one of the edges, do nothing: if(a[blockIdx.x] == 0 || a[blockIdx.x] == 9){ b[blockIdx.x] = a[blockIdx.x]; } else{ b[blockIdx.x] = (a[blockIdx.x]+a[blockIdx.x + 1]+a[blockIdx.x - 1]+a[blockIdx.x - W]+a[blockIdx.x + W])/5.0; } } //SERIAL FUNCTIONS: void fillSerial(double *a){ for(int i = 0; i < N; ++i){ if(i < W || i > (N-W) || i%W == 0 || (i - (W-1))%W == 0){ a[i] = 9; } else{ a[i] = 4.5; } if( i > W*((H-h)/2) && i < W*((H-h)/2+h) && i % W+1 > (W-w)/2 && i % W < (W-w)/2+w){ a[i] = 0; } } } void averageSerial(double* a, double* b){ for(int i = 0; i < N; ++i){ if(a[i] == 0 || a[i] == 9){ b[i] = a[i]; } else{ b[i] = (a[i]+a[i+1]+a[i-1]+a[i+W]+a[i-W])/5.0; } } } //----------------- //HELPER FUNCTIONS //----------------- //Quick function to print out the grid: void print(double* a){ for(int i = 0; i < N; ++i){ if(i%W == 0){ std::cout << '\n'; } std::cout << a[i] << ' '; } } int main(void){ //Serial Code: clock_t tS; clock_t tP; tS = clock(); double* notPlate_s = new double[N]; //Serial copy of the non-plate locations double* tempVal_s = new double[N]; //Double array to hold temporary values fillSerial(notPlate_s); for(int i = 0; i < 1000; ++i){ averageSerial(notPlate_s, tempVal_s); notPlate_s = tempVal_s; } tS = clock() - tS; tP = clock(); //PARALLEL CODE: double *notPlate_h; //Host copy of the non-plate locations double *notPlate_d; //Device copy of the non-plate locations int size = N * sizeof(double); //The first thing we need to do is fill the initial experiment vector: cudaMalloc((void **)&notPlate_d, size); //Allocate memory on device for the not-plate vector notPlate_h = (double *)malloc(size); //Allocate memory on the host for the not-plate vector cudaMemcpy(notPlate_d, notPlate_h, size, cudaMemcpyHostToDevice); fill<<<N,1>>>(notPlate_d); cudaMemcpy(notPlate_h, notPlate_d, size, cudaMemcpyDeviceToHost); //Now we need to create a secondary vector to hold our rolling values while the GPU works: double *tempVal_h; double *tempVal_d; cudaMalloc((void **)&tempVal_d, size); //Allocate memory on the device for the temp vector tempVal_h = (double *)malloc(size); //allocate memory on the host for the temp vector //Since GPU time is cheap, we don't set a threshold, just run the simulation 1000 times: for(int i = 0; i < 1000; ++i){ cudaMemcpy(notPlate_d, notPlate_h, size, cudaMemcpyHostToDevice); average<<<N,1>>>(notPlate_d, tempVal_d); cudaMemcpy(tempVal_h, tempVal_d, size, cudaMemcpyDeviceToHost); notPlate_h = tempVal_h; } //The output here will be redirected to a plaintext file which will then be opened and plot in OCTAVE //print(notPlate_h); //When uncommented, this prints to the std output what the values at the user input points are: /* while(true){ std::cout << "\nPlease enter an X coordinate: "; double x; std::cin >> x; x = x*U; std::cout << "\nPlease enter a Y coordinate: "; double y; std::cin >> y; y = y*U; std::cout << "The point (" << x/U << "," << y/U << ") has value: " << notPlate_h[(int)(y*W+x)] << " Statvolts\n"; }*/ //Free the memory just to be safe: free(notPlate_h); cudaFree(notPlate_d); tP = clock() - tP; std::cout << "N: " << N << " | SERIAL: " << (float)tS/CLOCKS_PER_SEC << "s | PARALLEL: " << (float)tP/CLOCKS_PER_SEC << "s" << std::endl; return 0; }
1,161
#include <stdio.h> __global__ void mykernel(void) { printf("Hello World from GPU!\n"); } int main(void) { mykernel<<<1,1>>>(); cudaDeviceSynchronize; printf("Hello World from CPU!\n"); return 0; }
1,162
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAX_CHAR 100 #define DATAFILE "data.txt" #define RESULTSFILE "resultsCudal.txt" #define G 6.674e-11 #define NUM_ITER 1000 #define NUM_ITER_SHOW 50 __global__ void asteroid(double * gpu_x, double * gpu_y, double * gpu_vx, double * gpu_vy, double * gpu_m, double * gpu_x_new, double * gpu_y_new, double * gpu_vx_new, double * gpu_vy_new, int noOfObjects){ int i = threadIdx.x; // local or register // printf("Object %d: %d, %d\n", i, gpu_x[i], gpu_y[i]); if (i < noOfObjects){ gpu_x_new[i]=gpu_x[i]; gpu_y_new[i]=gpu_y[i]; gpu_vx_new[i]=gpu_vx[i]; gpu_vy_new[i]=gpu_vy[i]; } if (i < noOfObjects){ double ax_total=0; double ay_total=0; for (int j=0; j < noOfObjects; j++) { if (i==j) continue; double d = sqrt(pow( (gpu_x[i]-gpu_x[j]) ,2.0) + pow( (gpu_y[i]-gpu_y[j]) ,2.0)); double f = G*gpu_m[i]*gpu_m[j]/pow(d,2.0); double fx = f*(gpu_x[j]-gpu_x[i])/d; double ax = fx/gpu_m[i]; double fy = f*(gpu_y[j]-gpu_y[i])/d; double ay = fy/gpu_m[i]; ax_total += ax; ay_total += ay; } gpu_vx_new[i] += ax_total; gpu_vy_new[i] += ay_total; gpu_x_new[i] += gpu_vx_new[i]; gpu_y_new[i] += gpu_vy_new[i]; } // noOfObjects printf("Object %d: %d, %d\n", i, gpu_x[i], gpu_y[i]); if (i < noOfObjects){ gpu_x[i]=gpu_x_new[i]; gpu_y[i]=gpu_y_new[i]; gpu_vx[i]=gpu_vx_new[i]; gpu_vy[i]=gpu_vy_new[i]; } } int main(){ clock_t start, end; double time_used; char str[MAX_CHAR]; FILE *file; int noOfObjects; int i; file = fopen( DATAFILE , "r"); fscanf(file,"%s",str); noOfObjects = atoi(str); printf("Number of objects: %d\n",noOfObjects); const int ARRAY_BYTES = noOfObjects * sizeof(double); double *x = (double *) malloc(ARRAY_BYTES); double *y = (double *) malloc(ARRAY_BYTES); double *vx = (double *) malloc(ARRAY_BYTES); double *vy = (double *) malloc(ARRAY_BYTES); double *m = (double *) malloc(ARRAY_BYTES); double *x0 = (double *) malloc(ARRAY_BYTES); double *y0 = (double *) malloc(ARRAY_BYTES); double *vx0 = (double *) malloc(ARRAY_BYTES); double *vy0 = (double *) malloc(ARRAY_BYTES); double *x_new = (double *) malloc(ARRAY_BYTES); double *y_new = (double *) malloc(ARRAY_BYTES); double *vx_new = (double *) malloc(ARRAY_BYTES); double *vy_new = (double *) malloc(ARRAY_BYTES); printf("\n"); // declare GPU memory pointers double * gpu_x; double * gpu_y; double * gpu_vx; double * gpu_vy; double * gpu_m; double * gpu_x_new; double * gpu_y_new; double * gpu_vx_new; double * gpu_vy_new; // launch the kernel for (i=0; i < noOfObjects; i++) { fscanf(file,"%s",str); x[i] = atof(str); x0[i] = atof(str); fscanf(file,"%s",str); y[i] = atof(str); y0[i] = atof(str); fscanf(file,"%s",str); vx[i] = atof(str); vx0[i] = atof(str); fscanf(file,"%s",str); vy[i] = atof(str); vy0[i] = atof(str); fscanf(file,"%s",str); m[i] = atof(str); } fclose(file); // allocate GPU memory cudaMalloc((void**) &gpu_x, ARRAY_BYTES); cudaMalloc((void**) &gpu_y, ARRAY_BYTES); cudaMalloc((void**) &gpu_vx, ARRAY_BYTES); cudaMalloc((void**) &gpu_vy, ARRAY_BYTES); cudaMalloc((void**) &gpu_m, ARRAY_BYTES); cudaMalloc((void**) &gpu_x_new, ARRAY_BYTES); cudaMalloc((void**) &gpu_y_new, ARRAY_BYTES); cudaMalloc((void**) &gpu_vx_new, ARRAY_BYTES); cudaMalloc((void**) &gpu_vy_new, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(gpu_x, x, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_y, y, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_vx, vx, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_vy, vy, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_m, m, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_x_new, x_new, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_y_new, y_new, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_vx_new, vx_new, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_vy_new, vy_new, ARRAY_BYTES, cudaMemcpyHostToDevice); start=clock(); for (int niter=0; niter<NUM_ITER; niter++) { asteroid<<<1, noOfObjects>>>(gpu_x, gpu_y, gpu_vx, gpu_vy, gpu_m, gpu_x_new, gpu_y_new, gpu_vx_new, gpu_vy_new, noOfObjects); //printf("ola k ase \n\n, %d", niter); if (niter%NUM_ITER_SHOW == 0) printf("Iteration %d/%d\n", niter, NUM_ITER); } // nIter end=clock(); // copy back the result array to the CPU cudaMemcpy(x, gpu_x, noOfObjects*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(y, gpu_y, noOfObjects*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(vx, gpu_vx, noOfObjects*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(vy, gpu_vy, noOfObjects*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(m, gpu_m, noOfObjects*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(x_new, gpu_x_new, noOfObjects*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(y_new, gpu_y_new, noOfObjects*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(vx_new, gpu_vx_new, noOfObjects*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(vy_new, gpu_vy_new, noOfObjects*sizeof(double), cudaMemcpyDeviceToHost); file = fopen( RESULTSFILE , "w"); fprintf(file, "Movement of objects\n"); fprintf(file, "-------------------\n"); for (i=0; i<noOfObjects; i++) { double mov = sqrt(pow( (x0[i]-x[i]) ,2.0) + pow( (y0[i]-y[i]) ,2.0)); fprintf(file," Object %i - %f meters\n", i, mov); } int hours = NUM_ITER/3600; int mins = (NUM_ITER - hours*3600)/60; int secs = (NUM_ITER - hours*3600 - mins*60); fprintf(file,"Time elapsed: %i seconds (%i hours, %i minutes, %i seconds)\n",NUM_ITER, hours, mins, secs); time_used = ((double)(end-start)) / CLOCKS_PER_SEC; fprintf(file,"Processing time: %f sec.\n",time_used); fclose(file); // // copy back the result array to the CPU // cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // // print out the resulting array // for (int i =0; i < ARRAY_SIZE; i++) { // printf("%f", h_out[i]); // printf(((i % 4) != 3) ? "\t" : "\n"); // } cudaFree(gpu_x); cudaFree(gpu_y); cudaFree(gpu_vx); cudaFree(gpu_vy); cudaFree(gpu_m); cudaFree(gpu_x_new); cudaFree(gpu_y_new); cudaFree(gpu_vx_new); cudaFree(gpu_vy_new); } // main
1,163
#include <stdio.h> unsigned int N = 1 << 12; unsigned int N_p = N/4; __global__ void mul(int n, int *x, int *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = x[i] * y[i]; } int main(void) { int *d_x, *d_y; int *x, *y; x = (int*)malloc(N*sizeof(int)); y = (int*)malloc(N*sizeof(int)); cudaMalloc(&d_x, N*sizeof(int)); cudaMalloc(&d_y, N*sizeof(int)); for (int i = 0; i < N; i++) { x[i] = i%16; y[i] = i%16; } cudaMemcpy(d_x, x, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(int), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); // Perform SAXPY on 1M elements mul<<<(N+255)/256, 256>>>(N, d_x, d_y); cudaDeviceSynchronize(); cudaMemcpy(y, d_y, N*sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); int maxError = 0.0; for (int i = 0; i < N; i++) { //maxError = max(maxError, y[i]-(i+i)); if (y[i] != ((i%16)*(i%16))) printf("Elements at pos %d not matching: y[i]=%x, expected = %x, i*i=%x\n", i,(int8_t)y[i], (int8_t)(x[i]*x[i]), ((i%16)*(i%16))); } printf("Max error: %d\n", maxError); }
1,164
#include <iostream> #include <cstdlib> __global__ void setVec (int* array) { int i=blockIdx.x*blockDim.x+threadIdx.x; array[i] = 42; } int main() { int* array; //Creates a pointer of int. This will be used on host int* array_d; //Creates a pointer of int. This will be used on device int N = 8; //Sets the array size as 8 int buffer_size = sizeof(int)*N; //array size, in bytes array = new int[N]; //Allocates array on host cudaMalloc((void**) &array_d, buffer_size); //Allocates buffer_size bytes on GPU's global memory for (int i=0; i<N; i++) { array[i] = N - i;//Initializes array } std::cout << "Array before kernel" << std::endl; for (int i=0; i<N; i++) { //Writes array on screen std::cout << "array[" << i << "]: " << array[i] << std::endl; } cudaMemcpy( array_d, array, buffer_size, cudaMemcpyHostToDevice ); //This will copy buffer_size bytes to device int Block = 1; //How many blocks we will use int Threads = N; //How many threads per block we will use setVec<<<Block,Threads>>> (array_d); //Launches the kernel, with Blocks blocks and Threads threads per block. cudaMemcpy( array, array_d, buffer_size, cudaMemcpyDeviceToHost ); //Copies array back to the host std::cout << std::endl << "Array after kernel" << std::endl; for (int i=0; i<N; i++) { //Writes array on screen std::cout << "array[" << i << "]: " << array[i] << std::endl; } cudaFree(array_d); delete[] array; return 0; }
1,165
#include <stdio.h> #include <stdlib.h> #define BLOCKSIZE 128 #define gpuErrchk(error) __checkCuda(error, __FILE__, __LINE__) #define iDivUp(x,y) (((x)+(y)-1)/(y)) // define No. of blocks/warps /*********************************************/ /* A method for checking error in CUDA calls */ /*********************************************/ inline void __checkCuda(cudaError_t error, const char *file, const int line) { //#if defined(DEBUG) || defined(_DEBUG) if (error != cudaSuccess) { printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError())); exit(-1); } //#endif return; } /*******************/ /* KERNEL FUNCTION */ /*******************/ __global__ void kernelFunction(double * __restrict__ d_data, const unsigned int NperGPU) { const int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < NperGPU) { for (int k = 0; k < 1000; k++) d_data[tid] = d_data[tid] * d_data[tid]; } } /******************/ /* PLAN STRUCTURE */ /******************/ typedef struct { int width; int height; double *elements; } Matrix; /*****************/ /* DATA CREATION */ /*****************/ // void matrix_init(device *plan, int thisSize) { // // initialize size and capacity // plan->size = thisSize; // // allocate memory for plan->d_data // plan->d_data = (double *)malloc(plan->size*sizeof(double)); // // initialize first 10 elements // for (int i = 0; i < 10; i++) plan->d_data[i] = i; // } // void data_print(device *plan, int thisSize) { // // print first 10 elements // printf("data : \n"); // for (int i = 0; i < 10; i++) printf("%1.1f ",plan->d_data[i]); // printf("\n"); // } // void data_free(device *plan) { // free(plan->d_data); // } /********/ /* MAIN */ /********/ int main() { const int numGPUs = 2; const int rows = 500; const int cols = 1000; const int NperGPU = rows*cols; const int N = NperGPU*numGPUs; Matrix plan[numGPUs]; //device plan[2]; // for (int i = 0; i < 2; ++i) // { // data_init(&plan[i],NperGPU); // data_print(&plan[i],NperGPU); // data_free(&plan[i]); // } // initialize arrays in gpus for (int k = 0; k < numGPUs; k++){ gpuErrchk(cudaSetDevice(k)); gpuErrchk(cudaMalloc(&(plan[k].elements), NperGPU*sizeof(double))); } // initialize input matrix in host double *inputMatrix = (double *)malloc(N*sizeof(double)); // --- "Breadth-first" approach - no async for (int k = 0; k < numGPUs; k++) { gpuErrchk(cudaSetDevice(k)); gpuErrchk(cudaMemcpy(plan[k].elements, inputMatrix+k*NperGPU, NperGPU*sizeof(double), cudaMemcpyHostToDevice)); } for (int k = 0; k < numGPUs; k++) { gpuErrchk(cudaSetDevice(k)); kernelFunction<<<iDivUp(NperGPU, BLOCKSIZE), BLOCKSIZE>>>(plan[k].elements, NperGPU); } for (int k = 0; k < numGPUs; k++) { gpuErrchk(cudaSetDevice(k)); gpuErrchk(cudaMemcpy(inputMatrix+k*NperGPU, plan[k].elements, NperGPU*sizeof(double), cudaMemcpyDeviceToHost)); } gpuErrchk(cudaDeviceReset()); }
1,166
/* * Program to show the COMPUTE CAPABILITY of the current device * * Version: Jul 2021 */ #include <stdio.h> //#define CURRENT_DEVICE 1 #define EXIT_SUCCESSFULLY 0 #define EXIT_ERROR -1 int main(int argc, char** argv) { cudaError_t result; int device, coresPerSM; //cudaSetDevice(CURRENT_DEVICE); cudaGetDevice(&device); result = cudaDeviceGetAttribute(&coresPerSM, cudaDevAttrWarpSize, device); if (result != cudaSuccess) return EXIT_ERROR; printf("%d\n", coresPerSM); return EXIT_SUCCESSFULLY; }
1,167
/****************************************************************************************** Source Code : SOAvsAOS.cu Objective : Example code to demonstrate the advantage of having Stucture of arrays rather than array of structures in the application while representing data the corresponding advantages in terms of the bandwidth of the global memory that is achievable Description : This example takes "Triangle" as structure with three arrays of three floating points each representing the three vertices of a triangle the same information is also is stored using a structure "Triangles" which has arrays for each field of each vertex both the representations are intialised generating typical access patterns that will be present while accessing those structures Input : none Output : The different bandwidths that are achieved while accessing data from different data representations Modified : Aug 2011 Author : RarchK ****************************************************************************************/ #include <stdio.h> #include <cuda.h> #include <float.h> #define NO_OF_TRIANGLES 1000000 #define BLOCK_SIZE 128 #define NO_OF_PATTERNS 2 #define NTIMES 10 #define MIN(x,y) ((x)<(y)?(x):(y)) #define MAX(x,y) ((x)>(y)?(x):(y)) void printResults(void); // Triangle structure -- which is the structure in "Array of Structures" struct Triangle { float A[3]; float B[3]; float C[3]; }; // Triangles structure -- which is the structure in "Structure of Arrays" struct Triangles { float *Ax, *Ay, *Az; float *Bx, *By, *Bz; float *Cx, *Cy, *Cz; }; ///////////////////////////////////////////////////////////////////////////////////// // // initializing the array of Triangle types. // each element is intialized to -- (0,0,0),(1,1,1),(2,2,2) // //////////////////////////////////////////////////////////////////////////////////// __global__ void setTriangles(Triangle *myArrayOfTriangles) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < NO_OF_TRIANGLES) { myArrayOfTriangles[idx].A[0] = 0; myArrayOfTriangles[idx].A[1] = 0; myArrayOfTriangles[idx].A[2] = 0; myArrayOfTriangles[idx].B[0] = 1; myArrayOfTriangles[idx].B[1] = 1; myArrayOfTriangles[idx].B[2] = 1; myArrayOfTriangles[idx].C[0] = 2; myArrayOfTriangles[idx].C[1] = 2; myArrayOfTriangles[idx].C[2] = 2; } } ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// // // initializing the Triangles structure // each triangle's vertices are initialized to -- (0,0,0),(1,1,1),(2,2,2) // //////////////////////////////////////////////////////////////////////////////////// __global__ void setTriangles2(Triangles myTrianglesStructure) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < NO_OF_TRIANGLES) { myTrianglesStructure.Ax[idx] = 0; myTrianglesStructure.Ay[idx] = 0; myTrianglesStructure.Az[idx] = 0; myTrianglesStructure.Bx[idx] = 1; myTrianglesStructure.By[idx] = 1; myTrianglesStructure.Bz[idx] = 1; myTrianglesStructure.Cx[idx] = 2; myTrianglesStructure.Cy[idx] = 2; myTrianglesStructure.Cz[idx] = 2; } } //////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// // // checking for the equivalence of both the initialising kernals // ////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void Test(Triangle *myArrayOfTriangles, Triangles myTrianglesStructure, int* dCorrectBool) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < NO_OF_TRIANGLES) if(myTrianglesStructure.Ax[idx] != myArrayOfTriangles[idx].A[0] || myTrianglesStructure.Ay[idx] != myArrayOfTriangles[idx].A[1] || myTrianglesStructure.Az[idx] != myArrayOfTriangles[idx].A[2] || myTrianglesStructure.Bx[idx] != myArrayOfTriangles[idx].B[0] || myTrianglesStructure.By[idx] != myArrayOfTriangles[idx].B[1] || myTrianglesStructure.Bz[idx] != myArrayOfTriangles[idx].B[2] || myTrianglesStructure.Cx[idx] != myArrayOfTriangles[idx].C[0] || myTrianglesStructure.Cy[idx] != myArrayOfTriangles[idx].C[1] || myTrianglesStructure.Cz[idx] != myArrayOfTriangles[idx].C[2] ) *dCorrectBool = 0; } //////////////////////////////////////////////////////////////////////////////////////////////////////////// static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS]; static float bandWidths[NO_OF_PATTERNS] = {0}; static double bytes = sizeof(float) * NO_OF_TRIANGLES * 9; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // the main routene // for timing the different kernals which are accessing array of structures and structure of arrays // finding the bandwidths // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char* argv[]) { Triangle *myArrayOfTriangles; // Array of Structures -- array of struct Triangle type Triangles myTrianglesStructure; // Structure of Arrays int* dCorrectBool; // on the device variable int hCorrectBool = 1; float elapsedTimes[NO_OF_PATTERNS][NTIMES]; cudaEvent_t start,stop; cudaError_t err = cudaSuccess; // event creation, which will be used for timing the code cudaEventCreate(&start); cudaEventCreate(&stop); // memory allocation on the device err = cudaMalloc((void **)&myArrayOfTriangles,sizeof(Triangle)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myArrayOfTriangles\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Ax,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Ax\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Ay,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Ay\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Az,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Az\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Bx,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Bx\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.By,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.By\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Bz,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Bz\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Cx,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Cx\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Cy,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Cy\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Cz,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Cz\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void**)&dCorrectBool, sizeof(int)); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for dCorrectBool\n exiting out of the program.....\n"); exit(-1); } // copying hCorrectBool into dCorrectBool cudaMemcpy(dCorrectBool,&hCorrectBool,sizeof(int),cudaMemcpyHostToDevice); //finding the 1D grid size int gridSize = NO_OF_TRIANGLES/BLOCK_SIZE; if( NO_OF_TRIANGLES % BLOCK_SIZE != 0 ) gridSize += 1; // running each pattern NTIMES for(int k=0; k < NTIMES; k++) { // timing the kernels corresponding to different access patterns // PATTERN 1 cudaEventRecord(start,0); setTriangles <<< gridSize , BLOCK_SIZE >>> (myArrayOfTriangles); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[0][k],start,stop); // PATTERN 2 cudaEventRecord(start,0); setTriangles2 <<< gridSize , BLOCK_SIZE >>> (myTrianglesStructure); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[1][k],start,stop); } // end of the for loop involving NTIMES Test <<< gridSize, BLOCK_SIZE >>> (myArrayOfTriangles,myTrianglesStructure,dCorrectBool); // testing the equivalence of both the intializing kernals // copying back the value of dCorrectBool into hCorrectBool cudaMemcpy(&hCorrectBool,dCorrectBool,sizeof(int),cudaMemcpyDeviceToHost); if(hCorrectBool != 1) // if the kernels are not equivalent { printf("the kernel executions are not equivalent\n"); printf("exiting out of the program\n"); } else // if kernals are equivalent { // intializing the mintime array for(int i=0; i < NO_OF_PATTERNS;i++) mintime[i] = FLT_MAX; for (int k=1; k < NTIMES; k++) // skiping the first iteration { for (int i=0; i < NO_OF_PATTERNS; i++) { avgtime[i] = avgtime[i] + elapsedTimes[i][k]; mintime[i] = MIN(mintime[i],elapsedTimes[i][k]); maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]); } } // calculation of the different band widths that are achieved by different access patterns for(int i=0; i < NO_OF_PATTERNS; i++) { avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time bandWidths[i] = bytes/mintime[i]; } printResults(); printf("\n\n**** successful termination of the program ****\n\n"); } //destroying the events cudaEventDestroy(start); cudaEventDestroy(stop); //freeing the allocated memory on the device cudaFree(myArrayOfTriangles); cudaFree(myTrianglesStructure.Ax); cudaFree(myTrianglesStructure.Ay); cudaFree(myTrianglesStructure.Az); cudaFree(myTrianglesStructure.Bx); cudaFree(myTrianglesStructure.By); cudaFree(myTrianglesStructure.Bz); cudaFree(myTrianglesStructure.Cx); cudaFree(myTrianglesStructure.Cy); cudaFree(myTrianglesStructure.Cz); return 0; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // prints the results containig the minimum, maximum, average times taken by the two initializing kernels // the associated maximum bandwidth of global memory achieved by the kernels // /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void printResults() { printf("Demonstrating the advantages of Structure of arrays over Array of structures in data representation\n"); printf("The transfered data size (Bytes): %ld\n",NO_OF_TRIANGLES*sizeof(float)*9); printf("\n-------------------------------------------------------------------------------------------------------------------------------\n"); printf("Pattern \t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n"); printf("-------------------------------------------------------------------------------------------------------------------------------\n"); // printing the results for different access patterns for(int i=0; i < NO_OF_PATTERNS; i++) { switch(i) { case 0: printf("Array of Structures"); break; case 1: printf("Structure of Arrays"); break; } printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]); } printf("\n ------------------------------------------------------------------------------------------------------------------------------\n"); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
1,168
#include<stdio.h> __global__ void helloWorld() { printf("Hello World! My threadId is %d\n", threadIdx.x); } int main() { helloWorld<<<1, 256>>>(); cudaDeviceSynchronize(); return 0; }
1,169
#include <stdio.h> #include <cuda_runtime.h> #include <stdlib.h> #include <iostream> #include <time.h> #include <thread> #include <vector> using namespace std; #define DSIZE 20000000 #define BSIZE 512 void initData(int data[]){ for(int i=0; i<DSIZE; i++){ data[i] = rand()%10; } } /* use shared memory to do reduciton multiple threads access shared mem no dymamic thread number */ /* __global__ void sumGPU(int dataGPU[], int* result){ //tid: current id of a thread int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int res[BSIZE]; //shared memory inside a block res[threadIdx.x] = 0; for(int i= tid ; i < DSIZE; i+= gridDim.x*blockDim.x) { res[threadIdx.x] += dataGPU[i]*dataGPU[i]; } __syncthreads(); //for loop unroll __shared__ int res1[32]; //shared memory inside a block if(threadIdx.x < 32){ res1[threadIdx.x]=0; //shared memory inside a block for(int i=threadIdx.x; i<BSIZE; i += 32){ res1[threadIdx.x] += res[i]; } //__syncthreads(); } if(threadIdx.x ==0){ //one thread only for(int i=0; i<32; i++){ result[blockIdx.x] += res1[i]; } } } */ /* use dynamic num of threads to do reduction */ __global__ void sumGPU(int dataGPU[], int* result){ //tid: current id of a thread int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int res[BSIZE]; //shared memory inside a block res[threadIdx.x] = 0; for(int i= tid ; i < DSIZE; i+= gridDim.x*blockDim.x) { res[threadIdx.x] += dataGPU[i]*dataGPU[i]; } __syncthreads(); int thread_num = BSIZE/2; while(threadIdx.x<thread_num){ res[threadIdx.x] += res[threadIdx.x+thread_num]; __syncthreads(); thread_num = thread_num/2; } result[blockIdx.x] = res[0]; } //one thread int sumCPU(int data[]){ clock_t start, end; start= clock(); int sum=0; for(int i=0; i<DSIZE; i++){ sum += data[i]*data[i]; } end=clock(); cout<<"CPU running time(ms):"<<(double)(end-start)*1000.0f/CLOCKS_PER_SEC<<endl; cout<<"CPU one-thread result:"<<sum<<endl; return sum; } /////////////////////// main program //////////////// int main(){ //cuda_init(); int* data = new int[DSIZE]; initData(data); cout<<"\n******** CPU ***********"<<endl; sumCPU(data); /////// GPU job ////////// cout<<"\n******** GPU ***********"<<endl; dim3 blocksize(512); dim3 gridsize(16); cout<<"block size:"<<blocksize.x<<endl; cout<<"grid size:"<<gridsize.x<<endl; clock_t start, end; start= clock(); //int dataGPU[DSIZE]; //this is wrong, cpu will allocate memory from stack int* dataGPU; int result_size = gridsize.x; int* result; cudaMalloc((void**)&dataGPU,sizeof(int)*DSIZE); cudaMalloc((void**)&result, sizeof(int)*result_size); cudaMemcpy(dataGPU,data,sizeof(int)*DSIZE,cudaMemcpyHostToDevice); sumGPU<<<gridsize, blocksize>>>(dataGPU,result); int psum [result_size]; for(int i=0; i<result_size; i++){psum[i]=0;} cudaMemcpy(psum,result,sizeof(int)*result_size,cudaMemcpyDeviceToHost); int sum=0; for(int i=0; i<result_size; i++){ sum += psum[i]; } cout<<"GPU result:"<<sum<<endl; end=clock(); cout<<"GPU running time(ms):"<<(double)(end-start)*1000.0f/CLOCKS_PER_SEC<<endl; }
1,170
#include<stdio.h> #include<stdio.h> #include<string.h> #include <stdlib.h> #include <stdarg.h> #include<time.h> #include <math.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } #define ITERATION_COUNT 300 #define BATCH_SIZE 32 #define BLOCK_X 32 #define BLOCK_Y 32 float learning_rate = 1.0e-4; #define INPUT_NODE_COUNT 32754 #define HIDDEN_LAYER_NODE_COUNT 128 #define OUTPUT_NODE_COUNT 32 // All vectors/matrices are stored as this structure in the memory... struct Vector2D { // Whole vector/matrix data is stored in one dimensional array... // All numbers are floating point numbers.... //This pointer points where the vector/matrix data lyies.... float * data; // Row number of the vector/matrix... int height; // Column number of the vector/matrix... int width; int size; }; // We are defining a type from this structure definition... typedef struct Vector2D Vector2D; float * device_matrix_location; Vector2D * CreateVector2D(float * data, int height, int width, bool fill = true, bool store = false) { // A new structure is allocated in GPU memory for matrix/vector... Vector2D * temp ; CHECK(cudaMalloc(&temp, sizeof(Vector2D))); float * temp2; CHECK(cudaMalloc(&temp2, sizeof(float)*height*width)); if(fill == true) CHECK(cudaMemcpy(temp2, data, sizeof(float)*height*width, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&temp->data, &temp2, sizeof(float *), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&temp->height, (void *)(&height), sizeof(int), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&temp->width, (void *)(&width), sizeof(int), cudaMemcpyHostToDevice)); //temp->height = height; //temp->width = width; if(store == true) device_matrix_location = temp2; cudaDeviceSynchronize(); return temp; } __global__ void MatrixSubtract(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********Matrix Subtract diff dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) { //printf("\nMatrixSubtractvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\nMatrixSubtract\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid +3*blockDim.x< vec1->width*vec1->height) { result->data[tid] = vec1->data[tid] - vec2->data[tid]; result->data[tid+blockDim.x] = vec1->data[tid+blockDim.x] - vec2->data[tid+blockDim.x]; result->data[tid+2*blockDim.x] = vec1->data[tid+2*blockDim.x] - vec2->data[tid+2*blockDim.x]; result->data[tid+3*blockDim.x] = vec1->data[tid+3*blockDim.x] - vec2->data[tid+3*blockDim.x]; } } #define TILE_WIDTH BLOCK_X #define TILE_HEIGHT BLOCK_Y __global__ void MatrixProductShared( Vector2D * __restrict__ result, Vector2D * __restrict__ m1, Vector2D * __restrict__ m2 )//float *A, float *B, float *C ) { { __shared__ float A_tile[TILE_HEIGHT][TILE_WIDTH]; __shared__ float B_tile[TILE_HEIGHT][TILE_WIDTH+1]; int numARows = m1->height, numAColumns= m1->width, numBRows = m2->height, numBColumns = m2->width, numCRows = result->height, numCColumns = m2->width; float * A = m1->data, * B = m2->data, * C = result->data; float sum = 0.0; // where am I? // tx for thread_x or tile_x int tx = threadIdx.x; int ty = threadIdx.y; // cx for top left corner of tile in C int cx = blockIdx.x * blockDim.x; int cy = blockIdx.y * blockDim.y; // Cx for cell coordinates in C int Cx = cx + tx; int Cy = cy + ty; int total_tiles = (numAColumns + TILE_WIDTH - 1) / TILE_WIDTH; for (int tile_idx = 0; tile_idx < total_tiles; tile_idx++) { // the corresponding tiles' top left corners are: // for A: row = blockIdx.y * blockDim.y, col = tile_idx * TILE_WIDTH // for B: row = tile_idx * TILE_WIDTH, col = blockIdx.x * blockDim.x // loading tiles int Ax = tile_idx * TILE_WIDTH + tx; int Ay = cy + ty; int Bx = cx + tx; int By = tile_idx * TILE_WIDTH + ty; if (Ax < numAColumns && Ay < numARows) { A_tile[ty][tx] = A[Ay * numAColumns + Ax]; } else { A_tile[ty][tx] = 0.0; } if (Bx < numBColumns && By < numBRows) { B_tile[ty][tx] = B[By * numBColumns + Bx]; } else { B_tile[ty][tx] = 0.0; } __syncthreads(); // multiplying tiles #pragma unroll 4 for (int i = 0; i < TILE_WIDTH; i++) { sum += A_tile[ty][i] * B_tile[i][tx]; } __syncthreads(); } // saving result (discarded if we're in the wrong thread) if (Cx < numCColumns && Cy < numCRows) { C[Cy * numCColumns + Cx] = sum; } } __global__ void TransposeVector2DShared(Vector2D * __restrict__ res, Vector2D * __restrict__ m1) { int thx = blockIdx.x*blockDim.x+ threadIdx.x; int thy = blockIdx.y*blockDim.y+threadIdx.y; int tid = thx + thy*m1->width; __shared__ float ordered_data[BLOCK_Y][BLOCK_X+1]; __shared__ float transposed_data[BLOCK_Y][BLOCK_X+1]; int j = threadIdx.x+blockDim.x*blockIdx.y; int k = threadIdx.y + blockDim.y*blockIdx.x; int target = j + res->width*k; if(tid < m1->width*m1->height) { //padded ordered_data[threadIdx.y][threadIdx.x] = m1->data[tid] ; } __syncthreads(); //transposed_data[thy+thx*m1->height] = ordered_data[tid] ; if(thx < m1->width && thy< m1->height) { transposed_data[threadIdx.x][threadIdx.y] = ordered_data[threadIdx.y][threadIdx.x]; } __syncthreads(); if(thx < m1->width && thy< m1->height) { res->data [target] = transposed_data[threadIdx.y][threadIdx.x] ; //printf("idy : %d - idx : %d - blockdim x : %d - blockDim y : %d - gridDim.x - %d - gridDim.y : %d\n", thy, thx, blockDim.x, blockDim.y, gridDim.x, gridDim.y); } if(tid ==0) { //printf("\nTransposeVector2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < m1->height) || (blockDim.x*gridDim.x<m1->width)) { printf("\nTransposeVector2D\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } } __global__ void DisplayVector2D(Vector2D * vector) { printf("["); for(int h = 0; h < vector->height; h++) { printf("["); for( int w = 0; w < vector->width-1; w++) { printf("%f, ", vector->data[h*vector->width+w]); } printf("%f], \n", vector->data[h*vector->width+vector->width-1]); } printf("]\n"); printf("Row : %d - Width : %d \n\n", vector->height, vector->width); } __device__ float error_sum[BATCH_SIZE]; __global__ void Sum2D(Vector2D * __restrict__ vec) { int tid = threadIdx.y; int val = 0; int width = vec->width; #pragma unroll 4 for(int a = 0; a < width; a++) { val += vec->data[a+tid*width]; } error_sum[tid] = val; } __device__ int arg_max_result[BATCH_SIZE]; __global__ void ArgMax2D(Vector2D * __restrict__ vec1) { int tid = blockIdx.y*blockDim.y + threadIdx.y; if(tid ==0) { //printf("\nArgMax2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if(blockDim.y*gridDim.y < vec1->height) { printf("\nArgMax2D\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid < vec1->height) { float max = -100000; int max_index = 0; #pragma unroll 4 for(int a = 0; a < vec1->width;a++) { if(vec1->data[tid*vec1->width+a]>max) { max = vec1->data[tid*vec1->width+a]; max_index = a; } } arg_max_result[tid] = max_index; } } __global__ void Softmax(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1) { int tid = blockIdx.y*blockDim.y + threadIdx.y; if(tid ==0) { //printf("\nSoftmaxvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if(blockDim.y*gridDim.y < vec1->height) { printf("\nSoftmax\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid < vec1->height) { float toplam = 0; #pragma unroll 4 for(int a = 0; a < vec1->width;a++) { toplam += vec1->data[a+tid*vec1->width]; } for(int a = 0; a < vec1->width;a++) { result->data[a+tid*vec1->width] = vec1->data[a+tid*vec1->width]/toplam; } } } __global__ void PointerSet(Vector2D * f1, Vector2D * f2, int shift, int batch_size) { f1->width = f2->width; f1->height = batch_size; f1->data = f2->data + f2->width*shift; } float generate_uniform(float a, float b) { return rand() / (RAND_MAX + 1.0) * (b - a) + a; } Vector2D * CreateWeightMatrix(int input_count, int output_count) { float init_range = 0; Vector2D * temp = (Vector2D *)malloc(sizeof(Vector2D)); Vector2D * device_temp; CHECK(cudaMalloc(&device_temp, sizeof(Vector2D))); temp->height = input_count; //For bias... temp->width = output_count; temp->data = (float * )malloc(sizeof(float)*(input_count)*output_count); init_range = sqrt(2.0 / input_count); for(int a=0; a<(input_count)*output_count; a++) { temp->data[a] = generate_uniform(-init_range, init_range); } float * temp2; CHECK(cudaMalloc(&temp2, sizeof(float)*temp->height*temp->width)); CHECK(cudaMemcpy(temp2, temp->data, sizeof(float)*temp->height*temp->width, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&device_temp->data, &temp2, sizeof(float *), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&device_temp->height, &(temp->height), sizeof(int), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&device_temp->width, &(temp->width), sizeof(int), cudaMemcpyHostToDevice)); return device_temp; } Vector2D * CreateVector2DCPU(float * data, int height, int width) { // A new structure is allocated in memory for matrix/vector... Vector2D * temp = (Vector2D *)malloc(sizeof(struct Vector2D)); temp->data = data; temp->height = height; temp->width = width; return temp; }; Vector2D * CreateOneHot(Vector2D * indexes, int vector_length) { Vector2D * one_hot_vector = (Vector2D*)malloc(sizeof(Vector2D)); one_hot_vector->height = indexes->height; one_hot_vector->width = vector_length; one_hot_vector->size = one_hot_vector->height; one_hot_vector->data = (float *)malloc(sizeof(float)*indexes->height*vector_length); memset(one_hot_vector->data, 0, sizeof(float)*indexes->height*vector_length); for(int i=0; i<one_hot_vector->height;i++) { one_hot_vector->data[i*vector_length+(int)indexes->data[i*indexes->width]] = 1.0; } return one_hot_vector; } void DisplayVector2DCPU(Vector2D * vector) { printf("["); for(int h = 0; h < vector->height; h++) { printf("["); for( int w = 0; w < vector->width-1; w++) { printf("%f, ", vector->data[h*vector->width+w]); } printf("%f], \n", vector->data[h*vector->width+vector->width-1]); } printf("\b\b\b]"); } __global__ void AddandSigmoid(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********Matrix add diff dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) {//printf("\nMatrixAddvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\AddandSigmoid\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid + blockDim.x*3 < vec1->width*vec1->height) { result->data[tid] = 1.0/(1+exp(-(vec1->data[tid] + vec2->data[tid]))); result->data[tid+blockDim.x] = 1.0/(1+exp(-(vec1->data[tid+blockDim.x] + vec2->data[tid+blockDim.x]))); result->data[tid+2*blockDim.x] = 1.0/(1+exp(-(vec1->data[tid+2*blockDim.x] + vec2->data[tid+2*blockDim.x]))); result->data[tid+3*blockDim.x] = 1.0/(1+exp(-(vec1->data[tid+3*blockDim.x] + vec2->data[tid+3*blockDim.x]))); } } __global__ void AddandExponential(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********Matrix add diff dimension...."); return; } int tx = blockIdx.x*blockDim.x+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) {//printf("\nMatrixAddvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width)) { printf("\AddandExponential\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid < vec1->width*vec1->height) { result->data[tid] = exp(vec1->data[tid] + vec2->data[tid]); } } //Combination of matrixpairwise-Scalarminus-matrixpairwise in backpropagte.... __global__ void LayerErrorCalculate(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********MatrixPairwiseProduct dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) { //printf("\nMatrixPairwiseProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\nMatrixPairwiseProduct\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec2->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid + 3*blockDim.x< vec1->width*vec1->height) { result->data[tid] = vec1->data[tid] * vec2->data[tid]*(1-vec2->data[tid]) ; result->data[tid+blockDim.x] = vec1->data[tid+blockDim.x] * vec2->data[tid+blockDim.x]*(1-vec2->data[tid+blockDim.x]) ; result->data[tid+2*blockDim.x] = vec1->data[tid+2*blockDim.x] * vec2->data[tid+2*blockDim.x]*(1-vec2->data[tid+2*blockDim.x]) ; result->data[tid+3*blockDim.x] = vec1->data[tid+3*blockDim.x] * vec2->data[tid+3*blockDim.x]*(1-vec2->data[tid+3*blockDim.x]) ; } } __global__ void ApplyWeightChange(Vector2D * __restrict__ result, float learning_rate, Vector2D * __restrict__ source) { if((result->width != source->width) || (result->height != source->height)) { printf("\n\n**********ScalarMatrixProduct dimensionç...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*source->width+tx; if(tid ==0) { //printf("\nScalarMatrixProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < source->height) || (blockDim.x*gridDim.x<source->width/4)) { printf("\nScalarMatrixProduct\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", source->width, source->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid +3*blockDim.x< source->width*source->height) { result->data[tid] += learning_rate*source->data[tid]; result->data[tid+blockDim.x] += learning_rate*source->data[tid+blockDim.x]; result->data[tid+2*blockDim.x] += learning_rate*source->data[tid+2*blockDim.x]; result->data[tid+3*blockDim.x] += learning_rate*source->data[tid+3*blockDim.x]; } } __global__ void Vector2DInfo(Vector2D * vec) { printf("\n\nWidth : %d - height : %d\n\n", vec->width, vec->height); } __global__ void calculateCrossEntropyLoss(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********MatrixPairwiseProduct dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) { //printf("\nMatrixPairwiseProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\nMatrixPairwiseProduct\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec2->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid +3*blockDim.x< vec1->width*vec1->height) { result->data[tid] = vec1->data[tid] * log(vec2->data[tid]); result->data[tid+blockDim.x] = vec1->data[tid+blockDim.x] * log(vec2->data[tid+blockDim.x]); result->data[tid+2*blockDim.x] = vec1->data[tid+2*blockDim.x] * log(vec2->data[tid+2*blockDim.x]); result->data[tid+3*blockDim.x] = vec1->data[tid+3*blockDim.x] * log(vec2->data[tid+3*blockDim.x]); } } #define EMPTY printf("\n\n"); Vector2D * w1, * w2, * b1, * b2; Vector2D * output_1, * output_2; Vector2D * bias_result_1, * bias_result_2; Vector2D * ones, * ones_transpose; void FeedForward(Vector2D * device_input, int batch_size) { //input * w1 dim3 block(BLOCK_X, BLOCK_Y); dim3 grid((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); MatrixProductShared<<<grid, block>>>(output_1, device_input, w1); cudaDeviceSynchronize(); //transpose ones * b1 MatrixProductShared<<<grid, block>>>(bias_result_1, ones_transpose, b1); cudaDeviceSynchronize(); int temp = grid.x ; grid.x /=4; if(grid.x == 0)grid.x = 1; AddandSigmoid<<<grid, block>>>(output_1, output_1, bias_result_1); cudaDeviceSynchronize(); grid.x =temp; /* //bias1 + input*w1 MatrixAdd<<<grid, block>>>(output_1, output_1, bias_result_1); cudaDeviceSynchronize(); // output of hidden layer... Sigmoid<<<grid, block>>>(output_1, output_1); cudaDeviceSynchronize(); */ //output of hidden layer * w2 grid.x = (OUTPUT_NODE_COUNT+block.x-1)/block.x; grid.y = (batch_size+block.y-1)/block.y; MatrixProductShared<<<grid, block>>>(output_2, output_1, w2); cudaDeviceSynchronize(); //transpose ones * b2 MatrixProductShared<<<grid, block>>>(bias_result_2, ones_transpose, b2); cudaDeviceSynchronize(); AddandExponential<<<grid, block>>>(output_2, output_2, bias_result_2); cudaDeviceSynchronize(); /* //bias2 + output of hidden layer * w2 - final output.... MatrixAdd<<<grid, block>>>(output_2, output_2, bias_result_2); cudaDeviceSynchronize(); Exponential<<<grid, block>>>(output_2, output_2); cudaDeviceSynchronize(); */ grid.x = 1; block.x = 1; Softmax<<<grid, block>>>(output_2, output_2); cudaDeviceSynchronize(); } Vector2D * layer_2_error, * layer_1_error; Vector2D * w1_update, * w2_update, * b1_update, * b2_update; Vector2D * output_1_transpose, * input_transpose; Vector2D * label_data; Vector2D * device_whole_label; Vector2D * device_whole_data; Vector2D * device_input; Vector2D * w2_transpose; Vector2D * scalar_minus; Vector2D * batch_data; Vector2D * batch_label; void BackPropagate(Vector2D * data, Vector2D * label, int batch_size) { FeedForward(data, batch_size); int temp; //Output error calculation dim3 block(BLOCK_X, BLOCK_Y); dim3 grid((OUTPUT_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); temp = grid.x ; grid.x /= 4; if(grid.x ==0 )grid.x = 1; MatrixSubtract<<<grid, block>>>(layer_2_error, label, output_2); grid.x = temp; //output1 transpose dim3 grid2((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); TransposeVector2DShared<<<grid2, block>>>(output_1_transpose, output_1); cudaDeviceSynchronize(); //W2 update... dim3 grid3((OUTPUT_NODE_COUNT+block.x-1)/block.x, (HIDDEN_LAYER_NODE_COUNT+block.y-1)/block.y); MatrixProductShared<<<grid3, block>>>(w2_update, output_1_transpose, layer_2_error); //b2 update dim3 grid4((OUTPUT_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); MatrixProductShared<<<grid4, block>>>(b2_update, ones, layer_2_error); //W2 transpose dim3 grid5((OUTPUT_NODE_COUNT+block.x-1)/block.x, (HIDDEN_LAYER_NODE_COUNT+block.y-1)/block.y); TransposeVector2DShared<<<grid5, block>>>(w2_transpose, w2); cudaDeviceSynchronize(); //Layer 1 error dim3 grid6((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); MatrixProductShared<<<grid6, block>>>(layer_1_error, layer_2_error, w2_transpose); cudaDeviceSynchronize(); temp = grid.x; grid.x /=4; if(grid.x == 0)grid.x = 1; LayerErrorCalculate<<<grid6, block>>>(layer_1_error, layer_1_error, output_1); grid.x =temp; /* MatrixPairwiseProduct<<<grid6, block>>>(layer_1_error, layer_1_error, output_1); cudaDeviceSynchronize(); ScalarMinusVector2D<<<grid6, block>>>(scalar_minus, 1.0, output_1); cudaDeviceSynchronize(); MatrixPairwiseProduct<<<grid6, block>>>(layer_1_error, layer_1_error, scalar_minus); cudaDeviceSynchronize(); */ //Input transpose dim3 grid7((INPUT_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); TransposeVector2DShared<<<grid7, block>>>(input_transpose, data); cudaDeviceSynchronize(); //w1 update.... dim3 grid8((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (INPUT_NODE_COUNT+block.y-1)/block.y); MatrixProductShared<<<grid8, block>>>(w1_update, input_transpose, layer_1_error); //b1 update dim3 grid9((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); MatrixProductShared<<<grid9, block>>>(b1_update, ones, layer_1_error); cudaDeviceSynchronize(); //Burası //w2_update * learning rate dim3 grid10((OUTPUT_NODE_COUNT+block.x-1)/block.x, (HIDDEN_LAYER_NODE_COUNT+block.y-1)/block.y); temp = grid10.x ; grid10.x /= 4; if(grid10.x ==0)grid10.x = 1; ApplyWeightChange<<<grid10, block>>>(w2, learning_rate, w2_update); grid10.x = temp; /*ScalarMatrixProduct<<<grid10, block>>>(w2_update, learning_rate, w2_update); cudaDeviceSynchronize(); //Apply w2 update MatrixAdd<<<grid10, block>>>(w2, w2, w2_update); cudaDeviceSynchronize(); */ //b2_update * learning_rate dim3 grid11((OUTPUT_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); temp = grid11.x ; grid11.x /= 4; if(grid11.x == 0) grid11.x = 1; ApplyWeightChange<<<grid11, block>>>(b2, learning_rate, b2_update); grid11.x = temp; /*ScalarMatrixProduct<<<grid11, block>>>(b2_update, learning_rate, b2_update); cudaDeviceSynchronize(); //Apply b2 update MatrixAdd<<<grid11, block>>>(b2, b2, b2_update); cudaDeviceSynchronize(); */ //w1_update * leraning_rate dim3 grid12((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (INPUT_NODE_COUNT+block.y-1)/block.y); temp = grid12.x; grid12.x /= 4; if(grid12.x == 0)grid12.x = 1; ApplyWeightChange<<<grid12, block>>>(w1, learning_rate, w1_update); /* ScalarMatrixProduct<<<grid12, block>>>(w1_update, learning_rate, w1_update); cudaDeviceSynchronize(); //Apply w1 update MatrixAdd<<<grid12, block>>>(w1, w1, w1_update); cudaDeviceSynchronize(); */ dim3 grid13((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); temp = grid13.x; grid13.x /= 4; if(grid13.x == 0)grid13.x = 1; ApplyWeightChange<<<grid13, block>>>(b1, learning_rate, b1_update); /* ScalarMatrixProduct<<<grid13, block>>>(b1_update, learning_rate, b1_update); cudaDeviceSynchronize(); //Apply b1 update MatrixAdd<<<grid13, block>>>(b1, b1, b1_update); cudaDeviceSynchronize(); */ cudaDeviceSynchronize(); } Vector2D * load_text_data() { FILE * dosya = fopen("text_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float *)malloc(width*height*sizeof(float)); for(int a =0; a< width*height; a++) fread(&loaded_data[a], sizeof(float), 1, dosya); fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * load_label_data() { FILE * dosya = fopen("label_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float *)malloc(width*height*sizeof(float)); int value; for(int a =0; a< width*height; a++) { fread(&value, sizeof(int), 1, dosya); loaded_data[a] = value; } fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * load_test_text_data() { FILE * dosya = fopen("test_text_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float*)malloc(width*height*sizeof(float)); for(int a =0; a< width*height; a++) fread(&loaded_data[a], sizeof(float), 1, dosya); fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * load_test_label_data() { FILE * dosya = fopen("test_label_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float *)malloc(width*height*sizeof(float)); int value; for(int a =0; a< width*height; a++) { fread(&value, sizeof(int), 1, dosya); loaded_data[a] = value; } fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * device_whole_test_data, * device_whole_test_label_data; int new_iteration_count ; int main() { /* float * del = (float *)malloc(sizeof(float)*64*32); for(int a=0; a < 64*32 ; a++)del[a] = a+1; Vector2D * s = CreateVector2D(del, 32, 64, true); Vector2D * transposed = CreateVector2D(NULL, 64, 32, false); dim3 bl(32, 32); dim3 gri((64+bl.x-1)/bl.x, (32+bl.y-1)/bl.y); TransposeVector2DShared<<<gri, bl>>>(transposed, s); cudaDeviceSynchronize(); printf("\nOriginal matrix : \n"); DisplayVector2D<<<1, 1>>>(s); cudaDeviceSynchronize(); printf("\nTransposed matrix \n"); DisplayVector2D<<<1, 1>>>(transposed); cudaDeviceSynchronize(); exit(0); */ int count = 0; cudaGetDeviceCount(&count); clock_t train_start, train_end; clock_t execution_start, execution_end; clock_t program_start, program_end ; program_start = clock(); execution_start = clock(); srand(time(0)); int blockx = 32, blocky = 32; dim3 block(blockx, blocky); int batch_size = BATCH_SIZE; float * ones_ = (float *)malloc(sizeof(float)*batch_size); for(int a = 0; a< batch_size;a++)ones_[a] = 1.0; ones = CreateVector2D(ones_, 1, batch_size, true); ones_transpose = CreateVector2D(ones_, batch_size, 1, true); //first hidden layer 160 input 784 w1 = CreateWeightMatrix(INPUT_NODE_COUNT, HIDDEN_LAYER_NODE_COUNT); b1 = CreateWeightMatrix(1, HIDDEN_LAYER_NODE_COUNT); bias_result_1 = CreateVector2D(NULL, batch_size, HIDDEN_LAYER_NODE_COUNT, false); output_1 = CreateVector2D(NULL, batch_size, HIDDEN_LAYER_NODE_COUNT, false); output_1_transpose = CreateVector2D(NULL, HIDDEN_LAYER_NODE_COUNT, batch_size, false); w1_update = CreateVector2D(NULL, INPUT_NODE_COUNT, HIDDEN_LAYER_NODE_COUNT, false); b1_update = CreateVector2D(NULL, 1, HIDDEN_LAYER_NODE_COUNT, false); //output 10 nodes.... w2 = CreateWeightMatrix(HIDDEN_LAYER_NODE_COUNT, OUTPUT_NODE_COUNT); w2_transpose = CreateVector2D(NULL, OUTPUT_NODE_COUNT, HIDDEN_LAYER_NODE_COUNT, false); b2 = CreateWeightMatrix(1, OUTPUT_NODE_COUNT); bias_result_2 = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); output_2 = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); w2_update = CreateVector2D(NULL, HIDDEN_LAYER_NODE_COUNT, OUTPUT_NODE_COUNT, false); b2_update = CreateVector2D(NULL, 1, OUTPUT_NODE_COUNT, false); layer_2_error = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); layer_1_error = CreateVector2D(NULL , batch_size, HIDDEN_LAYER_NODE_COUNT, false); scalar_minus = CreateVector2D(NULL, batch_size, HIDDEN_LAYER_NODE_COUNT, false); input_transpose = CreateVector2D(NULL, INPUT_NODE_COUNT, batch_size, false); Vector2D * data_set = load_text_data(); Vector2D * labels_ = load_label_data(); printf("\nData loaded...\n"); Vector2D * one_hot_labels = CreateOneHot(labels_, OUTPUT_NODE_COUNT); Vector2D * test_data = load_test_text_data(); Vector2D * lab = load_test_label_data(); Vector2D *one_hot_test = CreateOneHot(lab, OUTPUT_NODE_COUNT); train_start = clock(); device_whole_data = CreateVector2D(data_set->data, data_set->height, 32754); device_whole_label = CreateVector2D(one_hot_labels->data, data_set->height, OUTPUT_NODE_COUNT); batch_data = CreateVector2D(NULL, batch_size, INPUT_NODE_COUNT, false); batch_label = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); device_whole_test_data = CreateVector2D(test_data->data, test_data->height, 32754); device_whole_test_label_data = CreateVector2D(one_hot_test->data, one_hot_test->height, OUTPUT_NODE_COUNT); float error_val[BATCH_SIZE]; float batch_error; double toplam = 0; double previous_error = 100000000000; new_iteration_count = ITERATION_COUNT; repeat_iteration: for(int iteration = 0 ; iteration < new_iteration_count;iteration++) { toplam = 0; int temp; for(int batch_index = 1; batch_index < 51;batch_index++)//data_set->height/batch_size; batch_index++)//10;batch_index++ );//data_set->height/batch_size; batch_index++) { PointerSet<<<1 ,1>>>(batch_data, device_whole_data, (batch_index -1)*batch_size, batch_size); cudaDeviceSynchronize(); PointerSet<<<1 ,1>>>(batch_label, device_whole_label, (batch_index-1)*batch_size, batch_size); cudaDeviceSynchronize(); BackPropagate(batch_data, batch_label, batch_size); dim3 gridd((OUTPUT_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); temp = gridd.x ; gridd.x /= 4; if(gridd.x == 0)gridd.x = 1; calculateCrossEntropyLoss<<<gridd, block>>>(layer_2_error, batch_label, output_2); gridd.x = temp; cudaDeviceSynchronize(); dim3 k(1, BATCH_SIZE); Sum2D<<<1, k>>>(layer_2_error); cudaDeviceSynchronize(); cudaMemcpyFromSymbol( &error_val, error_sum, sizeof(float)*BATCH_SIZE); //printf("\n\nIteration %d - Error : %f\n", iteration, error_val); cudaDeviceSynchronize(); for(int a=0;a<BATCH_SIZE;a++) toplam += -error_val[a]; } printf("\nITeration %d error %f \n", iteration, toplam); /* if(toplam < previous_error) learning_rate += 2.0e-10; else learning_rate -= 2.0e-10; previous_error = toplam; */ if(previous_error > 1000000) previous_error = toplam; learning_rate = learning_rate + 0.00000001*(previous_error - toplam); previous_error = toplam; train_end = clock(); printf("\nIteration %d - Whole data train time : %f\n\n", iteration, (double)(train_end - train_start) / CLOCKS_PER_SEC); } printf("\nNew iteration count : \n"); scanf("%d", &new_iteration_count); if(new_iteration_count == 0); else goto repeat_iteration; if(abs(previous_error - toplam) < 0.00001) { learning_rate -= learning_rate/10.0*2; } printf("\nTraining has finished...\n"); execution_end = clock(); printf("\nWhole data train time : %f\n\n", (double)(execution_end - execution_start) / CLOCKS_PER_SEC); Vector2D * batch_test_data, * batch_label_data; batch_test_data = CreateVector2D(NULL, batch_size, test_data->width, false); batch_label_data = CreateVector2D(NULL, batch_size, one_hot_test->width, false); int predicted_labels[BATCH_SIZE]; int correct_number = 0, false_number = 0; for(int batch_index = 0; batch_index < one_hot_test->height/batch_size; batch_index++)//10;batch_index++)//one_hot_test->height/batch_size; batch_index++) { PointerSet<<<1 ,1>>>(batch_test_data, device_whole_test_data, (batch_index)*batch_size, batch_size); cudaDeviceSynchronize(); PointerSet<<<1 ,1>>>(batch_label_data, device_whole_test_label_data, (batch_index)*batch_size, batch_size); cudaDeviceSynchronize(); //printf("\nFeed forward...\n"); FeedForward(batch_test_data, batch_size); //printf("\nArgmax2d\n"); dim3 block(1 , BATCH_SIZE); ArgMax2D<<<1, block>>>(output_2); cudaDeviceSynchronize(); cudaMemcpyFromSymbol( &predicted_labels, arg_max_result, sizeof(int)*BATCH_SIZE); cudaDeviceSynchronize(); for(int i = 0; i < BATCH_SIZE;i++) { if( abs(predicted_labels[i] - lab->data[i + BATCH_SIZE*batch_index*lab->width]) < 0.1) { correct_number ++; } else false_number ++; } /*printf("\nCorrect output : \n"); DisplayVector2D<<<1, 1>>>(batch_label_data); cudaDeviceSynchronize(); */ } printf("\n\nAccuracy : %f", (float(correct_number)/(correct_number+false_number)*100.0)); printf("\nTamam\n"); cudaDeviceReset(); program_end = clock(); printf("\Program execution time : %f\n\n", (double)(program_end- program_start) / CLOCKS_PER_SEC); }
1,171
//pass //--blockDim=2048 --gridDim=64 struct s { float x, y, z; }; __global__ void foo(s *q) { s p = { 0.0f, 0.0f, 0.0f }; q[threadIdx.x + blockIdx.x * blockDim.x] = p; }
1,172
// To compute histogram with atomic operations */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <cuda_runtime.h> // Variables float* data_h; // host vectors unsigned int* hist_h; // GPU solution back to the CPU float* data_d; // device vectors unsigned int* hist_d; unsigned int* hist_c; // CPU solution // Functions void RandomUniform(float*, long); void RandomNormal(float*, long); void RandomExpDecay(float*, long); __global__ void hist_shmem(float *data, const long N, unsigned int *hist, const int bins, const float Rmin, const float binsize) { // use shared memory and atomic addition extern __shared__ unsigned int temp[]; // assume the blocksize is equal to the total # bins temp[threadIdx.x] = 0; __syncthreads(); long i = threadIdx.x + blockIdx.x * blockDim.x; long stride = blockDim.x * gridDim.x; // if( (index > bins-1) || (index < 0)) { // printf("data[%d]=%f, index=%d\n",i,data[i],index); // } while (i < N) { int index = (int)((data[i] - Rmin) / binsize); atomicAdd(&temp[index], 1); i += stride; } __syncthreads(); atomicAdd( &(hist[threadIdx.x]), temp[threadIdx.x] ); } int main(void) { int gid; // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; scanf("%d",&gid); err = cudaSetDevice(gid); if (err != cudaSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Set GPU with device ID = %d\n", gid); cudaSetDevice(gid); printf("To find the histogram of a data set (with real numbers): \n"); long N; int bins,index; float Rmin, Rmax, binsize; printf("Enter the size of the data vector: "); scanf("%ld",&N); printf("%ld\n",N); long size = N * sizeof(float); printf("Enter the data range [Rmin, Rmax] for the histogram: "); scanf("%f %f",&Rmin, &Rmax); printf("%f %f\n",Rmin, Rmax); printf("Enter the number of bins of the histogram: "); scanf("%d",&bins); printf("%d\n",bins); if(bins > 1024) { printf("The number of bins is set to # of threads per block < 1024 ! \n"); exit(0); } int bsize = bins*sizeof(int); binsize = (Rmax - Rmin)/(float)bins; data_h = (float*)malloc(size); hist_h = (unsigned int*)malloc(bsize); // Check memory allocations if(data_h == NULL || hist_h == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(int i=0; i<bins; i++){ hist_h[i]=0; } // initialize the data_h vector // srand(time(NULL)); // initialize the seed with the current time srand(12345); // RandomUniform(data_h, N); // uniform deviate in (0,1) RandomExpDecay(data_h, N); int threadsPerBlock; printf("Enter the number of threads per block: "); scanf("%d",&threadsPerBlock); printf("%d\n",threadsPerBlock); if( threadsPerBlock != bins ) { printf("The number of threads per block must be equal to the number of bins ! \n"); exit(0); } fflush(stdout); int blocksPerGrid; printf("Enter the number of blocks per grid: "); scanf("%d",&blocksPerGrid); printf("%d\n",blocksPerGrid); if( blocksPerGrid > 2147483647 ) { printf("The number of blocks must be less than 2147483647 ! \n"); exit(0); } printf("The number of blocks is %d\n", blocksPerGrid); fflush(stdout); int CPU; printf("To compute the histogram with CPU (1/0) ? "); scanf("%d",&CPU); printf("%d\n",CPU); fflush(stdout); // create the timer cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start the timer cudaEventRecord(start,0); // Allocate vectors in device memory cudaMalloc((void**)&hist_d, bsize); cudaMalloc((void**)&data_d, size); // Copy vectors from host memory to device memory cudaMemcpy(data_d, data_h, size, cudaMemcpyHostToDevice); cudaMemcpy(hist_d, hist_h, bsize, cudaMemcpyHostToDevice); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float Intime; cudaEventElapsedTime( &Intime, start, stop); printf("Input time for GPU: %f (ms) \n",Intime); // start the timer cudaEventRecord(start,0); int sm = threadsPerBlock * sizeof(int); hist_shmem <<< blocksPerGrid, threadsPerBlock, sm >>> (data_d, N, hist_d, bins, Rmin, binsize); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float gputime; cudaEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); printf("GPU Gflops: %f\n",2*N/(1000000.0*gputime)); // Copy result from device memory to host memory // hist_h contains the result in host memory // start the timer cudaEventRecord(start,0); cudaMemcpy(hist_h, hist_d, bsize, cudaMemcpyDeviceToHost); cudaFree(data_d); cudaFree(hist_d); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float Outime; cudaEventElapsedTime( &Outime, start, stop); printf("Output time for GPU: %f (ms) \n", Outime); float gputime_tot; gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n", gputime_tot); // Save histogram in file FILE *out; out = fopen("hist_shmem.dat","w"); fprintf(out, "Histogram (GPU):\n"); for(int i=0; i<bins; i++) { float x = Rmin + (i + 0.5) * binsize; // the center of each bin fprintf(out,"%f %d \n",x,hist_h[i]); } fclose(out); // Print the histogram on screen printf("Histogram (GPU):\n"); for(int i = 0; i < bins; i = i+1) { float x = Rmin + (i + 0.5) * binsize; // the center of each bin printf("%f %d\n", x, hist_h[i]); } if(CPU==0) { cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); free(data_h); free(hist_h); return 0; } // To compute the CPU reference solution hist_c = (unsigned int*)malloc(bsize); for(int i = 0; i < bins; i = i+1){ hist_c[i] = 0; } // start the timer cudaEventRecord(start,0); for(int i = 0; i < N; i = i+1) { index = (int)((data_h[i] - Rmin) / binsize); if( (index > bins - 1) || (index < 0)) { printf("data[%d]=%f, index=%d\n",i,data_h[i],index); exit(0); } hist_c[index]++; } // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float cputime; cudaEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); printf("CPU Gflops: %f\n",2*N/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/(gputime_tot)); // destroy the timer cudaEventDestroy(start); cudaEventDestroy(stop); // check histogram sum equal to the total number of data int sum = 0; for(int i = 0; i < bins; i = i+1) { sum += hist_c[i]; } if(sum != N) { printf("Error, sum = %d\n",sum); exit(0); } // compare histograms from CPU and GPU for (int i = 0; i < bins; i++) { if(hist_h[i] != hist_c[i]) { printf("i=%d, hist_h=%d, hist_c=%d \n", i, hist_h[i], hist_c[i]); } } // save histogram in file FILE *out1; out1 = fopen("hist_cpu.dat","w"); fprintf(out1, "Histogram (CPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin fprintf(out1,"%f %d \n",x,hist_c[i]); } fclose(out1); printf("Histogram (CPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin printf("%f %d \n",x,hist_c[i]); } cudaDeviceReset(); free(data_h); free(hist_h); free(hist_c); return 0; } void RandomUniform(float* data, long n) // RNG with uniform distribution in (0,1) { for(long i = 0; i < n; i++){ data[i] = rand()/(float)RAND_MAX; } } void RandomNormal(float* data, long n) // RNG with normal distribution, mu=0, sigma=1 { const float Pi = acos(-1.0); for(long i = 0; i < n; i++) { double y = (double) rand() / (float)RAND_MAX; double x = -log(1.0-y); double z = (double) rand() / (float)RAND_MAX; double theta = 2*Pi*z; data[i] = (float) (sqrt(2.0*x)*cos(theta)); } } void RandomExpDecay(float* data, long n) // RNG with Exponential Decay { for(long i = 0; i < n; i = i+1){ double y = (double) rand() / (float) RAND_MAX; data[i] = (float) -log(1.0 - y); } }
1,173
#include "includes.h" __global__ void AssembleArrayOfNoticedChannels ( const int nmbrOfChnnls, const float lwrNtcdEnrg, const float hghrNtcdEnrg, const float *lwrChnnlBndrs, const float *hghrChnnlBndrs, const float *gdQltChnnls, float *ntcdChnnls ) { int c = threadIdx.x + blockDim.x * blockIdx.x; if ( c < nmbrOfChnnls ) { ntcdChnnls[c] = ( lwrChnnlBndrs[c] > lwrNtcdEnrg ) * ( hghrChnnlBndrs[c] < hghrNtcdEnrg ) * ( 1 - gdQltChnnls[c] ); } }
1,174
#include "includes.h" __global__ void MatrixMulKernel(float *M, float *N, float *P, int Width) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if((Row < Width) && (Col < Width)) { float Pvalue = 0; for(int k = 0; k < Width; ++k) { Pvalue += M[Row*Width+k]*N[k*Width+Col]; } P[Row*Width+Col] = Pvalue; } }
1,175
#include "includes.h" __global__ void THCudaTensor_kernel_copy(float *dst, long *dst_sz, long *dst_st, int dst_dim, float *src, long *src_sz, long *src_st, int src_dim, long n_elem, long innerdim) { long k = (blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x)*blockDim.y + threadIdx.y; long i_start = threadIdx.x * src_st[src_dim-1]; long i_step = blockDim.x * src_st[src_dim-1]; long o_start = threadIdx.x * dst_st[dst_dim-1]; long o_step = blockDim.x * dst_st[dst_dim-1]; long o_end = innerdim * dst_st[dst_dim-1]; if ( ((k+1) * innerdim) <= n_elem) // too safe { long dst_idx = 0; long dst_rest = k * innerdim; for(int dim = 0; dim < dst_dim; dim++) { dst_idx += (dst_rest/dst_sz[dim])*dst_st[dim]; dst_rest = dst_rest % dst_sz[dim]; } long src_idx = 0; long src_rest = k * innerdim; for(int dim = 0; dim < src_dim; dim++) { src_idx += (src_rest/src_sz[dim])*src_st[dim]; src_rest = src_rest % src_sz[dim]; } for (int i=i_start, o=o_start; o<o_end; i+=i_step, o+=o_step) { dst[dst_idx + o] = src[src_idx + i]; } } }
1,176
#include <stdlib.h> #include <stdio.h> #include <stdint.h> // Note N must be an even multiple of BLOCK_DIM #define N (BLOCK_DIM*1024) #define BLOCK_DIM 16 #define RAND_SEED 97 #define NB_OF_THREADS 4 #define PRINT_MATRIX_OUT 0 #if PRINT_MATRIX_OUT #define PRINT_MATRIX(...) print_matrix(__VA_ARGS__) #else #define PRINT_MATRIX(...) do { } while(0) #endif // CUDA Functions __global__ void matrixMul(uint32_t n, float *dev_A, float *dev_B, float *dev_C); // HOST functions void matrixMulCPU(uint32_t n, float *A, float *B, float *C); void print_matrix(uint64_t m, uint64_t n, float *data, bool matlab); void fill_crap(uint64_t m, uint64_t n, float *data); int main() { float *dev_A, *dev_B, *dev_C; float (*A)[N] = (float (*)[N]) malloc(sizeof(float[N][N])); // NxN float (*B)[N] = (float (*)[N]) malloc(sizeof(float[N][N])); // NxN float (*C)[N] = (float (*)[N]) malloc(sizeof(float[N][N])); // NxN //seed srand srand(RAND_SEED); fill_crap(N, N, (float*)A); fill_crap(N, N, (float*)B); printf("A: \n"); PRINT_MATRIX(N, N, (float*)A, false); printf("\n\nB: \n"); PRINT_MATRIX(N, N, (float*)B, false); // copy A cudaMalloc(&dev_A, sizeof(float[N][N])); cudaMemcpy(dev_A, A, sizeof(float[N][N]), cudaMemcpyHostToDevice); // Copy B cudaMalloc(&dev_B, sizeof(float[N][N])); cudaMemcpy(dev_B, (float*)B, sizeof(float[N][N]), cudaMemcpyHostToDevice); // Allocate space for C cudaMalloc(&dev_C, sizeof(float[N][N])); dim3 Block(BLOCK_DIM, BLOCK_DIM); dim3 Grid(N/Block.x, N/Block.y); matrixMul<<< Grid, Block>>>(N, dev_A, dev_B, dev_C); //matrixMulCPU(N, (float*)A, (float*)B, (float*)C); cudaMemcpy(C, dev_C, sizeof(float[N][N]), cudaMemcpyDeviceToHost); printf("\nResult:\n"); PRINT_MATRIX(N, N, (float*)C, false); // Clean up memory cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); free(A); free(B); free(C); return 0; } __global__ void matrixMul(uint32_t n, float *dev_A, float *dev_B, float *dev_C) { float partial = 0.0; uint32_t i = blockIdx.y * blockDim.y + threadIdx.y; // Row i of C uint32_t j = blockIdx.x * blockDim.x + threadIdx.x; // Column j of C for (uint32_t k = 0; k < n; k++) partial += dev_A[n*i + k]*dev_B[n*k + j]; dev_C[n*i + j] = partial; } // compare to cpu implimentation void matrixMulCPU(uint32_t n, float *A, float *B, float *C) { #pragma omp parallel for schedule(static) num_threads(NB_OF_CPU_THREADS) for (uint32_t i=0; i < n; i++) { for (uint32_t j=0; j < n; j++) { C[n*i + j]=0.; for (uint32_t k=0; k<n; k++) { C[n*i + j] += B[n*i + k]*C[n*k + j]; } } } } void print_matrix(uint64_t m, uint64_t n, float *data, bool matlab) { if (matlab) printf("[ "); for (uint64_t i=0; i< m; i++) { for (uint64_t j=0; j < n-1; j++) { printf("%.4f, ", data[n*i+j]); } printf(matlab ? "%.4f; " : "%.4f\n", data[n*i + n-1]); } if (matlab) printf("]\n"); } void fill_crap(uint64_t m, uint64_t n, float *data) { for (uint64_t i=0; i<m; i++) { for (uint64_t j=0; j < n; j++) { data[n*i+j] = ((float)rand())/((float)RAND_MAX); } } }
1,177
#include "includes.h" __global__ void gpuMM(float *A, float *B, float *C, int N) { // Matrix multiplication for NxN matrices C=A*B // Each thread computes a single element of C int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; float sum = 0.0; for (int n = 0; n < N; ++n) sum += A[row*N+n]*B[n*N+col]; C[row*N+col] = sum; // if(row%50 ==5) // printf("%f \t %f \t %f\n",A[row*N+col], B[row*N+col], C[row*N+col]); }
1,178
#include <stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "string.h" void printDevice(cudaDeviceProp prop){ printf("\t Name: \t%s\n",prop.name); printf("\t Capability Major/Minor version number: %d.%d\n", prop.major, prop.minor); printf("\t Total amount of global memory: \t%.0f MBytes (%llu bytes)\n", (float)prop.totalGlobalMem/1048576.0f, (unsigned long long) prop.totalGlobalMem); printf("\t maxThreadsPerBlock: \t%d\n",prop.maxThreadsPerBlock); printf("\t totalConstMen: \t%d\n",prop.totalConstMem); printf("\t sharedMemPerBlcok: \t%d\n",prop.sharedMemPerBlock); printf("\t regsPerBlock: \t%d\n",prop.regsPerBlock); printf("\t maxThreadsPerMultiProcessor: \t%d\n",prop.maxThreadsPerMultiProcessor); printf("\t multiProcessorCount: \t%d\n",prop.multiProcessorCount); } int main(){ int count; cudaGetDeviceCount(&count); printf("There are %d devices.\n",count); int i; for(i = 0; i < count; i++){ cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i) == cudaSuccess){ printf("The %dth device's informations\n",i + 1); printDevice(prop); } } return 0; }
1,179
#include "includes.h" __global__ void kArgMinColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float min_vals[32]; __shared__ unsigned int min_args[32]; float cur_min = 2e38; unsigned int cur_arg = 0; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[i * width + blockIdx.x]; if (val < cur_min) { cur_min = val; cur_arg = i; } } min_vals[threadIdx.x] = cur_min; min_args[threadIdx.x] = cur_arg; __syncthreads(); if (threadIdx.x == 0) { cur_min = 2e38; cur_arg = 0; for (unsigned int i = 0; i < 32; i++) if (min_vals[i] < cur_min) { cur_min = min_vals[i]; cur_arg = min_args[i]; } target[blockIdx.x] = cur_arg; } }
1,180
#include "includes.h" __device__ void warpReduce(volatile float *sdata, int tid, int bid, int size) { if (bid + 32 < size) sdata[tid] += sdata[tid + 32]; if (bid + 16 < size) sdata[tid] += sdata[tid + 16]; if (bid + 8 < size) sdata[tid] += sdata[tid + 8]; if (bid + 4 < size) sdata[tid] += sdata[tid + 4]; if (bid + 2 < size) sdata[tid] += sdata[tid + 2]; if (bid + 1 < size) sdata[tid] += sdata[tid + 1]; } __global__ void naive_sum(float *input, int size, float *out) { const unsigned int tid = threadIdx.x; const unsigned int bid = blockIdx.x * blockDim.x * 2 + tid; extern __shared__ float sdata[]; if (!(bid < size)) return; sdata[tid] = input[bid]; if (bid + blockDim.x < size) sdata[tid] += input[bid + blockDim.x]; __syncthreads(); for (unsigned int offset = blockDim.x/2; offset > 32; offset /= 2) { if (tid < offset && bid + offset < size) sdata[tid] += sdata[tid + offset]; __syncthreads(); } if (tid < 32) warpReduce(sdata, tid, bid, size); if (tid == 0) out[blockIdx.x] = sdata[0]; }
1,181
__global__ void swap(int *A,int n){ int idi = blockIdx.y*blockDim.y+threadIdx.y; int idj = blockIdx.x*blockDim.x+threadIdx.x; if(idi<n && idj<=idi){ if(idj%2==0 && idj<n-1){ int temp=A[idi*n+idj]; A[idi*n+idj]=A[idi*n+idj+1]; A[idi*n+idj+1]=temp; } int temp=idi; idi=idj; idj=temp; if(idi!=idj && idj%2==0 && idj<n-1){ int temp=A[idi*n+idj]; A[idi*n+idj]=A[idi*n+idj+1]; A[idi*n+idj+1]=temp; } __syncthreads(); temp=A[idi*n+idj]; A[idi*n+idj]=A[idj*n+idi]; A[idj*n+idi]=temp; } }
1,182
#include <cuda.h> #include <ctime> #include <stdio.h> #include <iostream> int K = 256; int N = 1024 * 32; int sizeVector = (N * 32 * 20); #define CUDA_CHECK_RETURN(value) ((cudaError_t)value != cudaSuccess) ? printf("Error %s at line %d in the file %s\n", cudaGetErrorString((cudaError_t)value), __LINE__, __FILE__) : printf("") __global__ void addKernel(int *a, int *b, int *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = a[i] + b[i]; } __global__ void mulKernel(int *a, int *b, int *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = a[i] * b[i]; } void VectorOps() { cudaStream_t stream0, stream1; int *a; int *b; int *c; CUDA_CHECK_RETURN(cudaHostAlloc((void**)&a, sizeVector * sizeof(int), cudaHostAllocDefault)); CUDA_CHECK_RETURN(cudaHostAlloc((void**)&b, sizeVector * sizeof(int), cudaHostAllocDefault)); CUDA_CHECK_RETURN(cudaHostAlloc((void**)&c, sizeVector * sizeof(int), cudaHostAllocDefault)); for(int i = 0; i < sizeVector; i++) { a[i] = rand() % 20; b[i] = rand() % 20; } int *dev_a0 = 0; int *dev_b0 = 0; int *dev_c0 = 0; int *dev_a1 = 0; int *dev_b1 = 0; int *dev_c1 = 0; cudaEvent_t start, stop; CUDA_CHECK_RETURN(cudaStreamCreate(&stream0)); CUDA_CHECK_RETURN(cudaStreamCreate(&stream1)); float time; CUDA_CHECK_RETURN(cudaEventCreate(&start)); CUDA_CHECK_RETURN(cudaEventCreate(&stop)); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_c0, sizeVector * sizeof(int))); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_a0, sizeVector * sizeof(int))); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_b0, sizeVector * sizeof(int))); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_c1, sizeVector * sizeof(int))); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_a1, sizeVector * sizeof(int))); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_b1, sizeVector * sizeof(int))); CUDA_CHECK_RETURN(cudaEventSynchronize(start)); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for(int i = 0; i < sizeVector; i += N) { CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a0, a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b0, b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); addKernel <<< N / K, K, 0, stream0 >>> (dev_a0, dev_b0, dev_c0); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0)); } CUDA_CHECK_RETURN(cudaStreamSynchronize(stream0)); CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaEventElapsedTime(&time, start, stop)); printf("\nV.1-Add Time: %f ms\n", time); for(int i = sizeVector - 10; i < sizeVector; i++) { std::cout << a[i] << " " << b[i] << " " << c[i] << std::endl; } for(int i = 0; i < sizeVector; i++) { a[i] = rand() % 20; b[i] = rand() % 20; } CUDA_CHECK_RETURN(cudaEventSynchronize(start)); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for(int i = 0; i < sizeVector; i += N) { CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a1, a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b1, b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); mulKernel <<< N / K, K, 0, stream1 >>> (dev_a1, dev_b1, dev_c1); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1)); } CUDA_CHECK_RETURN(cudaStreamSynchronize(stream1)); CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaEventElapsedTime(&time, start, stop)); printf("\nV.1-Mul Time: %f ms\n", time); for(int i = sizeVector - 10; i < sizeVector; i++) { std::cout << a[i] << " " << b[i] << " " << c[i] << std::endl; } for(int i = 0; i < sizeVector; i++) { a[i] = rand() % 20; b[i] = rand() % 20; } CUDA_CHECK_RETURN(cudaEventSynchronize(start)); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for(int i = 0; i < sizeVector; i += N * 2) { CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a0, a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b0, b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); addKernel <<< N / K, K, 0, stream0 >>> (dev_a0, dev_b0, dev_c0); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a1, a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b1, b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); addKernel <<< N / K, K, 0, stream1 >>> (dev_a1, dev_b1, dev_c1); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1)); } CUDA_CHECK_RETURN(cudaStreamSynchronize(stream0)); CUDA_CHECK_RETURN(cudaStreamSynchronize(stream1)); CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaEventElapsedTime(&time, start, stop)); printf("\nV.2-Add Time: %f ms\n", time); for(int i = sizeVector - 10; i < sizeVector; i++) { std::cout << a[i] << " " << b[i] << " " << c[i] << std::endl; } for(int i = 0; i < sizeVector; i++) { a[i] = rand() % 20; b[i] = rand() % 20; } CUDA_CHECK_RETURN(cudaEventSynchronize(start)); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for(int i = 0; i < sizeVector; i += N * 2) { CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a0, a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b0, b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); mulKernel <<< N / K, K, 0, stream0 >>> (dev_a0, dev_b0, dev_c0); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a1, a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b1, b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); mulKernel <<< N / K, K, 0, stream1 >>> (dev_a1, dev_b1, dev_c1); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1)); } CUDA_CHECK_RETURN(cudaStreamSynchronize(stream0)); CUDA_CHECK_RETURN(cudaStreamSynchronize(stream1)); CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaEventElapsedTime(&time, start, stop)); printf("\nV.2-Mul Time: %f ms\n", time); for(int i = sizeVector - 10; i < sizeVector; i++) { std::cout << a[i] << " " << b[i] << " " << c[i] << std::endl; } for(int i = 0; i < sizeVector; i++) { a[i] = rand() % 20; b[i] = rand() % 20; } CUDA_CHECK_RETURN(cudaEventSynchronize(start)); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for(int i = 0; i < sizeVector; i += N * 2) { CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a0, a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a1, a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b0, b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b1, b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); addKernel <<< N / K, K, 0, stream0 >>> (dev_a0, dev_b0, dev_c0); addKernel <<< N / K, K, 0, stream1 >>> (dev_a1, dev_b1, dev_c1); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1)); } CUDA_CHECK_RETURN(cudaStreamSynchronize(stream0)); CUDA_CHECK_RETURN(cudaStreamSynchronize(stream1)); CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaEventElapsedTime(&time, start, stop)); printf("\nV.3-Add Time: %f ms\n", time); for(int i = sizeVector - 10; i < sizeVector; i++) { std::cout << a[i] << " " << b[i] << " " << c[i] << std::endl; } for(int i = 0; i < sizeVector; i++) { a[i] = rand() % 20; b[i] = rand() % 20; } CUDA_CHECK_RETURN(cudaEventSynchronize(start)); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for(int i = 0; i < sizeVector; i += N * 2) { CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a0, a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_a1, a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b0, b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(dev_b1, b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); mulKernel <<< N / K, K, 0, stream0 >>> (dev_a0, dev_b0, dev_c0); mulKernel <<< N / K, K, 0, stream1 >>> (dev_a1, dev_b1, dev_c1); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0)); CUDA_CHECK_RETURN(cudaMemcpyAsync(c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1)); } CUDA_CHECK_RETURN(cudaStreamSynchronize(stream0)); CUDA_CHECK_RETURN(cudaStreamSynchronize(stream1)); CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaEventElapsedTime(&time, start, stop)); printf("\nV.3-Mul Time: %f ms\n", time); for(int i = sizeVector - 10; i < sizeVector; i++) { std::cout << a[i] << " " << b[i] << " " << c[i] << std::endl; } CUDA_CHECK_RETURN(cudaFreeHost(a)); CUDA_CHECK_RETURN(cudaFreeHost(b)); CUDA_CHECK_RETURN(cudaFreeHost(c)); CUDA_CHECK_RETURN(cudaFree(dev_a0)); CUDA_CHECK_RETURN(cudaFree(dev_a1)); CUDA_CHECK_RETURN(cudaFree(dev_b0)); CUDA_CHECK_RETURN(cudaFree(dev_b1)); CUDA_CHECK_RETURN(cudaFree(dev_c0)); CUDA_CHECK_RETURN(cudaFree(dev_c1)); } float cuda_memory_malloc_test(int size, bool up) { cudaEvent_t start, stop; int *a, *dev_a; float elapsedTime = 0.0f; CUDA_CHECK_RETURN(cudaEventCreate(&start)); CUDA_CHECK_RETURN(cudaEventCreate(&stop)); a = (int*)malloc(size * sizeof(*a)); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_a, size * sizeof(*dev_a))); CUDA_CHECK_RETURN(cudaEventSynchronize(start)); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for (int i = 0; i < 100; i++) { if(up) { CUDA_CHECK_RETURN(cudaMemcpy(dev_a, a, size * sizeof(*dev_a), cudaMemcpyHostToDevice)); } else { CUDA_CHECK_RETURN(cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost)); } } CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop)); free(a); CUDA_CHECK_RETURN(cudaFree(dev_a)); CUDA_CHECK_RETURN(cudaEventDestroy(start)); CUDA_CHECK_RETURN(cudaEventDestroy(stop)); return elapsedTime; } float cuda_alloc_memory_malloc_test(int size, bool up) { cudaEvent_t start, stop; int *a, *dev_a; float elapsedTime = 0.0f; CUDA_CHECK_RETURN(cudaEventCreate(&start)); CUDA_CHECK_RETURN(cudaEventCreate(&stop)); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_a, size * sizeof(*dev_a))); CUDA_CHECK_RETURN(cudaHostAlloc((void**)&a, size * sizeof(*a),cudaHostAllocDefault)); CUDA_CHECK_RETURN(cudaEventSynchronize(start)); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for (int i = 0; i < 100; i++) { if (up) { CUDA_CHECK_RETURN(cudaMemcpy(dev_a, a, size * sizeof(*a), cudaMemcpyHostToDevice)); } else { CUDA_CHECK_RETURN(cudaMemcpy(a, dev_a, size * sizeof(*a), cudaMemcpyDeviceToHost)); } } CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop)); CUDA_CHECK_RETURN(cudaFree(dev_a)); CUDA_CHECK_RETURN(cudaFreeHost(a)); CUDA_CHECK_RETURN(cudaEventDestroy(start)); CUDA_CHECK_RETURN(cudaEventDestroy(stop)); return elapsedTime; } int main() { srand(time(NULL)); float elapsedTime; float MB = (float)100 * sizeVector * sizeof(int)/1024/1024; elapsedTime = cuda_memory_malloc_test(sizeVector, true); printf("Without block pages GPU: %3.5f ms\n",elapsedTime); printf("\tMB GPU %3.1f\n", MB/(elapsedTime/1000)); elapsedTime = cuda_memory_malloc_test(sizeVector, false); printf("Without block pages CPU: %3.5f ms\n", elapsedTime); printf("\tMB CPU %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_alloc_memory_malloc_test(sizeVector, true); printf("Block pages GPU: %3.5f ms\n", elapsedTime); printf("\tMB GPU %3.1f\n", MB / (elapsedTime / 1000)); elapsedTime = cuda_alloc_memory_malloc_test(sizeVector, false); printf("Block pages CPU: %3.5f ms\n", elapsedTime); printf("\tMB CPU %3.1f\n", MB / (elapsedTime / 1000)); VectorOps(); return 0; }
1,183
#include <stdio.h> #include "imageutils.cuh" // dimensions of the thread blocks #define NUM_BLOCKS_X 16 #define NUM_BLOCKS_Y 16 __global__ void rgba_to_negative( uchar4 *rgbaImage, uchar4 *negativeImage, int numRows, int numCols ) { // finding pixel assigned to this thread int thread_x = blockDim.x * blockIdx.x + threadIdx.x; int thread_y = blockDim.y * blockIdx.y + threadIdx.y; int idx = thread_x * numCols + thread_y; // thread is out of range (happens when block dimensions don't allign) if(thread_x >= numRows || thread_y >= numCols) { // if (idx < numRows * numCols) should also work return; } negativeImage[idx].x = 255 - rgbaImage[idx].x; negativeImage[idx].y = 255 - rgbaImage[idx].y; negativeImage[idx].z = 255 - rgbaImage[idx].z; } int main() { // load input picture PPMImage *input_image = readPPM("../PPMImages/Poivron.ppm"); const int dim_x = input_image->x; const int dim_y = input_image->y; // dimension and size of both arrays for testing const int RGB_SIZE = dim_x * dim_y; const int RGB_BYTES = RGB_SIZE * sizeof(uchar4); const int NEGATIVE_SIZE = dim_x * dim_y; const int NEGATIVE_BYTES = NEGATIVE_SIZE * sizeof(uchar4); // calculating grid and block dimensions of threads int grid_size_x = (dim_x + NUM_BLOCKS_X - 1) / NUM_BLOCKS_X; int grid_size_y = (dim_y + NUM_BLOCKS_Y - 1) / NUM_BLOCKS_Y; dim3 grid_dims = dim3(grid_size_x, grid_size_y, 1); dim3 block_dims = dim3(NUM_BLOCKS_X, NUM_BLOCKS_Y, 1); // memory pointers uchar4 *h_image_rgb; uchar4 *h_image_negative; uchar4 *d_image_rgb; uchar4 *d_image_negative; // memory allocation on host h_image_rgb = PPM_to_uchar4(input_image, 255); h_image_negative = (uchar4 *) malloc(NEGATIVE_BYTES); // memory allocation on device cudaMalloc((void **) &d_image_rgb, RGB_BYTES); cudaMalloc((void **) &d_image_negative, NEGATIVE_BYTES); // transferring input array to device memory cudaMemcpy(d_image_rgb, h_image_rgb, RGB_BYTES, cudaMemcpyHostToDevice); // launching kernels rgba_to_negative<<<grid_dims, block_dims>>>( d_image_rgb, d_image_negative, dim_x, dim_y ); // getting back the negative image cudaMemcpy(h_image_negative, d_image_negative, NEGATIVE_BYTES, cudaMemcpyDeviceToHost); // save resulting file PPMImage *result = uchar4_to_PPM(h_image_negative, dim_x, dim_y); writePPM("../PPMResults/Poivron_neg.ppm", result); // free host memory free(h_image_rgb); free(h_image_negative); // free device memory cudaFree(d_image_rgb); cudaFree(d_image_negative); return 0; }
1,184
#include <iostream> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #define num 25 __global__ void gpuAdd(int *d_a, int *d_b, int* d_c, int N=num) { int tid = blockIdx.x; if(tid < N) { d_c[tid] = d_a[tid] + d_b[tid]; printf("%d + %d = %d\n", d_a[tid], d_b[tid], d_c[tid]); } } void cpuAdd(int *h_a, int *h_b, int *h_c, int N=num) { for(int i = 0; i < N; i++) h_c[i] = h_a[i] + h_b[i]; } void cpuAdd_vec(std::vector<int> &h_a, std::vector<int> &h_b, std::vector<int> &h_c, int N=num) { for(int i = 0; i < N; i++) h_c[i] = h_a[i] + h_b[i]; } int main(void) { // int N; // std::cout << "N?"; // std::cin >> N; int N=num; std::cout << "N is " << num << "\n"; int *d_a, *d_b, *d_c;//device pointer to store answer std::cout <<"Device allocate.. "; cudaMalloc((void**)&d_a, N*sizeof(int)); cudaMalloc((void**)&d_b, N*sizeof(int)); cudaMalloc((void**)&d_c, N*sizeof(int)); std::vector<int> h_a(N), h_b(N), h_c(N); // int // *h_a = (int*)malloc(N*sizeof(int)), // *h_b = (int*)malloc(N*sizeof(int)), // *h_c = (int*)malloc(N*sizeof(int)); std::cout << "Allocated\n"; for(int i=0; i<N; i++) { h_a[i] = i; h_b[i] = i * i; h_c[i] = i; } std::cout << "Finished!!!\n"; //copy host to device cudaMemcpy(d_a, h_a.data(), N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b.data(), N*sizeof(int), cudaMemcpyHostToDevice); std::cout << "Ported to device\n"; clock_t start, end; // start = clock(); //// cpuAdd(h_a, h_b, h_c); // cpuAdd_vec(h_a, h_b, h_c); // end = clock(); // std:: cout << "CPU time: " << (double)(end-start)/ CLOCKS_PER_SEC << "\n"; start = clock(); gpuAdd <<<N, 1>>> (d_a, d_b, d_c, N); cudaDeviceSynchronize(); end = clock(); std:: cout << "GPU time: " << (double)(end-start)/ CLOCKS_PER_SEC <<'\n'; // free(h_a); // free(h_b); // free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
1,185
#include <stdio.h> #include<stdlib.h> #include<string.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define BLOCK_SIZE 1024 #define SECTION_SIZE 2*BLOCK_SIZE __global__ void listScanKernel(float * input, float * output, int len) { __shared__ float list[SECTION_SIZE]; unsigned int t = threadIdx.x; unsigned int start = blockIdx.x*blockDim.x; list[t] = ((t+start) < len ) ? input[t+start]:0.0f; list[t+blockDim.x] = ((start+t+blockDim.x) < len) ? input[start+t+blockDim.x]:0.0f; for(unsigned int stride =1;stride<= BLOCK_SIZE; stride*=2) { int index = (t+1)*stride*2-1; if(index<SECTION_SIZE) list[index]+=list[index-stride]; __syncthreads(); } for(unsigned int stride = BLOCK_SIZE/2;stride>0;stride/=2) { __syncthreads(); int index = (t+1)*stride*2-1; if(index+stride < SECTION_SIZE ) list[index+stride]+=list[index]; } __syncthreads(); if(t+start < len) output[t+start] = list[t]; } __global__ void loadSumArrayKernel(float *input, float *sumArray, int len) { unsigned int t = threadIdx.x; unsigned int start = blockIdx.x*blockDim.x; unsigned int lastBlockId = (len-1)/BLOCK_SIZE; unsigned int lastThreadIdx = (len%BLOCK_SIZE-1); if(t+start<len) { if(blockIdx.x == lastBlockId) sumArray[blockIdx.x] = input[lastThreadIdx+start]; else sumArray[blockIdx.x] = input[start+blockDim.x-1]; } } __global__ void listScanSumKernel(float *input, float *output,int len) { __shared__ float sumArray[SECTION_SIZE]; unsigned int t = threadIdx.x; unsigned int start = blockIdx.x*blockDim.x; if(t+start<len && blockIdx.x>0) { output[t+start]+=input[blockIdx.x-1]; __syncthreads(); } } void totalCPU(float * input, float * output, int len) { int i=0; output[0]=input[0]; for(i=1;i<len;i++) output[i] = output[i-1]+input[i]; printf("\n*****CPU calculation******\n"); } void loadValue(char *FileInput,int len,float *a,float *b) { FILE *file; int i=0; char buff[100]; memset(b,0,len); file = fopen(FileInput,"r"); if(!file) { printf("\nNo file found!"); system("pause"); exit(0); } while(fgets(buff,len,file)) { a[i] = atof(buff); i++; } fclose(file); } void storeResult(char *fileOutput,float *arr,unsigned int len) { FILE *file; int count=0; file = fopen(fileOutput,"w"); if(!file) { printf("\nCannot create file!"); system("pause"); exit(0); } fprintf(file,"%d\n",len); for(count =0 ;count<len;count++) { fprintf(file,"%.0f\n",arr[count]); } fclose(file); } void dispRes(float *arr,int len) { int i=0; printf("result = "); for(i=0;i<len;i++) printf("%4.0f ",arr[i]); system("pause"); } int main(int argc,char*argv[]) { float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float *deviceSumArray; float *deviceSumArrayOutput; float * deviceOutput; int numElements = (int) (atoi)(argv[3]); // number of elements in the input list hostInput = (float*)malloc(numElements*sizeof(float)); hostOutput = (float*)malloc(numElements*sizeof(float)); //cuda memory allocation on the device cudaMalloc((void**)&deviceInput,numElements*sizeof(float)); cudaMalloc((void**)&deviceOutput,numElements*sizeof(float)); cudaMalloc((void**)&deviceSumArray,numElements*sizeof(float)); cudaMalloc((void**)&deviceSumArrayOutput,numElements*sizeof(float)); printf("Loading values to the array...\n"); loadValue(argv[1],numElements,hostInput,hostOutput); //cuda memory copy from host to device cudaMemcpy(deviceInput,hostInput,numElements*sizeof(float),cudaMemcpyHostToDevice); //CPU equivalent totalCPU(hostInput,hostOutput,numElements); dispRes(hostOutput,numElements); printf("Calling CUDA kernel...\n"); dim3 DimGrid((numElements-1)/BLOCK_SIZE+1,1,1); dim3 DimBlock(BLOCK_SIZE,1,1); listScanKernel<<<DimGrid,DimBlock>>>(deviceInput,deviceOutput,numElements); loadSumArrayKernel<<<DimGrid,DimBlock>>>(deviceOutput,deviceSumArray,numElements); listScanKernel<<<DimGrid,DimBlock>>>(deviceSumArray,deviceSumArrayOutput,numElements); listScanSumKernel<<<DimGrid,DimBlock>>>(deviceSumArrayOutput,deviceOutput,numElements); //cuda memory copy from device to host cudaMemcpy(hostOutput,deviceOutput,numElements*sizeof(float),cudaMemcpyDeviceToHost); dispRes(hostOutput,numElements); storeResult(argv[2],hostOutput,numElements); free(hostInput); free(hostOutput); cudaFree(deviceInput); cudaFree(deviceOutput); return 0; }
1,186
#include <stdio.h> __global__ void vectorAdd(const float *a, const float *b, float *c, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { c[i] = a[i] + b[i]; } } int main(int argc, char *argv[]) { int numElements = 5e+4; // Allocate vectors a, b and c in host memory. size_t numBytes = sizeof(float) * numElements; float *h_a = (float *)malloc(numBytes); float *h_b = (float *)malloc(numBytes); float *h_c = (float *)malloc(numBytes); // Initialize vectors a and b. for (int i = 0; i < numElements; ++i) { h_a[i] = rand() / (float)RAND_MAX; h_b[i] = rand() / (float)RAND_MAX; } // Allocate vectors a, b and c in device memory. float *d_a; float *d_b; float *d_c; cudaMalloc((void **)&d_a, numBytes); cudaMalloc((void **)&d_b, numBytes); cudaMalloc((void **)&d_c, numBytes); // Copy vectors a and b from host memory to device memory synchronously. cudaMemcpy(d_a, h_a, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, numBytes, cudaMemcpyHostToDevice); // Determine the number of threads per block and the number of blocks per grid. int numThreadsPerBlock = 256; int numBlocksPerGrid = (numElements + numThreadsPerBlock - 1) / numThreadsPerBlock; // Invoke the kernel on device asynchronously. vectorAdd<<<numBlocksPerGrid, numThreadsPerBlock>>>(d_a, d_b, d_c, numElements); // Copy vector c from device memory to host memory synchronously. cudaMemcpy(h_c, d_c, numBytes, cudaMemcpyDeviceToHost); // Validate the result. for (int i = 0; i < numElements; ++i) { float actual = h_c[i]; float expected = h_a[i] + h_b[i]; if (fabs(actual - expected) > 1e-7) { printf("h_c[%d] = %f, expected = %f\n", i, actual, expected); break; } } // Cleanup. cudaFree(d_c); cudaFree(d_b); cudaFree(d_a); cudaDeviceReset(); free(h_c); free(h_b); free(h_a); }
1,187
#include <stdio.h> #include <time.h> #include <malloc.h> #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } //макрос для обработки ошибок __global__ void gTranspose0(float* storage_d, float* storage_d_t){ int i=threadIdx.x+blockIdx.x*blockDim.x; int j=threadIdx.y+blockIdx.y*blockDim.y; int N=blockDim.x*gridDim.x; storage_d_t[j+i*N]=storage_d[i+j*N]; } __global__ void gInitializeMatrixByRows(long long n, double* matrix_d){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int N = blockDim.x * gridDim.x; matrix_d[i+j*N] = (double)(i+j*N); } __global__ void gInitializeMatrixByColumns(long long n, double* matrix_d){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int N = blockDim.x * gridDim.x; matrix_d[j+i*N] = (double)(j+i*N); } int main(int argc, char *argv[]) { //установить предпочтительную конфигурацию кэша для текущего устройства: //cudaFuncSetCacheConfig(gInitVectors, cudaFuncCachePreferL1); if (argc < 3) { printf("Error: run program with 2 args: n, threads per block\n"); return 1; } long long n, threads; n = atoi(argv[1]); threads = atoi(argv[2]); double *matrix1_d, *matrix2_d; for (int i = 0; i < 10; i++) { CUDA_CHECK_RETURN(cudaMalloc((void**)&matrix1_d, n * n * sizeof(double))); gInitializeMatrixByRows <<< n / threads, threads >>> (n, matrix1_d); cudaDeviceSynchronize(); CUDA_CHECK_RETURN(cudaGetLastError()); cudaFree(matrix1_d); CUDA_CHECK_RETURN(cudaMalloc((void**)&matrix2_d, n * n * sizeof(double))); gInitializeMatrixByColumns <<< n / threads, threads >>> (n, matrix2_d); cudaDeviceSynchronize(); CUDA_CHECK_RETURN(cudaGetLastError()); cudaFree(matrix2_d); } return 0; }
1,188
#include <stdio.h> #include <cuda.h> void vectorAdd(double* A, double* B,double* C,int n); __global__ void vecAddKernel(double* A, double* B, double* C, int n); int main() { double *h_A, *h_B, *h_C; int i; long N=10000; int size=N*sizeof(double); h_A=(double*)malloc(size); h_B=(double*)malloc(size); h_C=(double*)malloc(size); if(h_A==NULL||h_B==NULL||h_C==NULL) { printf("malloc failed!"); exit(1); } for(i=0;i<N;i++) { h_A[i]=i*2; h_B[i]=i*3; } vectorAdd(h_A,h_B,h_C,N); for(i=0;i<10;i++) { printf("h_C[%d] is %f,should be %f\n",i,h_C[i],h_A[i]+h_B[i]); } return 0; } void vectorAdd(double* A, double* B,double* C,int n) { double *d_A=NULL, *d_B=NULL, *d_C=NULL; int size=sizeof(double)*n; cudaMalloc((void**)&d_A,size); cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice); cudaMalloc((void**)&d_B,size); cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice); cudaMalloc((void**)&d_C,size); if(d_A==NULL||d_B==NULL||d_C==NULL){ printf("device allocate memory failed!\n"); } // dim3 dimGrid(65537,65537,65537);//test grid,block size // dim3 dimBlock(1026,1024,64); vecAddKernel<<<ceil(n/1024.0),1024>>>(d_A,d_B,d_C,n); cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost); cudaFree(d_A);cudaFree(d_B);cudaFree(d_C); } __global__ void vecAddKernel(double* A, double* B, double* C, int n) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<n) { C[i]=A[i]+B[i]; } } //add nvcc -arch compute_13 to enable double /* Device 0: "Quadro K600" CUDA Driver Version / Runtime Version 5.5 / 5.5 CUDA Capability Major/Minor version number: 3.0 Total amount of global memory: 1024 MBytes (1073414144 bytes) ( 1) Multiprocessors, (192) CUDA Cores/MP: 192 CUDA Cores GPU Clock rate: 876 MHz (0.88 GHz) Memory Clock rate: 891 Mhz Memory Bus Width: 128-bit L2 Cache Size: 262144 bytes Maximum Texture Dimension Size (x,y,z) 1D=(65536), 2D=(65536, 65536), 3D=(4096, 4096, 4096) Maximum Layered 1D Texture Size, (num) layers 1D=(16384), 2048 layers Maximum Layered 2D Texture Size, (num) layers 2D=(16384, 16384), 2048 layers Total amount of constant memory: 65536 bytes Total amount of shared memory per block: 49152 bytes Total number of registers available per block: 65536 Warp size: 32 Maximum number of threads per multiprocessor: 2048 Maximum number of threads per block: 1024 Max dimension size of a thread block (x,y,z): (1024, 1024, 64) Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535) Maximum memory pitch: 2147483647 bytes Texture alignment: 512 bytes Concurrent copy and kernel execution: Yes with 1 copy engine(s) Run time limit on kernels: Yes Integrated GPU sharing Host Memory: No Support host page-locked memory mapping: Yes Alignment requirement for Surfaces: Yes Device has ECC support: Disabled Device supports Unified Addressing (UVA): Yes Device PCI Bus ID / PCI location ID: 5 / 0 Compute Mode: < Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) > deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 5.5, CUDA Runtime Version = 5.5, NumDevs = 1, Device0 = Quadro K600 Result = PASS */
1,189
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <iostream> #define Shared_Mem_Size 16*16*4 // CUDA kernel for vector addition __global__ void tile_MatrixMul(int* a, int* b, int* c, int n, int tile_size) { //statically-sized memory __shared__ int A[Shared_Mem_Size]; __shared__ int B[Shared_Mem_Size]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; //cal global row and col postions for this thread int row = by * tile_size + ty; int col = bx * tile_size + tx; //Intermidiate sum for element being written int temp_val = 0; //sweet tiles over entire matrix for (int i = 0; i < (n / tile_size); i++) { /* Every thread in a threadblock loads one element into shared memory The element location in shared memory corresponds to the thread's position in the threadblock (e.g thread[0,0] loads for A[0 * tile_size + 0] and B[0 * tile_size + 0]) Explanation of indexing parameters for A: row*n: Indexes the global row for this thread (loop invariant) i*tile_size: Indexes new set of column each iteration tx: Indexes the column within that set for B: col: Indexes the global column this thread (loop invariant) i*tile_size*n: Indexes next set of rows each iteration ty*n: Indexes the row within that set */ A[(ty * tile_size) + tx] = a[row * n + (i * tile_size + tx)]; B[(ty * tile_size) + tx] = b[(i * tile_size * n + ty * n) + col]; //Ensure all threads have loaded their data before proceeding __syncthreads(); //cal all temp values for this tile for (int j = 0; j < tile_size; j++) { temp_val += A[(ty * tile_size) + j] * B[(j * tile_size) + tx]; } //Ensure some threads dont progress and stomp current shared memory values __syncthreads(); } c[(row * n) + col] = temp_val; } // Initialize void Mat_init(int* a, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { a[i * n + j] = rand() % 100; } } } // Check MatrixMul add result void check_answer(int* a, int* b, int* c, int n) { int* result = (int*)malloc(n * n * sizeof(int)); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { result[i * n + j] += a[i * n + k] * b[k * n + j]; } } } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { assert(c[i * n + j] == result[i * n + j]); } } } int main() { // matrix of size 1024 x 1024 int n = 1 << 10; //host memory pointers int* h_a, * h_b, * h_c; // Allocation size for all vectors size_t bytes = sizeof(int) * n * n; h_a = (int*)malloc(bytes); h_b = (int*)malloc(bytes); h_c = (int*)malloc(bytes); //device memory pointers int* d_a, * d_b, * d_c; cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // Initialize vectors a and b with random values between 0 and 99 Mat_init(h_a, n); Mat_init(h_b, n); cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); // Threadblock size int BLOCKS = 16; // Grid size int GRID = (int)ceil(n / BLOCKS); //use dim3 objects dim3 grid(GRID, GRID); dim3 threads(BLOCKS, BLOCKS); // Launch kernel on default stream w/o shmem tile_MatrixMul <<<grid, threads >>> (d_a, d_b, d_c, n, BLOCKS); //copy result back to host cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); // Check result for errors check_answer(h_a, h_b, h_c, n); free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("COMPLETED SUCCESFULLY\n"); return 0; }
1,190
#include <stdio.h> const int N = 16; __global__ void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } int cuda_test() { char a[N] = "Hello \0\0\0\0\0\0"; int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; char* ad = NULL; int* bd = NULL; printf("%s", a); cudaMalloc((void**)&ad, sizeof(a)); cudaMalloc((void**)&bd, sizeof(b)); cudaMemcpy(ad, a, sizeof(a), cudaMemcpyHostToDevice); cudaMemcpy(bd, b, sizeof(b), cudaMemcpyHostToDevice); dim3 dimBlock(N, 1); dim3 dimGrid(1, 1); hello<<<dimGrid, dimBlock>>>(ad, bd); cudaMemcpy(a, ad, sizeof(a), cudaMemcpyDeviceToHost); cudaFree(ad); cudaFree(bd); printf("%s\n", a); return EXIT_SUCCESS; }
1,191
#include <stdio.h> #include <stdlib.h> __global__ void add(int *a,int *b) { int tid = threadIdx.x; int n=a[tid]; if(tid+2<*b && tid<(*b)/2) { a[tid]=a[tid+2]; a[tid+2]=n; } } int main(void) { int n,a[20],c[20]; printf("Enter value of N:"); n=5; printf("Enter array elements of array A\n"); for(int i=0;i<n;i++) { a[i]=i; } int *d_a,*d_b,*d; int size = sizeof(int); cudaMalloc((void **)&d_a,size*n); cudaMalloc((void **)&d_b,size); cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice); cudaMemcpy(d_b,&n,size,cudaMemcpyHostToDevice); add<<<1,n>>>(d_a,d_b); cudaMemcpy(c,d_a,size*n,cudaMemcpyDeviceToHost); printf("array is :"); for(int i=0;i<n;i++) { printf("%d ",c[i]); } cudaFree(d_a); cudaFree(d_b); return 0; }
1,192
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <iostream> #include <chrono> int main() { thrust::host_vector<double> host; // thrust::sequence(host.begin(), host.end()); while (std::cin.good()) { double t; std::cin >> t; host.push_back(t); } thrust::device_vector<double> dev(host); // 1. O preço médio das ações nos últimos 10 anos. double media_gpu = thrust::reduce(dev.begin(), dev.end(), 0, thrust::plus<double>()) / dev.size(); // 2. O preço médio das ações no último ano (365 anos atrás). double media_gpu_ua = thrust::reduce(dev.end()-365, dev.end(), 0, thrust::plus<double>()) / dev.size(); // 3. O maior e o menor preço da sequência inteira e do último ano. double max = thrust::reduce(dev.begin(), dev.end(), 0, thrust::maximum<double>()); double min = thrust::reduce(dev.begin(), dev.end(), 0, thrust::minimum<double>()); double max_ua = thrust::reduce(dev.end() - 365, dev.end(), 0, thrust::minimum<double>()); double min_ua = thrust::reduce(dev.end() - 365, dev.end(), 0, thrust::minimum<double>()); std::cout << dev[0] << "\n"; }
1,193
#define Index3D(_nx,_ny,_i,_j,_k) ((_i)+_nx*((_j)+_ny*(_k))) __global__ void block2D_hybrid_coarsen_x(float c0,float c1,float *A0,float *Anext, int nx, int ny, int nz) { const int i = blockIdx.x*blockDim.x*2+threadIdx.x; const int i2= blockIdx.x*blockDim.x*2+threadIdx.x+blockDim.x; const int j = blockIdx.y*blockDim.y+threadIdx.y; const int sh_id=threadIdx.x + threadIdx.y*blockDim.x*2; const int sh_id2=threadIdx.x +blockDim.x+ threadIdx.y*blockDim.x*2; extern __shared__ float sh_A0[]; sh_A0[sh_id]=0.0f; sh_A0[sh_id2]=0.0f; __syncthreads(); const bool w_region = i>0 && j>0 &&(i<(nx-1)) &&(j<(ny-1)) ; const bool w_region2 = j>0 &&(i2<nx-1) &&(j<ny-1) ; const bool x_l_bound = (threadIdx.x==0); const bool x_h_bound = ((threadIdx.x+blockDim.x)==(blockDim.x*2-1)); const bool y_l_bound = (threadIdx.y==0); const bool y_h_bound = (threadIdx.y==(blockDim.y-1)); float bottom=0.0f,bottom2=0.0f,top=0.0f,top2=0.0f; if((i<nx) &&(j<ny)) { bottom=A0[Index3D (nx, ny, i, j, 0)]; sh_A0[sh_id]=A0[Index3D (nx, ny, i, j, 1)]; } if((i2<nx) &&(j<ny)) { bottom2=A0[Index3D (nx, ny, i2, j, 0)]; sh_A0[sh_id2]=A0[Index3D (nx, ny, i2, j, 1)]; } __syncthreads(); for(int k=1;k<nz-1;k++) { float a_left_right,a_up,a_down; if((i<nx) &&(j<ny)) top=A0[Index3D (nx, ny, i, j, k+1)]; if(w_region) { a_up =y_h_bound?A0[Index3D (nx, ny, i, j+1, k )]:sh_A0[sh_id+2*blockDim.x]; a_down =y_l_bound?A0[Index3D (nx, ny, i, j-1, k )]:sh_A0[sh_id-2*blockDim.x]; a_left_right=x_l_bound?A0[Index3D (nx, ny, i-1, j, k )]:sh_A0[sh_id-1]; Anext[Index3D (nx, ny, i, j, k)] = (top + bottom + a_up + a_down + sh_A0[sh_id+1] +a_left_right)*c1 - sh_A0[sh_id]*c0; } if((i2<nx) &&(j<ny)) top2=A0[Index3D (nx, ny, i2, j, k+1)]; if(w_region2) { a_up =y_h_bound?A0[Index3D (nx, ny, i2, j+1, k )]:sh_A0[sh_id2+2*blockDim.x]; a_down =y_l_bound?A0[Index3D (nx, ny, i2, j-1, k )]:sh_A0[sh_id2-2*blockDim.x]; a_left_right=x_h_bound?A0[Index3D (nx, ny, i2+1, j, k )]:sh_A0[sh_id2+1]; Anext[Index3D (nx, ny, i2, j, k)] = (top2 + bottom2 + a_up + a_down + a_left_right +sh_A0[sh_id2-1])*c1 - sh_A0[sh_id2]*c0; } __syncthreads(); bottom=sh_A0[sh_id]; sh_A0[sh_id]=top; bottom2=sh_A0[sh_id2]; sh_A0[sh_id2]=top2; __syncthreads(); } }
1,194
#include <stdio.h> __global__ void dummy() { int j = 0; for(int i = 0; i < 1000000; i++) j++; } int main() { cudaStream_t stream1, stream2; double *A, *B, *C, *D; cudaSetDevice(1); cudaMalloc((void **) &C, 100000000 * sizeof(double)); cudaMalloc((void **) &D, 10000000 * sizeof(double)); cudaSetDevice(0); cudaMalloc((void **) &A, 100000000 * sizeof(double)); cudaMalloc((void **) &B, 10000000 * sizeof(double)); cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaDeviceEnablePeerAccess(1, 0); dummy<<<1, 1>>>(); cudaSetDevice(0); cudaMemcpyPeerAsync(C, 1, A, 0, 100000000 * sizeof(double), stream1); cudaSetDevice(1); for(int i = 0; i < 10; i++) dummy<<<1, 1>>>(); cudaSetDevice(0); cudaMemcpyPeerAsync(D, 1, B, 0, 10000000 * sizeof(double)); for(int i = 0; i < 2; i++) { cudaSetDevice(i); cudaDeviceSynchronize(); } return 0; }
1,195
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Work in progress: // Odd/Even sort. __device__ void sortColors3(float * v, float3 * colors, int * xrefs) { int tid = threadIdx.x; xrefs[tid] = tid; float e; const bool active = (tid != 0) && (tid != 15); for (int i = 0; i < 8; i++) { if (tid & 1) e = min(v[tid], v[tid+1]); else e = max(v[tid-1], v[tid]); v[tid] = e; if (active) { if (tid & 1) e = max(v[tid-1], v[tid]); else e = min(v[tid], v[tid+1]); v[tid] = e; } } /*for (int i = 0; i < 8; i++) { int x; { int odd = tid & 1; int x0 = xrefs[tid ^ odd]; int x1 = xrefs[tid ^ !odd]; int cmp = v[x0] > v[x1]; if (cmp ^ odd) x = x0; else x = x0; xrefs[tid] = x; } if (active) { if (tid & 1) e = max(v[xrefs[tid-1]], v[xrefs[tid]]); else e = min(v[xrefs[tid]], v[xrefs[tid+1]]); v[xrefs[tid]] = e; } }*/ // float3 tmp = colors[tid]; // colors[cmp[tid]] = tmp; }
1,196
#include <iostream> #include <stdio.h> #include <math.h> // kernels transpose a tile of TILE_DIM x TILE_DIM elements // using a TILE_DIM x BLOCK_ROWS thread block, so that each thread // transposes TILE_DIM/BLOCK_ROWS elements. // TILE_DIM must be an integral multiple of BLOCK_ROWS #define SIZE 10016 #define TILE_DIM 32 #define BLOCK_ROWS 8 __global__ void cuTranspose(int rows, int cols, const float *input, float *output) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = x + (y)*rows; x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = x + (y)*cols; for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = input[index_in+i*rows]; } __syncthreads(); for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { output[index_out + i*cols] = tile[threadIdx.x][threadIdx.y + i]; } return; } int check(int nrows, int ncols, float* input, float* output) { for (int i = 0; i < nrows; i++){ for(int j = 0; j < ncols; j++) { if (input[i*ncols + j] != output[j*ncols + i]) { printf("Input at (%i, %i) not equal to output at (%i, %i) with %f != %f\n", i, j, j, i, input[i*ncols + j], output[j*ncols + i]); return 0; } } } return 1; } int main(void) { int r = SIZE; // rows int c = SIZE; // columns int N = r*c; // total matrix entries. float *input, *output; dim3 dimGrid(r/TILE_DIM, c/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&input, N*sizeof(float)); cudaMallocManaged(&output, N*sizeof(float)); // initialize input and output matrices on the host for (int i = 0; i < r; i++) { for(int j = i; j < c; j++) { input[i*c + j] = (float)r*i + r*1.0f; output[i*c + j] = 5.0f; } } // Transpose the matrix cuTranspose<<<dimGrid, dimBlock>>>(r, c, input, output); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); if (!check(r, c, input, output)) { printf("Matrix entries not equal\n"); } else { printf("Matrix transpose successful\n"); } // Free memory cudaFree(input); cudaFree(output); return 0; }
1,197
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void print_threadIds() { printf("threadIdx.x : %d threadIdx.y : %d threadIdx.z : %d\n", threadIdx.x, threadIdx.y, threadIdx.z); } int main() { int nx=16, ny=16; dim3 block(8, 8); dim3 grid(nx/block.x, ny/block.y); print_threadIds <<<grid, block>>> (); cudaDeviceSynchronize(); cudaDeviceReset(); }
1,198
#include<iostream> #include<cstdlib> #include<fstream> #include<string> #include<sys/time.h> //#define debug typedef unsigned long long int UINT; using namespace std; __device__ int s(int a, int b){ return a==b?3:-3; } __global__ void GPU(int *dev_table, int *dev_arr1, int *dev_arr2, int startIdx, int curjobs, const int rowsize, int startx, int starty){ int thread = blockIdx.x * blockDim.x + threadIdx.x; if (thread < curjobs){ int idx = startIdx + (thread * rowsize - thread); int x = startx - thread; int y = starty + thread; dev_table[idx] = max(dev_table[idx-1]-2, max(dev_table[idx-rowsize]-2, max(dev_table[idx-rowsize-1] + s(dev_arr1[x], dev_arr2[y]), 0))); } // __threadfence(); } void checkGPUError(cudaError err){ if (cudaSuccess != err){ printf("CUDA error in file %s, in line %i: %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } int SW(int n1, int n2, int *arr1, int *arr2){ int last; int paddsize = 1; int rowsize = paddsize + n2; int colsize = paddsize + n1; int *dev_table, *dev_arr1, *dev_arr2; int *table; table = new int[colsize * rowsize]; size_t freeMem, totalMem; cudaMemGetInfo(&freeMem, &totalMem); int tablesize = colsize * rowsize; cout << "current GPU memory info FREE: " << freeMem << " Bytes, Total: " << totalMem << " Bytes."; cout << "colsize: " << colsize << ", rowsize: " << rowsize << ", allocates: " << tablesize * sizeof(int)<< " Bytes." << endl; cudaError err = cudaMalloc(&dev_table, tablesize * sizeof(int)); checkGPUError(err); cudaMalloc(&dev_arr1, n1*sizeof(int)); cudaMalloc(&dev_arr2, n2*sizeof(int)); cudaMemset(dev_table, 0, tablesize * sizeof(int)); cudaMemcpy(dev_arr1, arr1, n1*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_arr2, arr2, n2*sizeof(int), cudaMemcpyHostToDevice); int maxthreads = min(n1, n2); int maxlevel = n1 + n2 - 1; int curlevel = 1; int curjobs = 1; int startx, starty; int threadPerBlock = 32, blockPerGrid; cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); //suppose n2 is the row size and the longer array while(curlevel <= maxlevel){ // cout << "level: " << curlevel << endl; int startIdx; if (curlevel <= n2){ startIdx = curlevel - 1; curjobs = curlevel; startx = startIdx; starty = 0; } else{ startIdx = n2 - 1 + rowsize * (curlevel - n2); curjobs = 2 * n2 - curlevel; startx = n2 - 1; starty = curlevel - n2; } int numthreads = (curjobs + 31) / 32; numthreads *= 32; blockPerGrid = (numthreads + threadPerBlock - 1) / threadPerBlock; GPU<<<blockPerGrid, threadPerBlock>>>(&dev_table[paddsize*rowsize+paddsize], dev_arr1, dev_arr2, startIdx, curjobs, rowsize, startx, starty); cudaDeviceSynchronize(); curlevel++; } cudaMemcpy(&last, &dev_table[tablesize-1], sizeof(int), cudaMemcpyDeviceToHost); #ifdef debug cudaMemcpy(table, dev_table, (n1+paddsize)*rowsize*sizeof(int), cudaMemcpyDeviceToHost); //display table cout << "full table: " << endl; for (int i=0; i<n1+paddsize; i++){ for (int j=0; j<n2+paddsize; j++){ cout << table[i * rowsize + j] << " "; } cout << endl; } #endif cudaFree(dev_arr1); cudaFree(dev_arr2); cudaFree(dev_table); delete[] table; return last; }
1,199
#include<cstdio> #include<cstdlib> #include<iostream> #define DFL_LEN 32 #define MAX_THREADS_PER_BLOCK 1024 //supported by hardware, run ./deviceQuery to determine //cuda error checking #define check_error(ans) {cudaCheckError((ans),__FILE__,__LINE__);} inline void cudaCheckError(cudaError_t e,const char *file,int line,bool abort = true){ if(e != cudaSuccess){ fprintf(stderr,"GPUassert: %s\nFile: %s\nLine: %d\n",cudaGetErrorString(e),file,line); if(abort) exit(e); } } //end of error checking typedef long int g_type; //struct declarations and global variables begin here struct vertex{ g_type number; g_type start; int n; }; struct map{ g_type node; g_type index; }; struct entry{ g_type edge; double val; }; g_type *edges; g_type edges_length; g_type edges_size; g_type edges_itr; struct vertex *vertex_list; g_type vertex_length; g_type vertex_size; g_type vertex_itr; struct entry *transitions; struct map *node_map; double *ranks; double *result; //end of struct and global definitions //start of interface int init_edges(){ if(edges != NULL) return 0; edges = (g_type *)malloc(DFL_LEN * sizeof(g_type)); if(edges == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } edges_size = DFL_LEN; edges_length = 0; edges_itr = 0; return 1; } void delete_edges(){ edges_length = 0; edges_size = DFL_LEN; edges_itr = 0; if(edges != NULL) free(edges); } int add_edge(int edge){ if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } if(edges_length == edges_size){ edges_size *= 2; edges = (g_type *)realloc(edges,edges_size * sizeof(g_type)); if(edges == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } } edges[edges_length] = edge; edges_length++; return 1; } int get_edge(g_type *e){ if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } if(e == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } g_type val = edges[edges_itr]; edges_itr++; if(edges_itr >= edges_size){ edges_itr = edges_itr % edges_size; } *e = val; return 1; } void reset_edge(){ edges_itr = 0; } void move_edge(g_type index){ edges_itr = index; } int init_vertices(){ if(vertex_list != NULL) return 0; vertex_list = (struct vertex *)malloc(DFL_LEN * sizeof(struct vertex)); if(vertex_list == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } vertex_length = 0; vertex_size = DFL_LEN; vertex_itr = 0; return 1; } void delete_vertices(){ vertex_itr = 0; vertex_length = 0; vertex_size = 0; if(vertex_list != NULL) free(vertex_list); } int add_vertex(struct vertex v){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } if(vertex_length == vertex_size){ vertex_size *= 2; vertex_list = (struct vertex *)realloc(vertex_list,vertex_size * sizeof(struct vertex)); if(vertex_list == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } } vertex_list[vertex_length].number = v.number; vertex_list[vertex_length].n = v.n; vertex_list[vertex_length].start = v.start; g_type temp = vertex_length; vertex_length++; return temp; } int get_vertex(struct vertex *v){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } v->number = vertex_list[vertex_itr].number; v->start = vertex_list[vertex_itr].start; v->n = vertex_list[vertex_itr].n; vertex_itr++; if(vertex_itr >= vertex_size){ vertex_itr = vertex_itr % vertex_size; } return 1; } void reset_vertex(){ vertex_itr = 0; } void move_vertex(g_type index){ vertex_itr = index; } void build_graph(FILE *fp){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(fp == NULL){ fprintf(stderr,"File pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } g_type from,to; int seen = 0; g_type cur = -1; while(fscanf(fp,"%ld %ld",&from,&to) != -1){ if(from == vertex_list[cur].number && vertex_length != 0){ seen = 1; } else{ seen = 0; } if(!seen){ struct vertex temp; temp.number = from; temp.start = edges_length; temp.n = 0; cur = add_vertex(temp); } add_edge(to); vertex_list[cur].n++; } } void create_map(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(node_map == NULL){ node_map = (struct map *)malloc(vertex_length * sizeof(struct map)); if(node_map == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(int i=0;i<vertex_length;i++){ node_map[i].node = vertex_list[i].number; node_map[i].index = i; } } g_type search_map(g_type node){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return -1; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return -1; } if(node_map == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return -1; } for(int i=0;i<vertex_length;i++){ if(node_map[i].node == node) return node_map[i].index; } return -1; } __device__ g_type search_dmap(struct map *d_map,g_type *d_vlength,g_type node){ if(d_map == NULL){ return -1; } g_type len = *d_vlength; for(g_type i=0;i<len;i++){ if(d_map[i].node == node) return d_map[i].index; } return -1; } void delete_map(){ if(node_map != NULL) free(node_map); } void init_ranks(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(ranks == NULL){ ranks = (double *)malloc(vertex_length * sizeof(double)); if(ranks == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(int i=0;i<vertex_length;i++){ ranks[i] = 0.25; } } void delete_ranks(){ if(ranks != NULL) free(ranks); } void init_transitions(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(transitions == NULL){ transitions = (struct entry *)malloc(edges_length * sizeof(struct entry)); if(transitions == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(g_type i=0;i<vertex_length;i++){ g_type start = vertex_list[i].start; g_type j = start; int n = vertex_list[i].n; while(j < start + n){ transitions[j].edge = edges[j]; transitions[j].val = 1.0 / vertex_list[i].n; j++; } } } void delete_transitions(){ if(transitions != NULL) free(transitions); } void init_result(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(result == NULL){ result = (double *)malloc(vertex_length * sizeof(double)); if(result == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(int i=0;i<vertex_length;i++){ result[i] = 0.0; } } void delete_result(){ if(result != NULL) free(result); } int pagerank(){ for(int i=0;i<vertex_length;i++){ for(int j = vertex_list[i].start; j < vertex_list[i].start + vertex_list[i].n; j++){ double temp = transitions[j].val * ranks[i]; g_type index = search_map(transitions[j].edge); result[index] += temp; } } return 1; } void update_ranks(){ for(int i=0;i<vertex_length;i++){ ranks[i] = result[i]; result[i] = 0.0; } } //end of interface //CUDA kernels __global__ void multiply_kernel(struct vertex *d_vertices,struct entry *d_transitions,struct map *d_map,double *d_ranks,double *d_tempranks,g_type *d_vlength){ int threadId = blockDim.x * blockIdx.x + threadIdx.x; double b = d_ranks[threadId]; g_type len = *d_vlength; if(threadId < len){ for(g_type i = d_vertices[threadId].start;i < d_vertices[threadId].start + d_vertices[threadId].n;i++){ double a = d_transitions[i].val; int index = search_dmap(d_map,d_vlength,d_transitions[i].edge); double res = a * b; double temp = d_tempranks[index]; __syncthreads(); temp += res; d_tempranks[index] = temp; __syncthreads(); } } } //deprecated __global__ void add_kernel(struct vertex *d_vertices,struct entry *d_transitions,double *d_res,struct map *d_map,double *d_tempranks,g_type *d_vlength){ int threadId = blockDim.x * blockIdx.x + threadIdx.x; g_type len = *d_vlength; if(threadId < len){ for(g_type i = d_vertices[threadId].start;i < d_vertices[threadId].start + d_vertices[threadId].n;i++){ int index = search_dmap(d_map,d_vlength,d_transitions[i].edge); double val = d_res[i]; double temp = d_tempranks[index]; __syncthreads(); temp += val; d_tempranks[index] = temp; __syncthreads(); } } } __global__ void update_kernel(double *d_tempranks,double *d_ranks,g_type *d_vlength){ int threadId = blockDim.x * blockIdx.x + threadIdx.x; g_type len = *d_vlength; if(threadId < len){ d_ranks[threadId] = d_tempranks[threadId]; } } //end of CUDA kernels //main program begins here int main(int argc,char **argv){ if(argc != 4){ fprintf(stderr,"Correct usage: %s <pathToGraph> <numIterations> <serial = 0/parallel = 1>\n",argv[0]); exit(1); } FILE *fp = fopen(argv[1],"r"); const int iterations = atoi(argv[2]); const int mode = atoi(argv[3]); init_vertices(); init_edges(); build_graph(fp); create_map(); init_ranks(); init_transitions(); if(mode == 1){ //initializing device memory g_type *d_elength; check_error(cudaMalloc((void **)&d_elength,sizeof(g_type))); check_error(cudaMemcpy(d_elength,&edges_length,sizeof(g_type),cudaMemcpyHostToDevice)); g_type *d_vlength; check_error(cudaMalloc((void **)&d_vlength,sizeof(g_type))); check_error(cudaMemcpy(d_vlength,&vertex_length,sizeof(g_type),cudaMemcpyHostToDevice)); struct vertex *d_vertices; check_error(cudaMalloc((void **)&d_vertices,vertex_length * sizeof(struct vertex))); check_error(cudaMemcpy(d_vertices,vertex_list,vertex_length * sizeof(struct vertex),cudaMemcpyHostToDevice)); struct entry *d_transitions; check_error(cudaMalloc((void **)&d_transitions,edges_length * sizeof(struct entry))); check_error(cudaMemcpy(d_transitions,transitions,edges_length * sizeof(struct entry),cudaMemcpyHostToDevice)); struct map *d_map; check_error(cudaMalloc((void **)&d_map,vertex_length * sizeof(struct map))); check_error(cudaMemcpy(d_map,node_map,vertex_length * sizeof(struct map),cudaMemcpyHostToDevice)); double *d_ranks; check_error(cudaMalloc((void **)&d_ranks,vertex_length * sizeof(double))); check_error(cudaMemcpy(d_ranks,ranks,vertex_length * sizeof(double),cudaMemcpyHostToDevice)); double *d_res; check_error(cudaMalloc((void **)&d_res,edges_length * sizeof(double))); double *d_tempranks; check_error(cudaMalloc((void **)&d_tempranks,vertex_length * sizeof(double))); //pagerank iterations begin here: Power method int blocks = 1; int threads = vertex_length; if(vertex_length > MAX_THREADS_PER_BLOCK){ blocks = (int)ceil(vertex_length / (double)MAX_THREADS_PER_BLOCK); threads = MAX_THREADS_PER_BLOCK; } int counter = 0; clock_t begin = clock(); while(counter < iterations){ check_error(cudaMemset(d_res,0.0,edges_length * sizeof(double))); check_error(cudaMemset(d_tempranks,0.0,vertex_length * sizeof(double))); multiply_kernel<<<blocks,threads>>>(d_vertices,d_transitions,d_map,d_ranks,d_tempranks,d_vlength); cudaDeviceSynchronize(); //add_kernel<<<blocks,threads>>>(d_vertices,d_transitions,d_res,d_map,d_tempranks,d_vlength); //cudaDeviceSynchronize(); update_kernel<<<blocks,threads>>>(d_tempranks,d_ranks,d_vlength); cudaDeviceSynchronize(); counter++; } clock_t end = clock(); //end of pagerank iterations double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; double *res; res = (double *)malloc(vertex_length * sizeof(double)); check_error(cudaMemcpy(res,d_ranks,vertex_length * sizeof(double),cudaMemcpyDeviceToHost)); for(int i = 0;i<vertex_length;i++){ printf("%lf\n",res[i]); } free(res); printf("%lf s\n",time_spent); check_error(cudaFree(d_elength)); check_error(cudaFree(d_vlength)); check_error(cudaFree(d_vertices)); check_error(cudaFree(d_transitions)); check_error(cudaFree(d_map)); check_error(cudaFree(d_ranks)); check_error(cudaFree(d_res)); check_error(cudaFree(d_tempranks)); } else{ clock_t begin = clock(); init_result(); int counter = 0; while(counter < iterations){ if(!pagerank()){ fprintf(stderr,"Pagerank failed in iteration: %d\n",counter); break; } update_ranks(); counter++; } clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; for(int i = 0;i<vertex_length;i++){ printf("%lf\n",ranks[i]); } printf("%lf s\n",time_spent); } //end of device memory initialization delete_edges(); delete_vertices(); delete_ranks(); delete_transitions(); delete_map(); delete_result(); return 0; }
1,200
#include "includes.h" extern "C" __global__ void bubble(unsigned int length, unsigned int parity, float* tab) { int index = 2* (threadIdx.x + blockDim.x * blockIdx.x); int leftElementID = index + parity; int rightElementID = index + parity + 1; float l, r; if (rightElementID < length) { l = tab[ leftElementID ]; r = tab[ rightElementID ]; if ( r < l ) { tab[ leftElementID ] = r; tab[ rightElementID ] = l; } } }