serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
24,101
#include "includes.h" static unsigned int GRID_SIZE_N; static unsigned int GRID_SIZE_4N; static unsigned int MAX_STATE_VALUE; __global__ static void cudaSumTIGammaKernel(unsigned char *tipX1, double *x2, double *tipVector, double *sumtable, int limit) { const int n = blockIdx.x * blockDim.x + threadIdx.x; if (n >= limit) { return; } const int i = n / 4, l = n % 4; double *left = &(tipVector[4 * tipX1[i]]); double *right = &(x2[16 * i + l * 4]); double *sum = &sumtable[i * 16 + l * 4]; #pragma unroll for (int k = 0; k < 4; k++) { sum[k] = left[k] * right[k]; } }
24,102
#include<stdio.h> __global__ void kernel(int * a, int * b) { *b=*a+*b; } int main(void) { int h_in,h_out; int *d_out,*d_in; h_in=2; h_out=7; cudaMalloc((void **)&d_out,sizeof(int)); cudaMalloc((void **)&d_in,sizeof(int)); cudaMemcpy(d_in,&h_in,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_out,&h_out,sizeof(int),cudaMemcpyHostToDevice); kernel<<<1,1>>>(d_in,d_out); cudaMemcpy(&h_out,d_out,sizeof(int),cudaMemcpyDeviceToHost); printf("%d\n",h_out); cudaFree(d_in); cudaFree(d_out); return 0; }
24,103
#include "includes.h" #define TB 128 #define GS(x) (((x) - 1) / TB + 1) __global__ void downsample_(float *input, float *output, int factor, int size3, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int dim3 = id % size3; int dim2 = id / size3; atomicAdd(output + ((dim2 / factor) * (size3 / factor) + (dim3 / factor)), input[id] / (factor * factor)); } }
24,104
#include <vector> #include <iostream> #include <string> #include <iomanip> #include <sys/time.h> #include <cuda.h> #include <cstdio> #include <cmath> const int MAXITER = 1024; const int DIVISOR = 512; enum Color { red, black }; #define AT(mtx, width, row, column) \ mtx[(row) * (width) + (column)] inline double nowSec() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec*1e-6; } void printMtx(float *mtx, int size) { for (unsigned i=0; i<size; i++) { for (unsigned j=0; j<size; j++) std::cout<<std::fixed<<std::setprecision(4)<<AT(mtx,size,i,j)<<"\t"; std::cout<<std::endl; } } __global__ void global_cellSolve(float *mtx, int dim, int M, float* itr) { float tot = (M-2)*(M-2); Color c = red; int i = (M-2)-1 - (blockIdx.y * blockDim.y + threadIdx.y) + 1; //si somma 1 perchè ci sono N*N thread int j = blockIdx.x * blockDim.x + threadIdx.x + 1; do { if ((i % 2 == 0 && c == red) || (i % 2 == 1 && c == black)) { AT(mtx,M,i,j) = 0.25*(AT(mtx,M,i-1,j)+AT(mtx,M,i+1,j)+AT(mtx,M,i,j-1)+AT(mtx,M,i,j+1)); } c = c == red ? black : red; atomicAdd(itr, 0.5); //aver computato un solo colore corrisponde a mezza iterazione __syncthreads(); } while(MAXITER > *itr/tot); } int main(int argc, char **argv) { float* mtx; float* itr; if (argc != 2) { std::cout<<"Usage:"<<std::endl<< "pdeSolver [matrix size]"<<std::endl; return -1; } int N = std::atoi(argv[1]); int M = N + 2; if (N % DIVISOR) { std::cerr << "N deve essere un multiplo di " << DIVISOR <<std::endl; return -1; } int dim = N/DIVISOR; cudaMallocManaged(&mtx, M*M*sizeof(float)); cudaMallocManaged(&itr, sizeof(float)); cudaDeviceSynchronize(); for(unsigned i=0; i<M; i++) { AT(mtx,M,i,0) = 1; AT(mtx,M,0,i) = 1; AT(mtx,M,M-1,i) = 1; AT(mtx,M,i,M-1) = 1; } dim3 blockPerGrid(dim, dim, 1); dim3 threadPerBlock(DIVISOR, DIVISOR, 1); double t_begin = nowSec(); global_cellSolve <<< blockPerGrid, threadPerBlock >>> (mtx, dim, M, itr); cudaDeviceSynchronize(); double t_end = nowSec(); #ifdef PRINT printMtx(mtx, M); #endif std::cout<<"Elapsed time: "<<(t_end-t_begin)<<"sec"<<std::endl; cudaFree (mtx); cudaFree (itr); return 0; }
24,105
#include <cstdio> // main program for the CPU: compiled by MS-VC++ int main(void) { // host-side data const int WIDTH = 5; int a[WIDTH][WIDTH]; int b[WIDTH][WIDTH]; int c[WIDTH][WIDTH] = { 0 }; // make a, b matrices for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { a[y][x] = y * 10 + x; b[y][x] = (y * 10 + x) * 100; } } // calculate for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { c[y][x] = a[y][x] + b[y][x]; } } // print the result for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { printf("%5d", c[y][x]); } printf("\n"); } // done return 0; }
24,106
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define MAXBLOCKS 10 #define MAXTHREADS 1 //__global__ (paralellized method) __global__ void VectorAdd(int *a, int *b, int*c, int n) { int i = blockIdx.x; //Assign each c element to a single block c[i] = a[i] + b[i]; } int main() { int *a, *b, *c; //CPU int *d_a, *d_b, *d_c;//GPU //Allocate CPU memory a = (int*)malloc(MAXBLOCKS*sizeof(int)); b = (int*)malloc(MAXBLOCKS*sizeof(int)); c = (int*)malloc(MAXBLOCKS*sizeof(int)); //Allocate GPU memory cudaMalloc(&d_a, MAXBLOCKS*sizeof(int)); cudaMalloc(&d_b, MAXBLOCKS*sizeof(int)); cudaMalloc(&d_c, MAXBLOCKS*sizeof(int)); for (int i = 0; i < MAXBLOCKS; ++i) //Populate array { a[i] = i; b[i] = i; c[i] = 0; } //Copy data to GPU cudaMemcpy(d_a, a, MAXBLOCKS*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, MAXBLOCKS*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, MAXBLOCKS*sizeof(int), cudaMemcpyHostToDevice); VectorAdd<<< MAXBLOCKS, MAXTHREADS >>>(d_a, d_b, d_c, MAXBLOCKS); //Run GPU using MAXBLOCK number of blocks and MAXTHREADS number of threads //Copy result back to CPU cudaMemcpy(c, d_c, MAXBLOCKS*sizeof(int), cudaMemcpyDeviceToHost); printf("\nMAXBLOCKS (%d) VECTOR ADDITION USING CUDA\n\n", MAXBLOCKS); printf("c[i] = a[i] + b[i]\n"); printf("======================================\n"); for (int i = 0; i < MAXBLOCKS; ++i) printf("a[%d] = %d, b[%d] = %d, c[%d] = %d\n", i, a[i], i, b[i], i, c[i]); //Free CPU memory free(a); free(b); free(c); //Free GPU memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
24,107
#include "includes.h" __global__ void ExactResampleKernel_Nto1(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int size = outputWidth * outputHeight; if (id < size) { //output point coordinates int px = id % outputWidth; int py = id / outputWidth; int xRatio = inputWidth / outputWidth; int yRatio = inputHeight / outputHeight; float sum = 0; for (int sx = 0; sx < xRatio; sx++) { for (int sy = 0; sy < yRatio; sy++) { //corresponding coordinates in the original image int x = px * xRatio + sx; int y = py * yRatio + sy; sum += input[y * inputWidth + x]; } } output[py * outputWidth + px] = sum / (float)(xRatio * yRatio); } }
24,108
#include <cmath> #include <cstdlib> #include <cstdio> #include <sys/time.h> #define M 1024 __global__ void matmul(float *A, float *B, float *C, int N) { int i = blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; float sum = 0.0f; __shared__ float s_A[M]; for (int ks=0; ks<N; ks+=M) { __syncthreads(); s_A[threadIdx.x] = A[N*i+ks+threadIdx.x]; __syncthreads(); for (int k=ks; k<ks+M; k++) { sum += s_A[k-ks] * B[N*k+j]; } } C[N*i+j] = sum; } int main(int argc, char **argv) { int N = atoi(argv[1]); float * h_A = new float [N*N]; float * h_B = new float [N*N]; float * h_C = new float [N*N]; float *d_A, *d_B, *d_C; int size = N * N * sizeof(float); cudaMalloc((void **) &d_A, size); cudaMalloc((void **) &d_B, size); cudaMalloc((void **) &d_C, size); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { h_A[N*i+j] = drand48(); h_B[N*i+j] = drand48(); h_C[N*i+j] = 0; } } cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice); dim3 grid(N/M, N); struct timeval tic, toc; gettimeofday(&tic, NULL); matmul<<<grid,M>>>(d_A, d_B, d_C, N); cudaDeviceSynchronize(); gettimeofday(&toc, NULL); double time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6; printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9); cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); gettimeofday(&tic, NULL); #pragma omp parallel for for (int i=0; i<N; i++) { for (int k=0; k<N; k++) { for (int j=0; j<N; j++) { h_C[N*i+j] -= h_A[N*i+k] * h_B[N*k+j]; } } } gettimeofday(&toc, NULL); time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6; printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9); float err = 0; for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { err += fabs(h_C[N*i+j]); } } printf("error: %f\n",err/N/N); delete[] h_A; delete[] h_B; delete[] h_C; }
24,109
int main(){ float * tst; cudaMalloc((void **) &tst, sizeof(float) * 10); }
24,110
#include <cstdio> #include <cstdlib> #include <cuda_runtime.h> #include <cuda.h> #include "rbm_cuda.cuh" __global__ void trainKernel(int* train_vec_in_batch, int* movies_in_batch, int* ratings_in_batch, float* Vzeros, float* Vts, float* Hzeros, float* Hts, float* W, float* BV, float* BH, float* W_inc, float* BV_inc, float* BH_inc, int batch_size, int num_movies_in_this_batch, const int i_batch_start, const bool update_weights) { unsigned int user = blockIdx.x * blockDim.x + threadIdx.x; while (user < batch_size) { int start = train_vec_in_batch[2 * user]; int end = train_vec_in_batch[2 * user + 1]; int size = end - start; start -= i_batch_start; if (size != 0) { float* V0 = Vzeros + K * start; float* Vt = Vts + K * start; float* H0 = Hzeros + user * F; float* Ht = Hts + user * F; int* u_movies = movies_in_batch + start; int* u_ratings = ratings_in_batch + start; // initialize V0 for (int i = 0; i < size; i++) { V0[i * K + u_ratings[i] - 1] = 1; } //////////////// positive phase //////////////// for (int i = 0; i < size; i++) { float* W_user = W + u_movies[i] * (K * F); for (int j = 0; j < F; j++) { for (int k = 0; k < K; k++) { H0[j] += W_user[j * K + k] * V0[i * K + k]; } } } // add bias and logistic function on H0 for (int j = 0; j < F; j++) { H0[j] += BH[j]; H0[j] = 1.0 / (1 + exp(-H0[j])); } if (update_weights) { //////////////// negative phase //////////////// for (int i = 0; i < size; i++) { float* W_user = W + u_movies[i] * (K * F); for (int j = 0; j < F; j++) { for (int k = 0; k < K; k++) { Vt[i * K + k] += H0[j] * W_user[j * K + k]; } } // normalize Vt float sum_k = 0.0; for (int k = 0; k < K; k++) { Vt[i * K + k] += BV[u_movies[i] * K + k]; // add bias Vt[i * K + k] = exp(Vt[i * K + k]); // exponential sum_k += Vt[i * K + k]; } for (int k = 0; k < K; k++) { Vt[i * K + k] /= sum_k; } } // compute Ht for (int i = 0; i < size; i++) { float* W_user = W + u_movies[i] * (K * F); for (int j = 0; j < F; j++) { for (int k = 0; k < K; k++) { Ht[j] += W_user[j * K + k] * Vt[i * K + k]; } } } // add bias and logistic function on Ht for (int j = 0; j < F; j++) { Ht[j] += BV[j]; Ht[j] = 1.0 / (1 + exp(-Ht[j])); } //////////////// update weight increments //////////////// // update BV_inc for (int i = 0; i < size; i++) { for (int k = 0; k < K; k++) { BV_inc[u_movies[i] * K + k] += (V0[i * K + k] - Vt[i * K + k]); } } // update W_inc for (int i = 0; i < size; i++) { for (int j = 0; j < F; j++) { for (int k = 0; k < K; k++) { W_inc[u_movies[i] * K * F + j * K + k] += (H0[j] * V0[i * K + k] - Ht[j] * Vt[i * K + k]); } } } // update BH_inc for (int j = 0; j < F; j++) { BH_inc[user * F + j] = (H0[j] - Ht[j]); } } // end update weights } user += blockDim.x * gridDim.x; } } __global__ void updateW_kernel(float* W, float* W_inc, const unsigned int M, const float lrate) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < K * F * M) { W[i] += lrate * W_inc[i]; i += blockDim.x * gridDim.x; } } __global__ void updateBV_kernel(float* BV, float* BV_inc, const unsigned int M, const float lrate) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < K * M) { BV[i] += lrate * BV_inc[i]; i += blockDim.x * gridDim.x; } } __global__ void updateBH_kernel(float* BH, float* BH_inc, const float lrate_BH, const int batch_size) { extern __shared__ float sBH_inc[]; unsigned int tid = threadIdx.x; sBH_inc[tid] = 0; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < batch_size) { sBH_inc[tid] += BH_inc[i * F]; i += blockDim.x * gridDim.x; } __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) { sBH_inc[tid] += sBH_inc[tid + s]; } __syncthreads(); } if (tid < 32) { atomicAdd(&sBH_inc[tid], sBH_inc[tid + 32]); atomicAdd(&sBH_inc[tid], sBH_inc[tid + 16]); atomicAdd(&sBH_inc[tid], sBH_inc[tid + 8]); atomicAdd(&sBH_inc[tid], sBH_inc[tid + 4]); atomicAdd(&sBH_inc[tid], sBH_inc[tid + 2]); atomicAdd(&sBH_inc[tid], sBH_inc[tid + 1]); } if (tid == 0) atomicAdd(BH, lrate_BH * sBH_inc[0]); } __global__ void predictKernel(int* test_vec_in_batch, int* test_movies_in_batch, int* test_ratings_in_batch, float* Hzeros, float* Vts, float* W, float* BV, int batch_size, const int num_test_movies_in_this_batch, const int i_test_batch_start, float* results_in_batch) { unsigned int user = blockIdx.x * blockDim.x + threadIdx.x; while (user < batch_size) { int start = test_vec_in_batch[2 * user]; int end = test_vec_in_batch[2 * user + 1]; int size = end - start; start -= i_test_batch_start; if (size != 0) { float* H0 = Hzeros + user * F; float* Vt = Vts + K * start; int* u_movies = test_movies_in_batch + start; //////////////// negative phase //////////////// for (int i = 0; i < size; i++) { float* W_user = W + u_movies[i] * (K * F); for (int j = 0; j < F; j++) { for (int k = 0; k < K; k++) { Vt[i * K + k] += H0[j] * W_user[j * K + k]; } } // normalize Vt float sum_k = 0.0; for (int k = 0; k < K; k++) { Vt[i * K + k] += BV[u_movies[i] * K + k]; // add bias Vt[i * K + k] = exp(Vt[i * K + k]); // exponential sum_k += Vt[i * K + k]; } for (int k = 0; k < K; k++) { Vt[i * K + k] /= sum_k; } // update results float score = 0; for (int k = 0; k < K; k++) { score += (k + 1) * Vt[i * K + k]; } results_in_batch[start + i] = score; } } user += blockDim.x * gridDim.x; } } void train(int* train_vec_in_batch, int* movies_in_batch, int* ratings_in_batch, int* test_vec_in_batch, int* test_movies_in_batch, int* test_ratings_in_batch, float* Vzeros, float* Vts, float* Hzeros, float* Hts, float* W, float* BV, float* BH, float* W_inc, float* BV_inc, float* BH_inc, int batch_size, int num_movies_in_this_batch, const int i_batch_start, const int num_test_movies_in_this_batch, const int i_test_batch_start, const unsigned int M, const float lrate, const float lrate_BH, float* results_in_batch, const bool update_weights, int blocks, int threadsPerBlock) { if (update_weights) { trainKernel<<<blocks, threadsPerBlock>>> (train_vec_in_batch, movies_in_batch, ratings_in_batch, Vzeros, Vts, Hzeros, Hts, W, BV, BH, W_inc, BV_inc, BH_inc, batch_size, num_movies_in_this_batch, i_batch_start, true); unsigned int Wblocks = min(blocks, (int)ceil(K * F * M / (float)threadsPerBlock)); updateW_kernel<<<Wblocks, threadsPerBlock>>>(W, W_inc, M, lrate); unsigned int BVblocks = min(blocks, (int)ceil(K * M / (float)threadsPerBlock)); updateBV_kernel<<<BVblocks, threadsPerBlock>>>(BV, BV_inc, M, lrate); unsigned int BHblocks = min(blocks, (int)ceil(batch_size / (float)threadsPerBlock)); for (int j = 0; j < F; j++) { updateBH_kernel<<<BHblocks, threadsPerBlock, threadsPerBlock * sizeof(float)>>> (BH + j, BH_inc + j, lrate_BH, batch_size); } } // in prediction stage else { trainKernel<<<blocks, threadsPerBlock>>> (train_vec_in_batch, movies_in_batch, ratings_in_batch, Vzeros, Vts, Hzeros, Hts, W, BV, BH, W_inc, BV_inc, BH_inc, batch_size, num_movies_in_this_batch, i_batch_start, false); // TODO: update Vt, and compute results predictKernel<<<blocks, threadsPerBlock>>> (test_vec_in_batch, test_movies_in_batch, test_ratings_in_batch, Hzeros, Vts, W, BV, batch_size, num_test_movies_in_this_batch, i_test_batch_start, results_in_batch); } }
24,111
#include "includes.h" __global__ void g_One_wgrad_Add( float* _WgradTmp, float* Wgrad, float* w, int rows, int cols, int channels, float lambda) { extern __shared__ float _sum[]; int channel = blockIdx.x; int col = blockIdx.y; int tid = threadIdx.x; _sum[tid] = 0; __syncthreads(); for(int i = 0; i < rows; i += blockDim.x){ int row = i + threadIdx.x; if(row < rows){ _sum[threadIdx.x] += _WgradTmp[channel * rows * cols + row * cols + col]; } } __syncthreads(); int len = rows; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(tid < (len >> 1)) { _sum[tid] += _sum[tid + skip]; } len = (len + 1) >> 1; } __syncthreads(); if(tid == 0) { Wgrad[channel * cols + col] = _sum[0] / rows + w[channel * cols + col] * lambda; } }
24,112
#include "includes.h" __global__ void sobel( int width_d, int height_d, int threshold_d, unsigned int *pic_d , int *final_res) { int row_1 = blockIdx.y * blockDim.y + threadIdx.y; int col_1 = blockIdx.x * blockDim.x + threadIdx.x; int tx = threadIdx.y; int ty = threadIdx.x; int width_Tile = TILE_SIZE; int id, id1; __shared__ int sharedTile[TILE_SIZE * TILE_SIZE]; int magnitude, sum1, sum2; // Shared Tile Initialization sharedTile[tx * width_Tile + ty] = 0; __syncthreads(); // Copying Data from Global to Shared Memory sharedTile[tx * width_Tile + ty] = pic_d[row_1 * (width_d) + col_1]; __syncthreads(); // Output if ((row_1 < height_d) && (col_1 < width_d)) { final_res[row_1 * width_d + col_1] = 0; } __syncthreads(); if (row_1 > 0 && col_1 > 0 && row_1 < height_d - 1 && col_1 < width_d - 1) { // Applying Sobel Filter on the Tile Stored in the Shared Memory if ((tx > 0) && (tx < width_Tile - 1) && (ty > 0) && (ty < width_Tile - 1)) { id = row_1 * width_d + col_1; sum1 = sharedTile[ width_Tile * (tx-1) + ty+1] - sharedTile[ width_Tile * (tx-1) + ty-1 ] + 2 * sharedTile[ width_Tile * (tx) + ty+1 ] - 2 * sharedTile[ width_Tile*(tx) + ty-1 ] + sharedTile[ width_Tile * (tx+1) + ty+1] - sharedTile[ width_Tile*(tx+1) + ty-1 ]; sum2 = sharedTile[ width_Tile * (tx-1) + ty-1 ] + 2 * sharedTile[ width_Tile * (tx-1) + ty ] + sharedTile[ width_Tile * (tx-1) + ty+1] - sharedTile[width_Tile * (tx+1) + ty-1 ] - 2 * sharedTile[ width_Tile * (tx+1) + ty ] - sharedTile[ width_Tile * (tx+1) + ty+1]; magnitude = sum1 * sum1 + sum2 * sum2; if (magnitude > threshold_d) { final_res[id] = 255; } else { final_res[id] = 0; } } __syncthreads(); // For the Pixels at the Boundaries of the Block using Global Memory if ((row_1 == blockIdx.y * blockDim.y + blockDim.y - 1) || (col_1 == blockIdx.x * blockDim.x + blockDim.x - 1) || (row_1 == blockIdx.y * blockDim.y) || (col_1 == blockIdx.x * blockDim.x)) { id1 = row_1 * width_d + col_1; sum1 = pic_d[ width_d * (row_1-1) + col_1+1] - pic_d[ width_d * (row_1-1) + col_1-1 ] + 2 * pic_d[ width_d * (row_1) + col_1+1 ] - 2 * pic_d[ width_d*(row_1) + col_1-1 ] + pic_d[ width_d * (row_1+1) + col_1+1] - pic_d[ width_d*(row_1+1) + col_1-1 ]; sum2 = pic_d[ width_d * (row_1-1) + col_1-1 ] + 2 * pic_d[ width_d * (row_1-1) + col_1 ] + pic_d[ width_d * (row_1-1) + col_1+1] - pic_d[width_d * (row_1+1) + col_1-1 ] - 2 * pic_d[ width_d * (row_1+1) + col_1 ] - pic_d[ width_d * (row_1+1) + col_1+1]; magnitude = sum1*sum1 + sum2*sum2; if (magnitude > threshold_d) { final_res[id1] = 255; } else { final_res[id1] = 0; } } __syncthreads(); } }
24,113
#include "includes.h" __global__ void kernel_update_velocities(float4* d_uv, float4* d_velocities_buffer, int numel) { size_t col = threadIdx.x + blockIdx.x * blockDim.x; if (col >= numel) { return; } d_velocities_buffer[col] = make_float4( d_uv[col].x, d_uv[col].y, 0, 0 ); __syncthreads(); }
24,114
#include "includes.h" __device__ void updateCMax(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, const double *d_gama, double *d_cMax) { *d_cMax = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double ro, p, u; __shared__ double c; for (int i = index; i < nbrOfGrids; i += stride){ if (d_u1[i] == 0) continue; ro = d_u1[i]; u = d_u2[i] / ro; p = (d_u3[i] - ro * u * u / 2) * (*d_gama - 1); c = sqrt(*d_gama * abs(p) / ro); if (*d_cMax < c + abs(u)) *d_cMax = c + abs(u); } } __global__ void updateTau(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, const double *d_gama, double *d_cMax, const double *d_h, const double *d_cfl, double *d_tau) { updateCMax(nbrOfGrids, d_u1, d_u2, d_u3, d_gama, d_cMax); *d_tau = *d_cfl * *d_h / *d_cMax; }
24,115
/* //Serial version #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <time.h> #define SEED 921 #define NUM_ITER 1000000000 int main(int argc, char * argv[]) { int count = 0; double x, y, z, pi; srand(SEED); // Important: Multiply SEED by "rank" when you introduce MPI! // Calculate PI following a Monte Carlo method for (int iter = 0; iter < NUM_ITER; iter++) { // Generate random (X,Y) points x = (double) random() / (double) RAND_MAX; y = (double) random() / (double) RAND_MAX; z = sqrt((x * x) + (y * y)); // Check if point is in unit circle if (z <= 1.0) { count++; } } // Estimate Pi and display the result pi = ((double) count / (double) NUM_ITER) * 4.0; printf("The result is %f\n", pi); return 0; } */ //SAXPY - Single-Precision A*X Plus Y #include <stdio.h> #include <sys/time.h> #include <curand_kernel.h> #include <curand.h> #define BLOCK_SIZE 256 #define NUM_ITER 1000 #define ARRAY_SIZE 10000 __global__ void pi_kernel(int *count, curandState *states){ const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < ARRAY_SIZE){ curand_init(i, i, 0, &states[i]); double x, y, z; for(int j = 0; j < NUM_ITER; j++){ x = curand_uniform(&states[i]); y = curand_uniform(&states[i]); z = sqrt((x * x) + (y * y)); if (z <= 1.0){ count[i]++; } } } } int main(){ int *count_d = 0; int count_h[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; i++) { count_h[i] = 0; } int TB = (ARRAY_SIZE + BLOCK_SIZE -1)/BLOCK_SIZE; curandState *dev_random; cudaMalloc((void**)&dev_random, BLOCK_SIZE*TB*sizeof(curandState)); cudaMalloc(&count_d, ARRAY_SIZE*sizeof(int)); cudaMemcpy(count_d, count_h, ARRAY_SIZE*sizeof(int), cudaMemcpyHostToDevice); pi_kernel <<<TB, BLOCK_SIZE>>>(count_d, dev_random); cudaDeviceSynchronize(); cudaMemcpy(count_h, count_d, ARRAY_SIZE*sizeof(int), cudaMemcpyDeviceToHost); int count = 0; for(int i = 0; i < ARRAY_SIZE; i++) { count += count_h[i]; } //printf("%d, %d", count, ARRAY_SIZE*NUM_ITER); double pi = ((double) count / (double) (ARRAY_SIZE*NUM_ITER)) * 4.0; printf("The result is %f\n", pi); return 0; }
24,116
#include "cuda.h" #include "stdio.h" #include <sys/time.h> #include <sys/resource.h> double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } int cant = 512; int cant_elem = cant * cant; // arreglos usados como matrices int* arreglo_A; int* arreglo_B; int* arreglo_C; int* d_arreglo_A; int* d_arreglo_B; int* d_arreglo_C; void printi(int i){ printf("%d\n", i); } void init_CPU_array(int array[], int n){ for(int i = 0; i < n; i++) { array[i] = i; } } void print_CPU_array(int array[], int n){ for(int i = 0; i < n; i++) { printi(array[i]); } } // calcula la transpuesta in-place __global__ void transposeador(int* arreglo_b, int N){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int i = int((1+sqrtf(1+8*tid))/2); int j = tid - (i*(i-1)/2); int aux; if((i<N) && (j<N)){ aux = arreglo_b[i*N+j]; arreglo_b[i*N+j] = arreglo_b[j*N+i]; arreglo_b[j*N+i] = aux; } } // copia B en C __global__ void copiador(int* arreglo_b, int* arreglo_c, int N){ int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < N) arreglo_c[tid] = arreglo_b[tid]; } // C += A __global__ void sumador(int* arreglo_a, int* arreglo_c, int N){ int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < N) arreglo_c[tid] += arreglo_a[tid]; } // C += A * B^t __global__ void multiplicador(int* arreglo_a, int* arreglo_b_trans, int* arreglo_c, int N, int total_elem){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int i = (int)tid / N; // columna int j = (int)tid % N; // fila int k; int cuenta = 0; if(tid < total_elem) { for (k=0; k< N; k++){ cuenta += arreglo_a[i*N+k] * arreglo_b_trans[k*N+j]; } arreglo_c[tid] += cuenta; } } void solucion_CPU(){ int* arreglo_at; int* arreglo_bt; int* arreglo_a_por_b; int* arreglo_res; int numBytes = sizeof(int) * cant_elem; //bytes a alocar arreglo_at = (int *) malloc(numBytes); arreglo_bt = (int *) malloc(numBytes); arreglo_a_por_b = (int *) malloc(numBytes); // resultado de A * B^t arreglo_res = (int *) malloc(numBytes); double timetick; timetick = dwalltime(); // guardamos en arreglo_bt y arreglo_at los datos que van a ser transpuestos for (int i = 0; i < cant_elem; ++i) { arreglo_bt[i] = arreglo_B[i]; arreglo_at[i] = arreglo_A[i]; } // calculamos la transpuesta de B for (int i = 0; i < (cant * (cant+1))/2; ++i) { int col = int((1+sqrtf(1+8*i))/2); // columna int row = i - (col*(col-1)/2); // fila int aux; if((col<cant) && (row<cant)){ aux = arreglo_bt[col*cant+row]; arreglo_bt[col*cant+row] = arreglo_bt[row*cant+col]; arreglo_bt[row*cant+col] = aux; } } // calculamos la transpuesta de A for (int i = 0; i < (cant * (cant+1))/2; ++i) { int col = int((1+sqrtf(1+8*i))/2); // columna int row = i - (col*(col-1)/2); // fila int aux; if((col<cant) && (row<cant)){ aux = arreglo_at[col*cant+row]; arreglo_at[col*cant+row] = arreglo_at[row*cant+col]; arreglo_at[row*cant+col] = aux; } } for (int i = 0; i < cant_elem; i++) { int col = i / cant; // columna int row = i % cant; // fila int mul = 0; for (int k=0; k< cant; k++){ mul += arreglo_A[col*cant+k] * arreglo_bt[k*cant+row]; } arreglo_a_por_b[i] = mul; } for (int i = 0; i < cant_elem; i++){ arreglo_res[i] = 0; } // C = B + A * B^t + A^t for (int i = 0; i < cant_elem; i++){ arreglo_res[i] += arreglo_B[i] + arreglo_a_por_b[i] + arreglo_at[i]; } printf("-> Tiempo transcurrido en la CPU %f\n", dwalltime() - timetick); // printf("%s\n", ""); // printf("%s\n", "Resultados CPU:"); // for (int i = 0; i < cant_elem; i++){ // printf("%d\n", arreglo_res[i]); // } free(arreglo_at); free(arreglo_bt); free(arreglo_a_por_b); free(arreglo_res); } int main(int argc, char** argv){ int numBytes = sizeof(int) * cant_elem; //bytes a alocar arreglo_A = (int *) malloc(numBytes); arreglo_B = (int *) malloc(numBytes); arreglo_C = (int *) malloc(numBytes); // llenamos los arreglos init_CPU_array(arreglo_A, cant_elem); init_CPU_array(arreglo_B, cant_elem); init_CPU_array(arreglo_C, cant_elem); // allocamos memoria en la gpu cudaMalloc(&d_arreglo_A, numBytes); cudaMalloc(&d_arreglo_B, numBytes); cudaMalloc(&d_arreglo_C, numBytes); // copiamos los datos de la cpu a la gpu cudaMemcpy(d_arreglo_A, arreglo_A, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_arreglo_B, arreglo_B, numBytes, cudaMemcpyHostToDevice); double timetick; timetick = dwalltime(); dim3 miGrid1D(512,1); dim3 miBloque1D(512,1); dim3 miBloque1D_transposeador((cant * (cant + 1))/2,1); // (N*(N+1))/2 // C = B copiador<<<miGrid1D, miBloque1D>>>(d_arreglo_B, d_arreglo_C, cant_elem); // B^t transposeador<<<miGrid1D, miBloque1D_transposeador>>>(d_arreglo_B, cant); // C += A * B^t multiplicador <<<miGrid1D, miBloque1D>>>(d_arreglo_A, d_arreglo_B, d_arreglo_C, cant, cant_elem); // A^t transposeador<<<miGrid1D, miBloque1D_transposeador>>>(d_arreglo_A, cant); // C += A^t sumador<<<miGrid1D, miBloque1D>>>(d_arreglo_A, d_arreglo_C, cant_elem); // esperamos a que termine la ejecucion cudaThreadSynchronize(); printf("-> Tiempo transcurrido en la GPU %f\n", dwalltime() - timetick); // nos traemos los resultados de la gpu a la cpu cudaMemcpy(arreglo_C, d_arreglo_C, numBytes, cudaMemcpyDeviceToHost); // imprimimos los resultados // printf("%s\n", ""); // printf("%s\n", "Resultados GPU:"); // print_CPU_array(arreglo_C, cant_elem); solucion_CPU(); // liberamos memoria free(arreglo_A); free(arreglo_B); free(arreglo_C); cudaFree (d_arreglo_A); cudaFree (d_arreglo_B); cudaFree (d_arreglo_C); }
24,117
#include <curand_kernel.h> __device__ float gamma(float k, curandState_t* state_ptr){ // gamma distribution float x; if(k<1){ // Weibull algorithm float c=1/k; float d=(1-k)*powf(k, 1/(c-1)); float z; float e; do{ z=-logf(curand_uniform(state_ptr)); e=-logf(curand_uniform(state_ptr)); x=powf(z, c); } while(z+e<d+x); }else{ // Cheng's algorithm float b=k-logf(4.0f); float l=sqrtf(2*k-1); float c=1+logf(4.5f); float u, v, y, z, r; do{ u=curand_uniform(state_ptr); v=curand_uniform(state_ptr); y=-logf(1/v-1)/l; x=k*expf(y); z=u*v*v; r=b+(k+l)*y-x; } while(r<4.5f*z-c && r<logf(z)); } return x; }
24,118
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cmath> #include <iostream> #include <forward_list> #include <chrono> cudaError_t findSimpleDividersWithCUDA(std::forward_list<long long> *result, long long value, int cudaCores); __device__ bool isPrime(long long value) { for (int i = 2; i <= sqrt((double) value); i++) { if (value%i == 0) return false; } return true; } __global__ void getSimpleDividersKernel(char *output, long long from, long long value, int step) { const long long current = threadIdx.x + from + step * blockIdx.x; long long outPos = current - from; output[outPos] = 0; if (value%current == 0 && isPrime(current)) { while (value%current == 0) { output[outPos]++; value /= current; } } } int main() { using namespace std; char *outputArray = nullptr; long long value; std::forward_list<long long> result; std::cout << "Write value:" << std::endl; std::cin >> value; auto begin = chrono::high_resolution_clock::now(); // Add vectors in parallel. cudaError_t cudaStatus = findSimpleDividersWithCUDA(&result, value, 1000); if (cudaStatus != cudaSuccess) { fprintf(stderr, "findSimpleDividersWithCUDA failed!"); return 1; } auto end = chrono::high_resolution_clock::now(); auto run_time = chrono::duration_cast<chrono::milliseconds>(end - begin).count(); cout << "It's done in " << run_time << "ms" << endl; // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } for (auto item : result) { std::cout << item << ' '; } return 0; } cudaError_t findSimpleDividersWithCUDA(std::forward_list<long long> *result, long long value, int cudaCores) { const long long from = 2; const long long buferSize = value - from; const long long blockCount = (buferSize / cudaCores) + (buferSize%cudaCores == 0 ? 0 : 1); if (buferSize < cudaCores) { cudaCores = buferSize; } char *buffer_output = new char[buferSize]; char *dev_output; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc(&dev_output, buferSize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } getSimpleDividersKernel <<<blockCount, cudaCores >>> (dev_output, from, value, cudaCores); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(buffer_output, dev_output, buferSize, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy 260 failed!"); goto Error; } for (long long j = 0; j < buferSize; j++) { int itCount = (int)buffer_output[j]; for (int k = 0; k < itCount; k++) { long long tempValue = from + j; result->push_front(tempValue); } } Error: delete[] buffer_output; cudaFree(dev_output); return cudaStatus; }
24,119
__global__ void reduce_kernel(const int* g_idata, int* g_odata, unsigned int n) { extern __shared__ int arr[]; long tid = threadIdx.x; long idx = (long)blockIdx.x * (long)blockDim.x + tid; if (idx < n) arr[tid] = g_idata[idx]; else arr[tid] = 0; __syncthreads(); for (long i = blockDim.x / 2; i > 0; i >>= 1) { if (tid < i) { if (tid + i < n) // Not needed arr[tid] += arr[tid + i]; } __syncthreads(); } if (tid == 0) g_odata[blockIdx.x] = arr[0]; } __host__ int reduce(const int* arr, unsigned int N, unsigned int threads_per_block) { int num_blocks = (N + threads_per_block - 1) / threads_per_block; int *darr, *dout; cudaMalloc((void**)&darr, N * sizeof(int)); cudaMalloc((void**)&dout, num_blocks * sizeof(int)); // int* tmp = new int[num_blocks]; int* tmp = new int[1]; cudaMemcpy(darr, arr, N * sizeof(int), cudaMemcpyHostToDevice); while (num_blocks > 1) { reduce_kernel<<<num_blocks, threads_per_block, sizeof(int) * threads_per_block>>>(darr, dout, N); cudaDeviceSynchronize(); // cudaMemcpy(tmp, dout, num_blocks * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(darr, dout, num_blocks * sizeof(int), cudaMemcpyDeviceToDevice); N = num_blocks; num_blocks = (num_blocks + threads_per_block - 1) / threads_per_block; // cudaMemcpy(darr, tmp, N * sizeof(int), cudaMemcpyHostToDevice); } reduce_kernel<<<num_blocks, threads_per_block, sizeof(int) * threads_per_block>>>(darr, dout, N); cudaDeviceSynchronize(); cudaMemcpy(tmp, dout, num_blocks * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(darr); cudaFree(dout); int ret = tmp[0]; delete[] tmp; // return tmp[0]; return ret; }
24,120
#include "includes.h" __global__ void metropolisPoposal2 ( const int dim, const int nwl, const int isb, const float *xx, const float *rr, float *xx1 ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int t = i + j * dim; if ( i < dim && j < nwl ) { xx1[t] = xx[t] + ( i == isb ) * rr[j]; } }
24,121
#include<iostream> #include<ctime> #define Size 512 using namespace std; template<typename T, unsigned int BlockSize> void __global__ add(const T* lhs,const T *rhs ,T*sum, const unsigned int n) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x*4; if(idx + 3*blockDim.x < n) { sum[idx] = lhs[idx] + rhs[idx]; sum[idx + blockDim.x] = lhs[idx + blockDim.x] + rhs[idx + blockDim.x]; sum[idx + blockDim.x*2] = lhs[idx + blockDim.x*2] + rhs[idx + blockDim.x*2]; sum[idx + blockDim.x*3] = lhs[idx + blockDim.x*3] + rhs[idx + blockDim.x*3]; } } int main() { int N = 1 << 19; dim3 block(Size,1); dim3 grid( (N + Size - 1)/Size/4 ,1); float a[N],b[N],c[N] ,*a_dev,*b_dev,*c_dev; clock_t start, end; memset(c, 0 , sizeof(c)); auto init = [&](auto* a,unsigned int n) { for (int i = 0;i < n;i++) { a[i] = (float)(rand()&0xff) / 100.0f; } }; init(a,N); init(b,N); cudaMalloc((void**)&a_dev, sizeof(float)*N); cudaMalloc((void**)&b_dev, sizeof(float)*N); cudaMalloc((void**)&c_dev, sizeof(float)*N); cudaMemcpy(a_dev ,a , sizeof(a),cudaMemcpyHostToDevice); cudaMemcpy(b_dev ,b , sizeof(b),cudaMemcpyHostToDevice); start = clock(); add<float,Size><<<grid, block>>>(a_dev, b_dev, c_dev,N); cudaDeviceSynchronize(); end = clock(); cout << "sum time on gpu:" << end - start << endl; cudaMemcpy(c, c_dev,sizeof(c),cudaMemcpyDeviceToHost); for (int i = 0;i < N;i++) { if(a[i] + b[i] != c[i]) { cout << "failed" <<endl; } } return 0; }
24,122
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <unistd.h> #define BLOCK_SIZE 16 /* ********************************************************************* function name: gpu_square_matrix_mult description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a n X n matrix (A) &b GPU device pointer to a n X n matrix (B) &c GPU device output purpose pointer to a n X n matrix (C) to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_square_matrix_mult(int *d_M, int *d_N, int *d_result, int width) { __shared__ int Mds[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int Nds[BLOCK_SIZE][BLOCK_SIZE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * BLOCK_SIZE + ty; int col = bx * BLOCK_SIZE + tx; int p_val = 0; //ph -> phrase for (int ph = 0; ph < ceil(width/(float)BLOCK_SIZE); ++ph) { if ((row < width) && (ph*BLOCK_SIZE + tx) < width) { Mds[ty][tx] = d_M[row*width + ph*BLOCK_SIZE + tx]; } else { Mds[ty][tx] = 0; // assign 0 is position is not within input dim } if ((col < width) && (ph*BLOCK_SIZE + ty) < width) { Nds[ty][tx] = d_N[(ph*BLOCK_SIZE + ty) * width + col]; } else { Nds[ty][tx] = 0; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { p_val += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } if ((row < width) && (col < width)) { d_result[row * width + col] = p_val; } } /* ********************************************************************* function name: cpu_matrix_mult description: dot product of two matrix (not only square) in CPU, for validating GPU results parameters: &a CPU host pointer to a m X n matrix (A) &b CPU host pointer to a n X k matrix (B) &c CPU host output purpose pointer to a m X k matrix (C) to store the result return: none ********************************************************************* */ void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } int main(int argc, char const *argv[]) { int m, n, k; /* Fixed seed for illustration */ srand(3333); printf("please type in m n and k\n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a = NULL, *h_b, *h_c, *h_cc; cudaError_t err = cudaMallocHost((void **) &h_a, sizeof(int)*m*n); printf("Error status is %s\n",cudaGetErrorString(err)); printf("%p\n", h_a); cudaMallocHost((void **) &h_b, sizeof(int)*n*k); cudaMallocHost((void **) &h_c, sizeof(int)*m*k); cudaMallocHost((void **) &h_cc, sizeof(int)*m*k); // random initialize matrix A for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; } } // random initialize matrix B for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; // some events to count the execution time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start to count execution time of GPU version cudaEventRecord(start, 0); // Allocate memory space on the device int *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(int)*m*n); cudaMalloc((void **) &d_b, sizeof(int)*n*k); cudaMalloc((void **) &d_c, sizeof(int)*m*k); // copy matrix A and B from host to device memory cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice); unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n); // Transefr results from device to host cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // time counting terminate cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // compute time elapse on GPU computing cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); // start the CPU version cudaEventRecord(start, 0); cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); // validate results computed by GPU int all_ok = 1; for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { if(h_cc[i*k + j] != h_c[i*k + j]) { printf("[%d][%d]:%d == [%d][%d]:%d, ", i, j, h_cc[i*k + j], i, j, h_c[i*k + j]); all_ok = 0; } } printf("\n"); } // roughly compute speedup if(all_ok) { printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms); } else { printf("incorrect results\n"); } // free memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFreeHost(h_cc); return 0; }
24,123
#include "stdio.h" #define CUDA_ERR_CHECK(x) \ do{ cudaError_t err = x; \ if (err != cudaSuccess) { \ fprintf(stderr, "Error \"%s\" at %s:%d \n", \ cudaGetErrorString(err), __FILE__, __LINE__);\ exit(0);\ } \ } while(0) #define DGX 3 #define DGY 2 #define DBX 2 #define DBY 2 #define DBZ 2 #define N (DBX*DBY*DBZ*DGX*DGY) __global__ void gpu_kernel() { /* int block_idx, grid_dim; block_idx = blockIdx.x; //номер блока по оси x grid_dim = gridDim.x; //общее количество блоков по оси х */ printf("\nThread:\n block_x # %d\t block_y # %d\t block_z # %d\n thread_x # %d\t thread_y # %d\t thread_z # %d\n", blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z); } int main (void){ dim3 grid(DGX, DGY); dim3 block(DBX,DBY,DBZ); int dev; cudaDeviceProp prop; CUDA_ERR_CHECK( cudaGetDevice( &dev ) ); CUDA_ERR_CHECK( cudaGetDeviceProperties(&prop, dev) ); printf("name\t\t\t%s\n", prop.name); printf("totalGlobalMem\t\t%zd\n", prop.totalGlobalMem); printf("sharedMemPerBlock\t%zd\n", prop.sharedMemPerBlock); printf("regsPerBlock\t\t%d\n", prop.regsPerBlock); printf("warpSize\t\t%d\n", prop.warpSize); printf("memPitch\t\t%zd\n", prop.memPitch); printf("maxThreadsPerBlock\t%d\n", prop.maxThreadsPerBlock); printf("maxThreadsDim.x\t\t%d\n", prop.maxThreadsDim[0]); printf("maxThreadsDim.y\t\t%d\n", prop.maxThreadsDim[1]); printf("maxThreadsDim.z\t\t%d\n", prop.maxThreadsDim[2]); printf("maxGridSize.x\t\t%d\n", prop.maxGridSize[0]); printf("maxGridSize.y\t\t%d\n", prop.maxGridSize[1]); printf("maxGridSize.z\t\t%d\n", prop.maxGridSize[2]); printf("totalConstMem\t\t%zd\n", prop.totalConstMem); printf("major\t\t\t%d\n", prop.major); printf("minor\t\t\t%d\n", prop.minor); printf("clockRate\t\t%d\n", prop.clockRate); printf("textureAlignment\t%zd\n", prop.textureAlignment); printf("deviceOverlap\t\t%d\n", prop.deviceOverlap); printf("multiProcessorCount\t%d\n", prop.multiProcessorCount); printf("kernelExecTimeoutEnabled %d\n", prop.kernelExecTimeoutEnabled); printf("integrated\t\t%d\n", prop.integrated); printf("canMapHostMemory\t%d\n", prop.canMapHostMemory); printf("computeMode\t\t%d\n", prop.computeMode); printf("concurrentKernels\t%d\n", prop.concurrentKernels); printf("ECCEnabled\t\t%d\n", prop.ECCEnabled); printf("pciBusID\t\t%d\n", prop.pciBusID); printf("pciDeviceID\t\t%d\n", prop.pciDeviceID); printf("tccDriver\t\t%d\n", prop.tccDriver); printf("cudaComputeMode:\n"); printf("cudaComputeModeDefault: %d\n", cudaComputeModeDefault); printf("cudaComputeModeExclusive: %d\n", cudaComputeModeExclusive); printf("cudaComputeModeProhibited: %d\n", cudaComputeModeProhibited); //gpu_kernel<<<grid, block>>>(); CUDA_ERR_CHECK( cudaGetLastError() ); CUDA_ERR_CHECK( cudaDeviceSynchronize() ); return 0; }
24,124
/* Program name: HelloGPU.cu Author name: Dr. Nileshchandra Pikle Email: nilesh.pikle@gmail.com Contact Number: 7276834418 Webpage: https://piklenileshchandra.wixsite.com/personal Purpose: To demonstarte 1. How to write a simple CUDA program 2. Calling CUDA kernel 3. How to compile & run CUDA program Discrition: Given two functions helloCPU() and helloGPU() helloCPU() function is executed on CPU and prints message "Hello from the CPU." helloGPU() function is executed on GPU and prints message "Hello from the GPU." To compile nvcc -arch=sm_35 1_HelloGPU.cu To Run ./a.out */ #include <stdio.h> void helloCPU() { printf("Hello from the CPU.\n"); } __global__ void helloGPU() { printf("Hello also from the GPU.\n"); } int main() { helloCPU(); // First #thread blocks Second = # threads per block helloGPU<<<2,32>>>(); cudaDeviceSynchronize(); return 0; }
24,125
#include <iostream> #include <vector> #include <cstdio> #include <exception> /** macro to throw a runtime error */ #define THROW(fmt, ...) \ do { \ std::string msg; \ char errMsg[2048]; \ std::sprintf(errMsg, "Exception occured! file=%s line=%d: ", __FILE__, \ __LINE__); \ msg += errMsg; \ std::sprintf(errMsg, fmt, ##__VA_ARGS__); \ msg += errMsg; \ throw std::runtime_error(msg); \ } while (0) /** macro to check for a conditional and assert on failure */ #define ASSERT(check, fmt, ...) \ do { \ if (!(check)) THROW(fmt, ##__VA_ARGS__); \ } while (0) /** check for cuda runtime API errors and assert accordingly */ #define CUDA_CHECK(call) \ do { \ cudaError_t status = call; \ ASSERT(status == cudaSuccess, "FAIL: call='%s'. Reason:%s\n", #call, \ cudaGetErrorString(status)); \ } while (0) __global__ void init_xy(double* x, double* y, double a, double b, int N) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid*blockDim.x + tid; if(idx < N) { x[idx] = a; y[idx] = b; } } __global__ void saxpy(const double* x, const double* y, const double a, const int N, double* z) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid*blockDim.x + tid; if(idx < N) { z[idx] = a*x[idx] + y[idx]; // printf("z[%d]=%f * %f + %f = %f\n", idx, a, x[idx], y[idx], z[idx]); } } int checkResults(double* d_z, double a, const int N) { std::vector<double> h_z(N); CUDA_CHECK(cudaMemcpy(h_z.data(), d_z, sizeof(double)*N, cudaMemcpyDeviceToHost)); // check int i=0; for(auto zi : h_z) { if(zi != a*1 + 2) { std::printf("ERROR: z[%d]=%f != %fn", i, zi, a*1 + 2); return -1; } i++; } return 0; } int main() { const int N = 100; double* d_x; double* d_y; double* d_z; CUDA_CHECK(cudaMalloc(&d_x, sizeof(double)*N)); CUDA_CHECK(cudaMalloc(&d_y, sizeof(double)*N)); CUDA_CHECK(cudaMalloc(&d_z, sizeof(double)*N)); const int threads_per_block = 64; const int num_blocks = ceil((double)N/threads_per_block); // printf("Num blocks=%d\n", num_blocks); init_xy<<<num_blocks, threads_per_block>>>(d_x, d_y, 1.0, 2.0, N); CUDA_CHECK(cudaGetLastError()); double a = 42.0; saxpy<<<num_blocks, threads_per_block>>>(d_x, d_y, a, N, d_z); CUDA_CHECK(cudaGetLastError()); return checkResults(d_z, a, N); }
24,126
// counting Hamilton cycle, CUDA acceleration #include<stdio.h> #include<stdlib.h> #define MAX_BLOCK_SIZE 1024 #define MAX_ARRAY_SIZE (1024*8) // any 2 <= mod <= 2^31 should work __host__ __device__ unsigned mod_sum(unsigned a, unsigned b, unsigned mod) { unsigned c = a+b; return c >= mod ? c-mod : c; } __global__ void ha2(int n, int work, unsigned *part, int *adj, unsigned *ret, unsigned int mod) { __shared__ unsigned qc[1024]; __shared__ unsigned ai[32]; int k = blockDim.x; int tid = threadIdx.x; int sha = threadIdx.y * k; int bid = threadIdx.y + blockIdx.x * blockDim.y; int gridSize = blockDim.y * gridDim.x; unsigned s = part[bid]; unsigned mask = (1u<<k) - 1; unsigned total = 0; for (int i = tid+sha; i < n; i += blockDim.y * k) { unsigned aa = 0; for (int j = 0; j < n; j++) { aa = aa | adj[i * n + j] << j; } ai[i] = aa; } __syncthreads(); for (int runs = 0; runs < work; runs += gridSize) { // first transition unsigned row = s; for (int i = 0; i < tid; i++) { row = row & (row-1); } unsigned at = __ffs(row)-1; row = 0; { unsigned me = ai[at]; for (int i = n-2; i >= 0; i--) { if (s>>i & 1) { row = row + row + (me>>i & 1); } } qc[tid+sha] = (me >> (n-1)) & 1; __syncthreads(); } // calculate each transition, uses GPU SIMD feature for (int t = 1; t < n-1; t++) { unsigned sum = 0; for (int i = 0; i < k; i++) { sum = mod_sum(sum, qc[i+sha] * (row>>i & 1), mod); } __syncthreads(); qc[tid+sha] = sum; __syncthreads(); } // last transition { if (!(ai[n-1] >> at & 1)) qc[tid+sha] = 0; __syncthreads(); unsigned count = 0; for (int i = 0; i < k; i++) { count = mod_sum(count, qc[i+sha], mod); } //if (tid==0) printf("[%d:%d],", s, count); if (runs + bid < work) { total = mod_sum(count, total, mod); } } unsigned bit = s & (-s); s += bit; s |= mask >> __popc(s); __syncthreads(); } if (tid == 0) { // output total for this block ret[bid] = total; } } int n; int adj[1024]; unsigned part[MAX_ARRAY_SIZE]; unsigned ret[MAX_ARRAY_SIZE]; int nCr[33][33]; unsigned getComb(int idx, int n, int r) { unsigned ans = 0; n -= 1; while (r > 0) { if (idx < nCr[n][r]) n -= 1; else { ans |= 1u<<(n); idx -= nCr[n][r]; n -= 1; r -= 1; } } return ans; } int main() { int *gpu_adj; unsigned *gpu_part, *gpu_ret; scanf("%d", &n); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i != j) adj[i*n+j] = rand()>>5&1; } } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) scanf("%d", &adj[i*n+j]); } for (int i = 0; i <= 32; i++) { nCr[i][0] = nCr[i][i] = 1; for (int j = 1; j < i; j++) nCr[i][j] = nCr[i-1][j-1] + nCr[i-1][j]; } cudaMalloc(&gpu_part, sizeof part); cudaMalloc(&gpu_adj, sizeof adj); cudaMalloc(&gpu_ret, sizeof ret); cudaMemcpy(gpu_adj, adj, sizeof adj, cudaMemcpyHostToDevice); unsigned ans = 0; unsigned mod = 0; for (int k = 1; k <= n-1; k++) { int wo = nCr[n-1][k]; int blockSize = wo; if (blockSize > MAX_BLOCK_SIZE / k) blockSize = MAX_BLOCK_SIZE / k; int gridSize = wo / blockSize; if (blockSize * gridSize > MAX_ARRAY_SIZE) gridSize = MAX_ARRAY_SIZE / blockSize; int totSize = blockSize * gridSize; printf("block size = (%d,%d,1) grid size = (%d,1,1)\n", k, blockSize, gridSize); //for (int j = 0; j < wo; j++) printf("%d,", getComb(j, n-1, k)); for (int j = 0; j < totSize; j++) { int step = wo / totSize * j; if (j < wo % totSize) step += j; else step += wo % totSize; //printf("step=%d\n", step); part[j] = getComb(step, n-1, k); } cudaMemcpy(gpu_part, part, sizeof(int) * totSize, cudaMemcpyHostToDevice); ha2<<<gridSize, dim3(k, blockSize)>>>(n, wo, gpu_part, gpu_adj, gpu_ret, mod); cudaDeviceSynchronize(); cudaMemcpy(ret, gpu_ret, sizeof(int) * totSize, cudaMemcpyDeviceToHost); unsigned sum = 0; for (int j = 0; j < totSize; j++) { sum = mod_sum(sum, ret[j], 0); } printf("sum = %u\n", sum); if ((n-k)%2 == 1) ans = mod_sum(ans, sum, mod); else if (sum != 0) ans = mod_sum(ans, mod-sum, mod); } printf("ans = %u\n", ans); cudaFree(gpu_ret); cudaFree(gpu_adj); cudaFree(gpu_part); }
24,127
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> // std::cout #include <algorithm> // std::sort #include <vector> // std::vector #include <time.h> using namespace std; #define PINNED 1 #define THREADS 1000 struct Point { float x, y; // Co-ordinate of point }; void selectionSort(float *result_prediction_host, float *ref_points_host_val, int n) { int i, j, min, temp, temp2; for (i = 0; i < n - 1; i++) { min = i; for (j = i + 1; j < n; j++) if (result_prediction_host[j] < result_prediction_host[min]) min = j; temp = result_prediction_host[i]; temp2 = ref_points_host_val[i]; result_prediction_host[i] = result_prediction_host[min]; ref_points_host_val[i] = ref_points_host_val[min]; result_prediction_host[min] = temp; ref_points_host_val[min] = temp2; } } /** * @param arr refence points * @param n number of reference points * @param k number of points we want to use for the prediction * @param p point we want to predict */ int classifyAPoint(Point arr[], float *val, int n, int k, Point p) { float distance[n]; // Fill distances of all points from p for (int i = 0; i < n; i++) distance[i] = sqrt((arr[i].x - p.x) * (arr[i].x - p.x) + (arr[i].y - p.y) * (arr[i].y - p.y)); // Sort the Points by distance from p selectionSort(distance, val, n); // Now consider the first k elements and only // two groups int freq1 = 0; // Frequency of group 0 int freq2 = 0; // Frequency of group 1 for (int i = 0; i < k; i++) { if (val[i] == 0) freq1++; else if (val[i] == 1) freq2++; } printf ("freq1 is %d.\n", freq1); printf ("freq2 is %d.\n", freq2); return (freq1 > freq2 ? 0 : 1); } void InitHostInput(Point arr[], float *val, int n, Point p, float *ref_points_host_x, float *ref_points_host_y, float *ref_points_host_val) { for (int i=0; i<n; i++) { ref_points_host_x[i] = arr[i].x; ref_points_host_y[i] = arr[i].y; ref_points_host_val[i] = val[i]; } } void InitHostFreq(unsigned int *freq1_host, unsigned int *freq2_host) { freq1_host[0] = 0; freq2_host[0] = 0; } __global__ void calculateDistance(int n, Point p, float *ref_points_dev_x, float *ref_points_dev_y, float *result_prediction_dev) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Fill distances of all points from p result_prediction_dev[i] = sqrt((ref_points_dev_x[i] - p.x) * (ref_points_dev_x[i] - p.x) + (ref_points_dev_y[i] - p.y) * (ref_points_dev_y[i] - p.y)); } __global__ void calculateFreq(int k, float *ref_points_host_val, unsigned int *freq1_dev, unsigned int *freq2_dev) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < k) { if (ref_points_host_val[i] == 0) { atomicAdd(&freq1_dev[0], 1); } else if (ref_points_host_val[i] == 1) { atomicAdd(&freq2_dev[0], 1); } } } int classifyAPointCUDA(Point arr[], float *val, int n, int k, Point p) { unsigned int N; unsigned int numBytes; unsigned int nBlocks, nThreads; float TiempoKernelDistance, TiempoSort, TiempoKernelFreq, TiempoAllOperations, TiempoProva; cudaEvent_t E0, E1, E2, E3, E4, E5, E6, E7; cudaEventCreate(&E0); cudaEventCreate(&E1); cudaEventCreate(&E2); cudaEventCreate(&E3); cudaEventCreate(&E4); cudaEventCreate(&E5); cudaEventCreate(&E6); cudaEventCreate(&E7); cudaEventRecord(E6, 0); float *ref_points_dev_x = NULL; float *ref_points_dev_y = NULL; float *ref_points_dev_val = NULL; float *result_prediction_dev = NULL; float *ref_points_host_x = NULL; float *ref_points_host_y = NULL; float *ref_points_host_val = NULL; float *result_prediction_host = NULL; unsigned int *freq1_dev = NULL; unsigned int *freq2_dev = NULL; unsigned int *freq1_host = NULL; unsigned int *freq2_host = NULL; // numero de Threads nThreads = THREADS; // numero de Blocks en cada dimension nBlocks = (n+nThreads-1)/nThreads; printf("nBlocks = %d \n", nBlocks); numBytes = nBlocks * nThreads * sizeof(float); printf("numBytes = %d \n", numBytes); if (PINNED) { // Obtiene Memoria [pinned] en el host cudaMallocHost((float**)&ref_points_host_x, numBytes); cudaMallocHost((float**)&ref_points_host_y, numBytes); cudaMallocHost((float**)&ref_points_host_val, numBytes); cudaMallocHost((float**)&result_prediction_host, numBytes); cudaMallocHost((unsigned int**)&freq1_host, sizeof(unsigned int)); cudaMallocHost((unsigned int**)&freq2_host, sizeof(unsigned int)); } else { // Obtener Memoria en el host ref_points_host_x = (float*) malloc(numBytes); ref_points_host_y = (float*) malloc(numBytes); ref_points_host_val = (float*) malloc(numBytes); result_prediction_host = (float*) malloc(numBytes); freq1_host = (unsigned int*) malloc(sizeof(unsigned int)); freq2_host = (unsigned int*) malloc(sizeof(unsigned int)); } InitHostInput(arr,val, n, p, ref_points_host_x, ref_points_host_y, ref_points_host_val); InitHostFreq(freq1_host, freq2_host); // Obtener Memoria en el device cudaMalloc((float**)&ref_points_dev_x, numBytes); cudaMalloc((float**)&ref_points_dev_y, numBytes); cudaMalloc((float**)&ref_points_dev_val, numBytes); cudaMalloc((float**)&result_prediction_dev, numBytes); cudaMalloc((unsigned int**)&freq1_dev, sizeof(unsigned int)); cudaMalloc((unsigned int**)&freq2_dev, sizeof(unsigned int)); // Copiar datos desde el host en el device cudaMemcpy(ref_points_dev_x, ref_points_host_x, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(ref_points_dev_y, ref_points_host_y, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(ref_points_dev_val, ref_points_host_val, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(result_prediction_dev, result_prediction_host,numBytes, cudaMemcpyHostToDevice); cudaMemcpy(freq1_dev, freq1_host, sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(freq2_dev, freq2_host, sizeof(unsigned int), cudaMemcpyHostToDevice); nBlocks = nBlocks-1; cudaEventRecord(E0, 0); // Ejecutar el kernel calculateDistance<<<nBlocks, nThreads>>>(n, p, ref_points_dev_x, ref_points_dev_y, result_prediction_dev); cudaEventRecord(E1, 0); cudaEventSynchronize(E1); cudaEventElapsedTime(&TiempoKernelDistance, E0, E1); // Obtener el resultado desde el host cudaMemcpy(result_prediction_host, result_prediction_dev, numBytes, cudaMemcpyDeviceToHost); // Liberar Memoria del device cudaFree(ref_points_dev_x); cudaFree(ref_points_dev_y); cudaFree(result_prediction_dev); cudaEventRecord(E4, 0); // Sort the Points by distance from p selectionSort(result_prediction_host, ref_points_host_val, n); cudaEventRecord(E5, 0); cudaEventSynchronize(E5); cudaEventElapsedTime(&TiempoSort, E4, E5); cudaEventRecord(E2, 0); // Ejecutar el kernel calculateFreq<<<k, 1>>>(k, ref_points_dev_val, freq1_dev, freq2_dev); cudaEventRecord(E3, 0); cudaEventSynchronize(E3); cudaEventElapsedTime(&TiempoKernelFreq, E2, E3); TiempoAllOperations = TiempoKernelDistance + TiempoSort + TiempoKernelFreq; cudaMemcpy(freq1_host, freq1_dev, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(freq2_host, freq2_dev, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaFree(ref_points_dev_val); cudaFree(freq1_dev); cudaFree(freq2_dev); int result = -1; if(freq1_host[0] > freq2_host[0]) result = 0; else result = 1; printf ("freq1 is %d.\n", freq1_host[0]); printf ("freq2 is %d.\n", freq2_host[0]); printf ("The value classified to unknown point" " is %d.\n", result); printf("Invocació Kernel <<<nBlocks, nKernels>>> (N): <<<%d, %d>>> (%d)\n", nBlocks, nThreads, n); printf("Tiempo Kernel calculo distancia (00): %4.6f milseg\n", TiempoKernelDistance); printf("Tiempo Kernel calculo freq (00): %4.6f milseg\n", TiempoKernelFreq); printf("Tiempo Sort (00): %4.6f milseg\n", TiempoSort); printf("Tiempo todas las operaciones (00): %4.6f milseg\n", TiempoAllOperations); if (PINNED) printf("Usando Pinned Memory\n"); else printf("NO usa Pinned Memory\n"); if (PINNED) { cudaFreeHost(ref_points_host_x); cudaFreeHost(ref_points_host_y); cudaFreeHost(ref_points_host_val);cudaFreeHost(result_prediction_host); cudaFreeHost(freq1_host); cudaFreeHost(freq2_host); } else { free(ref_points_host_x); free(ref_points_host_y); free(ref_points_host_val); free(result_prediction_host); free(freq1_host); free(freq2_host); } cudaEventRecord(E7, 0); cudaEventSynchronize(E7); cudaEventElapsedTime(&TiempoProva, E6, E7); printf("Temps total CUDA: %4.6f milseg\n", TiempoProva); return result; } void InitKDefecte(int *k) { // Parameter to decide group of the testing point (*k) = 15; } void InitTestPointDefecte(struct Point *p) { //Test Point p->x = 2.5; p->y = 7; } void InitDefecte(int *k, struct Point *p) { InitKDefecte(k); InitTestPointDefecte(p); } int main(int argc, char** argv) { srand(time(0)); //Es declaren les variables int n, k; struct Point p; //S'inicialitza la K, i les coordenades del Testing point if (argc == 1) { InitDefecte(&k, &p); } else if (argc == 2) { k = atoi(argv[1]); InitTestPointDefecte(&p); } else if (argc == 4) { k = atoi(argv[1]); p.x = atof(argv[2]); p.y = atof(argv[3]);} else { printf("Usage: ./exe k TestPointCoordenadaX TestPointCoordenadaY\n"); exit(0); } //Es crea l'estructura sobre la qual es vol fer la predicció n = 10000; // Number of data points Point arr[n]; float val[n]; float val_cuda[n]; for(int i = 0; i < n; ++i) { arr[i].x = rand() % 100; arr[i].y = rand() % 100; val[i] = rand() % 2; val_cuda[i] = val[i]; } printf("k = %d \n", k); printf("The Testing Point values are:"); printf(" x = %f", p.x); printf(" and"); printf(" y = %f", p.y); printf("\n"); printf("\n"); printf("Programa Seqüencial -------------------------------------------------- \n"); printf("\n"); // Calculate the time taken by the sequential code: classifyAPoint function clock_t t; t = clock(); int result = classifyAPoint(arr,val, n, k, p); t = clock() - t; float time_taken = ((float)t)/(CLOCKS_PER_SEC/1000); // in mseconds printf ("The value classified to unknown point" " is %d.\n", result); printf ("Temps total seqüencial:" " %lf milseg.\n", time_taken); printf("\n"); printf("Programa CUDA -------------------------------------------------------- \n"); printf("\n"); int result2 = classifyAPointCUDA(arr,val_cuda, n, k, p); printf ("The value classified to unknown point" " is %d.\n", result2); }
24,128
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <ctype.h> #include <sys/types.h> //----------------------------------------------------------------------------// //----------------------------------ppmFile.c----------------------------------// //----------------------------------------------------------------------------// typedef struct Image { int width; int height; unsigned char *data; } Image; /************************ private functions ****************************/ /* die gracelessly */ static void die(char const *message) { fprintf(stderr, "ppm: %s\n", message); exit(1); } /* check a dimension (width or height) from the image file for reasonability */ static void checkDimension(int dim) { if (dim < 1 || dim > 6000) die("file contained unreasonable width or height"); } /* read a header: verify format and get width and height */ static void readPPMHeader(FILE *fp, int *width, int *height) { char ch; int maxval; if (fscanf(fp, "P%c\n", &ch) != 1 || ch != '6') die("file is not in ppm raw format; cannot read"); /* skip comments */ ch = getc(fp); while (ch == '#') { do { ch = getc(fp); } while (ch != '\n'); /* read to the end of the line */ ch = getc(fp); } if (!isdigit(ch)) die("cannot read header information from ppm file"); ungetc(ch, fp); /* put that digit back */ /* read the width, height, and maximum value for a pixel */ fscanf(fp, "%d%d%d\n", width, height, &maxval); if (maxval != 255) die("image is not true-color (24 bit); read failed"); checkDimension(*width); checkDimension(*height); } /************************ exported functions ****************************/ __host__ Image * ImageCreate(int width, int height) { Image *image = (Image *) malloc(sizeof(Image)); if (!image) die("cannot allocate memory for new image"); image->width = width; image->height = height; image->data = (unsigned char *) malloc(width * height * 3); if (!image->data) die("cannot allocate memory for new image"); return image; } __host__ Image * ImageRead(char const *filename) { int width, height, num, size; //unsigned *p; Image *image = (Image *) malloc(sizeof(Image)); FILE *fp = fopen(filename, "rb"); if (!image) die("cannot allocate memory for new image"); if (!fp) die("cannot open file for reading"); readPPMHeader(fp, &width, &height); size = width * height * 3; image->data = (unsigned char*) malloc(size); image->width = width; image->height = height; if (!image->data) die("cannot allocate memory for new image"); num = fread((void *) image->data, 1, (size_t) size, fp); if (num != size) die("cannot read image data from file"); fclose(fp); return image; } __host__ void ImageWrite(Image *image, char const *filename) { int num; int size = image->width * image->height * 3; FILE *fp = fopen(filename, "wb"); if (!fp) die("cannot open file for writing"); fprintf(fp, "P6\n%d %d\n%d\n", image->width, image->height, 255); num = fwrite((void *) image->data, 1, (size_t) size, fp); if (num != size) die("cannot write image data to file"); fclose(fp); } __host__ int ImageWidth(Image *image) { return image->width; } __host__ int ImageHeight(Image *image) { return image->height; } __host__ void ImageClear(Image *image, unsigned char red, unsigned char green, unsigned char blue) { int i; int pix = image->width * image->height; unsigned char *data = image->data; for (i = 0; i < pix; i++) { *data++ = red; *data++ = green; *data++ = blue; } } __device__ void ImageSetPixel(unsigned char* data, int x, int y, int chan, unsigned char val,int width) // changed for data use { int offset = (y * width + x) * 3 + chan; data[offset] = val; } __device__ unsigned char ImageGetPixel(unsigned char* data, int x, int y, int chan, int width) //changed for data use { int offset = (y * width + x) * 3 + chan; return data[offset]; } //========================================================================================// //============================== MY CODE ===============================// //========================================================================================// typedef struct pix{ int r,g,b; }pix; //--------------------------------KERNEL FUNCTION---------------------// __device__ pix getAvg(unsigned char* data,int w,int h,int r,int x, int y){ pix avg = {0}; int xMin, xMax, yMin, yMax; if((xMin = x-r) < 0) xMin = 0; if((yMin = y-r) < 0) yMin = 0; //define bounds if((xMax = x+r) > w-1) xMax = w; if((yMax = y+r) > h-1) yMax = h; int i; int j; for(i=yMin; i < yMax; i++){ //i is y, j is x for row first iteration for(j=xMin; j < xMax; j++){ //efficient for cache avg.r += ImageGetPixel(data,j,i,0,w); avg.g += ImageGetPixel(data,j,i,1,w); avg.b += ImageGetPixel(data,j,i,2,w); } } int num = (yMax-yMin)*(xMax-xMin); avg.r = avg.r/num; avg.g = avg.g/num; avg.b = avg.b/num; return avg; } //--------------------------KERNEL---------------------------// __global__ void myKernel(unsigned char* oldData, unsigned char* newData,int WIDTH, int HEIGHT, int r){ int indx = blockIdx.x * blockDim.x + threadIdx.x; int indy = blockIdx.y * blockDim.y + threadIdx.y; pix avg; int stride_x = gridDim.x*blockDim.x; int stride_y = gridDim.y*blockDim.y; int i; int j; for(i=indy; i<HEIGHT; i+= stride_y){ for(j=indx; j<WIDTH; j+= stride_x){ avg = getAvg(oldData,WIDTH,HEIGHT,r,j,i); ImageSetPixel(newData,j,i,0,avg.r,WIDTH); ImageSetPixel(newData,j,i,1,avg.g,WIDTH); ImageSetPixel(newData,j,i,2,avg.b,WIDTH); } } } //------------------------------------MAIN----------------------------------// int main(int argc, char *argv[]){ //--------------Handle Input Arguments int r = atoi(argv[1]); char const * inFile = argv[2]; char const * outFile = argv[3]; //--------------Create Image* oldPic; Image* newPic; oldPic = ImageRead(inFile); //read old int WIDTH = ImageWidth(oldPic); int HEIGHT = ImageHeight(oldPic); newPic = ImageCreate(WIDTH,HEIGHT); //make new same size as old printf("Processing...\n"); //------------------cuda init----------------// dim3 blockDim(32,32); //1024 dim3 gridDim(20,20); int dsize = WIDTH*HEIGHT*3; //size of data unsigned char* oldDataDevice; //device data unsigned char* newDataDevice; cudaMalloc(&oldDataDevice,dsize); cudaMalloc(&newDataDevice,dsize); cudaMemcpy(oldDataDevice,oldPic->data,dsize,cudaMemcpyHostToDevice); //copy to device data //----------------------KERNEL--------------------// myKernel<<<gridDim,blockDim>>>(oldDataDevice,newDataDevice,WIDTH,HEIGHT,r); cudaDeviceSynchronize(); //----------------------post proccess------------------------// cudaMemcpy(newPic->data,newDataDevice,dsize, cudaMemcpyDeviceToHost); //copy back ImageWrite(newPic,outFile); cudaFree(oldDataDevice); cudaFree(newDataDevice); printf("New picture written to: %s\n",outFile); return 0; }
24,129
#include "device_launch_parameters.h" #include <cuda_runtime_api.h> #include "cuda_runtime.h" #include <iostream> #include <stdlib.h> #include <random> #include <chrono> #include <math.h> using namespace std; // Define error values #define MEMORY_ERROR -2 #define INPUT_ERROR -1 // Define Constant values #define MIN_VALUE -100.0 #define MAX_VALUE 100.0 #define WARP_SIZE 32 #define MAX_BLOCK_THREADS 512 #define MAX_WARPS_BLOCK_Y 16 #define MIN_WARPS_BLOCK_Y 1 //#define VERBOSE #define ERROR_THRESHOLD 500 const char Result[2][10] = {"SUCCEDED", "FAILED"}; typedef struct{ dim3 block; dim3 grid; int s_mem; }BestSplit; BestSplit findBestSplit(int n, int d); void printData(float *dataset, int n, int d); void calculateDistances(float *distances, float *dataset, float *point, int n, int d); void parallelDistance(float *distances, float *data, int n, int d); __global__ void cudaReduce(float *temp, float *distances, int n, int d, int r); __global__ void cudaDotProduct(float *dataset, float *point, float *product, int n, int d, int r); // void serialReduce(float *sum, float *data, int n, int d); // __global__ void deviceCalculatedDistances(float *dist, float *data, float *point, int n, int d); int main(int argc, char *argv[]){ // show cuda gpu details int d; // number of dimensions int n; // number of points // host variables float *h_dataset = NULL; float *h_distances = NULL; float *h_test_distances = NULL; // device variables float *d_point = NULL; float *d_dataset = NULL; float *d_distances = NULL; if(argc != 3){ cout << "Wrong number of arguments. Aborting ..." << endl; return INPUT_ERROR; }else{ // assign values n = atoi(argv[1]); d = atoi(argv[2]); cout << "Initializing " << n << " random data points of " << d << " dimensions" << endl; } // creating the Random dataset h_dataset = (float *)malloc(n * d * sizeof(float)); if(h_dataset == NULL){ cout << "Not enough memory. Aborting ..." << endl; free(h_dataset); return MEMORY_ERROR; }else{ // initialize random generator engine unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); default_random_engine generator(seed); uniform_real_distribution<float> distribution(MIN_VALUE, MAX_VALUE); int length = n * d; for(int i = 0; i < length; i++){ h_dataset[i] = distribution(generator); } } // serial stuff h_test_distances = (float *)malloc(n * sizeof(float)); h_distances = (float *)malloc(n * sizeof(float)); auto start = std::chrono::high_resolution_clock::now(); calculateDistances(h_test_distances, h_dataset, &h_dataset[0], n, d); auto finish = std::chrono::high_resolution_clock::now(); auto cpu_time = std::chrono::duration_cast<std::chrono::nanoseconds>(finish-start).count(); // cuda stuff cudaMalloc(&d_dataset, n * d * sizeof(float)); cudaMalloc(&d_point, d * sizeof(float)); cudaMalloc(&d_distances, n * sizeof(float)); // copy the dataset to the device cudaMemcpy(d_dataset, h_dataset, n * d * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_point, &d_dataset[0], d * sizeof(float), cudaMemcpyHostToDevice); start = std::chrono::high_resolution_clock::now(); parallelDistance(d_distances ,d_dataset, n , d); finish = std::chrono::high_resolution_clock::now(); auto gpu_time = std::chrono::duration_cast<std::chrono::nanoseconds>(finish-start).count(); // copy the calculated distances back to the host cudaMemcpy(h_distances, d_distances, n * sizeof(float), cudaMemcpyDeviceToHost); #ifdef VERBOSE cout << "CPU Time: " << cpu_time << endl; cout << "GPU Time: " << gpu_time << endl; #endif cout << "Total Speed up: " << (float)cpu_time / gpu_time << endl; // validation bool fail = 0; for(int i = 0; i < n; i++){ float error = abs(pow(h_test_distances[i],2) - h_distances[i]); if(error > ERROR_THRESHOLD){ cout << "Error exeeded threshold("<< ERROR_THRESHOLD << "): " << error << endl; fail = 1; break; } } cout << "Program: " << Result[fail] << endl; cudaFree(d_dataset); cudaFree(d_distances); free(h_distances); free(h_dataset); return 0; } /* * Function that reduces a sum in parallel efficiently by managing kernel assignments */ BestSplit findBestSplit(int n, int d){ BestSplit split; int req_warps_y = d / WARP_SIZE + (d % WARP_SIZE != 0); float min_error = 100000.0; int best_dividor = -1; int num_of_warps = -1; for(int split = 1; split < 1000; split++){ float warps_in_block_y = (float)req_warps_y / split; if(MIN_WARPS_BLOCK_Y <= warps_in_block_y && warps_in_block_y <= MAX_WARPS_BLOCK_Y){ float split_error = warps_in_block_y - (int)(req_warps_y / split); if(min_error > split_error){ min_error = split_error; best_dividor = split; num_of_warps = warps_in_block_y; } }else if(warps_in_block_y < 1){ break; } } split.block.y = num_of_warps * WARP_SIZE; split.block.x = MAX_BLOCK_THREADS / split.block.y; split.grid.x = n / split.block.x + (n % split.block.x != 0); split.grid.y = best_dividor + (min_error > 0); split.s_mem = (split.block.x + 1) * split.block.y * sizeof(float); #ifdef VERBOSE cout << "Block: " << split.block.x << ", " << split.block.y << ", " << (split.block.x + 1) * split.block.y << endl; cout << "Grid: " << split.grid.x << ", " << split.grid.y << endl; cout << "Occupancy: in x: " << (float)n / (split.block.x * split.grid.x) << ", in y: " << (float)d / (split.block.y * split.grid.y) << endl; #endif return split; } void parallelDistance(float *distances, float *data, int n, int d){ float *d_product_temp = NULL; BestSplit b_split = findBestSplit(n, d); // calculate temp block & grid sizes dim3 temp_block; dim3 temp_grid; int temp_size_x = b_split.block.x * b_split.grid.x; int temp_size_y = b_split.grid.y; temp_block.y = (temp_size_y / 8 + (temp_size_y % 8 != 0)) * 8; temp_block.x = MAX_BLOCK_THREADS / temp_block.y; temp_grid.x = n / temp_block.x + (n % temp_block.x != 0); temp_grid.y = 1; int shared_mem = temp_block.x * temp_block.y * sizeof(float); #ifdef VERBOSE cout << "Temp Block: " << temp_block.x << ", " << temp_block.y << endl; cout << "Temp Grid: " << temp_grid.x << ", " << temp_grid.y << endl; #endif int temp_product_size = temp_size_x * temp_size_y * sizeof(float); cudaMalloc(&d_product_temp, temp_product_size); int r = pow(2,ceil(log2(b_split.block.y))); cudaDotProduct<<<b_split.grid, b_split.block, b_split.s_mem>>>(data, &data[0], d_product_temp, n, d, r); r = pow(2,ceil(log2(temp_block.y))); // cout << r << endl; cudaReduce<<<temp_grid, temp_block, shared_mem>>>(d_product_temp, distances, n, b_split.grid.y, r); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); auto stop = std::chrono::high_resolution_clock::now(); #ifdef VERBOSE if (errSync != cudaSuccess) printf("Sync kernel error: %s\n", cudaGetErrorString(errSync)); if (errAsync != cudaSuccess) printf("Async kernel error: %s\n", cudaGetErrorString(errAsync)); if (errSync == cudaSuccess && errAsync == cudaSuccess) printf("Kernals succesfully finished without any errors!\n"); #endif } __global__ void cudaDotProduct(float *data, float *point, float *product, int n, int d, int r){ // copy dataset and point to shared memory extern __shared__ float s_mem[]; int thread_id = threadIdx.x * blockDim.y + threadIdx.y; int point_offset = blockDim.x * blockDim.y; float *s_point = &s_mem[point_offset]; float *s_data = &s_mem[0]; int pos_x = blockIdx.x * blockDim.x + threadIdx.x; int pos_y = blockIdx.y * blockDim.y + threadIdx.y; // IF the thread belongs to the first row and is inside the block dimensions copy the data else if it outfside just zero initialize the array if(thread_id < blockDim.y && pos_y < d){ s_point[threadIdx.y] = point[pos_y]; }else if(thread_id < blockDim.y){ s_point[threadIdx.y] = 0.0; } // copy data if(pos_x < n && pos_y < d){ s_data[thread_id] = data[pos_x * d + pos_y]; }else{ s_data[thread_id] = 0.0; } __syncthreads(); // calculate dot product s_data[thread_id] = s_data[thread_id] * s_data[thread_id] - 2 * s_data[thread_id] * s_point[threadIdx.y] + s_point[threadIdx.y] * s_point[threadIdx.y]; __syncthreads(); //reduce sum in parallel for(int s = r / 2; s > 0; s >>= 1){ if(threadIdx.y < s && (threadIdx.y + s) < d){ s_data[thread_id] += s_data[thread_id + s]; }else{ s_data[thread_id] += 0.0; } __syncthreads(); } // copy data back pos_x = blockIdx.x * (blockDim.x * gridDim.y); pos_y = blockIdx.y; if(threadIdx.y == 0){ product[pos_x + pos_y + threadIdx.x * gridDim.y] = s_data[thread_id]; } } // complete unroll these iterations __global__ void cudaReduce(float *temp, float *distances, int n, int d, int r){ extern __shared__ float s_data[]; int thread_id = threadIdx.x * blockDim.y + threadIdx.y; int pos_x = blockIdx.x * blockDim.x + threadIdx.x; int pos_y = threadIdx.y; // copy data to shared memory if(pos_x < n && pos_y < d){ s_data[thread_id] = temp[pos_x * d + pos_y]; }else{ s_data[thread_id] = 0.0; } __syncthreads(); for(int s = r / 2; s > 0; s >>= 1){ if(threadIdx.y < s && (threadIdx.y + s) < d){ s_data[thread_id] += s_data[thread_id + s]; } __syncthreads(); } // copy back the data if(threadIdx.y == 0 && pos_x < n){ distances[pos_x] = s_data[thread_id]; } } void calculateDistances(float *distances, float *dataset, float *point, int n, int d){ float temp; for(int i = 0; i < n; i++){ temp = 0.0; for(int j = 0; j < d; j++){ temp += pow(dataset[i * d + j] - point[j], 2); } distances[i] = pow(temp, 0.5); } } void printData(float *dataset, int n, int d){ cout << "============ Dataset ============" << endl; for(int i = 0; i < n; i++){ cout << i + 1 << ") "; for(int j = 0; j < d - 1; j++){ cout << dataset[i * d + j] << ", "; } cout << dataset[(i + 1) * d - 1] << endl; } }
24,130
/** * Global Memory (Symbol) * Demonstrates: * - Communication between host and device * - Method in which host accesses global memory */ #include <stdio.h> #include <stdlib.h> #define NUM_ELEMENTS 5 __device__ int result[NUM_ELEMENTS]; void check_cuda_errors() { cudaError_t rc; rc = cudaGetLastError(); if (rc != cudaSuccess) { printf("Last CUDA error %s\n", cudaGetErrorString(rc)); } } __global__ void incrementor() { result[threadIdx.x]++; } int main(int argc, char **argv) { int start[NUM_ELEMENTS]; int host_result[NUM_ELEMENTS]; int i; cudaError_t rc; // Seed our RNG srand(0); printf("Incrementor input:\n"); for (i = 0; i < NUM_ELEMENTS; i++) { start[i] = rand() % 100; printf("start[%d] = %d\n", i, start[i]); } /** * Copy a value from result to host */ rc = cudaMemcpyToSymbol(result, &start, sizeof(start)); if (rc != cudaSuccess) { printf("Could not copy to device. Reason: %s\n", cudaGetErrorString(rc)); } incrementor<<<1, NUM_ELEMENTS>>>(); check_cuda_errors(); // Retrieve data from global memory variable rc = cudaMemcpyFromSymbol(&host_result, result, sizeof(start)); if (rc != cudaSuccess) { printf("Could not copy from device. Reason: %s\n", cudaGetErrorString(rc)); } printf("Incrementor results:\n"); for (i = 0; i < NUM_ELEMENTS; i++) { printf("result[%d] = %d\n", i, host_result[i]); } return 0; }
24,131
#include "includes.h" __global__ void MatrixMulKernelV3(float* d_M, float* d_N, float* d_P, int Width) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; // [TILE_WIDTH][TILE_WIDTH] __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; // [TILE_WIDTH][TILE_WIDTH] int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int ph = 0; ph < ceil(Width/(float)TILE_WIDTH); ++ph){ if ((Row< Width) && (ph*TILE_WIDTH+tx)< Width) Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx]; if ((ph*TILE_WIDTH+ty)<Width && Col<Width) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += Mds[ty][k] * Nds[k][tx]; __syncthreads(); } if ((Row<Width) && (Col<Width)) d_P[Row*Width + Col] = Pvalue; }
24,132
/* * Module : Twine * Copyright : [2016..2017] Trevor L. McDonell * License : BSD3 * * Maintainer : Trevor L. McDonell <tmcdonell@cse.unsw.edu.au> * Stability : experimental * Portability : non-portable (GHC extensions) * * Convert between Accelerate's Struct-of-Array representation of complex * numbers and the Array-of-Struct representation necessary for CUBLAS. */ #include <cuda.h> #include <cuComplex.h> #ifdef __cplusplus extern "C" { #endif __global__ void interleave ( cuFloatComplex * __restrict__ cplx, const float * __restrict__ real, const float * __restrict__ imag, const int size ) { const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < size; ix += gridSize) { const float re = real[ix]; const float im = imag[ix]; cplx[ix] = make_cuFloatComplex(re, im); } } __global__ void deinterleave ( float * __restrict__ real, float * __restrict__ imag, const cuFloatComplex * __restrict__ cplx, const int size ) { const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < size; ix += gridSize) { const cuFloatComplex c = cplx[ix]; real[ix] = cuCrealf(c); imag[ix] = cuCimagf(c); } } #ifdef __cplusplus } #endif
24,133
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define MAX_THREADS 1024 //declerations cudaError_t matchWithGPU(int* results, double* points, int* signs, double* w, int numOfPoints, int k); cudaError_t updateLocationsWithGPU(double* locations, double* velocity, int numOfPoints, int k, double t); void checkError_locations(cudaError_t cudaStatus, double* dev_locations, double* dev_velocity, const char* errorMessage); void freeCudaMemory_locations(double* dev_locations, double* dev_velocity); void freeCudaMemory_match(int* dev_results, double* dev_points, int* dev_signs, double* dev_w); void checkError_match(cudaError_t cudaStatus, int* dev_results, double* dev_points, int* dev_signs, double* dev_w, const char* errorMessage); __device__ int signGPU(double f); __device__ double fGPU(const double* p, int offset, const double* w, int k); // implmentations //changes results at a given index to 1 if the test fails __global__ void matchKernel(int* results, const double* points, const int* signs, const double* w, int numOfPoints, int k) { int index = threadIdx.x + blockIdx.x * blockDim.x; //calculate index of each element in array int value = signGPU(fGPU(points, index*k, w, k)); if (value != signs[index]) results[index] = 1; } //updates the location at index based on location=location0 +velocity * t __global__ void updateLocationsKernel(double* location, const double* velocity, double t) { int index = threadIdx.x + blockIdx.x * blockDim.x; //calculate index of each element in array location[index] = location[index] + velocity[index] * t; } //function to free all arrays related to the match part of CUDA void freeCudaMemory_match(int* dev_results, double* dev_points, int* dev_signs, double* dev_w) { cudaFree(dev_results); cudaFree(dev_points); cudaFree(dev_signs); cudaFree(dev_w); } //function responsible to handle errors in the match part of CUDA void checkError_match(cudaError_t cudaStatus, int* dev_results, double* dev_points, int* dev_signs, double* dev_w, const char* errorMessage) { if (cudaStatus != cudaSuccess) { fprintf(stderr, errorMessage); fprintf(stderr, "\n"); freeCudaMemory_match(dev_results, dev_points, dev_signs, dev_w); } } //tests all points to see if they're in the place they should be cudaError_t matchWithGPU(int* results, double* points, int* signs, double* w, int numOfPoints, int k) { char errorBuffer[100]; int* dev_results = 0; double* dev_points = 0; int* dev_signs = 0; double* dev_w = 0; int extra = 0; int numOfBlocks, numOfThreads; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); // Allocate GPU buffers for every array cudaStatus = cudaMalloc((void**)&dev_results, numOfPoints * sizeof(int)); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, "cudaMalloc failed!"); cudaStatus = cudaMalloc((void**)&dev_points, numOfPoints * k * sizeof(double)); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, "cudaMalloc failed!"); cudaStatus = cudaMalloc((void**)&dev_signs, numOfPoints * sizeof(int)); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, "cudaMalloc failed!"); cudaStatus = cudaMalloc((void**)&dev_w, k * sizeof(double)); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, "cudaMalloc failed!"); // Copy input arrays from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_points, points, numOfPoints * k * sizeof(double), cudaMemcpyHostToDevice); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, "cudaMemcpy failed!"); cudaStatus = cudaMemcpy(dev_signs, signs, numOfPoints * sizeof(int), cudaMemcpyHostToDevice); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, "cudaMemcpy failed!"); cudaStatus = cudaMemcpy(dev_w, w, k * sizeof(double), cudaMemcpyHostToDevice); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, "cudaMemcpy failed!"); // Calculate the number of blocks and threads needed. extra = numOfPoints % MAX_THREADS != 0 ? 1 : 0; numOfBlocks = (numOfPoints / MAX_THREADS + extra); numOfThreads = MAX_THREADS>numOfPoints ? numOfPoints : MAX_THREADS; // Launch a kernel on the GPU with one thread for each element. matchKernel<<<numOfBlocks, numOfThreads >>>(dev_results, dev_points, dev_signs, dev_w, numOfPoints, k); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); sprintf(errorBuffer, "matchKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, errorBuffer); // cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); sprintf(errorBuffer, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, errorBuffer); // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(results, dev_results, numOfPoints * sizeof(int), cudaMemcpyDeviceToHost); checkError_match(cudaStatus, dev_results, dev_points, dev_signs, dev_w, "cudaMemcpy failed!"); freeCudaMemory_match(dev_results, dev_points, dev_signs, dev_w); return cudaStatus; } //checks the sign of a given double __device__ int signGPU(double f) { if (f < 0) return -1; return 1; } //calculates f based on the algorithm __device__ double fGPU(const double* p, int offset, const double* w, int k) { int i; double result = 0; for (i = 0; i < k; i++) { result += w[i] * p[offset + i]; } return result; } //updates the locations vector to its new position based on time and velocity cudaError_t updateLocationsWithGPU(double* locations, double* velocity, int numOfPoints, int k, double t) { char errorBuffer[100]; double* dev_locations = 0; double* dev_velocity = 0; int extra; int numOfBlocks, numOfThreads; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); checkError_locations(cudaStatus, dev_locations, dev_velocity, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); // Allocate GPU buffers for the arrays cudaStatus = cudaMalloc((void**)&dev_locations, numOfPoints * k * sizeof(double)); checkError_locations(cudaStatus, dev_locations, dev_velocity, "cudaMalloc failed!"); cudaStatus = cudaMalloc((void**)&dev_velocity, numOfPoints * k * sizeof(double)); checkError_locations(cudaStatus, dev_locations, dev_velocity, "cudaMalloc failed!"); // Copy input vector from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_locations, locations, numOfPoints * k * sizeof(double), cudaMemcpyHostToDevice); checkError_locations(cudaStatus, dev_locations, dev_velocity, "cudaMemcpy failed!"); cudaStatus = cudaMemcpy(dev_velocity, velocity, numOfPoints * k * sizeof(double), cudaMemcpyHostToDevice); checkError_locations(cudaStatus, dev_locations, dev_velocity, "cudaMemcpy failed!"); // Calculate the number of blocks and threads needed. extra = (numOfPoints*k) % MAX_THREADS != 0 ? 1 : 0; numOfBlocks = ((numOfPoints*k) / MAX_THREADS + extra); numOfThreads = MAX_THREADS>(numOfPoints*k) ? (numOfPoints*k) : MAX_THREADS; // Launch a kernel on the GPU with one thread for each element. Each calculating a dimension in a point, going over all points updateLocationsKernel<<<numOfBlocks, numOfThreads>>>(dev_locations, dev_velocity, t); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); sprintf(errorBuffer, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); checkError_locations(cudaStatus, dev_locations, dev_velocity, "cudaMemcpy failed!"); // cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); sprintf(errorBuffer, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); checkError_locations(cudaStatus, dev_locations, dev_velocity, "cudaMemcpy failed!"); // Copy output vector from GPU buffer to host memory. // In this case, overwrite the locations array on host memory with the updated one cudaStatus = cudaMemcpy(locations, dev_locations, numOfPoints * k * sizeof(double), cudaMemcpyDeviceToHost); checkError_locations(cudaStatus, dev_locations, dev_velocity, "cudaMemcpy failed!"); freeCudaMemory_locations(dev_locations, dev_velocity); return cudaStatus; } //function responsible to handle errors in the update locations part of CUDA void checkError_locations(cudaError_t cudaStatus, double* dev_locations, double* dev_velocity, const char* errorMessage) { if (cudaStatus != cudaSuccess) { fprintf(stderr, errorMessage); fprintf(stderr, "\n"); freeCudaMemory_locations(dev_locations, dev_velocity); } } //function responsible to handle errors in the update locations part of CUDA void freeCudaMemory_locations(double* dev_locations, double* dev_velocity) { cudaFree(dev_locations); cudaFree(dev_velocity); }
24,134
#include "includes.h" #define MAX_THREADS 20 #define pi(x) printf("%d\n",x); #define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) ) #define th_p_block 256 __global__ void dotPro(long n, double *vec1, double *vec2, double *vec3) { __shared__ double cache[th_p_block]; unsigned tid = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int cacheIdx = threadIdx.x; double temp = 0; while(tid < n) { temp += vec1[tid] * vec2[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIdx] = temp; __syncthreads(); // reduction unsigned i = blockDim.x/2; // need the num threads to be a power of two (256 is okay) while( i != 0 ){ if(cacheIdx < i){ cache[cacheIdx] += cache[cacheIdx + i ]; } __syncthreads(); //sync threads in the current block // power of two needed here i = i/2; } if(cacheIdx == 0){ vec3[blockIdx.x] = cache[0]; } // if (tid < n) vec3[i] = vec1[i] * vec2[i]; }
24,135
// // Created by kindr on 2021/4/28. // #include "pinnedMemory.cuh" #include "../../common/utils.cuh" #include <cstdio> bool profileCopies(float *h_a, float *h_b, float *d, unsigned int n) { unsigned int bytes = n * sizeof(float); CHECK(cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost)); for (size_t i = 0; i < n; ++i) { if (h_a[i] != h_b[i]) { return false; } } return true; } void pinnedMemory(size_t nElements) { size_t bytes = nElements * sizeof(float); // 可分页内存 float *h_aPageable, *h_bPageable; h_aPageable = (float *) malloc(bytes); h_bPageable = (float *) malloc(bytes); for (size_t i = 0; i < nElements; ++i) { h_aPageable[i] = static_cast<float>(i); } // 固定内存 float *h_aPinned, *h_bPinned; CHECK(cudaMallocHost((void **) &h_aPinned, bytes)); CHECK(cudaMallocHost((void **) &h_bPinned, bytes)); memcpy(h_aPinned, h_aPageable, bytes); memset(h_bPageable, 0, bytes); memset(h_bPinned, 0, bytes); float *d_a; CHECK(cudaMalloc((void **) &d_a, bytes)); TIME([&]() { bool isSame = profileCopies(h_aPageable, h_bPageable, d_a, nElements); printf("Pageable isSame: %s --- ", isSame ? "true" : "false"); }); TIME([&]() { bool isSame = profileCopies(h_aPinned, h_bPinned, d_a, nElements); printf("Pinned isSame: %s --- ", isSame ? "true" : "false"); }); // cleanup cudaFree(d_a); cudaFreeHost(h_aPinned); cudaFreeHost(h_bPinned); free(h_aPageable); free(h_bPageable); }
24,136
#include "includes.h" __global__ void matrixAddKernel2(float* ans, float* M, float* N, int size) { int row = blockIdx.y*blockDim.y + threadIdx.y; if(row < size) { for(int i = 0; i < size; ++i) ans[row*size + i] = M[row*size + i] + N[row*size + i]; } }
24,137
#include <stdio.h> __global__ void AplusB(int *ret, int a, int b) { /* * Simple unimportant kernel */ ret[threadIdx.x] = a + b + threadIdx.x; } int main() { // Create a managed space int *ret; cudaMallocManaged(&ret, 1000 * sizeof(int)); // Call the kernel AplusB<<< 1, 1000 >>>(ret, 10, 100); cudaDeviceSynchronize(); // Print the results for(int i=0; i<1000; i++) { printf("%d: A+B = %d\n", i, ret[i]); } // Free the unneeded memory cudaFree(ret); return 0; }
24,138
/* blockDim=[16, 16, 1] * * * p refers to the new pixel * */ __global__ void my_reduce256(float const * const partialSum16x16, float const * const ZpartialSum16x16, float *filtI, int m, int n) { __shared__ float Z[256], my_w[256]; int p=/*blockDim.z*blockIdx.z+threadIdx.z*/blockIdx.z; int tid = 16*threadIdx.y+threadIdx.x; Z[tid]=ZpartialSum16x16[p*256+tid]; my_w[tid]=partialSum16x16[p*256+tid]; __syncthreads(); if (tid<128) {my_w[tid]+=my_w[tid+128]; Z[tid]+=Z[tid+128]; } __syncthreads(); if (tid< 64) {my_w[tid]+=my_w[tid+ 64]; Z[tid]+=Z[tid+ 64]; } __syncthreads(); if (tid< 32) {my_w[tid]+=my_w[tid+ 32]; Z[tid]+=Z[tid+ 32]; } __syncthreads(); if (tid< 16) {my_w[tid]+=my_w[tid+ 16]; Z[tid]+=Z[tid+ 16]; } __syncthreads(); if (tid< 8) {my_w[tid]+=my_w[tid+ 8]; Z[tid]+=Z[tid+ 8]; } __syncthreads(); if (tid< 4) {my_w[tid]+=my_w[tid+ 4]; Z[tid]+=Z[tid+ 4]; } __syncthreads(); if (tid< 2) {my_w[tid]+=my_w[tid+ 2]; Z[tid]+=Z[tid+ 2]; } __syncthreads(); if (tid< 1) { filtI[p]=(my_w[0]+my_w[1])/(Z[0]+Z[1]); } } __global__ void my_reduce64(float const * const partialSum16x16, float const * const ZpartialSum16x16, float *filtI, int m, int n) { __shared__ float Z[64], my_w[64]; int p=/*blockDim.z*blockIdx.z+threadIdx.z*/blockIdx.z; int tid = 8*threadIdx.y+threadIdx.x; Z[tid]=ZpartialSum16x16[p*64+tid]; my_w[tid]=partialSum16x16[p*64+tid]; __syncthreads(); if (tid< 32) {my_w[tid]+=my_w[tid+ 32]; Z[tid]+=Z[tid+ 32]; } __syncthreads(); if (tid< 16) {my_w[tid]+=my_w[tid+ 16]; Z[tid]+=Z[tid+ 16]; } __syncthreads(); if (tid< 8) {my_w[tid]+=my_w[tid+ 8]; Z[tid]+=Z[tid+ 8]; } __syncthreads(); if (tid< 4) {my_w[tid]+=my_w[tid+ 4]; Z[tid]+=Z[tid+ 4]; } __syncthreads(); if (tid< 2) {my_w[tid]+=my_w[tid+ 2]; Z[tid]+=Z[tid+ 2]; } __syncthreads(); if (tid< 1) { filtI[p]=(my_w[0]+my_w[1])/(Z[0]+Z[1]); } } __global__ void my_reduce16(float const * const partialSum16x16, float const * const ZpartialSum16x16, float *filtI, int m, int n) { __shared__ float Z[16], my_w[16]; int p=/*blockDim.z*blockIdx.z+threadIdx.z*/blockIdx.z; int tid = 4*threadIdx.y+threadIdx.x; Z[tid]=ZpartialSum16x16[p*16+tid]; my_w[tid]=partialSum16x16[p*16+tid]; __syncthreads(); if (tid< 8) {my_w[tid]+=my_w[tid+ 8]; Z[tid]+=Z[tid+ 8]; } __syncthreads(); if (tid< 4) {my_w[tid]+=my_w[tid+ 4]; Z[tid]+=Z[tid+ 4]; } __syncthreads(); if (tid< 2) {my_w[tid]+=my_w[tid+ 2]; Z[tid]+=Z[tid+ 2]; } __syncthreads(); if (tid< 1) { filtI[p]=(my_w[0]+my_w[1])/(Z[0]+Z[1]); } }
24,139
#include <iostream> #include <assert.h> #include <cuda.h> #include <math.h> #include <bits/stdc++.h> using namespace std; #define isValid(X, Y) (X >= 0 && Y>=0 && X < M && Y < N) __global__ void image_bluring(float* a, float* b, int M, int N) { //__shared__ float[16][16][3]; int global_x = blockDim.x * blockIdx.x + threadIdx.x ; int global_y = blockDim.y * blockIdx.y + threadIdx.y ; float channel1 = 0, channel2 = 0, channel3 = 0; int count = 0; for(int i = global_x - 1; i <= global_x + 1; i++) { for(int j = global_y - 1; j <= global_y + 1; j++){ if(isValid(j,i)) { //printf("%f\n",a[(j*N+i)*3]); channel1 += a[(j*N + i)*3]; channel2 += a[(j*N + i)*3 + 1]; channel3 += a[(j*N + i)*3 + 2]; count++; } } } channel1 = channel1 / count; channel2 = channel2 / count; channel3 = channel3 / count; //printf("%f\n",channel1); b[(global_y * N + global_x)*3 ] = channel1; b[(global_y * N + global_x)*3 + 1] = channel2; b[(global_y * N + global_x)*3 + 2] = channel3; printf("%f\n",b[(global_y*N+global_x)*3]); } void handle_error(cudaError_t error) { if (error != cudaSuccess) { std::cout << "Cuda Error. Exiting..."; exit(0); } } void initialise_matrix(int M, int N, float A[]) { for(int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { for (int k = 0; k<3; k++) { A[(i*N + j )*3 + k ] = 1.0; } } } } void get_kernel(float K[][3]) { for(int i=0; i < 3; i++) { for(int j=0; j < 3;j++) { K[i][j] = 1.0/9.0; } } } void print(float a[]) { for(int k=0;k<3;k++) { for(int i=0;i<16;i++) { for(int j=0;j<16;j++) cout<<a[(i*16+j)*3+k]<< " " ; cout<<endl; } cout<<endl; } cout<<endl<<endl; } int main() { float image[16*16*3]; float result[16*16*3]; float kernel[3][3]; initialise_matrix(16,16,image); get_kernel(kernel); float *I, *R; size_t size = 16 * 16 * 3 * sizeof(float); handle_error(cudaMalloc((void**) &I, size)); handle_error(cudaMalloc((void**) &R, size)); cudaMemcpy(I,image,size,cudaMemcpyHostToDevice); dim3 grid_dim(1,1,1); dim3 block_dim(16,16,1); image_bluring<<<grid_dim, block_dim>>> (I, R, 16, 16); cudaMemcpy(result, R,size,cudaMemcpyDeviceToHost); print(result); }
24,140
#include <stdio.h> #include <stdlib.h> #include <cuda.h> /* * Tempo Sequencial: * real 1m9.236s * user 1m8.004s * sys 0m0.132s * * Tempo multicore: * real 0m17.290s * user 1m8.421s * sys 0m0.148s * * Tempo, warps_launched e warp_execution_efficiency GPU OpenMP: * * real 0m5.384s * user 0m3.656s * sys 0m1.756s * * ==21740== NVPROF is profiling process 21740, command: ./mmGPU * ==21740== Some kernel(s) will be replayed on device 0 in order to collect all events/metrics. * ==21740== Replaying kernel "mm$_omp_fn$0" (done) * ==21740== Profiling application: ./mmGPU * ==21740== Profiling result: * ==21740== Event result: * Invocations Event Name Min Max Avg Total * Device "GeForce GT 1030 (0)" * Kernel: mm$_omp_fn$0 * 1 warps_launched 72 72 72 72 * * ==21740== Metric result: * Invocations Metric Name Metric Description Min Max Avg * Device "GeForce GT 1030 (0)" * Kernel: mm$_omp_fn$0 * 1 warp_execution_efficiency Warp Execution Efficiency 86.81% 86.81% 86.81% * * * * * Tempo, warps_launched e warp_execution_efficiency CUDA: * * real 0m2.798s * user 0m1.522s * sys 0m1.189s * ==5256== NVPROF is profiling process 5256, command: ./mmCU ==5256== Some kernel(s) will be replayed on device 0 in order to collect all events/metrics. ==5256== Replaying kernel "mm_cuda(double*, double*, double*, int)" (done) ==5256== Profiling application: ./mmCU ==5256== Profiling result: ==5256== Event result: Invocations Event Name Min Max Avg Total Device "GeForce GT 1030 (0)" Kernel: mm_cuda(double*, double*, double*, int) 1 warps_launched 127008 127008 127008 127008 ==5256== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GT 1030 (0)" Kernel: mm_cuda(double*, double*, double*, int) 1 warp_execution_efficiency Warp Execution Efficiency 99.21% 99.21% 99.21% * * */ __global__ void mm_cuda(double* a, double* b, double* c, int width) { // #pragma omp parallel for schedule(dynamic) // #pragma omp target map(to:a[0:width*width], b[0:width*width]) map(from:c[0:width*width]) // #pragma omp teams distribute parallel for simd int j = blockIdx.y*blockDim.y+threadIdx.y; int i = blockIdx.x*blockDim.x+threadIdx.x; if((i < width) && (j < width)) { double sum = 0; for (int k = 0; k < width; k++) { double x = a[i * width + k]; double y = b[k * width + j]; sum += x * y; //printf("%lf", sum); } c[i * width + j] = sum; } } int main() { int width = 2000; double *a = (double*) malloc (width * width * sizeof(double)); double *b = (double*) malloc (width * width * sizeof(double)); double *c = (double*) malloc (width * width * sizeof(double)); //#pragma omp parallel for schedule(dynamic) for(int i = 0; i < width; i++) { for(int j = 0; j < width; j++) { a[i*width+j] = i; b[i*width+j] = j; c[i*width+j] = 0; } } int size = width*width*sizeof(double); double *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_b, size); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_c, size); int block_size = 32; dim3 dimGrid((width-1)/block_size+1, (width-1)/block_size+1, 1); dim3 dimBlock(block_size, block_size, 1); mm_cuda<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, width); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); /* for(int i = 0; i < width; i++) { for(int j = 0; j < width; j++) { printf("\n c[%d][%d] = %lf",i,j,c[i*width+j]); } } */ cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
24,141
#include "MatrixUtilities.cuh" HostMatrix MatrixUtilities::loadFromFile(const char* fileName) { std::ifstream fin; fin.open(fileName); if (!fin.is_open()) { std::cerr << "Could not open " << fileName << "." << std::endl; exit(EXIT_FAILURE); } unsigned int height = 0; unsigned int width = 0; fin >> height >> width; HostMatrix matrix(height, width); for (unsigned int i = 0; i < height; ++i) { for (unsigned int j = 0; j < width; ++j) { float value = 0.0f; fin >> value; matrix.setElement(i, j, value); } } fin.close(); return matrix; } void MatrixUtilities::saveToFile(const HostMatrix& matrix, const char* fileName) { std::ofstream fout; fout.open(fileName); if (!fout.is_open()) { std::cerr << "Could not open " << fileName << "." << std::endl; exit(EXIT_FAILURE); } fout.setf(std::ios::fixed, std::ios::floatfield); fout.precision(6); fout << matrix.getHeight() << " " << matrix.getWidth() << std::endl; for (unsigned int i = 0; i < matrix.getHeight(); ++i) { for (unsigned int j = 0; j < matrix.getWidth(); ++j) { fout << matrix.getElement(i, j) << " "; } fout << std::endl; } fout.close(); } bool MatrixUtilities::compare(const HostMatrix& matrixA, const HostMatrix& matrixB, float epsilon) { if (matrixA.getHeight() != matrixB.getHeight() || matrixA.getWidth() != matrixB.getWidth()) { return false; } else { for (unsigned int i = 0; i < matrixA.getHeight(); ++i) { for (unsigned int j = 0; j < matrixA.getWidth(); ++j) { if (fabs(matrixA.getElement(i, j) - matrixB.getElement(i, j)) > epsilon) { return false; } } } return true; } } HostMatrix MatrixUtilities::copyToHost(const Matrix& matrix) { HostMatrix matrixCopy(matrix.getHeight(), matrix.getWidth()); size_t count = matrix.getHeight() * matrix.getWidth() * sizeof(float); cudaMemcpyKind kind = matrix.isOnDevice()? cudaMemcpyDeviceToHost: cudaMemcpyHostToHost; cudaMemcpy(matrixCopy.getElements(), matrix.getElements(), count, kind); return matrixCopy; } DeviceMatrix MatrixUtilities::copyToDevice(const Matrix& matrix) { DeviceMatrix matrixCopy(matrix.getHeight(), matrix.getWidth()); size_t count = matrix.getHeight() * matrix.getWidth() * sizeof(float); cudaMemcpyKind kind = matrix.isOnDevice()? cudaMemcpyDeviceToDevice: cudaMemcpyHostToDevice; cudaMemcpy(matrixCopy.getElements(), matrix.getElements(), count, kind); return matrixCopy; } template <> HostMatrix MatrixUtilities::copy<HostMatrix>(const HostMatrix& matrix) { HostMatrix matrixCopy(matrix.getHeight(), matrix.getWidth()); size_t count = matrix.getHeight() * matrix.getWidth() * sizeof(float); cudaMemcpy(matrixCopy.getElements(), matrix.getElements(), count, cudaMemcpyHostToHost); return matrixCopy; } template <> DeviceMatrix MatrixUtilities::copy<DeviceMatrix>(const DeviceMatrix& matrix) { DeviceMatrix matrixCopy(matrix.getHeight(), matrix.getWidth()); size_t count = matrix.getHeight() * matrix.getWidth() * sizeof(float); cudaMemcpy(matrixCopy.getElements(), matrix.getElements(), count, cudaMemcpyDeviceToDevice); return matrixCopy; }
24,142
#include <stdio.h> #include <cuda_runtime.h> /*#define CHECK(call) { const cudaError_t error = call; if (error != cudaSuccess) { printf("Error: %s:%d, ", __FILE__, __LINE__); printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); exit(1); } }*/ void initialInt(int *ip, int size) { for (int i = 0; i < size; i++) { ip[i] = i; } } void printMatrix(int *C, const int nx, const int ny) { int *ic = C; for (int i = 0; i < ny; i++) { for (int j = 0; j < nx; j++) { printf("%3d", ic[j]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = ix + (iy * nx); printf("thread_id (%d, %d) block_id (%d, %d) coordinates (%d, %d) global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } void devConfig(const int devId) { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, devId); printf("Using Device %d: %s\n", devId, devProp.name); cudaSetDevice(devId); } int main(int argc, char *argv[]) { devConfig(0); int nx = 8; int ny = 6; int nxy = nx * ny; int nBytes = nxy * sizeof(float); int *h_A; h_A = (int *)malloc(nBytes); initialInt(h_A, nxy); printMatrix(h_A, nx, ny); int *d_MatA; cudaMalloc((void **)&d_MatA, nBytes); cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice); dim3 block(4, 2); dim3 grid((nx + (block.x - 1)) / block.x, (ny + (block.y - 1)) / block.y); printThreadIndex <<< grid, block >>>(d_MatA, nx, ny); cudaDeviceSynchronize(); cudaFree(d_MatA); free(h_A); cudaDeviceReset(); return (0); }
24,143
#include <cuda_runtime_api.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <iostream> using namespace std; void cuSetDeviceFlags(){ cudaSetDeviceFlags(cudaDeviceMapHost); } void cuMallocManaged(void** h_img, int r, int c, int channel){ cudaMallocManaged(h_img,sizeof(unsigned char)*r*c * channel); } void cuMalloc(void** h_img, int r, int c){ cudaMalloc(h_img, sizeof(float)*r*c); } void cuDeviceSynchronize(){ cudaDeviceSynchronize(); } void cuFree(void* mem){ cudaFree(mem); }
24,144
/** * @file * @author answeror <answeror@gmail.com> * @date 2012-04-05 * * @section DESCRIPTION * * */ //#include <boost/range/algorithm/fill.hpp>
24,145
/* Name: Daniyal Manair Student Number: 20064993 */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <vector> #include <stdio.h> #include <random> #include <algorithm> #include <chrono> #include <map> __global__ void TiledMatrixMulGPU2(float* A, float* B, float* C, const int N) { __shared__ float t_A [2][2]; __shared__ float t_B [2][2]; unsigned int tileWidth = 2; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int row = by * tileWidth + ty; unsigned int col = bx * tileWidth + tx; float cValue = 0.0; for (int i = 0; i < (N / tileWidth); i++) { t_A[ty][tx] = A[row*N + i*tileWidth + tx]; t_B[ty][tx] = B[(i*tileWidth + ty)*N + col]; __syncthreads(); for (int j = 0; j < tileWidth; j++) cValue += t_A[ty][j] * t_B[j][tx]; __syncthreads(); } C[row*N + col] = cValue; } __global__ void TiledMatrixMulGPU4(float* A, float* B, float* C, const int N) { __shared__ float t_A [4][4]; __shared__ float t_B [4][4]; unsigned int tileWidth = 4; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int row = by * tileWidth + ty; unsigned int col = bx * tileWidth + tx; float cValue = 0.0; for (int i = 0; i < (N / tileWidth); i++) { t_A[ty][tx] = A[row*N + i*tileWidth + tx]; t_B[ty][tx] = B[(i*tileWidth + ty)*N + col]; __syncthreads(); for (int j = 0; j < tileWidth; j++) cValue += t_A[ty][j] * t_B[j][tx]; __syncthreads(); } C[row*N + col] = cValue; } __global__ void TiledMatrixMulGPU10(float* A, float* B, float* C, const int N) { __shared__ float t_A [10][10]; __shared__ float t_B [10][10]; unsigned int tileWidth = 10; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int row = by * tileWidth + ty; unsigned int col = bx * tileWidth + tx; float cValue = 0.0; for (int i = 0; i < (N / tileWidth); i++) { t_A[ty][tx] = A[row*N + i*tileWidth + tx]; t_B[ty][tx] = B[(i*tileWidth + ty)*N + col]; __syncthreads(); for (int j = 0; j < tileWidth; j++) cValue += t_A[ty][j] * t_B[j][tx]; __syncthreads(); } C[row*N + col] = cValue; } __global__ void TiledMatrixMulGPU20(float* A, float* B, float* C, const int N) { __shared__ float t_A [20][20]; __shared__ float t_B [20][20]; unsigned int tileWidth = 20; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int row = by * tileWidth + ty; unsigned int col = bx * tileWidth + tx; float cValue = 0.0; for (int i = 0; i < (N / tileWidth); i++) { t_A[ty][tx] = A[row*N + i*tileWidth + tx]; t_B[ty][tx] = B[(i*tileWidth + ty)*N + col]; __syncthreads(); for (int j = 0; j < tileWidth; j++) cValue += t_A[ty][j] * t_B[j][tx]; __syncthreads(); } C[row*N + col] = cValue; } __global__ void TiledMatrixMulGPU25(float* A, float* B, float* C, int N) { __shared__ float t_A[25][25]; __shared__ float t_B[25][25]; unsigned int tileWidth = 25; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int row = by * tileWidth + ty; unsigned int col = bx * tileWidth + tx; float cValue = 0.0; for (int i = 0; i < (N / tileWidth); i++) { t_A[ty][tx] = A[row*N + i*tileWidth + tx]; t_B[ty][tx] = B[(i*tileWidth + ty)*N + col]; __syncthreads(); for (int j = 0; j < tileWidth; j++) cValue += t_A[ty][j] * t_B[j][tx]; __syncthreads(); } C[row*N + col] = cValue; } void initialData(float* matrix, const int size){ for (int i = 0; i < size; i++) matrix[i] = (float)(rand() & 0xFF) / 10.0f; } void MatrixMulCPU(float* A, float* B, float* C, const int N){ for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { for (int k = 0; k < N; k++) C[i * N + j] += A[i * N + k] * B[k * N + j]; } } } void checkResult(float* CPU, float* GPU, const int size) { double epsilon = 1.0E-8; for (int i = 0; i < size; i++){ if (abs(CPU[i] - GPU[i]) > epsilon){ printf("CPU %f GPU %f ", CPU[i], GPU[i]); printf("Arrays do not match.\n\n"); return; } } printf("Test PASSED\n\n"); } void printArr(float* matrix, const int size) { printf("["); for (int i = 0; i < size; i++) printf("%f,", matrix[i]); printf("\b]\n"); } void GPUtest(float* C_A, float* C_B, float* CPUResult, const int tileSize, const int N){ // Initialize variables cudaEvent_t gStart, gEnd; float timeDuration; float *G_A, *G_B, *G_C, *GPUResult; size_t size = N * N * sizeof(float); // Initialize GPU variables cudaMalloc((void**)&G_A, size); cudaMalloc((void**)&G_B, size); cudaMalloc((void**)&G_C, size); GPUResult = (float*)malloc(size); memset(GPUResult, 0.0, size); cudaEventCreate(&gStart); cudaEventCreate(&gEnd); // Copy over the data cudaMemcpy(G_A, C_A, size, cudaMemcpyHostToDevice); cudaMemcpy(G_B, C_B, size, cudaMemcpyHostToDevice); // Perform GPU comparison // Create block dim3 block(tileSize, tileSize, 1); dim3 grid((int)ceil((N + block.x - 1) / block.x),(int)ceil((N + block.y - 1) / block.y), 1); cudaEventRecord(gStart); if (tileSize == 2) TiledMatrixMulGPU2 <<<grid, block>>> (G_A, G_B, G_C, N); else if (tileSize == 4) TiledMatrixMulGPU4 <<<grid, block>>> (G_A, G_B, G_C, N); else if (tileSize == 10) TiledMatrixMulGPU10 <<<grid, block>>> (G_A, G_B, G_C, N); else if (tileSize == 20) TiledMatrixMulGPU20 <<<grid, block>>> (G_A, G_B, G_C, N); else if (tileSize == 25) TiledMatrixMulGPU25 <<<grid, block >>> (G_A, G_B, G_C, N); cudaEventRecord(gEnd); cudaEventSynchronize(gEnd); cudaEventElapsedTime(&timeDuration, gStart, gEnd); cudaMemcpy(GPUResult, G_C, size, cudaMemcpyDeviceToHost); printf("The GPU took %f to perform the computation with tile size %d.\n", timeDuration, tileSize); checkResult(CPUResult, GPUResult, N*N); cudaFree(G_A); cudaFree(G_B); cudaFree(G_C); free(GPUResult); FILE *fp; fp=fopen("machineProblem4.csv","a"); fprintf(fp,"%d,%d,%f\n",N,tileSize,timeDuration); fclose(fp); } void computeMatrix(const int N) { // Initial prints printf("------------------------------------------------------------------------\n\n"); printf("%dx%d matrix multiplication.\n\n", N, N); // Initialize Host variables float *C_A, *C_B, *C_C; size_t size = N * N * sizeof(float); // Initialize space C_A = (float*)malloc(size); C_B = (float*)malloc(size); C_C = (float*)malloc(size); // Set with random data initialData(C_A, N*N); initialData(C_B, N*N); memset(C_C, 0.0, size); // Serial Test CPU MatrixMulCPU(C_A, C_B, C_C, N); // Test Complete parallel Computation int tileSizes [] = {2, 4, 10, 20, 25}; for (int i = 0; i < 5; i++) GPUtest(C_A, C_B, C_C, tileSizes[i], N); // Free all the memory free(C_A); free(C_B); free(C_C); cudaDeviceReset(); } // --------------------BONUS----------------------------- #define BONUSTILE_C 8 #define BONUSTILE_R 14 __global__ void TiledMatrixMulGPUBonus1(float* A, float* B, float* C, int M, int N, int K) { int Brows = N; int Crows = M; int Ccols = K; float cValue = 0; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; int row = by*BONUSTILE_R + ty; int col = bx*BONUSTILE_C + tx; __shared__ float t_A[BONUSTILE_R][BONUSTILE_C]; __shared__ float t_B[BONUSTILE_R][BONUSTILE_C]; for (int i = 0; i < (BONUSTILE_C + N - 1) / 2; i++) { if (i*BONUSTILE_C + tx < N && row < M) t_A[ty][tx] = A[row*N + i*BONUSTILE_C + tx]; else t_A[ty][tx] = 0.0; if (i*BONUSTILE_C + ty < Brows && col < K) t_B[ty][tx] = B[(i*BONUSTILE_C + ty)*K + col]; else t_B[ty][tx] = 0.0; __syncthreads(); for (int j = 0; j < BONUSTILE_C; j++) cValue += t_A[ty][j] * t_B[j][tx]; __syncthreads(); } if (row < Crows && col < Ccols) C[((by * blockDim.y + ty)*Ccols) + (bx * blockDim.x) + tx] = cValue; } __global__ void TiledMatrixMulGPUBonus(float* A, float* B, float* C, int M, int N, int K) { float cValue = 0; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; int row = by*BONUSTILE_R + ty; int col = bx*BONUSTILE_C + tx; __shared__ float t_A[BONUSTILE_R][BONUSTILE_C]; __shared__ float t_B[BONUSTILE_R][BONUSTILE_C]; for (int i = 0; i < (BONUSTILE_C + N - 1) / 2; i++) { if (i*BONUSTILE_C + tx < N && row < M) t_A[ty][tx] = A[row*N + i*BONUSTILE_C + tx]; else t_A[ty][tx] = 0.0; if (i*BONUSTILE_C + ty < N && col < K) t_B[ty][tx] = B[(i*BONUSTILE_C + ty)*K + col]; else t_B[ty][tx] = 0.0; __syncthreads(); for (int j = 0; j < BONUSTILE_C; j++) cValue += t_A[ty][j] * t_B[j][tx]; __syncthreads(); } if (row < M && col < K) C[((by * blockDim.y + ty)*K) + (bx * blockDim.x) + tx] = cValue; } void MatrixMulCPUBonus(float* A, float* B, float* C, const int M, const int N, const int K) { float cValue = 0.0; for (int i = 0; i < M; i++) { for (int j = 0; j < K; j++) { cValue = 0.0; for (int k = 0; k < N; k++) cValue += A[i * N + k] * B[k * K + j]; C[i * K + j] = cValue; } } } void computeMatrixBonus(const int M, const int N, const int K) { // Initial prints printf("------------------------------------------------------------------------\n\n"); printf("%dx%d and %dx%d matrix multiplication with %dx%d tile size.\n\n", M, N, N, K, BONUSTILE_R, BONUSTILE_C); float *C_A, *C_B, *C_C, *GPUResult; size_t sizeA = M * N * sizeof(float); size_t sizeB = N * K * sizeof(float); size_t sizeC = M * K * sizeof(float); // Initialize space C_A = (float*)malloc(sizeA); C_B = (float*)malloc(sizeB); C_C = (float*)malloc(sizeC); GPUResult = (float*)malloc(sizeC); // Set with random data initialData(C_A, M * N); initialData(C_B, N * K); memset(C_C, 0.0, sizeC); memset(GPUResult, 0.0, sizeC); // Serial Test CPU MatrixMulCPUBonus(C_A, C_B, C_C, M, N, K); // GPU calculations float *G_A, *G_B, *G_C; // Initialize GPU variables cudaMalloc((void**)&G_A, sizeA); cudaMalloc((void**)&G_B, sizeB); cudaMalloc((void**)&G_C, sizeC); // Copy over values cudaMemcpy(G_A, C_A, sizeA, cudaMemcpyHostToDevice); cudaMemcpy(G_B, C_B, sizeB, cudaMemcpyHostToDevice); // Case 1 8x14 dim3 block(BONUSTILE_C, BONUSTILE_R, 1); dim3 grid((K + block.x - 1) / block.x, (M + block.y - 1) / block.y, 1); TiledMatrixMulGPUBonus <<<grid, block>>> (G_A, G_B, G_C, M, N, K); cudaMemcpy(GPUResult, G_C, sizeC, cudaMemcpyDeviceToHost); checkResult(C_C, GPUResult, N*N); // Free all the memory free(C_A); free(C_B); free(C_C); free(GPUResult); cudaFree(G_A); cudaFree(G_B); cudaFree(G_C); cudaDeviceReset(); } // ------------------------------------------------------ int main(){ FILE *fp; fp=fopen("machineProblem4.csv","w"); fprintf(fp,"matrixSize,tileSize,time\n"); fclose(fp); int matrixWidths [] = {100, 200, 500, 1000, 1500, 5000}; for (int i = 0; i < 6; i++) computeMatrix(matrixWidths[i]); printf("------------------------------------------------------------------------\n"); printf("BONUS\n"); computeMatrixBonus(2, 3, 4); computeMatrixBonus(250, 300, 450); printf("------------------------------------------------------------------------\n\n"); return 0; }
24,146
#include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <iostream> #include <string> //===> FINITE DIFFERENCES PARAMETERS <===// #define DT 0.05f //->Time in milliseconds #define DX ( 12.0f / MODELSIZE_X ) //->Displacement in x #define DY ( 12.0f / MODELSIZE_Y ) //->Displacement in y //===> CONSTANTES <===// #define Eh 3.0f #define En 1.0f #define Re 0.6f #define tauE 5.0f #define tauN 250.0f #define gam 0.001f #define East 1.5415f //===> INITIAL CONDITIONS <===// #define v0 0.5f #define VOLT0 3.0f //==> DISCRETE DOMAIN <==// #ifndef MODEL_WIDTH #define MODEL_WIDTH 0 #endif #define MODELSIZE_X (MODEL_WIDTH) #define MODELSIZE_Y (MODEL_WIDTH) #define MODELSIZE_Z 1 #define MODELSIZE2D ( MODELSIZE_X*MODELSIZE_Y ) //==> CUDA THREAD BLOCK <==// //#define TILESIZE 32 //#define BLOCKDIM_X ( TILESIZE ) //#define BLOCKDIM_Y ( TILESIZE ) #ifndef BLOCKDIM_X #define BLOCKDIM_X 32 #endif #ifndef BLOCKDIM_Y #define BLOCKDIM_Y 32 #endif #define BLOCKDIM_Z 1 #define BLOCKDIM2D ( BLOCKDIM_X*BLOCKDIM_Y ) //==> CUDA GRID <==// #define GRIDDIM_X ( ( MODELSIZE_X / BLOCKDIM_X ) + ( ( MODELSIZE_X % BLOCKDIM_X ) > 0 ) ) #define GRIDDIM_Y ( ( MODELSIZE_Y / BLOCKDIM_Y ) + ( ( MODELSIZE_Y % BLOCKDIM_Y ) > 0 ) ) #define GRIDDIM_Z 1 ////////////////////////////////////////////////////////////////////////// static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) ////////////////////////////////////////////////////////////////////////// __global__ void timeStep( const float *voltIN, float *v, float *voltOUT ) { int x = blockIdx.x*BLOCKDIM_X + threadIdx.x; int y = blockIdx.y*BLOCKDIM_Y + threadIdx.y; __shared__ float U[BLOCKDIM_X+2][BLOCKDIM_Y+2]; if ( x < MODELSIZE_X && y < MODELSIZE_Y ) { // int idx = y*MODELSIZE_X + x; int i = threadIdx.x+1; int j = threadIdx.y+1; U[i][j] = voltIN[idx]; __syncthreads(); float rv = v[idx]; if ( threadIdx.y == 0 ) U[i][0] = voltIN[(idx - ((y>0)-(y==0))*MODELSIZE_X)]; else if ( threadIdx.y == (BLOCKDIM_Y-1) ) U[i][(BLOCKDIM_Y+1)] = voltIN[(idx + ((y<MODELSIZE_Y-1)-(y==MODELSIZE_Y-1))*MODELSIZE_X)]; if ( threadIdx.x == 0 ) U[0][j] = voltIN[(idx - (x>0) + (x==0))]; else if ( threadIdx.x == (BLOCKDIM_X-1) ) U[(BLOCKDIM_X+1)][j] = voltIN[(idx + (x<MODELSIZE_X-1)-(x==MODELSIZE_X-1))]; float Rn = ( 1.0f / ( 1.0f - expf(-Re) ) ) - rv; float p = ( U[i][j] > En ) * 1.0f; float dv = ( Rn * p - ( 1.0f - p ) * rv ) / tauN; float Dn = rv * rv; float hE = ( 1.0f - tanh(U[i][j] - Eh) ) * U[i][j] * U[i][j] / 2.0f; float du = ( ( ( East - Dn ) * hE ) - U[i][j] ) / tauE; float xlapr = U[i+1][j] - U[i][j]; float xlapl = U[i][j] - U[i-1][j]; float xlapf = U[i][j+1] - U[i][j]; float xlapb = U[i][j] - U[i][j-1]; float lap = xlapr - xlapl + xlapf - xlapb; voltOUT[idx] = ( U[i][j] + ( du * DT ) + ( lap * DT * gam / ( DX * DX ) ) ); v[idx] = rv + dv*DT; } } int main( int argc, char *argv[] ) { int nsteps = 3; //8000; // if ( argc > 1 ) // { // char *p; // long conv = strtol(argv[1], &p, 10); // // // // Check for errors: e.g., the string does not represent an integer // // or the integer is larger than int // if (*p != '\0' || conv > INT_MAX) // { // printf("Error with argument 1!"); // return 3; // } // else // nsteps = int(conv/DT); // } if (argc > 1) { nsteps = atoi(argv[1]); } // cudaEvent_t dstart,dstop; cudaEventCreate( &dstart ); cudaEventCreate( &dstop ); // long start, end; struct timeval timecheck; gettimeofday(&timecheck, NULL); start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; // float *hvolt, *hv; hvolt = (float*) malloc( MODELSIZE2D*sizeof(float) ); hv = (float*) malloc( MODELSIZE2D*sizeof(float) ); // int x, y, idx; // for( y = 0; y < MODELSIZE_Y; y++ ) // { // for( x = 0; x < MODELSIZE_X; x++ ) // { // idx = y*MODELSIZE_X + x; // // // hv[idx] = 0.5f; // // // if ( y < 10*(MODELSIZE_Y/20) && y > 8*(MODELSIZE_Y/20) && x < 10*(MODELSIZE_Y/20) && x > 8*(MODELSIZE_Y/20)) // hvolt[idx] = VOLT0; // else // hvolt[idx] = 0.0f; // // // } // } FILE *arq; arq = fopen("entrada.txt", "rt"); for(int i=0;i<MODELSIZE_X;i++) for(int j=0;j<MODELSIZE_Y;j++) { hv[i+j*MODELSIZE_X] = 0.5f; int temp; fscanf(arq," %d",&temp); hvolt[i+j*MODELSIZE_X] = temp; } fclose(arq); // FILE *prof; // char fpname[100]; // sprintf(fpname, "./profiles_%d_k2D_shared.csv",MODELSIZE_X); // prof = fopen(fpname,"w"); // fprintf(prof,"index,timestep,P\n"); // fprintf(prof,"0,%6.4f",0.0); // fclose(prof); dim3 point; //int pointIdx; point.x = MODELSIZE_X/2; point.y = MODELSIZE_Y/2; point.z = 0; // pointIdx = point.y*MODELSIZE_X + point.x; //fprintf(prof,",%6.4f\n",hvolt[pointIdx]); float *dvoltA, *dvoltB, *dv; HANDLE_ERROR( cudaMalloc( (void**)&dvoltA, MODELSIZE2D*sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dvoltB, MODELSIZE2D*sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dv , MODELSIZE2D*sizeof(float) ) ); HANDLE_ERROR( cudaMemcpy( dvoltA, hvolt, MODELSIZE2D*sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dvoltB, hvolt, MODELSIZE2D*sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dv , hv , MODELSIZE2D*sizeof(float), cudaMemcpyHostToDevice ) ); free( hv ); dim3 blocks(GRIDDIM_X,GRIDDIM_Y,GRIDDIM_Z); dim3 threads(BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z); //int nsamples = (nsteps >= 2000)*2000 + (nsteps < 2000)*nsteps; //int j = nsteps/nsamples; cudaDeviceSynchronize(); cudaEventRecord( dstart, 0 ); int i=0; for (i = 0; i < nsteps; i++ ) { if ( (i%2) == 0 ) //==> EVEN timeStep<<<blocks, threads>>>( dvoltA, dv, dvoltB ); else //==> ODD timeStep<<<blocks, threads>>>( dvoltB, dv, dvoltA ); // /*if ( (i%j) == 0 ) { if ( (i%2) == 0 ) //==> EVEN HANDLE_ERROR( cudaMemcpy( hvolt, dvoltB, MODELSIZE3D*sizeof(float), cudaMemcpyDeviceToHost ) ); else //==> ODD HANDLE_ERROR( cudaMemcpy( hvolt, dvoltA, MODELSIZE3D*sizeof(float), cudaMemcpyDeviceToHost ) ); // fprintf(prof,"%d,%6.4f,%6.4f\n", (i+1), ((i+1)*DT), hvolt[pointIdx]); }*/ cudaError_t err = cudaSuccess; err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err)); } } cudaDeviceSynchronize(); cudaEventRecord( dstop, 0 ); cudaEventSynchronize ( dstop ); float elapsed; cudaEventElapsedTime( &elapsed, dstart, dstop ); //printf("GPU elapsed time: %f s (%f milliseconds)\n", (elapsed/1000.0), elapsed); //arq = fopen("TempoExecucaoOrig12000.txt", "a"); //printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY); //fprintf (arq,"[%d,%.5f],\n",MODEL_WIDTH,elapsed); printf ("[%d,%.5f]",0,elapsed); //fclose(arq); // if ( (i%2) == 0 ) // HANDLE_ERROR( cudaMemcpy( hvolt, dvoltA, MODELSIZE2D*sizeof(float), cudaMemcpyDeviceToHost ) ); // else // HANDLE_ERROR( cudaMemcpy( hvolt, dvoltB, MODELSIZE2D*sizeof(float), cudaMemcpyDeviceToHost ) ); // arq = fopen("resultado.txt", "wt"); // for(int i=0;i<MODELSIZE_X;i++) // { // for(int j=0;j<MODELSIZE_Y;j++) // { // fprintf(arq," %6.4f",hvolt[i+j*MODELSIZE_X]); // } // fprintf(arq,"\n"); // } // fclose(arq); //fclose( prof ); free( hvolt ); cudaFree( dvoltA ); cudaFree( dvoltB ); cudaFree( dv ); // // cudaDeviceSynchronize(); // gettimeofday(&timecheck, NULL); // end = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; //printf("CPU elapsed time: %f s (%ld milliseconds)\n", ((end - start)/1000.0), (end - start)); // cudaEventDestroy( dstart ); cudaEventDestroy( dstop ); cudaDeviceReset(); // return 0; }
24,147
#include "includes.h" __global__ void get_i_idx_se_r(const int nloc, const int * ilist, int * i_idx) { const unsigned int idy = blockIdx.x * blockDim.x + threadIdx.x; if(idy >= nloc) { return; } i_idx[ilist[idy]] = idy; }
24,148
/** * @brief Hello Wolrd with an empty kernel. * * This is a basic Hello World example where we use our first * kernel, ableit empty. * * From: http://www.nvidia.com/docs/io/116711/sc11-cuda-c-basics.pdf */ #include <iostream> #include <string> /** * @brief Emtpy kernel */ __global__ void mykernel(void) { } int main(void) { mykernel<<<1,1>>>(); std::cout << "Hello world!" << std::endl; }
24,149
#include "includes.h" /* ============================================================================ Name : Author : Peter Whidden Version : Copyright : Description : ============================================================================ */ static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) /* * Device kernel that compares the provided PSF distribution to the distribution * around each pixel in the provided image */ __global__ void convolvePSF(int width, int height, int imageCount, short *image, short *results, float *psf, int psfRad, int psfDim) { // Find bounds of image const int x = blockIdx.x*32+threadIdx.x; const int y = blockIdx.y*32+threadIdx.y; const int minX = max(x-psfRad, 0); const int minY = max(y-psfRad, 0); const int maxX = min(x+psfRad, width); const int maxY = min(y+psfRad, height); const int dx = maxX-minX; const int dy = maxY-minY; if (dx < 1 || dy < 1) return; // Read Image /*__shared__*/ float convArea[13][13]; //convArea[dx][dy]; int xCorrection = x-psfRad < 0 ? 0 : psfDim-dx; int yCorrection = y-psfRad < 0 ? 0 : psfDim-dy; float sum = 0.0; for (int i=0; i<dx; ++i) { for (int j=0; j<dy; ++j) { float value = float(image[0*width*height+(minX+i)*height+minY+j]); sum += value; convArea[i][j] = value; } } float sumDifference = 0.0; for (int i=0; i<dx; ++i) { for (int j=0; j<dy; ++j) { sumDifference += abs(convArea[i][j]/sum - psf[(i+xCorrection)*psfDim+j+yCorrection] ); } } results[0*width*height+x*height+y] = int(1000.0*sumDifference);//*/convArea[psfRad][psfRad]); }
24,150
#include "includes.h" __global__ void matrixMulCUDA5(float *C, float *A, float *B, unsigned int n) { const int tileWidth = 1; // Define the starting row and ending row for each thread block int startRow = blockIdx.y * blockDim.y + threadIdx.y * tileWidth; int endRow = startRow + tileWidth; // Define the starting column and ending column for each thread block int startCol = blockIdx.x * blockDim.x + threadIdx.x * tileWidth; int endCol = startCol + tileWidth; // Each block of threads allocate space on shared memory __shared__ float A_S[32 * 32 * 4]; __shared__ float B_S[32 * 32 * 4]; // Each thread helps copying the proper indexes into the shared memory // Now we have some blocks in 2 dimensions for (int row = startRow; row < endRow; row++) { for (int col = startCol; col < endCol; col++) { // Copy data into shared memory for (int k = 0; k < n; k++) { A_S[k] = A[row * n + k]; B_S[k] = B[k * n + col]; } // Synchronize all threads to make a tile completely ready to go! __syncthreads(); // Compute the proper sum for each block float sum = 0.0f; // Defined as a register (Better than directly writing to C) for (int k = 0; k < n; k++) { sum += A_S[k] * B_S[k]; } // Write back sum into C C[row * n + col] = sum; } } }
24,151
#include<iostream> //think about inlining this __device__ double* three_dim_indexGPU(double* matrix, int i, int j, int k, double m, int b){ int m_int = (int)m; double* p; //specify index layout here p=&matrix[(m_int)*b*(k)+(m_int)*(j)+(i)]; return p; } __device__ double* two_dim_indexGPU(double* vector, int i, int j, double m, int b){ //int m_int= (int)m; double* p; //specify index layout here p=&vector[b*(i)+(j)]; return p; }
24,152
#include <stdio.h> #include <time.h> int main(void) { time_t t; time( &t ); printf("%ld\n", t); printf(ctime( &t )); }
24,153
#include <cuda.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> //---------------------------------------------- // 排序 N 個 float 元素 (N=1~1025) // 使用到的記憶體大小為 SIZE 個 bytes // 只使用單一區塊 // 區塊大小為 N/2 //---------------------------------------------- #define N 1024 #define SIZE (N*sizeof(float)) #define GRID 1 #define BLOCK (N/2) #define testLoop 1000 //測試效能時的 loop 數 //---------------------------------------------- // 交換函式 (host 和 kernel 都可以使用) // 因為加了 __host__ 和 __device__ 兩個標籤 //---------------------------------------------- inline __host__ __device__ void swap(float& a, float& b){ float c=a; a=b; b=c; } //---------------------------------------------- // 泡泡的 kernel (由小到大排列 N 個元素 a->r) //---------------------------------------------- __global__ void bubble(float *r, float *a){ //*** blockDim=N/2 *** int j=threadIdx.x; //j=0,1,2,...blockDim-1 int k=2*threadIdx.x; //k=0,2,4,...2*(blockDim-1) 配對的基底索引 //配置共享記憶體 __shared__ float s[N+20]; //載入資料到共享記憶體 __syncthreads(); //同步化執行緒, 加速載入速度 (合併讀取 coalesced) s[j]=a[j]; //使用全部執行緒一起載入前半段 (0~N/2-1) s[j+N/2]=a[j+N/2]; //使用全部執行緒一起載入後半段 (N/2~N-1) if(j==0){ //若 N 為奇數時, 還要多載入一個尾巴, 只使用第 0 個執行緒 s[N-1]=a[N-1]; } //開始泡泡排序 for(int loop=0; loop<=N/2; loop++){ //排列 0 based 配對資料 (0,1) (2,3) (4,5) .... __syncthreads(); //同步化確保共享記憶體已寫入 if(s[k]>s[k+1]){ swap(s[k],s[k+1]); } //排列 1 based 配對資料 (1,2) (3,4) (5,6) .... __syncthreads(); //同步化確保共享記憶體已寫入 if(s[k+1]>s[k+2]){ if(k<N-2) //若 N 為偶數時, 最後一個執行緒不作用 swap(s[k+1],s[k+2]); } } //轉出資料到全域記憶體 __syncthreads(); r[j]=s[j]; r[j+N/2]=s[j+N/2]; if(j==0){ r[N-1]=s[N-1]; } } //---------------------------------------------- // 泡泡的 host 函數 //---------------------------------------------- void bubble_host(float *r, float *a){ //載入資料 for(int k=0; k<N; k++){ r[k]=a[k]; } for(int loop=0; loop<=N/2; loop++){ //排列 0 based 配對資料 for(int k=0; k<N-1; k+=2){ if(r[k]>r[k+1]){ swap(r[k],r[k+1]); } } //排列 1 based 配對資料 for(int k=1; k<N-1; k+=2){ if(r[k]>r[k+1]){ swap(r[k],r[k+1]); } } } } //---------------------------------------------- // 主程式 //---------------------------------------------- int main(){ //配置 host 記憶體 float *a=(float*)malloc(SIZE); float *b=(float*)malloc(SIZE); float *c=(float*)malloc(SIZE); //初始化 for(int k=0; k<N; k++){ a[k]=k; c[k]=0; } //對陣列 a 洗牌 srand(time(0)); for(int k=0; k<2*N; k++){ int i=rand()%N; int j=rand()%N; swap(a[i],a[j]); } //配置 device 記憶體 float *ga, *gc; cudaMalloc((void**)&ga, SIZE); cudaMalloc((void**)&gc, SIZE); //載入 (順便載入 c 來清空裝置記憶體內容) cudaMemcpy(ga, a, SIZE, cudaMemcpyHostToDevice); cudaMemcpy(gc, c, SIZE, cudaMemcpyHostToDevice); //測試 kernel 效能 double t0=(double)clock()/CLOCKS_PER_SEC; for(int k=0; k<testLoop; k++){ //呼叫 kernel (此為單一 block 的版本) bubble<<<1,BLOCK>>>(gc,ga); //同步化執行緒, 避免還沒做完, 量到不正確的時間 cudaThreadSynchronize(); } t0=((double)clock()/CLOCKS_PER_SEC-t0)/testLoop; //測試 host 效能 double t1=(double)clock()/CLOCKS_PER_SEC; for(int k=0; k<testLoop; k++){ bubble_host(b,a); } t1=((double)clock()/CLOCKS_PER_SEC-t1)/testLoop; //顯示計算時間, 並比較 printf("time[gpu]: %g ms\n",t0*1000); printf("time[host]: %g ms\n",t1*1000); printf("ratio: %g x\n",t1/t0); //讀出 device 資料 cudaMemcpy(c, gc, SIZE, cudaMemcpyDeviceToHost); //測試 device 結果的正確性 printf("------------------------\n"); bool flag=true; for(int k=0; k<N; k++){ if(c[k]!=k){ flag=false; break; } } printf("test[gpu]: %s\n",flag?"pass":"fail"); //測試 host 結果的正確性 flag=true; for(int k=0; k<N; k++){ if(b[k]!=k){ flag=false; break; } } printf("test[host]: %s\n",flag?"pass":"fail"); //釋放記憶體 cudaFree(ga); cudaFree(gc); free(a); free(b); free(c); return 0; }
24,154
#include <stdio.h> #include <algorithm> #include <cstdlib> #include <curand.h> #include <curand_kernel.h> // In the following section, define the model Parameters #define N_AR 3 #define START_X 0.800, 0.900, 1.100 #define PHI -0.315415, 0.427606, 0.189134 #define C 1.500 // End model parameters unsigned int N_SIMS, N_BLK, N_THRD, N_BYTES, T_MAX; const unsigned int MAX_THREADS = 512; // max threads per block __constant__ float c_phi[N_AR]; // autoregressive parameters as constant unsigned int N_BYTES_PARM = N_AR * sizeof(float); // size of parameter constant // Calculate and return mean of an array of floats float calcMean(float *arr, unsigned int const n) { float sum = 0.0; for (int i=0; i<n; i++) { sum += arr[i]; } return sum / n; } __host__ cudaEvent_t get_time(void) { cudaEvent_t time; cudaEventCreate(&time); cudaEventRecord(time); return time; } // Simulate a AR(n) process saving temp results to registers __global__ void sim_register(float *x0, float *x1, float *x2, float *x_out, const unsigned int N, const unsigned int T) { unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { curandState_t state; // initialize rand state curand_init(tid, 0, 0, &state); // set seed to thread index float r_x0 = x0[tid]; // Copy values of X to register float r_x1 = x1[tid]; float r_x2 = x2[tid]; float r_x; // initialize r_x float w; // white noise for AR process for (int t=0; t < T; t++) { // Simulate for T_MAX periods w = curand_normal(&state) / 2; // w ~ Normal(0, 0.5) r_x = C + c_phi[2]*r_x2 + c_phi[1]*r_x1 + c_phi[0]*r_x0 + w; r_x2 = r_x1; r_x1 = r_x0; r_x0 = r_x; } x_out[tid] = r_x; // save x as output } } // Simulate a AR(n) process saving work to global mem directly __global__ void sim_gmem(float *x0, float *x1, float *x2, float *x_out, const unsigned int N, const unsigned int T) { unsigned int const i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < N) { curandState_t state; // initialize rand state curand_init(i, 0, 0, &state); // set seed to thread index float w; // white noise for AR process for (int t=0; t < T; t++) { // Simulate for T_MAX periods w = curand_normal(&state) / 2; // w ~ Normal(0, 0.5) x_out[i] = C + c_phi[2]*x2[i] + c_phi[1]*x1[i] + c_phi[0]*x0[i] + w; x2[i] = x1[i]; x1[i] = x0[i]; x0[i] = x_out[i]; } } } void simulate(const unsigned int type) { float *h_x0, *h_x1, *h_x2, *h_x; h_x0 = (float*) malloc(N_BYTES); // allocate input h_x1 = (float*) malloc(N_BYTES); // allocate input h_x2 = (float*) malloc(N_BYTES); // allocate input h_x = (float*) malloc(N_BYTES); // allocate output float start_x [N_AR] = {START_X}; for (int i = 0; i < N_SIMS; i++) { // set all host Xs to the same number h_x0[i] = start_x[0]; h_x1[i] = start_x[1]; h_x2[i] = start_x[2]; } float *d_x0, *d_x1, *d_x2, *d_out; // device memory for storing X cudaMalloc((void **)&d_x0, N_BYTES); // allocate device input cudaMalloc((void **)&d_x1, N_BYTES); // allocate device input cudaMalloc((void **)&d_x2, N_BYTES); // allocate device input cudaMalloc((void **)&d_out, N_BYTES); // allocate device output float h_phi [N_AR] = {PHI}; // constant for AR parms cudaMemcpyToSymbol(c_phi, h_phi, N_BYTES_PARM); // copy params to constant /**** Simulation *****/ char *typeName; cudaEvent_t start = get_time(); // start time cudaMemcpy(d_x0, h_x0, N_BYTES, cudaMemcpyHostToDevice); //copy to device cudaMemcpy(d_x1, h_x1, N_BYTES, cudaMemcpyHostToDevice); //copy to device cudaMemcpy(d_x2, h_x2, N_BYTES, cudaMemcpyHostToDevice); //copy to device if (type == 1){ // if simulating with registers typeName = "registers"; sim_register<<<N_BLK, N_THRD>>>(d_x0, d_x1, d_x2, d_out, N_SIMS, T_MAX); } else { // if simulating with global memory typeName = "global mem"; sim_gmem<<<N_BLK, N_THRD>>>(d_x0, d_x1, d_x2, d_out, N_SIMS, T_MAX); } cudaMemcpy(h_x, d_out, N_BYTES, cudaMemcpyDeviceToHost ); // copy back cudaEvent_t stop = get_time(); // stop time cudaEventSynchronize(stop); // Calculate and print simulation results and timing float x_mu = calcMean(h_x, N_SIMS); float dur = 0; cudaEventElapsedTime(&dur, start, stop); printf("\twith %s, result=%f, %.3f ms taken, \n", typeName, x_mu, dur); // Free up memory cudaFree(d_x2); cudaFree(d_x1); cudaFree(d_x0); cudaFree(d_out); cudaFree(c_phi); free(h_x0); free(h_x1); free(h_x2); free(h_x); } int main(int argc, char* argv[]) { if (argc == 3) { // get number of simulations based on CMDLINE input N_SIMS = atoi(argv[1]); T_MAX = atoi(argv[2]); } else { printf("Usage: %s [nSimulations] [maxTimePeriods].\n", argv[0]); return EXIT_FAILURE; } N_BLK = N_SIMS / MAX_THREADS + 1; // min of one block N_THRD = std::min(N_SIMS, MAX_THREADS); // num of threads per block N_BYTES = N_SIMS * sizeof(float); // size of array printf("Running %u simulations over %u time periods...\n", N_SIMS, T_MAX); simulate(1); // simulating with registers simulate(2); // simulating with global memory return EXIT_SUCCESS; }
24,155
#include <stdio.h> #define cudaCheck(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"cudaAssert: %s at %s:%d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } const char *boolStrings[2] = {"NO", "YES"}; int main(void) { // print GPU info int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device Name: %s\n", prop.name); printf(" Compute Capability: %d.%d\n", prop.major, prop.minor); printf(" Number of SMs: %d\n", prop.multiProcessorCount); printf(" Core Clock Rate (KHz): %d\n", prop.clockRate); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Device Overlap Supported: %s\n", boolStrings[prop.deviceOverlap]); printf(" Concurrent Kernels Supported: %s\n", boolStrings[prop.concurrentKernels]); printf(" Managed Memory Supported: %s\n", boolStrings[prop.managedMemory]); printf(" Concurrent Managed Memory Access Supported: %s\n\n", boolStrings[prop.concurrentManagedAccess]); } return 0; }
24,156
// A "hello world" style CUDA program #include <stdio.h> #include <stdlib.h> // Define a kernel function __global__ void vector_add(float* A, float* B, float* C) { // Add two vectors together and store in a third // Our ID is unique to our thread int i = threadIdx.x; C[i] = A[i] + B[i]; } int main() { // This is the size of our vectors, and the number of threads int N = 10; // These will be our vectors on the host float* host_A; float* host_B; float* host_C; // Use this for indices int i; // Define our vectors on the host host_A = (float*) malloc(N*sizeof(float)); host_B = (float*) malloc(N*sizeof(float)); host_C = (float*) malloc(N*sizeof(float)); // Initialise them for (i = 0; i < N; i++) { host_A[i] = (float)i; host_B[i] = 2.0 * (float)i; host_C[i] = 0.0; } // Define our vectors on the GPU float* device_A; float* device_B; float* device_C; cudaMalloc((void**) &device_A, sizeof(float)*N); cudaMalloc((void**) &device_B, sizeof(float)*N); cudaMalloc((void**) &device_C, sizeof(float)*N); // Transfer data to the GPU cudaMemcpy(device_A, host_A, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(device_B, host_B, sizeof(float)*N, cudaMemcpyHostToDevice); //cudaMemcpy(device_C, host_C, sizeof(float)*N, cudaMemcpyHostToDevice); // Call our function; second number is how many threads to use // The first number is to do with thread blocks... vector_add<<<1, N>>>(device_A, device_B, device_C); // Copy memory back cudaMemcpy(host_C, device_C, sizeof(float)*N, cudaMemcpyDeviceToHost); // Free device memory cudaFree(device_A); cudaFree(device_B); cudaFree(device_C); // Output our results printf("A = ["); for (i = 0; i < N; i++) { printf("%G,", host_A[i]); } printf("]\n"); printf("B = ["); for (i = 0; i < N; i++) { printf("%G,", host_B[i]); } printf("]\n"); printf("C = ["); for (i = 0; i < N; i++) { printf("%G,", host_C[i]); } printf("]\n"); return 0; }
24,157
#include "includes.h" __global__ void pow_kernel(float *v, int n, float e) { int x(threadIdx.x + blockDim.x * blockIdx.x); if (x >= n) return; v[x] = ::pow(v[x], e); }
24,158
#include<stdio.h> #define MIN(a,b) (a<b?a:b) #define MAX(a,b) (a>b?a:b) __device__ size_t string_len(const char *str){ const char *s; for(s=str; *s; ++s); return (s-str); } __device__ char* string_copy(char *dest, const char *src, size_t n){ size_t k; for(k=0; k < n && src[k] != '\0'; k++){ dest[k] = src[k]; } for(; k < n; k++){ dest[k] = '\0'; } return dest; } __device__ void LCSubstring(void *params){ int *s1_size = (int*)params; int *s2_size = s1_size + 1; char *s1 = (char*)(s2_size+1); char *s2 = s1 + *s1_size + 1; char *ret = s2 + *s2_size + 1; *ret = NULL; if(*s1_size > 32 || *s2_size > 32 || *s1_size <= 0 || *s2_size <= 0){ return; } int max = MAX(*s1_size, *s2_size); int min = MIN(*s1_size, *s2_size); int i = threadIdx.x % max; int table[32][32], j; for(j=0; j<min; j++){ table[i][j] = 0; } int longest = 0; //TODO: This section can be parallelized. for(i=0; i<max;i++){ for(j=0; j<min;j++){ if(s1[i] == s2[j]){ if(i==0 || j==0){ table[i][j] = 1; } else{ table[i][j] = table[i-1][j-1] + 1; } if(table[i][j] > longest){ longest = table[i][j]; string_copy(ret, &s1[i-longest+1], longest); } } } } }
24,159
#include <iostream> #include <sstream> #include <fstream> #include <string> #include <thrust/sort.h> using namespace std; /********************************************************** *********************************************************** error checking stufff *********************************************************** ***********************************************************/ // Enable this for error checking #define CUDA_CHECK_ERROR #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_CHECK_ERROR #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { if ( cudaSuccess != err ) { fprintf( stderr,"cudaSafeCall() failed at %s:%i : %s\n",file, line, cudaGetErrorString( err ) ); exit( -1 ); } } while ( 0 ); #pragma warning( pop ) #endif // CUDA_CHECK_ERROR return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_CHECK_ERROR #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr,"cudaCheckError() failed at %s:%i : %s.\n",file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment if not needed. err = cudaThreadSynchronize(); if( cudaSuccess != err ) { fprintf( stderr,"cudaCheckError() with sync failed at %s:%i : %s.\n",file, line, cudaGetErrorString( err ) ); exit( -1 ); } } while ( 0 ); #pragma warning( pop ) #endif // CUDA_CHECK_ERROR return; } /*************************************************************** *************************************************************** end of error checking stuff **************************************************************** ***************************************************************/ // function takes an array pointer, and the number of rows and cols in the array, and // allocates and intializes the array to a bunch of random numbers // Note that this function creates a 1D array that is a flattened 2D array // to access data item data[i][j], you must can use data[(i*rows) + j] int * makeRandArray( const int size, const int seed ) { srand( seed ); int * array = new int[ size ]; for( int i = 0; i < size; i ++ ) { array[i] = std::rand() % 1000000; } return array; } //*******************************// // your kernel here!!!!!!!!!!!!!!!!! //*******************************// __global__ void matavgKernel() { } int main( int argc, char * argv[] ) { int * array; // the poitner to the array of rands int size, seed; // values for the size of the array bool printSorted = false; // and the seed for generating // random numbers // check the command line args if( argc < 4 ){ std::cerr << "usage: " << argv[0] << " [amount of random nums to generate] [seed value for rand]" << " [1 to print sorted array, 0 otherwise]" << std::endl; exit( -1 ); } // convert cstrings to ints { std::stringstream ss1( argv[1] ); ss1 >> size; } { std::stringstream ss1( argv[2] ); ss1 >> seed; } { int sortPrint; std::stringstream ss1( argv[3] ); ss1 >> sortPrint; if( sortPrint == 1 ) printSorted = true; } // get the random numbers array = makeRandArray( size, seed ); //print out initial array //for(int index = 0; index < size-1; index++) //{ // printf("%d, ", array[index]); //} //printf("%d\n", array[size-1]); /*********************************** create a cuda timer to time execution * **********************************/ cudaEvent_t startTotal, stopTotal; float timeTotal; cudaEventCreate(&startTotal); cudaEventCreate(&stopTotal); cudaEventRecord( startTotal, 0 ); /*********************************** end of cuda timer creation * **********************************/ ///////////////////////////////////////////////////////////////////// /////////////////////// YOUR CODE HERE /////////////////////// ///////////////////////////////////////////////////////////////////// thrust::sort(array, array+size); /* You need to implement your kernel as a function at the top of this file. Here you must 1) allocate device memory 2) set up the grid and block sizes 3) call your kenrnel 4) get the result back from the GPU to use the error checking code, wrap any cudamalloc functions as follows: CudaSafeCall( cudaMalloc( &pointer_to_a_device_pointer, length_of_array * sizeof( int ) ) ); Also, place the following function call immediately after you call your kernel ( or after any other cuda call that you think might be causing an error ) CudaCheckError(); */ /*********************************** Stop and destroy the cuda timer * **********************************/ cudaEventRecord( stopTotal, 0 ); cudaEventSynchronize( stopTotal ); cudaEventElapsedTime( &timeTotal, startTotal, stopTotal ); cudaEventDestroy( startTotal ); cudaEventDestroy( stopTotal ); /*********************************** end of cuda timer destruction * **********************************/ std::cerr << "Total time in seconds: " << timeTotal / 1000.0 << std::endl; if( printSorted ){ //print out sorted array for(int index = 0; index < size-1; index++) { printf("%d, ", array[index]); } printf("%d\n", array[size-1]); } }
24,160
#include "includes.h" __global__ void kernelMultMat(int *a, int *b, int *c,int m){ int i,add; int col=blockDim.x*blockIdx.x + threadIdx.x; int row=blockDim.y*blockIdx.y + threadIdx.y; if(col<m && row<m) { add=0; for(i=0; i< m ;i++){ add += a[i+m*row]*b[col+m*i]; } c[row*m+col] = add; } }
24,161
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <chrono> #define BLOCK_SIZE 16 //функция ядра __global__ void matrixMult(const double *A, const double *B, double *C, int n) { int ai = n * (blockDim.y * blockIdx.y + threadIdx.y); // индекс начала строки матрицы A int bj = blockDim.x * blockIdx.x + threadIdx.x; // индекс начала строки матрицы B double sum = 0; for (int k = 0; k < n; k++) sum += A[ai + k] * B[k * n + bj]; int index = n * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x; // индекс вычисляемого элемента матрицы C C[index] = sum; } //функция ядра с разделяемой памятью __global__ void matrixMultShared(double* A, double* B, double* C, int n) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = n * BLOCK_SIZE * by; // индекс первой подматрицы A, обработанной блоком int aEnd = aBegin + n - 1; // индекс пследней подматрицы A int aStep = BLOCK_SIZE; // размер шага, используемый для итерации подматриц A int bBegin = BLOCK_SIZE * bx; // индекс первой подматрицы B, обработанной блоком int bStep = BLOCK_SIZE * n; // размер шага, используемый для итерации подматриц B double Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // объявление массивов для хранения подматриц в разделяемой памяти // с помощью модификатора __shared__ __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE]; As[ty][tx] = A[a + n * ty + tx]; Bs[ty][tx] = B[b + n * ty + tx]; __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; __syncthreads(); } int c = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + n * ty + tx] = Csub; } // генерация матриц double * generateRandMatrix(int n, size_t sizeMatrix) { double * matrix = (double *)malloc(sizeMatrix); for (int i = 0; i < n * n; i++) { matrix[i] = (double)rand() / (double)RAND_MAX; } return matrix; } void printMatrix(double * matrix, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { printf("%4.1lf ", matrix[i*n + j]); } printf("\n"); } } // функция для последовательного варианта умножения матриц void matrixMultCPU(double* A, double* B, double * C, int n) { for (int i = 0; i<n; i++) { for (int j = 0; j<n; j++) { for (int k = 0; k<n; k++) { C[i*n + j] += A[i*n + k] * B[k*n + j]; } } } } // проверка результатов умножения bool checkMult(double * C1, double * C2, int n) { double accuracy = 1.e-6; for (int i = 0; i < n*n; i++) { if (abs(C1[i] - C2[i]) >= accuracy) return false; } return true; } int main(int argc, char *argv[]) { int N = atoi(argv[1]); int flag_s = atoi(argv[2]); if (N % 16 != 0) { printf("The number is not a multiple of the block size. The program will be closed.\n"); system("pause"); exit(1); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); srand(time(NULL)); size_t sizeMatrix = sizeof(double) * N * N; double * h_A = generateRandMatrix(N, sizeMatrix); double * h_B = generateRandMatrix(N, sizeMatrix); double * h_C = (double *)malloc(sizeMatrix); double * h_C_seq = (double *)malloc(sizeMatrix); for (int i = 0; i<N*N; i++) { h_C_seq[i] = 0; } using namespace std::chrono; high_resolution_clock::time_point t1 = high_resolution_clock::now(); matrixMultCPU(h_A, h_B, h_C_seq, N); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double, std::milli> time_span = t2 - t1; double cpu_time = time_span.count(); printf("The time: %f milliseconds\n", cpu_time); double *d_A; cudaMalloc((void **)&d_A, sizeMatrix); double *d_B; cudaMalloc((void **)&d_B, sizeMatrix); double * d_C; cudaMalloc((void **)&d_C, sizeMatrix); cudaMemcpy(d_A, h_A, sizeMatrix, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizeMatrix, cudaMemcpyHostToDevice); dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE); dim3 blocksPerGrid = dim3(N / BLOCK_SIZE, N / BLOCK_SIZE); if (flag_s) { cudaEventRecord(start, 0); matrixMultShared<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } else { cudaEventRecord(start, 0); matrixMult<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } float KernelTime; cudaEventElapsedTime(&KernelTime, start, stop); printf("KernelTime: %f milliseconds\n", KernelTime); double S = cpu_time / KernelTime; printf("Acceleration: %f\n", S); cudaMemcpy(h_C, d_C, sizeMatrix, cudaMemcpyDeviceToHost); if (checkMult(h_C, h_C_seq, N)) printf("The multiplication results are correct.\n"); else printf("Multiplication results are NOT correct.\n"); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); free(h_C_seq); return 0; }
24,162
#include<stdio.h> #include<stdlib.h> #include<string.h> #include<random> #define cudaCheck(x) _cudaCheck(x, #x ,__FILE__, __LINE__) template<typename T> void _cudaCheck(T e, const char* func, const char* call, const int line){ if(e != cudaSuccess){ printf("\"%s\" at %d in %s\n\treturned %d\n-> %s\n", func, line, call, (int)e, cudaGetErrorString(e)); exit(EXIT_FAILURE); } } // Number of elements worked by one thread long nelem; // Each threads work on nelem-elements in a pair of sz-long vector __global__ void add_krnl(float *x, float *y, long sz, long nelem) { // PARAMETER: 0 means adjacent, 1 means cyclical int pattern = 1; long i; long inc = (sz + nelem-1) / nelem; long start = blockIdx.x*1024 + threadIdx.x; if (pattern == 0) { start = start*nelem; long end = start+nelem; for(i = start; i < end && i < sz; i++) { x[i] += y[i]; } } else { for (i = start; i < sz; i += inc) { x[i] += y[i]; } } } long func_add(float *x, float *y, long sz) { long i; // CPU Calculation for (i = 0; i < sz; i++) x[i] += y[i]; // GPU Calculation float *dx, *dy; cudaMalloc((float **) &dx, sz*sizeof(float)); cudaMalloc((float **) &dy, sz*sizeof(float)); cudaMemcpy(dx, x, sz*sizeof(float), cudaMemcpyHostToDevice); // Timing using cudaEvent cudaEvent_t start, stop; float et; cudaCheck(cudaEventCreate(&start)); cudaCheck(cudaEventCreate(&stop)); // Time event start cudaCheck(cudaEventRecord(start)); { long n_threads = (sz + nelem-1) / nelem; long n_blocks = (n_threads + 1023) / 1024; add_krnl<<<n_blocks, 1024>>>(dx, dy, sz, nelem); } cudaCheck(cudaGetLastError()); // Time event end cudaCheck(cudaEventRecord(stop)); cudaCheck(cudaEventSynchronize(stop)); cudaCheck(cudaEventElapsedTime(&et, start, stop)); cudaCheck(cudaEventDestroy(start)); cudaCheck(cudaEventDestroy(stop)); printf("\t%0.3f", et); // Copy data back to d_x and free GPU memory float * d_x = (float *) malloc(sz * sizeof(float)); cudaMemcpy(d_x, dx, sz*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dx); cudaFree(dy); // Compare CPU and GPU output to see if it is within error tolerance for (i = 0; i < sz; i++) { if (fabsf(d_x[i] - x[i]) > 1e-5) { free(d_x); return 0; } } free(d_x); return 1; } int main(int argc, char **argv) { float *a, *b; long j; long i; std::random_device rd; std::mt19937_64 mt(rd()); std::uniform_real_distribution<float> u(0, 1); // Print title printf("sz"); for (nelem = 1; nelem < 513; nelem *= 2) printf("\t%d", nelem); printf("\n"); for (j = 10; j <= 1000000000; j *= 10) { a = (float *) malloc(sizeof(float) * j); b = (float *) malloc(sizeof(float) * j); // Initialize with random number generator for (i = 0; i < j; i++) { a[i] = u(mt); b[i] = u(mt); } printf("%d", j); for (nelem = 1; nelem < 513; nelem *= 2) if (!func_add(a, b, j)) printf("failed to add\n"); printf("\n"); free(a); free(b); } return 0; }
24,163
#include "includes.h" __global__ void mult(int* results, int* data, int* vec) { int index = blockIdx.x * blockDim.x + threadIdx.x; int result_val = 0; for(int i = 0; i < cuda_features; i++) { result_val += vec[i] * data[(index * cuda_features) + i]; } results[index] = result_val; }
24,164
#include "includes.h" __global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha * mat[i]; } }
24,165
#include "includes.h" #define THREADS_PER_BLOCK 1024 #define TIME 3600000 __global__ void initialize(float *a_d, float *b_d, float *c_d, int arraySize) { int ix = blockIdx.x * blockDim.x + threadIdx.x; if(ix==0) { a_d[ix]=200.0; b_d[ix]=200.0; } else if (ix<arraySize) { a_d[ix]=0.0; b_d[ix]=0.0; } }
24,166
#include <iostream> #include <stdio.h> #include <sys/time.h> #define CUDA_CHECK(cmd) {cudaError_t error = cmd; if(error!=cudaSuccess) std::cout << cudaGetErrorString(error) << std::endl;} __global__ void kernelLineSliceFields(cudaPitchedPtr fieldE, cudaPitchedPtr fieldB, float3 *sliceDataField, dim3 globalCellIdOffset, dim3 globalNrOfCells, dim3 superCellSize, int guardingSuperCells) { dim3 superCellIdx; //superCellIdx(mapper.getSuperCellIndex(blockIdx)) superCellIdx.x = blockIdx.x/16 + 1; superCellIdx.y = blockIdx.y + 1; superCellIdx.z = blockIdx.x%16 + 1; if((threadIdx.x==0)&&(threadIdx.y==0)&&(threadIdx.z==0)){ //printf("Block %d %d %d => Supercell %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z, superCellIdx.x, superCellIdx.y, superCellIdx.z); } __syncthreads(); dim3 localCell; //dim3 superCellSize; //superCellSize.x = 8; superCellSize.y = 8; superCellSize.z = 4; localCell.x = superCellIdx.x * superCellSize.x + threadIdx.x; localCell.y = superCellIdx.y * superCellSize.y + threadIdx.y; localCell.z = superCellIdx.z * superCellSize.z + threadIdx.z; if((threadIdx.x==0)&&(threadIdx.y==0)&&(threadIdx.z==0)){ //printf("Supercell %d %d %d => Localcell %d %d %d\n", superCellIdx.x, superCellIdx.y, superCellIdx.z, localCell.x, localCell.y, localCell.z); } if((localCell.x==64)&&(localCell.z==64)){ printf("X==64 && Z==64 : %d %d %d => %d %d %d => %d %d %d\n", superCellIdx.x, superCellIdx.y, superCellIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, localCell.x, localCell.y, localCell.z); } char *fieldEPtr = (char *)fieldE.ptr; size_t eSlicePitch = fieldE.pitch * fieldE.ysize; char *eSlice = fieldEPtr + localCell.z * eSlicePitch; //float3 *eRow = (float3 *)eSlice + localCell.y * fieldE.pitch; float3 *eRow = (float3 *)((char *)eSlice + localCell.y * fieldE.pitch); float3 e = eRow[localCell.x]; char *fieldBPtr = (char *)fieldB.ptr; size_t bSlicePitch = fieldB.pitch * fieldB.ysize; char *bSlice = fieldBPtr + localCell.z * bSlicePitch; //float3 *bRow = (float3 *)bSlice + localCell.y * fieldB.pitch; float3 *bRow = (float3 *)((char *)bSlice + localCell.y * fieldB.pitch); float3 b = bRow[localCell.x]; dim3 localCellWG; //int guardingSuperCells = 1; localCellWG.x = localCell.x - superCellSize.x * guardingSuperCells; localCellWG.y = localCell.y - superCellSize.y * guardingSuperCells; localCellWG.z = localCell.z - superCellSize.z * guardingSuperCells; dim3 globalCell; globalCell.x = localCellWG.x + globalCellIdOffset.x; globalCell.y = localCellWG.y + globalCellIdOffset.y; globalCell.z = localCellWG.z + globalCellIdOffset.z; if(globalCell.x == globalNrOfCells.x /2){ if(globalCell.z == globalNrOfCells.z /2){ printf("%d %d %d %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z); sliceDataField[localCellWG.y] = e; } } __syncthreads(); } int main(){ cudaExtent extent; cudaPitchedPtr d_field_e, d_field_b; extent.width = 960 * sizeof(float3); extent.height = 80; extent.depth = 72; CUDA_CHECK(cudaMalloc3D(&d_field_e, extent));//960 80 72 CUDA_CHECK(cudaMalloc3D(&d_field_b, extent));//960 80 72 CUDA_CHECK(cudaMemset3D(d_field_e, 0, extent)); CUDA_CHECK(cudaMemset3D(d_field_b, 0, extent)); float3 *d_slice_data_field; size_t slice_data_field_pitch = 1; CUDA_CHECK(cudaMallocPitch(&d_slice_data_field, &slice_data_field_pitch, 64 * sizeof(float3), 1)); dim3 grid(128, 8, 1), block(8, 8, 4); dim3 global_cell_id_offset(0, 0, 0), global_nr_of_cells(128, 128, 128), super_cell_size(8, 8, 4); int guarding_super_cells = 1; struct timeval start, end; for(int i=0; i<25; i++){ //-s 25 -lslice.period 1 //wait_for(); gettimeofday(&start, NULL); kernelLineSliceFields<<<grid, block>>>(d_field_e, d_field_b, d_slice_data_field, global_cell_id_offset, global_nr_of_cells, super_cell_size, guarding_super_cells); CUDA_CHECK(cudaDeviceSynchronize()); gettimeofday(&end, NULL); std::cout << "kernelLineSliceFields execution time (in us) : " << end.tv_sec + end.tv_usec - start.tv_sec - start.tv_usec << std::endl; //release(); } return 0; }
24,167
#include <stdio.h> #include <stdlib.h> #include <math.h> #ifndef N #define N 4096 #endif #ifndef FLOAT #define FLOAT double #endif #define sqrt_of_array_cell(x,j) ((FLOAT)sqrt(x[j])) #define FLOAT_N 3214212.01f #define EPS 0.005f /* Thread block dimensions for kernel 1*/ #define DIM_THREAD_BLOCK_KERNEL_1_X 256 #define DIM_THREAD_BLOCK_KERNEL_1_Y 1 /* Thread block dimensions for kernel 2*/ #define DIM_THREAD_BLOCK_KERNEL_2_X 256 #define DIM_THREAD_BLOCK_KERNEL_2_Y 1 /* Thread block dimensions for kernel 3*/ #define DIM_THREAD_BLOCK_KERNEL_3_X 32 #define DIM_THREAD_BLOCK_KERNEL_3_Y 8 /* Thread block dimensions for kernel 4*/ #define DIM_THREAD_BLOCK_KERNEL_4_X 256 #define DIM_THREAD_BLOCK_KERNEL_4_Y 1 __global__ void mean_kernel(FLOAT *mean, FLOAT *data) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < N) { mean[j] = 0.0f; int i; for(i=0; i < N; i++) { mean[j] += data[i*N + j]; } mean[j] /= (FLOAT)FLOAT_N; } } __global__ void std_kernel(FLOAT *mean, FLOAT *std, FLOAT *data) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < N) { std[j] = 0.0f; int i; for(i = 0; i < N; i++) { std[j] += (data[i*N + j] - mean[j]) * (data[i*N + j] - mean[j]); } std[j] /= (FLOAT_N); std[j] = FLOAT(sqrt(std[j])); if(std[j] <= EPS) { std[j] = 1.0f; } } } __global__ void reduce_kernel(FLOAT *mean, FLOAT *std, FLOAT *data) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < N) && (j < N)) { data[i*N + j] -= mean[j]; data[i*N + j] /= FLOAT(sqrt(FLOAT_N)) * std[j]; } } __global__ void corr_kernel(FLOAT *symmat, FLOAT *data) { int j1 = blockIdx.x * blockDim.x + threadIdx.x; int i, j2; if (j1 < (N-1)) { symmat[j1*N + j1] = 1.0; for (j2 = (j1 + 1); j2 < N; j2++) { symmat[j1*N + j2] = 0.0; for(i = 0; i < N; i++) { symmat[j1*N + j2] += data[i*N + j1] * data[i*N + j2]; } symmat[j2*N + j1] = symmat[j1*N + j2]; } } } int main() { int i; FLOAT * data = (FLOAT *) malloc(N*N*sizeof(FLOAT)); FLOAT * symmat = (FLOAT *) malloc(N*N*sizeof(FLOAT)); FLOAT * mean = (FLOAT *) malloc(N*sizeof(FLOAT)); FLOAT * stddev = (FLOAT *) malloc(N*sizeof(FLOAT)); srand(5497); for (i = 0; i < N*N; i++) data[i] = (FLOAT)rand() / (FLOAT)RAND_MAX; FLOAT *data_gpu; FLOAT *stddev_gpu; FLOAT *mean_gpu; FLOAT *symmat_gpu; cudaMalloc((void **)&data_gpu, sizeof(FLOAT) * N * N); cudaMalloc((void **)&symmat_gpu, sizeof(FLOAT) * N * N); cudaMalloc((void **)&stddev_gpu, sizeof(FLOAT) * N); cudaMalloc((void **)&mean_gpu, sizeof(FLOAT) * N); cudaMemcpy(data_gpu, data, sizeof(FLOAT) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(symmat_gpu, symmat, sizeof(FLOAT) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(stddev_gpu, stddev, sizeof(FLOAT) * N, cudaMemcpyHostToDevice); cudaMemcpy(mean_gpu, mean, sizeof(FLOAT) * N, cudaMemcpyHostToDevice); dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y); dim3 grid1((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1); dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y); dim3 grid2((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1); dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y); dim3 grid3((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y))); dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y); dim3 grid4((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1); mean_kernel<<< grid1, block1 >>>(mean_gpu,data_gpu); cudaThreadSynchronize(); std_kernel<<< grid2, block2 >>>(mean_gpu,stddev_gpu,data_gpu); cudaThreadSynchronize(); reduce_kernel<<< grid3, block3 >>>(mean_gpu,stddev_gpu,data_gpu); cudaThreadSynchronize(); corr_kernel<<< grid4, block4 >>>(symmat_gpu,data_gpu); cudaThreadSynchronize(); cudaMemcpy(symmat, symmat_gpu, sizeof(FLOAT) * N * N, cudaMemcpyDeviceToHost); for (i = 0; i < N*N; i++) printf("%.15f,", symmat[i]); return 0; }
24,168
#include<stdlib.h> #include<stdio.h> __global__ void kernel(int* array) { int index_x = blockIdx.x*blockDim.x + threadIdx.x; int index_y = blockIdx.y*blockDim.y + threadIdx.y; //map the two 2D indices to a single linear 1D index int grid_width = gridDim.x*blockDim.x; int index = index_y*grid_width + index_x; //map the two 2D block indices to a single linear 1D block index int result = blockIdx.y*gridDim.x + blockIdx.x; //write out the result array[index] = result; } int main(void) { int num_elements_x = 16; int num_elements_y = 16; int num_bytes = num_elements_x*num_elements_y*sizeof(int); int* device_array = 0; int* host_array = 0; //allocate memory in either space host_array=(int*)malloc(num_bytes); cudaMalloc((void**)&device_array,num_bytes); //create two dimensional 4x4 thread blocks dim3 block_size; block_size.x = 4; block_size.y = 4; //configure a two dimensional grid as well dim3 grid_size; grid_size.x = num_elements_x/block_size.x; grid_size.y = num_elements_y/block_size.y; //grid_size & block_size are passed as arguments to the triple chevrons kernel<<<grid_size,block_size>>>(device_array); //download and inspect the result on the host cudaMemcpy(host_array,device_array,num_bytes,cudaMemcpyDeviceToHost); //print out the result element by element for(int row = 0; row<num_elements_y; ++row) { for(int col = 0; col<num_elements_x; ++col) { printf("%2d ", host_array[row*num_elements_x+col]); } printf("\n"); } printf("\n"); //deallocate memory free(host_array); cudaFree(device_array); }
24,169
#include "includes.h" __global__ void update_mixed_derivatives(double *temppsix, double *temppsiy, double *temppsixy, unsigned int nx, unsigned int ny, double dx, double dy, unsigned int TileSize) { unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int index_x = bx * TileSize + tx; unsigned int index_y = by * TileSize + ty; unsigned int indexToWrite = index_y * nx + index_x; if ((index_y == 0 || index_y == ny - 1) && (index_x != 0 && index_x != nx - 1)) temppsixy[indexToWrite] = (temppsiy[indexToWrite+1] - temppsiy[indexToWrite-1])/(2 * dx); else if ((index_y != 0 && index_y != ny - 1) && (index_x == 0 || index_x == nx - 1)) temppsixy[indexToWrite] = (temppsix[indexToWrite + nx] - temppsix[indexToWrite - nx])/(2 * dy); else if((index_y == 0 || index_y == ny - 1) && (index_x == 0 || index_x == nx - 1)){ if(index_y == 0 && index_x == 0){ double d1 = (temppsiy[1] - temppsiy[0])/dx; double d2 = (temppsix[nx] - temppsix[0])/dy; double d3 = (temppsix[nx+1] - temppsix[1])/dy; double d4 = (temppsiy[nx+1] - temppsiy[nx])/dx; temppsixy[indexToWrite] = 0.75 * (d1 + d2) - 0.25 * (d3 + d4); } else if(index_y == 0 && index_x == nx-1){ double d1 = (temppsiy[nx-1] - temppsiy[nx-2])/dx; double d2 = (temppsix[nx+nx-2] - temppsix[nx-2])/dy; double d3 = (temppsix[nx+nx-1] - temppsix[nx-1])/dy; double d4 = (temppsiy[nx+nx-1] - temppsiy[nx+nx-2])/dx; temppsixy[indexToWrite] = 0.75 * (d1 + d3) - 0.25 * (d2 + d4); } else if(index_y == ny-1 && index_x == 0){ double d1 = (temppsiy[nx *(ny-2) + 1] - temppsiy[nx *(ny-2)])/dx; double d2 = (temppsix[nx *(ny-1)] - temppsix[nx *(ny-2)])/dy; double d3 = (temppsix[nx *(ny-1)] - temppsix[nx *(ny-2) + 1])/dy; double d4 = (temppsiy[nx *(ny-1) + 1] - temppsiy[nx *(ny-1)])/dx; temppsixy[indexToWrite] = 0.75 * (d2 + d4) - 0.25 * (d3 + d1); } else if(index_y == ny-1 && index_x == nx-1){ double d1 = (temppsiy[nx *(ny-2) + nx - 1] - temppsiy[nx *(ny-2) + nx - 2])/dx; double d2 = (temppsix[nx *(ny-1) + nx - 2] - temppsix[nx *(ny-2) + nx - 2])/dy; double d3 = (temppsix[nx *(ny-1) + nx - 1] - temppsix[nx *(ny-2) + nx - 1])/dy; double d4 = (temppsiy[nx *(ny-1) + nx - 1] - temppsiy[nx *(ny-1) + nx - 2])/dx; temppsixy[indexToWrite] = 0.75 * (d3 + d4) - 0.25 * (d1 + d2); } } else{ double dxy1 = (temppsiy[indexToWrite+1] - temppsiy[indexToWrite-1])/(2 * dx); double dxy2 = (temppsix[indexToWrite + nx] - temppsix[indexToWrite - nx])/(2 * dy); temppsixy[indexToWrite] = (dxy1 + dxy2)/2.0; } }
24,170
__global__ void per_row_kernel(int m,int n,int *A,int *B,int *C){ int idr; idr=blockIdx.x*blockDim.x+threadIdx.x; if(idr<m){ for(int j=0;j<n;j++){ C[idr*n+j]=A[idr*n+j]+B[idr*n+j]; } } } __global__ void per_column_kernel(int m,int n,int *A,int *B,int *C){ int idc; idc =blockDim.x*blockDim.y*blockIdx.x+ blockDim.x* threadIdx.y+threadIdx.x; if(idc<n){ for(int j=0;j<m;j++){ C[idc*m+j]=A[idc*m+j]+B[idc*m+j]; } } } __global__ void per_element_kernel(int m,int n,int *A,int *B,int *C){ int i,j,ide; i = threadIdx.x + blockDim.x * threadIdx.y; j = blockIdx.x + gridDim.x * blockIdx.y; ide=j*blockDim.x*blockDim.y+i; if(ide<(m*n)) C[ide]=A[ide]+B[ide]; }