serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
13,301
#include <stdio.h> #include <iostream> #include <iostream> #include <fstream> #define WIDTH 8192 #define LENGHT 8192 #define N_PARTICLES 5000 #define INF 999999.999 #define RADIO 100 #define CELLS_FOR_THREAD 8 using namespace std; // __constant__ float x_part_dev[N_PARTICLES]; // __constant__ float y_part_dev[N_PARTICLES]; #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ cout << cudaGetErrorString(error) << endl; \ } \ } while (0) __device__ float dist(float x1, float y1, float x2, float y2) { float dist; dist = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1); //dist = sqrtf(powf(x2-x1, 2) + powf(y2-y1, 2)); if(dist != 0) return 1/dist; else return -1; } __global__ void charge(float l, float *map,float *X,float *Y) { int idx = blockIdx.x*blockDim.x + threadIdx.x; float rowParticle,colParticle,rowCell,colCell; for (int i = idx*CELLS_FOR_THREAD; i<idx*CELLS_FOR_THREAD+CELLS_FOR_THREAD; i++) { if (i<l) { for (size_t j = 0; j < N_PARTICLES; j++) { rowParticle = Y[j]; colParticle = X[j]; rowCell = (i / WIDTH); colCell = (i % WIDTH); //float distancia = rowCell-colCell; float distancia = 1;//(dist(rowParticle,colParticle,rowCell,colCell); if (distancia != -1) { map[i] += distancia; } } //map[i] = 1; } } } __global__ void chargeWithRadio(int l, float *map,float *X,float *Y) { float d; int idx = blockIdx.x*blockDim.x + threadIdx.x; int rowPartcile,colParticle,rowCell,colCell; if (idx < l) { for (size_t i = 0; i < N_PARTICLES; i++) { rowPartcile = Y[i]; colParticle = X[i]; rowCell = (idx / WIDTH)+1; colCell = (idx % WIDTH)+1; d = dist(rowPartcile,colParticle,rowCell,colCell); map[idx] += (d<RADIO)?d:0.0; } } } __global__ void minReduction(float *in, float *out) { __shared__ float sharedData[256]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + tid; // blockSize = 256 sharedData[tid] = in[i] + in[i+blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x/2; s>32; s>>=1) { if(tid<s) { sharedData[tid] = (sharedData[tid]<sharedData[tid+s])?sharedData[tid]:sharedData[tid+s]; } __syncthreads(); } if (tid < 32) { sharedData[tid] = (sharedData[tid]<sharedData[tid+32])?sharedData[tid]:sharedData[tid+32]; sharedData[tid] = (sharedData[tid]<sharedData[tid+16])?sharedData[tid]:sharedData[tid+16]; sharedData[tid] = (sharedData[tid]<sharedData[tid+8])?sharedData[tid]:sharedData[tid+8]; sharedData[tid] = (sharedData[tid]<sharedData[tid+4])?sharedData[tid]:sharedData[tid+4]; sharedData[tid] = (sharedData[tid]<sharedData[tid+2])?sharedData[tid]:sharedData[tid+2]; sharedData[tid] = (sharedData[tid]<sharedData[tid+1])?sharedData[tid]:sharedData[tid+1]; } if(tid==0) { out[blockIdx.x] = sharedData[0]; } } int main(int argc, char *argv[]){ // Load data string input_file_name; if (argc > 1) { input_file_name = argv[1]; } else { cout << "faltó un argumento" << endl; exit(0); } ifstream infile; cout << "Reading: " << input_file_name.c_str() << endl; infile.open(input_file_name.c_str()); int nP; float *x_part, *y_part; infile >> nP; cout << "nP: "<<nP << endl; x_part = (float *)malloc(nP * sizeof(float)); y_part = (float *)malloc(nP * sizeof(float)); for (int i = 0; i<nP; i++) { infile >> x_part[i] >> y_part[i]; } // Get memory for structures float *cells, *d_cells,*outData,*out2,*out3,y[4]; float *x_part_dev, *y_part_dev; cells = (float*)malloc(WIDTH*LENGHT*sizeof(float)); // Initialization grid with 0 for (int i = 0; i < WIDTH*LENGHT; i++) { cells[i] = 0.0; } // Define sizes of GPU int blockSize = 256; // # threads int gridSize = ((WIDTH*LENGHT)/256)/CELLS_FOR_THREAD +1; // # blocks cout << "gridSize: " << gridSize << endl; // Get memory in GPU for structures // data for charge function //cudaMalloc(&x_dev, nP * sizeof(float)); // X cord for particles //cudaMalloc(&y_dev, nP * sizeof(float)); // Y cord for particles CUDA_CHECK(cudaMalloc(&d_cells, WIDTH*LENGHT*sizeof(float))); // 1D array representation for grid 2D CUDA_CHECK(cudaMalloc(&x_part_dev, N_PARTICLES*sizeof(float))); CUDA_CHECK(cudaMalloc(&y_part_dev, N_PARTICLES*sizeof(float))); // data for reduction function CUDA_CHECK(cudaMalloc(&outData, gridSize*sizeof(float))); CUDA_CHECK(cudaMalloc(&out2, (gridSize/blockSize)*sizeof(float))); CUDA_CHECK(cudaMalloc(&out3, ((gridSize/blockSize)/blockSize)*sizeof(float))); // Copy data from CPU to GPU CUDA_CHECK(cudaMemcpy(d_cells, cells, WIDTH*LENGHT*sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(x_part_dev, x_part, N_PARTICLES * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(y_part_dev, y_part, N_PARTICLES * sizeof(float), cudaMemcpyHostToDevice)); //cudaMemcpy(x_dev, &x_part, nP * sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(y_dev, &y_part, nP * sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t ct1, ct2; float dt, dt2; // time before kernel cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); // Charge grid charge<<<gridSize,blockSize>>>(WIDTH*LENGHT, d_cells, x_part_dev, y_part_dev); cudaDeviceSynchronize(); //Time after charge kernel cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); float time1 = dt; std::cout << "Time GPU computing cells charges: " << time1 << "[ms]" << std::endl; CUDA_CHECK(cudaMemcpy(cells, d_cells, WIDTH*LENGHT*sizeof(float), cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); // check for errors cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); } for (size_t i = 0; i < 100; i++) { cout << cells[i] << ' '; } cout << endl; float suma = 0; for (int i = 0; i < WIDTH*LENGHT; i++) { if (cells[i] == 0) { cout << "i: " << i << " = 0"<< endl; break; } suma += cells[i]; } cout << "Suma: " << suma << endl; cout << "\n \n primera parte exitosa (?)" << endl; // time before kernel min cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); // Search min load minReduction<<<gridSize,blockSize>>>(d_cells,outData); // First reduction 8192*8192 -> (8192*8192+255)/ 256 = 262.144 cudaDeviceSynchronize(); minReduction<<<gridSize/blockSize,blockSize>>>(outData,out2); // Second reduction 262.144 -> 262.144/256 = 1024 cudaDeviceSynchronize(); minReduction<<<(gridSize/blockSize)/blockSize,blockSize>>>(out2,out3); // Third reduction 262.144 -> 4 :) cudaDeviceSynchronize(); //Time after min kernel cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt2, ct1, ct2); float time2 = dt2; std::cout << "Time GPU computing minimum value: " << time2 << "[ms]" << std::endl; // check for errors error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); } // Escribiendo resultado en archivo ofstream times_file; times_file.open("results_tarea_4_2.txt", ios_base::app); times_file << input_file_name.c_str() << endl; times_file << "Tiempo en charge kernel: "<< dt << "[ms]" << endl; times_file << "Tiempo en min kernel: "<< dt2 << "[ms]" << endl; cudaMemcpy(y, out3, 4*sizeof(float), cudaMemcpyDeviceToHost); int min=INF; // min load for (size_t i = 0; i < 4; i++) { min = (y[i]<min)?y[i]:min; } cout << min << endl; //cudaFree(x_dev); //cudaFree(y_dev); cudaFree(d_cells); cudaFree(outData); cudaFree(out2); cudaFree(out3); free(cells); free(x_part); free(y_part); return 0; }
13,302
#include <stdio.h> #include <stdlib.h> #define NUMBER 100 __global__ void demo(int *arr){ if(threadIdx.x<NUMBER){ for(int i=0;i<NUMBER;i++){ arr[threadIdx.x]= arr[threadIdx.x]+threadIdx.x; __syncthreads(); } } } int main(int argc , char **argv){ int * arr; cudaError_t err; err=cudaMalloc((void**)&arr,NUMBER*sizeof(int)); if( err != cudaSuccess){ printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } dim3 dimGrid(1,1); dim3 dimBlock(512,1); demo<<<dimGrid,dimBlock>>>(arr); err=cudaFree(arr); if( err != cudaSuccess){ printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(-1); } return 0; }
13,303
#include <stdio.h> #include <cuda.h> #include "cuda_runtime_api.h" #include <stdint.h> #include <stdlib.h> //DO NOT EDIT THIS!!! //This is the working matrix multiplication code - very basic /* Additional: can now print matrix in a more pleasant manner */ // START of Auxiliary functions //Start of kernel multiplication __global__ void MatrixMulKernel ( int *Md, int *Nd, int *Pd, int Width ){ int tx = threadIdx.x; int ty = threadIdx.y; int Pvalue = 0; for ( int k = 0; k < Width; ++k ){ int Mdelement = Md[ ty * Width + k ]; int Ndelement = Nd[ k * Width + tx ]; Pvalue += Mdelement * Ndelement; } Pd[ ty * Width + tx ] = Pvalue; } //End of kernel multiplication //Start of matrix multiplication host function void MatrixMul( int *M, int *N, int *P, int Width ){ int size = Width * Width * sizeof( int ); int *Md, *Nd, *Pd; cudaMalloc( (void**) &Md, size ); cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice ); cudaMalloc( (void**) &Nd, size ); cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice ); cudaMalloc( (void**) &Pd, size ); dim3 dimBlock( Width, Width ); dim3 dimGrid( 1, 1 ); MatrixMulKernel<<< dimGrid, dimBlock >>>( Md, Nd, Pd, Width ); cudaMemcpy( P, Pd, size, cudaMemcpyDeviceToHost ); for ( int w = 0; w < Width * Width; w++ ){ printf( "\n" ); printf( " %d: %f ", w, P[w] ); printf( "\n" ); } cudaFree( Md ); cudaFree( Nd ); cudaFree ( Pd ); } //End of Matrix multiplication function //function to print matrix void printMatrix ( int *M, int rows, int columns ){ //assumes matrix is in row-major format printf ( "\n %s: \n", "M" ); for ( int v = 0; v < rows; v++ ){ //assumes a square matrix for ( int w = 0; w < columns; w++ ) { printf ( " %03d ", M[ v * columns + w ] ); } printf ( " \n " ); } }//End of printMatrix function //END of Auxiliary functions //START of Main function int main ( void ) { int Width = 4; int A[ Width * Width ]; for ( int x = 0; x < Width * Width; x++ ){ A[ x ] = 2; } int B[ Width * Width ]; for ( int z = 0; z < Width * Width; z++ ){ B[ z ] = 2; } int C[ Width * Width ]; MatrixMul( A, B, C, Width ); printMatrix( C, Width, Width ); } //END of Main function
13,304
#include <iterator> #include <iostream> #include <functional> #include <cstdio> #include <cuda.h> // sample of functor passed to kernel #define cudaCheck(stmt) do { \ cudaError_t err = stmt; \ if( err != cudaSuccess ) { \ std::clog << "Failed to run stmt [" << #stmt << "] (err=" << err << "): " << cudaGetErrorString(err) << '\n'; \ cudaDeviceReset(); \ return -1; \ } \ } while(0) struct Add { __device__ float operator()( float a, float b ) const { return a+b; } }; struct Sub { __device__ float operator()( float a, float b ) const { return a-b; } }; template < typename Operation > __global__ void do_op( float* input, float* input2, float* output, int len , Operation op ) { if( threadIdx.x < len ) { printf( "Thread %d: [%f + %f]\n", threadIdx.x, input[threadIdx.x], input2[threadIdx.x] ); output[threadIdx.x] = op( input[threadIdx.x], input2[threadIdx.x] ); } } int main( int argc, char** argv ) { float i1[] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0 }; float i2[] = { .1, .2, .3, .4, .5, .6 }; float i3[6]; ::memset( i3, 0, sizeof(i3) ); float *d1,*d2,*d3; cudaDeviceReset(); cudaCheck(cudaMalloc( (void**)&d1, 6 * sizeof(float) )); cudaCheck(cudaMalloc( (void**)&d2, 6 * sizeof(float) )); cudaCheck(cudaMalloc( (void**)&d3, 6 * sizeof(float) )); cudaCheck(cudaMemcpy( d1, i1, 6*sizeof(float), cudaMemcpyHostToDevice )); cudaCheck(cudaMemcpy( d2, i2, 6*sizeof(float), cudaMemcpyHostToDevice )); for( int i = 0; i < 6; ++i ) std::cout << "i1[" << i << "] = " << i1[i] << std::endl; dim3 grid( 1, 1, 1 ); dim3 block( 6, 1, 1 ); do_op<<< grid, block >>>( d1, d2, d3, 6, Sub() ); cudaCheck( cudaPeekAtLastError() ); std::cout << "Before:\n"; std::copy( i3, i3+6, std::ostream_iterator<float>(std::cout,"\n") ); cudaCheck(cudaMemcpy( i3, d3, 6*sizeof(float), cudaMemcpyDeviceToHost )); std::cout << "After:\n"; std::copy( i3, i3+6, std::ostream_iterator<float>(std::cout,"\n") ); cudaCheck(cudaFree( d1 )); cudaCheck(cudaFree( d2 )); cudaCheck(cudaFree( d3 )); cudaDeviceReset(); return 0; }
13,305
#include<stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #define NUM_ROWS 4 #define NUM_COLS 4 __global__ void add2(int* da, int* db, int* dc) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; int id = idy * gridDim.x * blockDim.x + idx; dc[id] = da[id]+ db[id]; } int main() { int ha[NUM_ROWS][NUM_COLS] ; int *da ; int hb[NUM_ROWS][NUM_COLS] ; int *db; int hc[NUM_ROWS][NUM_COLS] ; int *dc; int iSize = NUM_ROWS * NUM_COLS * sizeof(int) ; cudaError_t cuError = cudaSuccess; dim3 dimGrid (NUM_ROWS/2, NUM_COLS/2, 1) ; dim3 dimBlock (NUM_ROWS/2, NUM_COLS/2, 1) ; for(int i=0;i<NUM_ROWS;i++) { for(int j=0;j<NUM_COLS;j++) { ha[i][j]=rand()%10+1; hb[i][j]=rand()%10+1; } } cuError = cudaMalloc((void**)&da, iSize) ; if (cudaSuccess != cuError) { printf ("Failed to allocate memory\n") ; return 1 ; } cuError = cudaMemcpy(da, ha, iSize, cudaMemcpyHostToDevice); if (cudaSuccess != cuError) { cudaFree (da) ; printf ("Failed in Memcpy 1\n") ; return 1 ; } cuError = cudaMalloc((void**)&db, iSize) ; if (cudaSuccess != cuError) { printf ("Failed to allocate memory\n") ; return 1 ; } cuError = cudaMemcpy(db, hb, iSize, cudaMemcpyHostToDevice); if (cudaSuccess != cuError) { cudaFree (db) ; printf ("Failed in Memcpy 1\n") ; return 1 ; } cuError = cudaMalloc((void**)&dc, iSize) ; if (cudaSuccess != cuError) { printf ("Failed to allocate memory\n") ; return 1 ; } add2<<<dimGrid, dimBlock>>>(da, db, dc); cuError = cudaGetLastError () ; if (cudaSuccess != cuError) { printf ("Failed in kernel launch and reason is %s\n", cudaGetErrorString(cuError)) ; return 1 ; } cuError = cudaMemcpy(hc, dc, iSize, cudaMemcpyDeviceToHost); if (cudaSuccess != cuError) { cudaFree (dc) ; printf ("Failed in Memcpy 2\n") ; return 1 ; } bool success = true; for(int i=0;i<NUM_ROWS;i++){ for(int j=0;j<NUM_COLS;j++){ if ((ha[i][j] + hb[i][j]) != hc[i][j]) { printf( "Error: %d + %d != %d\n", ha[i][j], hb[i][j], hc[i][j] ); success = false; } } } if (success) printf( "We did it!\n" ); cudaFree (da) ; cudaFree (db) ; cudaFree (dc) ; return 0; }
13,306
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void convertToBinary(char *str, int *arr) { int i = threadIdx.x; int num = str[i], p = 1, temp = num, ans = 0; while (temp > 0) { ans = ans + p * (temp % 2); temp /= 2; p *= 10; } arr[i] = ans; } int main() { char *str = (char *) calloc(BUFSIZ, sizeof(char)), *dStr; printf("Enter the string\n"); scanf("%[^\n]%*c", str); int len = strlen(str), *dArr; int *arr = (int *) calloc(len, sizeof(int)); cudaMalloc(&dStr, len); cudaMalloc(&dArr, sizeof(int) * len); cudaMemcpy(dStr, str, len, cudaMemcpyHostToDevice); cudaMemcpy(dArr, arr, sizeof(int) * len, cudaMemcpyHostToDevice); convertToBinary<<<1, len>>>(dStr, dArr); cudaMemcpy(arr, dArr, sizeof(int) * len, cudaMemcpyDeviceToHost); printf("The binary values of each character is\n"); for (int i = 0; i < len; i++) { printf("%i ", arr[i]); } printf("\n"); cudaFree(dStr); }
13,307
#include <iostream> #include <fstream> #include <sstream> #include <chrono> #include <vector> #include <cmath> #include <dirent.h> #include <cstring> using namespace std; #define N_REPEAT 3 // Complex numbers data type typedef float2 Cplx; // Complex numbers operations static __device__ __host__ inline Cplx CplxAdd(Cplx a, Cplx b) { Cplx c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } static __device__ __host__ inline Cplx CplxInv(Cplx a) { Cplx c; c.x = -a.x; c.y = -a.y; return c; } static __device__ __host__ inline Cplx CplxMul(Cplx a, Cplx b) { Cplx c; c.x = a.x * b.x - a.y + b.y; c.y = a.x * b.y + a.y * b.x; return c; } /** * Reorders array by bit-reversing the indexes. */ __global__ void bitrev_reorder(Cplx* __restrict__ r, Cplx* __restrict__ d, int s, size_t nthr) { int id = blockIdx.x * nthr + threadIdx.x; r[__brev(id) >> (32 - s)] = d[id]; } /** * Inner part of FFT loop. Contains the procedure itself. */ __device__ void inplace_fft_inner(Cplx* __restrict__ r, int j, int k, int m, int n) { if (j + k + m / 2 < n) { Cplx t, u; t.x = __cosf((2.0 * M_PI * k) / (1.0 * m)); t.y = -__sinf((2.0 * M_PI * k) / (1.0 * m)); u = r[j + k]; t = CplxMul(t, r[j + k + m / 2]); r[j + k] = CplxAdd(u, t); r[j + k + m / 2] = CplxAdd(u, CplxInv(t)); } } /** * Middle part of FFT for small scope paralelism. */ __global__ void inplace_fft(Cplx* __restrict__ r, int j, int m, int n, size_t nthr) { int k = blockIdx.x * nthr + threadIdx.x; inplace_fft_inner(r, j, k, m, n); } /** * Outer part of FFT for large scope paralelism. */ __global__ void inplace_fft_outer(Cplx* __restrict__ r, int m, int n, size_t nthr) { int j = (blockIdx.x * nthr + threadIdx.x) * m; for (int k = 0; k < m / 2; k++) { inplace_fft_inner(r, j, k, m, n); } } /** * Runs in-place Iterative Fast Fourier Transformation. */ void fft(Cplx* __restrict__ d, size_t n, size_t threads, int balance) { size_t data_size = n * sizeof(Cplx); Cplx *r, *dn; // Copy data to GPU cudaMalloc((void**)&r, data_size); cudaMalloc((void**)&dn, data_size); cudaMemcpy(dn, d, data_size, cudaMemcpyHostToDevice); // Bit-reversal reordering int s = log2(n); bitrev_reorder<<<ceil(n / threads), threads>>>(r, dn, s, threads); // Synchronize cudaDeviceSynchronize(); // Iterative FFT (with loop paralelism balancing) for (int i = 1; i <= s; i++) { int m = 1 << i; if (n/m > balance) { inplace_fft_outer<<<ceil((float)n / m / threads), threads>>>(r, m, n, threads); } else { for (int j = 0; j < n; j += m) { float repeats = m / 2; inplace_fft<<<ceil(repeats / threads), threads>>>(r, j, m, n, threads); } } } // Copy data from GPU & free the memory blocks Cplx* result; result = (Cplx*)malloc(data_size / 2); cudaMemcpy(result, r, data_size / 2, cudaMemcpyDeviceToHost); cudaFree(r); cudaFree(dn); } /** * Reads numeric data from a file. */ void read_file(const char* filename, vector<Cplx>& out) { ifstream file; file.open(filename); if (file.is_open()) { while (!file.eof()) { Cplx val; if (file >> val.x) { val.y = 0; out.push_back(val); } } } else { cerr << "Can't open file " << filename << " for reading." << endl; } file.close(); } /** * Saves the result data to an output file. */ void save_results(const char* filename, Cplx* result, size_t count, int sample_rate) { char* outfilename = new char[512]; // Compose the output filename strcpy(outfilename, filename); strcat(outfilename, ".out"); // Create the file ofstream outfile; outfile.open (outfilename); outfile.precision(4); // Save the data outfile << "frequency, value" << endl; for (int i = 0; i < count / 2; i++) { outfile << i * ((float)sample_rate/count) << "," << result[i].x << endl; } outfile.close(); } void compute_file(const char* filename, int sample_rate, size_t threads, int balance) { vector<Cplx> buffer; // Read the file read_file(filename, buffer); int count = buffer.size(); // Display active computation cout << filename << "," << count << "," << sample_rate << "," << threads << "," << balance; cout.flush(); // Start the stopwatch auto start = chrono::high_resolution_clock::now(); // Run FFT algorithm with loaded data fft(&buffer[0], count, threads, balance); // Log the elapsed time auto finish = chrono::high_resolution_clock::now(); auto microseconds = chrono::duration_cast<std::chrono::microseconds>(finish-start); cout << "," << microseconds.count() << endl; // Save the computed data save_results(filename, &buffer[0], count, sample_rate); } int main(int argc, char** argv) { srand (time(NULL)); // Deal with program arguments if (argc < 2) { cerr << "Usage: " << argv[0] << " [input_folder]"; return 2; } // Initialize CUDA cudaFree(0); // Print out the CSV header cout << "file,samples,sample_rate,threads,balance,elapsed_us" << endl; // Read the folder DIR* dirp = opendir(argv[1]); struct dirent *epdf; // Compute all files in the folder while ((epdf = readdir(dirp)) != NULL) { size_t len = strlen(epdf->d_name); // Pick only .dat files if (strcmp(epdf->d_name,".") != 0 && strcmp(epdf->d_name,"..") != 0 && strcmp(&epdf->d_name[len-3], "dat") == 0) { stringstream fname(epdf->d_name); string samples, sr; // Read file properties getline(fname, samples, '@'); getline(fname, sr, '.'); char* fold = new char[512]; strcpy(fold, argv[1]); int smp = atoi(samples.c_str()); // Compute for all set parameters for (int th = 0; th <= 1024; th <<= 1) { if (th == 0) th = 1; for (int bal = 2; bal <= smp / 2; bal <<= 1) for (int r = 0; r < N_REPEAT; r++) { char fname[512]; strcpy(fname, fold); strcat(strcat(fname, "/"), epdf->d_name); compute_file(fname, atoi(sr.c_str()), th, bal); } } } } closedir(dirp); return 0; }
13,308
// #include "gpu_runtime.h" // // the shape of bn_scale/bias 1*C*1*1 // int CuDNN_DLGpuRelu(const DLArrayHandle input, DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){ // int dev_id = (input_X->ctx).device_id; // cudaSetDevice(dev_id); // cudnn_init(dev_id, stream_handle); // int input_N = input->shape[0]; // int input_C = input->shape[1]; // int input_H = input->shape[2]; // int input_W = input->shape[3]; // if(p != NULL){ // int size_input = 1, size_output = 1; // for(int i = 0; i < input -> ndim; i++) // size_input *= input -> shape[i]; // for(int i = 0; i < output -> ndim; i++) // size_output *= output -> shape[i]; // p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024; // p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024; // p -> workspace_memory = 0; // cudnnTensorDescriptor_t input_desc; // CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc)); // CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W)); // cudnnTensorDescriptor_t output_desc; // CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc)); // CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W)); // cudnnActivationDescriptor_t activation_desc; // CUDNN_CALL(cudnnCreateActivationDescriptor(&activation_desc)); // CUDNN_CALL(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));// after conv // float *input_data = (float *)(input->data); // float *output_data = (float *)(output->data); // // Insert the begin and end event. // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventRecord(start,0); // float alpha = 1.0f; // float beta = 0.0f; // CUDNN_CALL(cudnnActivationForward(cudnn_map[dev_id], activation_desc, &alpha, // input_desc, input_data, // &beta, // output_desc, output_data)); // float elapsedTime; // cudaEventCreate(&stop); // cudaEventRecord(stop,0); // cudaEventSynchronize(stop); // cudaEventElapsedTime(&elapsedTime, start,stop); // p->time = elapsedTime; // CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc)); // CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc)); // CUDNN_CALL(cudnnDestroyActivationDescriptor(activation_desc)); // CUDNN_CALL(cudnnDestroy(cudnn_handle)); // }else{ // // input // cudnnTensorDescriptor_t input_desc; // CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc)); // CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W)); // cudnnTensorDescriptor_t output_desc; // CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc)); // CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W)); // cudnnActivationDescriptor_t activation_desc; // CUDNN_CALL(cudnnCreateActivationDescriptor(&activation_desc)); // CUDNN_CALL(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));// after conv // float *input_data = (float *)(input->data); // float *output_data = (float *)(output->data); // float alpha = 1.0f; // float beta = 0.0f; // CUDNN_CALL(cudnnActivationForward(cudnn_handle, activation_desc, &alpha, // input_desc, input_data, // &beta, // output_desc, output_data)); // CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc)); // CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc)); // CUDNN_CALL(cudnnDestroyActivationDescriptor(activation_desc)); // CUDNN_CALL(cudnnDestroy(cudnn_handle)); // } // return 0; // } // int CuDNN_DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad, // DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL) // int dev_id = (input_X->ctx).device_id; // cudaSetDevice(dev_id); // cudnn_init(dev_id, stream_handle); // int input_N = input->shape[0]; // int input_C = input->shape[1]; // int input_H = input->shape[2]; // int input_W = input->shape[3]; // if(p != NULL){ // int size_input = 1, size_output = 1; // for(int i = 0; i < input -> ndim; i++) // size_input *= input -> shape[i]; // for(int i = 0; i < output -> ndim; i++) // size_output *= output -> shape[i]; // p -> input_memory = 2.0 * (size_input) * sizeof(float) / 1024 / 1024; // p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024; // p -> workspace_memory = 0; // cudnnTensorDescriptor_t input_desc; // CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc)); // CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W)); // cudnnTensorDescriptor_t output_desc; // CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc)); // CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W)); // cudnnActivationDescriptor_t activation_desc; // CUDNN_CALL(cudnnCreateActivationDescriptor(&activation_desc)); // CUDNN_CALL(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));// after conv // float *input_data = (float *)(input->data); // float *output_data = (float *)(output->data); // // Insert the begin and end event. // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventRecord(start,0); // float alpha = 1.0f; // float beta = 0.0f; // CUDNN_CALL(cudnnActivationForward(cudnn_map[dev_id], activation_desc, &alpha, // input_desc, input_data, // &beta, // output_desc, output_data)); // float elapsedTime; // cudaEventCreate(&stop); // cudaEventRecord(stop,0); // cudaEventSynchronize(stop); // cudaEventElapsedTime(&elapsedTime, start,stop); // p->time = elapsedTime; // CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc)); // CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc)); // CUDNN_CALL(cudnnDestroyActivationDescriptor(activation_desc)); // CUDNN_CALL(cudnnDestroy(cudnn_handle)); // } // return 0; // }
13,309
//pass //--blockDim=32 --gridDim=2 #include <cuda.h> __global__ void foo() { __threadfence(); } int main(){ foo<<<1,2>>>(); //ESBMC_verify_kernel(foo,1,2); cudaThreadSynchronize(); return 0; }
13,310
#include<stdio.h> #include<string.h> #include<vector> #include<iostream> #include<utility> #include<algorithm> #include <time.h> #include<math.h> #define block_size 256 #define thread_size 256 using namespace std; __device__ int bit_count(unsigned int i) { i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; } __global__ void gpu_inter(unsigned int * query,unsigned int** bank,unsigned int** d_result,int *d_count,int start,int max,int size) { const int tid = threadIdx.x; const int bid = blockIdx.x; int i,j; //move the query on the the sharded memory //use parella to compute all the result for(i=bid + start ; i<size ; i+= gridDim.x) for(j=tid ; j<max ; j+=blockDim.x) d_result[i][j] = query[j] & bank[i][j]; for(i=bid + gridDim.x*tid + start ; i<size ; i+= gridDim.x*blockDim.x) { d_count[i] = 0; for(j=0;j<max;j++) d_count[i] += bit_count(d_result[i][j]); } } class ECLAT{ public: int min_sup; FILE* output; unsigned int **d_data; unsigned int **h_data; unsigned int **data; unsigned int **h_result; unsigned int **d_result; unsigned int *d_query; int *h_count; int *d_count; int *pre; int max,size; vector<unsigned int**> result; vector<int*> count; ECLAT(void){ } ECLAT(vector< pair< int , vector<int> > > &input_data,int max,double min_sup,char* output_file){ this->min_sup = int(ceil(min_sup)); this->output = fopen(output_file,"w"); this->max = (max+31)/32; this->init(input_data); } void print(unsigned *x) { for(int j=0;j<this->max*32;j++) { if(x[j/32] & 1UL<<(j%32)) printf("1"); else printf("0"); if(j && j%32==0) printf(" "); } printf("\n"); fflush(stdout); } void init(vector< pair< int , vector<int> > > &input_data) { //here we first filter out the un sup data vector< pair<int,unsigned int*> > data_temp; //finst parsing the data for(int i=0;i<input_data.size();i++) if(input_data[i].second.size()>=this->min_sup) { unsigned int *temp = new unsigned int[this->max]; memset(temp,0,this->max*sizeof(int)); for(int j=0;j<input_data[i].second.size();j++) temp[input_data[i].second[j]/32] |= (1UL << (input_data[i].second[j]%32)); data_temp.push_back( make_pair(input_data[i].first,temp) ); } //put the data into cpu memory this->size = data_temp.size(); this->pre = new int[data_temp.size()]; this->data = new unsigned int*[data_temp.size()]; for(int i=0 ; i<data_temp.size() ; i++) { this->pre[i] = data_temp[i].first; this->data[i] = data_temp[i].second; //printf("%3d:",this->pre[i]); //print(this->data[i]); } //we should alloc all the memory first XD this->h_data = new unsigned int*[data_temp.size()]; this->h_result = new unsigned int*[data_temp.size()]; //alloc memory to 2d array cudaMalloc(&(this->d_data), data_temp.size()*sizeof(unsigned int*)); cudaMalloc(&(this->d_result), data_temp.size()*sizeof(unsigned int*)); cudaMemcpy(this->d_data, this->data, data_temp.size()*sizeof(unsigned int*), cudaMemcpyHostToDevice); printf("finish 2d\n"); for(int i=0; i<data_temp.size(); i++){ //alloc memory to 1d array cudaMalloc(&(this->h_data[i]), (this->max)*sizeof(unsigned int)); cudaMalloc(&(this->h_result[i]), (this->max)*sizeof(unsigned int)); cudaMemcpy(this->h_data[i], this->data[i], (this->max)*sizeof(unsigned int) , cudaMemcpyHostToDevice); cudaMemcpy(&(this->d_data[i]), &(this->h_data[i]), sizeof(unsigned int*), cudaMemcpyHostToDevice); cudaMemcpy(&(this->d_result[i]), &(this->h_result[i]), sizeof(unsigned int*), cudaMemcpyHostToDevice); } printf("finish 1d\n"); cudaMalloc((void**)&(this->d_query), this->max*sizeof(unsigned int)); cudaMalloc((void**)&(this->d_count), this->size* sizeof(int)); printf("max:%d this->size:%d\n",this->max,this->size); } // use_gpu( bit , now, result, h_count); void use_gpu( unsigned int *query,int now,unsigned int**result,int* h_count) { //we only copy the data here cudaMemcpy(this->d_query, query, this->max*sizeof(unsigned int), cudaMemcpyHostToDevice); //printf("start count\n"); gpu_inter<<<block_size,thread_size,0>>>(this->d_query,this->d_data,this->d_result,this->d_count,now,this->max,this->size); cudaDeviceSynchronize(); //printf("finish count\n"); //move result and count back to the cpu cudaMemcpy(this->h_result,this->d_result, this->size*sizeof(unsigned int*), cudaMemcpyDeviceToHost); for (int i=now; i<this->size; i++) cudaMemcpy(result[i],this->h_result[i], this->max*sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(h_count,this->d_count, this->size*sizeof(int), cudaMemcpyDeviceToHost); } void find(vector<int> &arr,int idx, unsigned int* bit,int now) { int i; int* count_temp; unsigned int** result_temp; //printf("idx:%d now %d\n",idx,now); while(arr.size()<=idx) { arr.push_back(0); count_temp = new int[this->size]; result_temp = new unsigned int*[this->size]; for(i=0;i<this->size;i++) result_temp[i] = new unsigned int[this->max]; this->result.push_back(result_temp); this->count.push_back(count_temp); } //printf("use gpu\n"); use_gpu( bit , now, this->result[idx], this->count[idx]); /* printf("query: "); print(bit); for(i=0;i<this->size;i++) { printf("%3d %3d:",h_count[i],this->pre[i]); print(result[i]); } */ for(;now<this->size;now++) { //printf("in\n"); //since we share the memory if( this->count[idx][now] >= this->min_sup) { arr[idx] = this->pre[now]; for(i=0 ; i<idx+1 ; i++) fprintf(this->output,"%d ",arr[i]+1); fprintf(this->output,"(%d)\n",this->count[idx][now]); find(arr,idx+1,this->result[idx][now],now+1); } //printf("out\n"); } } void finish() { cudaFree(this->d_query); cudaFree(this->d_count); int i,j; for(i=0; i<this->size; i++){ cudaFree(this->h_data[i]); cudaFree(this->h_result[i]); } cudaFree(this->d_data); cudaFree(this->d_result); for(i=0;i<this->result.size();i++) { for(j=0;j<this->size;j++) delete(this->result[i][j]); delete(this->result[i]); delete(this->count[i]); } } }; int main(int argc,char * argv[]) { time_t start, end; start = clock(); int id,i; char str[4096]; char* fir; vector< pair< int , vector<int> > > data; FILE *in; //here we first deal with the input data i = 0; in = fopen(argv[1],"r"); int max = 0; printf("parsing data\n"); while(fgets(str,4096,in)) { fir = strtok(str," "); while(fir != NULL) { sscanf(fir,"%d",&id); while(data.size()<id) data.push_back( make_pair(data.size(),vector<int>())); data[id-1].second.push_back(i); if(max<i) max=i; fir = strtok(NULL," "); } i++; } double min_sup; sscanf(argv[2],"%lf",&min_sup); printf("initial\n"); ECLAT eclat(data,max,min_sup*i,argv[3]); pair< vector<int> , unsigned int* > head; head.first.clear(); head.second = new unsigned int[eclat.max]; printf("eclat.max %d\n",eclat.max); for(int i=0;i<eclat.max;i++) head.second[i] = 0xFFFFFFFF; //cout << head.second; printf("find freq\n"); eclat.find(head.first,0,head.second,0); printf("finish"); eclat.finish(); delete(head.second); end = clock(); double diff = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Time = %f\n", diff); }
13,311
#include <stdio.h> #include <math.h> void printMatrix(const int *A, int rows, int cols) { for(int i = 0; i < rows*cols*4; i++){ printf("%d ", A[i]); printf(" "); if ((i+1)%9 == 0){ printf("|"); } } printf("\n"); }; void readInput_soa(const char *filename, int **Soa, int *rows, int *cols) { FILE *file; file = fopen(filename, "r"); fscanf(file, "%d %d", rows, cols); int * A_F0 = (int *) malloc(*rows * (*cols)* (4) * sizeof(int)); for(int i = 0; i < *rows*(*cols)*(4); i++) { fscanf(file, "%d ", &A_F0[i]); } fclose(file); *Soa = A_F0; }; __global__ void step_periodic(int * array,int *buffer,int rows, int cols){ int tId = threadIdx.x + blockIdx.x * blockDim.x; if(tId < rows*cols){ int x = tId%(cols); int y = (int) tId/rows; int c_aux = x -1; if (c_aux < 0){ c_aux = cols -1; } if (buffer[(y*cols + c_aux) + rows*cols] == 1 && buffer[(y*cols + c_aux) + 3*rows*cols] == 1){ array[tId] = 1; }else if (buffer[(y*cols + c_aux)] == 1 && buffer[(y*cols + c_aux) + 2*rows*cols] == 1){ array[tId] = 0; }else if (buffer[(y*cols + c_aux)] == 1){ array[tId] = 1; }else if (buffer[(y*cols + c_aux)] == 0){ array[tId] = 0; } c_aux = x + 1; if (c_aux == cols){ c_aux = 0; } if (buffer[(y*cols + c_aux) + rows*cols] == 1 && buffer[(y*cols + c_aux) + 3*rows*cols] == 1){ array[tId+2*rows*cols] = 1; }else if (buffer[(y*cols + c_aux)] == 1 && buffer[(y*cols + c_aux) + 2*rows*cols] == 1){ array[tId+2*rows*cols] = 0; }else if (buffer[(y*cols + c_aux)+ 2*rows*cols] == 1){ array[tId+2*rows*cols] = 1; }else if (buffer[(y*cols + c_aux)+ 2*rows*cols] == 0){ array[tId+2*rows*cols] = 0; } c_aux = y - 1; if (c_aux <0){ c_aux = rows-1; } c_aux = (((y-1)%rows)+rows)%rows*cols; if (buffer[(c_aux + x)] == 1 && buffer[(c_aux + x) + 2*rows*cols] == 1){ array[tId+rows*cols] = 1; }else if (buffer[(c_aux + x)+rows*cols] == 1 && buffer[(c_aux + x) + 3*rows*cols] == 1){ array[tId+rows*cols] = 0; }else if (buffer[ (c_aux + x) + rows*cols ] == 1){ array[tId+rows*cols] = 1; }else if (buffer[ (c_aux + x) + rows*cols ] == 0){ array[tId+rows*cols] = 0; } c_aux = (((y+1)%rows)*cols); if (buffer[(c_aux + x)] == 1 && buffer[(c_aux + x) + 2*rows*cols] == 1){ array[tId+3*rows*cols] = 1; }else if (buffer[(c_aux + x)+rows*cols] == 1 && buffer[(c_aux + x) + 3*rows*cols] == 1){ array[tId+3*rows*cols] = 0; }else if (buffer[ (c_aux + x) + 3*rows*cols ] == 1){ array[tId+3*rows*cols] = 1; }else if (buffer[ (c_aux + x) + 3*rows*cols ] == 0){ array[tId+3*rows*cols] = 0; } } } int main(int argc, char const *argv[]) { int rows, cols; int *array; int *d_array; int *d_buffer; readInput_soa("../initial.txt", &array, &rows, &cols); int n = (int)(rows*cols); int block_size = 256; int grid_size = (int) ceil((float) n/ block_size); cudaMalloc(&d_array ,4*rows * cols * sizeof(int)); cudaMalloc(&d_buffer,4*rows*cols*sizeof(int)); cudaMemcpy(d_array, array,4* rows * cols * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_buffer, array,4* rows * cols * sizeof(int), cudaMemcpyHostToDevice); for(int k = 0; k < 1000; k++){ step_periodic<<<grid_size, block_size>>>(d_array, d_buffer, rows, cols); cudaMemcpy(d_buffer,d_array,4*rows*cols * sizeof(int), cudaMemcpyDeviceToDevice); } cudaMemcpy(array, d_array, 4*rows * cols * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_array); cudaFree(d_buffer); return(0); }
13,312
// Tests that CUDA attributes are warnings when compiling C files, but not when // compiling CUDA files. // // RUN: %clang_cc1 -fsyntax-only -verify %s // RUN: %clang_cc1 -fsyntax-only -fcuda-is-device -verify %s // Now pretend that we're compiling a C file. There should be warnings. // RUN: %clang_cc1 -DEXPECT_WARNINGS -fsyntax-only -verify -x c %s #if defined(EXPECT_WARNINGS) // expected-warning@+12 {{'device' attribute ignored}} // expected-warning@+12 {{'global' attribute ignored}} // expected-warning@+12 {{'constant' attribute ignored}} // expected-warning@+12 {{'shared' attribute ignored}} // expected-warning@+12 {{'host' attribute ignored}} // // NOTE: IgnoredAttr in clang which is used for the rest of // attributes ignores LangOpts, so there are no warnings. #else // expected-no-diagnostics #endif __attribute__((device)) void f_device(); __attribute__((global)) void f_global(); __attribute__((constant)) int* g_constant; __attribute__((shared)) float *g_shared; __attribute__((host)) void f_host(); __attribute__((device_builtin)) void f_device_builtin(); typedef __attribute__((device_builtin)) const void *t_device_builtin; enum __attribute__((device_builtin)) e_device_builtin {E}; __attribute__((device_builtin)) int v_device_builtin; __attribute__((cudart_builtin)) void f_cudart_builtin(); __attribute__((nv_weak)) void f_nv_weak(); __attribute__((device_builtin_surface_type)) unsigned long long surface_var; __attribute__((device_builtin_texture_type)) unsigned long long texture_var;
13,313
//--blockDim=1024 --gridDim=1 /* * The intention of this kernel is to increment each * element of 'A' with its neighbouring element, * 'offset' places away. * * A barrier statement ensures that read-write data * races do not occur. */ __global__ void add_neighbour(int *A, int offset) { unsigned tid = threadIdx.x; // use a barrier to order the accesses to A int temp = A[tid + offset]; __syncthreads(); A[tid] += temp; } int main() { return 0; }
13,314
#include <cuda.h> #include <stdio.h> #include <stdlib.h> const int size = 1000; void fillVectors(float *A); void printVector(float *V); void sumVectors(float *A, float *B, float *C); //void vecAddKernel(float *A, float *B, float *C); int main(int argc, char const *argv[]) { float *A = (float *) malloc(size * sizeof(float)); float *B = (float *) malloc(size * sizeof(float)); float *C = (float *) malloc(size * sizeof(float)); // int *C; // B = (int *) malloc(size); // C = (int *) malloc(size); fillVectors(A); fillVectors(B); sumVectors(A, B, C); printVector(A); printVector(B); printVector(C); free(A); free(B); free(C); return 0; } void fillVectors(float *A) { for (int i = 0; i < size; i++) { A[i] = i + 1; } } void printVector(float *V) { for (int i = 0; i < size; i++) printf("%d ", (int)V[i]); printf("\n"); } __global__ void vecAddKernel(float *A, float *B, float *C) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < size) C[i] = A[i] + B[i]; } void sumVectors(float *A, float *B, float *C) { int n = size * sizeof(float); float *d_A, *d_B, *d_C; // Allocate device memory for A, B, and C // copy A and B to device memory cudaMalloc((void**)&d_A, n); cudaMemcpy(d_A, A, n, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_B, n); cudaMemcpy(d_B, B, n, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_C, n); // Kernel launch code - to have the device to perform the actual vector addition // Run ceil(size/256) blocks of 256 threads each vecAddKernel <<< ceil(size/256.0), 256 >>> (d_A, d_B, d_C); // copy C from the device memory cudaMemcpy(C, d_C, n, cudaMemcpyDeviceToHost); // Free device vectors cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
13,315
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <math.h> #define NROWS 2048 #define NCOLS 1024 #define NUM_THREADS 32 // Matrix addition kernel void __global__ matrixAdd(float *A, float *B, float *C, int numRows, int numCols) { unsigned int index; // Some checks if (blockIdx.y * blockDim.y + threadIdx.y >= numRows) return; if (blockIdx.x * blockDim.x + threadIdx.x >= numCols) return; index = (blockIdx.y * blockDim.y + threadIdx.y) * numCols + blockIdx.x * blockDim.x + threadIdx.x; C[index] = A[index] + B[index]; } int main() { unsigned i, j; // Check if matrix is large enough if (NROWS < NUM_THREADS || NCOLS < NUM_THREADS) { printf("%d or %d < %d\n", NROWS, NCOLS, NUM_THREADS); exit(0); } // Allocate memory on CPU float *A, *B, *C; A = (float *) malloc(NROWS * NCOLS * sizeof(float)); B = (float *) malloc(NROWS * NCOLS * sizeof(float)); C = (float *) malloc(NROWS * NCOLS * sizeof(float)); // Allocate memory on GPU float *d_A, *d_B, *d_C; cudaMalloc((void **) &d_A, NROWS * NCOLS * sizeof(float)); cudaMalloc((void **) &d_B, NROWS * NCOLS * sizeof(float)); cudaMalloc((void **) &d_C, NROWS * NCOLS * sizeof(float)); // Initliase data for (i = 0; i < NROWS; i++) for (j = 0; j < NCOLS; j++) A[i * NCOLS + j] = B[i * NCOLS + j] = j; // Allocate CUDA event cudaEvent_t event_start, event_stop; float timestamp; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); // Copy data to GPU cudaEventRecord(event_start, 0); cudaMemcpy(d_A, A, NROWS * NCOLS * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, NROWS * NCOLS * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, NROWS * NCOLS * sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied data to GPU in: %lf ms\n", timestamp); // Configure and launch kernel cudaEventRecord(event_start, 0); dim3 numThreads(NUM_THREADS, NUM_THREADS); dim3 blockSize(ceil(NCOLS / ((float) NUM_THREADS)), ceil(NROWS / ((float) NUM_THREADS))); printf("Num threads: %d, blocks: (%d, %d)\n", NUM_THREADS, blockSize.x, blockSize.y); matrixAdd<<<blockSize, numThreads>>>(d_A, d_B, d_C, NROWS, NCOLS); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Performed matrix addition in: %lf ms\n", timestamp); // Wait for kernel cudaThreadSynchronize(); // Get result cudaEventRecord(event_start, 0); cudaMemcpy(C, d_C, NROWS * NCOLS * sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied results from GPU in: %lf ms\n", timestamp); // Check result for (i = 0; i < NROWS; i++) for (j = 0; j < NCOLS; j++) if (C[i * NCOLS + j] != A[i * NCOLS + j] + B[i * NCOLS + j]) { printf("ERROR: %d %d (%f != %f)\n", i, j, A[i * NCOLS + j] + B[i * NCOLS + j], C[i * NCOLS + j]); // exit(0); } printf("Success!\n"); }
13,316
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <iostream> #define IDX2C(rowlength,i,j) ((rowlength*i)+j) // 행렬의 index 구하는 방식 #define m 2 #define k 3 #define n 2 using namespace std; // 쓰레드의 인덱스 만큼 행렬의 덧샘을 실행하는 함수 __global__ void add(int *c , int *d){ int tid=threadIdx.x; d[tid]+=c[tid]; } // 쓰레드의 인덱스와 블록 인덱스를 사용하여 // 행렬의 곱샘 연산으 병렬화 합니다. __global__ void multi(int *a , int *b,int *d){ int tid=threadIdx.x; int bid=blockIdx.x; for(int l=0;l<k; l++){ d[IDX2C(n,tid,bid)]+=a[IDX2C(k,tid,l)]*b[IDX2C(n,l,bid)]; } } int main(){ int *a; int *b; int *c; int *d; int i,j; //device 메모리 int *d_a,*d_b,*d_c,*d_d; // 행렬 메모리 할당 a=(int*)malloc( m*k * sizeof(int) ); b=(int*)malloc( k*n * sizeof(int) ); c=(int*)malloc( m*n * sizeof(int) ); d=(int*)malloc( m*n * sizeof(int) ); int value_a[m*k]={1,0,-3,-2,4,1}; int value_b[k*n]={2,-1,3,0,-5,2}; int value_c[m*n]={3,-1,-2,2}; // a,b,c행렬의 값을 넣고 확인한다. cout<<"a:\n"; for(i=0; i<m*k; i++){ a[i]=value_a[i]; } for(i=0; i<m; i++){ for(j=0;j<k;j++){ cout<<(a[ IDX2C(k,i,j) ])<<" "; } cout<<endl; } cout<<"b:\n"; for(i=0; i<k*n; i++){ b[i]=value_b[i]; } for(i=0; i<k; i++){ for(j=0;j<n;j++){ cout<<(b[ IDX2C(n,i,j) ])<<" "; } cout<<endl; } cout<<"c:\n"; for(i=0; i<m*n; i++){ c[i]=value_c[i]; } for(i=0; i<m; i++){ for(j=0;j<n;j++){ cout<<(c[ IDX2C(n,i,j) ])<<" "; } cout<<endl; } // 결과는 0으로 초기화 for(i=0; i<m*n; i++){ d[i]=0; } // cuda 메모리 할당 cudaMalloc( (void**)&d_a , m*k*sizeof(int) ) ; cudaMalloc( (void**)&d_b , k*n*sizeof(int) ) ; cudaMalloc( (void**)&d_c , m*n*sizeof(int) ) ; cudaMalloc( (void**)&d_d , m*n*sizeof(int) ) ; // device로 행렬값 전달 cudaMemcpy( d_a,a,m*k*sizeof(int),cudaMemcpyHostToDevice ); cudaMemcpy( d_b,b,k*n*sizeof(int),cudaMemcpyHostToDevice ); cudaMemcpy( d_c,c,m*n*sizeof(int),cudaMemcpyHostToDevice ); //열의 수 만큼 블록을 행의 수만큼 쓰레드를 생성한다. multi<<<k,m>>>(d_a,d_b,d_d); //메모리 가지고 오기 cudaMemcpy( d,d_d,m*n*sizeof(int),cudaMemcpyDeviceToHost ); cout<<"A*B 결과 d:\n"; for(i=0; i<m; i++){ for(j=0;j<n;j++){ cout<<(d[ IDX2C(n,i,j) ])<<" "; } cout<<endl; } //행렬의 원소 수 만큼 쓰레드를 생성 후 덧샘 add<<<1,m*n>>>(d_c,d_d); //메모리 가지고 오기 cudaMemcpy( d,d_d,m*n*sizeof(int),cudaMemcpyDeviceToHost ); cout<<"A*B+C 결과 d:\n"; for(i=0; i<m; i++){ for(j=0;j<n;j++){ cout<<(d[ IDX2C(n,i,j) ])<<" "; } cout<<endl; } //device 메모리 헤제 cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_d); // host 메모리 헤제 free(a); free(b); free(c); free(d); return 0; }
13,317
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <iomanip> #include <stdio.h> #include <chrono> #include <random> // For kernel generation #include <algorithm> #include <list> using namespace std; #pragma region Cuda const int WARP_SIZE = 32; // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. const int MAX_BLOCK_SIZE = 256; const int CUDA_NUM_THREADS = 1024; int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } static int getGradParamsNumThreads(int batchSize){ //warp per item in a batch, up to a maximum return std::min(batchSize * WARP_SIZE, MAX_BLOCK_SIZE); } __global__ void ConvolutionRowwise(const float *input, float *rowwiseResults, int batch_ix, int channel_ix, int input_dim, int result_dim ) { float* res1 = rowwiseResults + (blockIdx.x * result_dim); float* res2 = res1 + (input_dim * result_dim); float* res3 = res2 + (input_dim * result_dim); input = input + (blockIdx.x * input_dim); float l1 = input[0], l2 = input[1], l3 = input[2]; for (int i = 3; i < input_dim; ++i) { *res1 = (l1 + l2 + l3); ++res1; *res2 = (l1 - l2 + l3); ++res2; *res3 = (l1 + l2 - l3); ++res3; l1 = l2; l2 = l3; l3 = input[i]; } *res1 = (l1 + l2 + l3); *res2 = (l1 - l2 + l3); *res3 = (l1 + l2 - l3); } __global__ void ConvolutionColwise(const float *rowwiseResults, float *colwiseResults, int inputDim, int resultDim) { // blockDim // Z tells us which rowwiseResults matrix to work on {0,1,2} // X tells us the rowwiseResults matrix top-row // Y tells us the rowwiseResults matrix col int topCell = (blockIdx.z *inputDim*resultDim) + (blockIdx.x * resultDim) + blockIdx.y; float l1 = rowwiseResults[topCell]; float l2 = rowwiseResults[topCell + resultDim]; float l3 = rowwiseResults[topCell + resultDim + resultDim]; topCell = (blockIdx.z * resultDim * resultDim * 3) + (blockIdx.x * resultDim) + blockIdx.y; colwiseResults[topCell] = l1 + l2 + l3; topCell += resultDim * resultDim; colwiseResults[topCell] = l1 - l2 + l3; topCell += resultDim * resultDim; colwiseResults[topCell] = l1 + l2 - l3; } #pragma endregion #pragma region Misc std::default_random_engine randomGeneratorEngine; std::uniform_real_distribution<float> randomGenerator; float *CreateArray(int size) { int i; float *arr; cudaMallocManaged(&arr, size); for (i = 0; i < size; ++i) { arr[i] = i + 1; cout << arr[i] << " "; //arr[i] = (int)(randomGenerator(randomGeneratorEngine) * 10); } cout << endl; return arr; } void PrintMat(float *mat, int rows, int cols) { for (int i = 0; i < rows; ++i) { cout << "["; for (int j = 0; j < cols - 1; ++j) { cout << mat[i*cols + j] << " "; } cout << mat[i*cols + (cols - 1)] << "]"; if (i < rows - 1) cout << endl; } cout << endl; } template <typename Function> void zip(const vector<int> &batchSizes, const vector<int> &inputChannels, const vector<int> &outputChannels, const vector<int> &inputDims, Function function) { for (int batchSize : batchSizes) for (int inputChannel : inputChannels) for (int outputChannel : outputChannels) for (int inputDim : inputDims) function(batchSize, inputChannel, outputChannel, inputDim); } #pragma endregion int main() { const vector<int> batchSizes = { 1 }; const vector<int> inputChannels = { 1 }; const vector<int> outputChannels = { 1 }; const vector<int> inputDims = { 10 }; // 16, 32, 64, 128, 256, 512, 650, 1024, 1280, 1500 std::cout << std::setfill('0') << std::setw(5) << std::fixed << std::setprecision(1); zip(batchSizes, inputChannels, outputChannels, inputDims, [](int batchIndex, int inputChannel, int outputChannel, int inputDim) { float *rowwiseResults; int resultDim = inputDim - 2; int rowwiseResultsSize = 3 * inputDim * resultDim; cudaMallocManaged(&rowwiseResults, rowwiseResultsSize); float *arr = CreateArray(inputDim * inputDim); dim3 grid(inputDim); ConvolutionRowwise <<< grid, 1 >>> (arr, rowwiseResults, batchIndex, inputChannel, inputDim, resultDim); cudaDeviceSynchronize(); PrintMat(rowwiseResults, inputDim, resultDim); cout << endl; PrintMat(rowwiseResults + inputDim * resultDim, inputDim, resultDim); cout << endl; PrintMat(rowwiseResults + (2 * (inputDim * resultDim)), inputDim, resultDim); cout << endl; float *colResults; cudaMallocManaged <float>(&colResults, 9 * resultDim * resultDim); grid = dim3(resultDim, resultDim, 3); ConvolutionColwise <<< grid, 1 >>> (rowwiseResults, colResults, inputDim, resultDim); cudaDeviceSynchronize(); for (int i = 0; i < 9; i++) { PrintMat(colResults + (i*resultDim*resultDim), inputDim, resultDim); cout << endl; } }); //for (auto& [a, b] : zip(batchSizes, inputChannels)) { return 0; }
13,318
#include <iostream> #include <device_launch_parameters.h> /** * query the gpu info */ int main1(){ int deviceCount; cudaGetDeviceCount(&deviceCount);//传递deviceCount 的内存地址 std::cout<<"device count:"<<deviceCount<<std::endl; for(int i=0;i<deviceCount;i++){ cudaDeviceProp prop; cudaGetDeviceProperties(&prop,i); std::cout<<"device :"<<i<<":"<<prop.name<<std::endl; std::cout<<"global mem :"<<prop.totalGlobalMem/1024/1024<<"MB"<<std::endl; std::cout<<"sm number :"<<prop.multiProcessorCount<<std::endl; std::cout<<"shared mem per thread block :"<<prop.sharedMemPerBlock/1024<<"KB"<<std::endl; std::cout<<"max thread num per thread block :"<<prop.maxThreadsPerBlock<<std::endl; std::cout<<"register num per thred block :"<<prop.regsPerBlock<<std::endl; std::cout<<"max thread num per multi processor :"<<prop.maxThreadsPerMultiProcessor<<std::endl; std::cout<<"max wrap per multi processor :"<<prop.maxThreadsPerMultiProcessor/32<<std::endl; } return 0; }
13,319
#include "iostream" #define N 257 __global__ void sum_of_array(float *arr1, float *arr2, float *arr3) { int i = blockIdx.x * blockDim.x + threadIdx.x; arr3[i] = arr1[i] + arr2[i]; } void initialize_array(float *arr, int size) { for (int i = 0; i < size; i++) { arr[i] = (float) random(); } } int main() { float *arr1, *arr2, *arr3, *d_arr1, *d_arr2, *d_arr3; size_t n_byte = N * sizeof(float); arr1 = (float *) malloc(n_byte); arr2 = (float *) malloc(n_byte); arr3 = (float *) malloc(n_byte); initialize_array(arr1, N); initialize_array(arr2, N); initialize_array(arr3, N); printf("start cudaMalloc\n"); cudaMalloc((void **) &d_arr1, N); cudaMalloc((void **) &d_arr2, N); cudaMalloc((void **) &d_arr3, N); printf("finish cudaMalloc\n"); printf("start cudaMemcpy\n"); cudaMemcpy(d_arr1, arr1, n_byte, cudaMemcpyHostToDevice); cudaMemcpy(d_arr2, arr2, n_byte, cudaMemcpyHostToDevice); cudaMemcpy(d_arr3, arr3, n_byte, cudaMemcpyHostToDevice); printf("finish cudaMemcpy\n"); printf("start kernel function\n"); sum_of_array<<<(N + 255) / 256, 256>>>(d_arr1, d_arr2, d_arr3); printf("finish kernel function\n"); cudaMemcpy(arr3, d_arr3, n_byte, cudaMemcpyDeviceToHost); printf("%f", *arr3); }
13,320
#include <iostream> #include <cuda.h> extern "C" int use_cuda(void) { int nDevices = 0; cudaError_t err = cudaGetDeviceCount(&nDevices); if (err != cudaSuccess) { std::cerr << "Failed to retrieve the number of CUDA enabled devices" << std::endl; return 1; } std::cout << "Found " << nDevices << " CUDA enabled devices" << std::endl; return 0; }
13,321
#include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void kernel(double *a, double *b, double *c, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; c[i] = a[i] + b[i]; } int main(int argc, char **argv) { int N = 1000; int sz_in_bytes = N*sizeof(double); double *h_a, *h_b, *h_c; double *d_a, *d_b, *d_c; h_a = (double*)malloc(sz_in_bytes); h_b = (double*)malloc(sz_in_bytes); h_c = (double*)malloc(sz_in_bytes); // Initiate values on h_a and h_b for(int i = 0 ; i < N ; i++) { h_a[i] = 1./(1.+i); h_b[i] = (i-1.)/(i+1.); } cudaMalloc((void**)&d_a, sz_in_bytes); cudaMalloc((void**)&d_b, 0); cudaMalloc((void**)&d_c, sz_in_bytes); cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice); dim3 dimBlock(64, 1, 1); dim3 dimGrid(10, 1, 1); kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N); cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Verifying double err = 0, norm = 0; for(int i = 0 ; i < N ; i++) { double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i])); err += err_loc; norm += fabs(h_c[i]); } if (err/norm < 1.e-16) { printf("SUCCESS (Relative error : %.3e)\n", err/norm); } else { printf("ERROR (Relative error : %.3e)\n", err/norm); } free(h_a); free(h_b); free(h_c); return 0; }
13,322
#include <iostream> #include <vector> #include <string> #include <fstream> #include <cuda.h> using namespace std; __global__ void block_scan(int *output, int *input, int *sums, int n) { int bid = blockIdx.x; int tid = threadIdx.x; int blockOff = bid * n; extern __shared__ int buffer[]; buffer[2 * tid] = input[blockOff + (2 * tid)]; buffer[2 * tid + 1] = input[blockOff + (2 * tid) + 1]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (tid < d) { int a = offset * (2 * tid + 1) - 1; int b = offset * (2 * tid + 2) - 1; buffer[b] += buffer[a]; } offset *= 2; } __syncthreads(); if (tid == 0) { sums[bid] = buffer[n - 1]; buffer[n - 1] = 0; } for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (tid < d) { int a = offset * (2 * tid + 1) - 1; int b = offset * (2 * tid + 2) - 1; int t = buffer[a]; buffer[a] = buffer[b]; buffer[b] += t; } } __syncthreads(); output[blockOff + (2 * tid)] = buffer[2 * tid]; output[blockOff + (2 * tid) + 1] = buffer[2 * tid + 1]; } __global__ void add(int *output, int length, int *n) { int blockId = blockIdx.x; int tid = threadIdx.x; int blockOffset = blockId * length; output[blockOffset + tid] += n[blockId]; } __global__ void markBit(int *input, int *predicates, int bit, int length) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= length) { return; } int mask = 1 << bit; predicates[idx] = input[idx] & mask ? 0 : 1; } __global__ void compact(int *input, int *output, int *f, int *t, int *predicate, int length) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= length) { return; } t[idx] = idx - f[idx] + f[length]; if (predicate[idx] == 0) { int address = t[idx]; output[address] = input[idx]; } else { int address = f[idx]; output[address] = input[idx]; } } int main() { vector<int> data; ifstream infile; infile.open("inp.txt"); if (infile.is_open()) { while (infile.good()) { char cNum[10]; infile.getline(cNum, 256, ','); int num = atoi(cNum); data.push_back(num); // cout << num << " "; } infile.close(); } else { cout << "Error opening file"; } int size = data.size(); int size1 = size * sizeof(int); int *d_f; int *d_true; int *d_input; int *d_output; int *d_predicates; int *d_result; int *d_dummy_blocks_sums; int *d_sums; int *d_inc; int *output = (int *)malloc(size1); int blocks = size / 1024; if (size % 1024 != 0) { blocks += 1; } cudaMalloc((void **)&d_f, size1); cudaMalloc((void **)&d_true, size1); cudaMalloc((void **)&d_input, size1); cudaMalloc((void **)&d_output, size1); cudaMalloc((void **)&d_predicates, size1); cudaMalloc((void **)&d_result, size1); cudaMalloc((void **)&d_sums, blocks * sizeof(int)); cudaMalloc((void **)&d_inc, blocks * sizeof(int)); cudaMalloc((void **)&d_dummy_blocks_sums, blocks * sizeof(int)); cudaMemcpy(d_output, data.data(), size1, cudaMemcpyHostToDevice); cudaMemcpy(d_input, data.data(), size1, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); const int sharedSize = 2 * 1024 * sizeof(int); for (int i = 0; i < 10; i++) { markBit<<<blocks, 1024>>>(d_input, d_predicates, i, size); block_scan<<<blocks, 512, sharedSize>>>(d_f, d_predicates, d_sums, 1024); block_scan<<<1, (blocks + 1) / 2, sharedSize>>>(d_inc, d_sums, d_dummy_blocks_sums, 1024); add<<<blocks, 1024>>>(d_f, 1024, d_inc); compact<<<blocks, 1024>>>(d_input, d_result, d_f, d_true, d_predicates, size); cudaMemcpy(d_input, d_result, size1, cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); } cudaMemcpy(output, d_input, size1, cudaMemcpyDeviceToHost); ofstream outfile; outfile.open("q4.txt"); if (outfile.is_open()) { for (int i = 0; i < size; i++) { outfile << output[i] << ", "; } outfile.close(); } else { cout << "Error opening file"; } cudaFree(d_f); cudaFree(d_true); cudaFree(d_input); cudaFree(d_sums); cudaFree(d_inc); cudaFree(d_predicates); cudaFree(d_dummy_blocks_sums); free(output); }
13,323
/************************************* * Simple CUDA kernel for matrix sum * *************************************/ #include <stdio.h> #define CUDA_SAFE_CALL( call ) { \ cudaError_t err = call; \ if( cudaSuccess != err ) { \ fprintf(stderr,"CUDA: error occurred in cuda routine. Exiting...\n"); \ exit(err); \ } } #define A(i,j) A[ (j) + ((i)*(n)) ] #define B(i,j) B[ (j) + ((i)*(n)) ] #define C(i,j) C[ (j) + ((i)*(n)) ] #define C_gpu(i,j) C_gpu[ (j) + ((i)*(n)) ] #define C_cpu(i,j) C_cpu[ (j) + ((i)*(n)) ] #define d_A(i,j) d_A[ (j) + ((i)*(n)) ] #define d_B(i,j) d_B[ (j) + ((i)*(n)) ] #define d_C(i,j) d_C[ (j) + ((i)*(n)) ] /* This kernel computes a matrix addition. Each thread executing this kernel performs a matrix element sum */ __global__ void compute_kernel( unsigned int m, unsigned int n, float *d_A, float *d_B, float *d_C ) { /* Obtain the global matrix index accessed by the thread executing this kernel */ int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; /* Perform the addition. Pay attention because probably not all the threads should perform the addition */ if(i < m && j < n){ d_C( i,j ) = d_A( i,j ) + d_B( i,j ); } } int cu_matrix_sum( unsigned int m, unsigned int n, unsigned int block_rows, unsigned int block_cols, float *h_A, float *h_B, float *h_C ) { // Allocate device memory unsigned int mem_size = m * n * sizeof(float); float *d_A, *d_B, *d_C; CUDA_SAFE_CALL( cudaMalloc((void **) &d_A, mem_size ) ); CUDA_SAFE_CALL( cudaMalloc((void **) &d_B, mem_size ) ); CUDA_SAFE_CALL( cudaMalloc((void **) &d_C, mem_size ) ); // Copy host memory to device CUDA_SAFE_CALL( cudaMemcpy( d_A, h_A, mem_size, cudaMemcpyHostToDevice ) ); CUDA_SAFE_CALL( cudaMemcpy( d_B, h_B, mem_size, cudaMemcpyHostToDevice ) ); int row_blocks = (int) ceil( (float) m / (float) block_rows ); int col_blocks = (int) ceil( (float) n / (float) block_cols ); // Execute the kernel dim3 dimGrid( row_blocks, col_blocks ); dim3 dimBlock( block_rows, block_cols ); compute_kernel<<< dimGrid, dimBlock >>>( m, n, d_A, d_B, d_C ); // Copy device memory to host CUDA_SAFE_CALL( cudaMemcpy( h_C, d_C, mem_size, cudaMemcpyDeviceToHost ) ); // Deallocate device memory CUDA_SAFE_CALL( cudaFree(d_A) ); CUDA_SAFE_CALL( cudaFree(d_B) ); CUDA_SAFE_CALL( cudaFree(d_C) ); return EXIT_SUCCESS; } int matrix_sum( unsigned int m, unsigned int n, float *A, float *B, float *C ) { unsigned int i, j; for( i=0; i<m; i++ ) { for( j=0; j<n; j++ ) { C( i, j ) = A( i, j ) + B( i, j ); } } return EXIT_SUCCESS; } int main( int argc, char *argv[] ) { unsigned int m, n; unsigned int block_rows, block_cols; unsigned int i, j; /* Generating input data */ if( argc<5 ) { printf("Usage: %s n_rows n_cols block_rows block_cols \n",argv[0]); exit(-1); } sscanf(argv[1],"%d",&m); sscanf(argv[2],"%d",&n); sscanf(argv[3],"%d",&block_rows); sscanf(argv[4],"%d",&block_cols); float *A = (float *) malloc( m*n*sizeof(float) ); float *B = (float *) malloc( m*n*sizeof(float) ); printf("%s: Generating two random matrices of size %dx%d...\n",argv[0],m,n); for( i=0; i<m; i++ ) { for( j=0; j<n; j++ ) { A( i, j ) = 2.0f * ( (float) rand() / RAND_MAX ) - 1.0f; B( i, j ) = 2.0f * ( (float) rand() / RAND_MAX ) - 1.0f; } } printf("%s: Adding matrices in CPU...\n",argv[0]); float *C_cpu = (float *) malloc( m*n*sizeof(float) ); matrix_sum( m, n, A, B, C_cpu ); printf("%s: Adding matrices in GPU...\n",argv[0]); float *C_gpu = (float *) malloc( m*n*sizeof(float) ); cu_matrix_sum( m, n, block_rows, block_cols, A, B, C_gpu ); /* Check for correctness */ float error = 0.0f; for( i=0; i<m; i++ ) { for( j=0; j<n; j++ ) { error += fabs( C_gpu( i, j ) - C_cpu( i, j ) ); } } printf("Error CPU/GPU = %.3e\n",error); free(A); free(B); free(C_cpu); free(C_gpu); }
13,324
#include <cmath> __global__ void cu_cbrt(double* value) { value[threadIdx.x] = std::cbrt(value[threadIdx.x]); }
13,325
#include "cuda_runtime.h" #include <stdio.h> int main(void) { cudaDeviceProp prop; int dev; cudaGetDevice(&dev); printf("ID of currrent CUDA device: %d\n", dev); memset(&prop, 0, sizeof(cudaDeviceProp)); prop.major = 1; prop.minor = 3; cudaChooseDevice(&dev, &prop); printf("ID of CUDA device closest to revision 1.3: %d\n", dev); cudaSetDevice(dev); return 0; }
13,326
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define NUM_DATA 1024000 // << 1024*100 #define MAX_THREAD_IN_SINGLE_BLOCK = 1024 __global__ void vecAdd(int *_a, int *_b, int *_c) { int tID = blockIdx.x*blockDim.x+threadIdx.x; _c[tID] = _a[tID] + _b[tID]; } int main(void){ int *a, *b, *c; int *d_a, *d_b, *d_c; int memSize = sizeof(int)*NUM_DATA; printf("%d elements, memSize = %d bytes\n", NUM_DATA, memSize); a = new int[NUM_DATA]; memset(a, 0, memSize); b = new int[NUM_DATA]; memset(b, 0, memSize); c = new int[NUM_DATA]; memset(c, 0, memSize); for (int i =0; i< NUM_DATA; i++){ a[i] = rand() % 10; b[i] = rand() % 10; } cudaMalloc(&d_a, memSize); cudaMalloc(&d_b, memSize); cudaMalloc(&d_c, memSize); //Under two line synchronize automatically. You don't need to use synchronize. cudaMemcpy(d_a, a, memSize, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, memSize, cudaMemcpyHostToDevice); // Kernel call dim3 dimGrid(NUM_DATA/1024, 1, 1); dim3 dimBlock(1024,1,1); //MAX_SIZE = 1024 vecAdd<<<dimGrid, dimBlock >>>(d_a, d_b, d_c); cudaDeviceSynchronize(); cudaMemcpy(c, d_c, memSize, cudaMemcpyDeviceToHost); //check results bool result = true; for (int i =0; i<NUM_DATA; i++) { if((a[i] + b[i]) != c[i]){ printf("[%d] The results is not matched! (%d, %d)\n", i, a[i] + b[i], c[i]); result = false; } } if(result) printf("GPU works well!\n"); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); delete [] a; delete [] b; delete [] c; return 0; }
13,327
#include "bmp.cuh" /* initialize properties of given Bitmap header with provided size*/ void init_bmp_header(bmp_header_t *header, uint32_t width, uint32_t height) { uint32_t bmp_size = 54; uint32_t raw_row_size = width * 3; uint32_t raw_pixel_data_size; uint32_t rest = raw_row_size % 4; if (rest) { raw_row_size += 4 - rest; } raw_pixel_data_size = raw_row_size * height; header->bm = 'B' + ('M' << 8); header->size = bmp_size + raw_pixel_data_size; header->reserved = 0; header->pixel_offset = bmp_size; header->bitmap_info_header_size = 40; header->pixel_width = width; header->pixel_height = height; header->color_planes_num = 1; header->bits_per_pixel = 24; header->compression_enabled = 0; header->pixel_data_raw_size = raw_pixel_data_size; header->horiz_res = 2835; header->vert_res = 2835; header->colors_num = 0; header->important_colors = 0; } /* save given RGB color array into file given by handle fout */ void write_bmp(FILE *fout, float *colors, uint32_t width, uint32_t height) { uint32_t padding = (width * 3) % 4; if (padding) padding = 4 - padding; uint8_t header[54]; init_bmp_header((bmp_header_t *)header, width, height); fwrite(header, 1, 54, fout); // each line for (uint32_t y = 0; y < height; y++) { for (uint32_t x = 0; x < width; x++) { float *elem = &colors[(y * width + x) * 3]; int c = (int)(elem[2] * 255.0); // B fputc(c, fout); c = (int)(elem[1] * 255.0); // G fputc(c, fout); c = (int)(elem[0] * 255.0); // R fputc(c, fout); } for (uint32_t x = 0; x < padding; x++) { fputc(0, fout); } } }
13,328
#include <iostream> #include <math.h> #include <vector> #include <iomanip> #include <sstream> #include <string> #include <fstream> #include <thread> #include <ctime> #include <stdio.h> __device__ static inline void setSeed(int64_t *seed) { *seed = (*seed ^ 0x5deece66d) & ((1LL << 48) - 1); } __device__ static inline int next(int64_t *seed, const int bits) { *seed = (*seed * 0x5deece66d + 0xb) & ((1LL << 48) - 1); return (int) (*seed >> (48 - bits)); } __device__ static inline int nextInt(int64_t *seed, const int n) { int bits, val; const int m = n - 1; if((m & n) == 0) return (int) ((n * (int64_t)next(seed, 31)) >> 31); do { bits = next(seed, 31); val = bits % n; } while (bits - val + m < 0); return val; } struct Pos { int x, z; }; __device__ class BoundingBox { public: Pos start; Pos end; __device__ static BoundingBox getBoundingBox(int minx, int miny, int minz, int maxx, int maxy, int maxz) { BoundingBox box; box.start.x = minx; box.start.z = minz; box.end.x = maxx; box.end.z = maxz; return box; } __device__ bool intersectsWith(BoundingBox box) { return this->end.x >= box.start.x && this->start.x <= box.end.x && this->end.z >= box.start.z && this->start.z <= box.end.z; } }; #define BLOCK_SIZE (128) #define WORK_SIZE_BITS 16 #define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE)) #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line); exit(code); } } FILE *fp; uint64_t total = 0; uint64_t current = 0; __device__ BoundingBox guessBox; uint64_t* buffer; uint32_t* counter; //__device__ uint64_t hardcoded = 8682522807148012UL * 181783497276652981UL; __global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(uint64_t offset, uint32_t* counter, uint64_t* buffer){ uint64_t seed = (blockIdx.x * blockDim.x + threadIdx.x) + offset; //int64_t structureSeed = hardcoded ^ seed; int64_t structureSeed = seed; BoundingBox spawnBox; Pos spawn; spawn.x = 0; spawn.z = 0; int count = 0; setSeed(&structureSeed); nextInt(&structureSeed, 12000); for(spawn.z = 0; !spawnBox.intersectsWith(guessBox) && count <= 150; spawn.z += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64)) { spawn.x += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64); spawnBox.start = spawn; spawnBox.end = spawn; count++; } if(spawnBox.intersectsWith(guessBox)){ buffer[atomicAdd(counter, 1)] = seed; } } __global__ __launch_bounds__(1,1) static void setupGuessBox(Pos guessMin, Pos guessMax){ guessBox.start = guessMin; guessBox.end = guessMax; } int main(int argc, char **argv ){ time_t start = time(NULL); fp = fopen("seananners.txt", "w+"); double seconds_per_structure_seed = 0.0; std::vector<std::thread> threads; std::cout << "Begin loading threads" << std::endl; int thread = 0; int curr = 0; uint64_t startValue = 0; total = 100000000000; std::vector<std::string> tArr; int tmpCount = 0; GPU_ASSERT(cudaMallocManaged(&buffer, sizeof(uint64_t) * SEEDS_PER_CALL)); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaMallocManaged(&counter, sizeof(uint32_t))); GPU_ASSERT(cudaPeekAtLastError()); Pos guessMin; Pos guessMax; guessMin.x = 1710; guessMin.z = 276; guessMax.x = 1734; guessMax.z = 348; setupGuessBox<<<1,1>>>(guessMin, guessMax); cudaSetDevice(0); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); std::vector<uint64_t> results; uint64_t countOut = 0; uint64_t tempCount; for(uint64_t offset = startValue; offset <= total; offset += SEEDS_PER_CALL){ threadWork<<<1ULL<<WORK_SIZE_BITS,BLOCK_SIZE>>>(offset, counter, buffer); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); for(int i = 0; i < *counter; i++){ uint64_t seed = buffer[i]; if(seed != 0) fprintf(fp, "%lld\n", seed); } *counter = 0; if(countOut >= 5000000000){ time_t tempTime = time(NULL); uint64_t tempDiff = tempTime - start; double sps = (double)(offset - startValue)/tempDiff; double percent = ((double)offset/(double)total) * 100.0; printf("Seeds Per Second: %f\tProgress: %f\n", sps, percent); countOut = 0; } countOut += SEEDS_PER_CALL; } time_t end = time(NULL); uint64_t diff = end - start; double seedsPerSec = (double)total/(double)diff; printf("Time taken: %lld\nSeeds per second: %15.9f", diff, seedsPerSec); fclose(fp); return 0; }
13,329
#include <stdio.h> #include <cstdlib> #include <time.h> #include <cmath> void printMatrix(float* matrix, int rowCount, int columnCount) { for (int y = 0; y < rowCount; y++) { for (int x = 0; x < columnCount; x++) { printf("%d ", int(matrix[(y * columnCount) + x])); } printf("\n"); } printf("\n"); } void initializeMatrix(float* matrix, int rowCount, int columnCount) { for (int y = 0; y < rowCount; y++) { for (int x = 0; x < columnCount; x++) { matrix[(y * columnCount) + x] = rand() % 25; } } } __global__ void addMatrix(float* out, float* a, float* b, int rowCount, int columnCount) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = (x * columnCount) + y; if (x < rowCount && y < columnCount) { out[index] = a[index] + b[index]; } } int main(void) { srand(time(NULL)); int matrixRowCount = 1<<5; int matrixColumnCount = 1<<5; int memorySize = matrixRowCount*matrixColumnCount*sizeof(float); float* h_a = (float*)malloc(memorySize); float* h_b = (float*)malloc(memorySize); float* h_c = (float*)malloc(memorySize); initializeMatrix(h_a, matrixRowCount, matrixColumnCount); initializeMatrix(h_b, matrixRowCount, matrixColumnCount); float *d_a, *d_b, *d_c; cudaMalloc((float**)&d_a, memorySize); cudaMalloc((float**)&d_b, memorySize); cudaMalloc((float**)&d_c, memorySize); cudaMemcpy(d_a, h_a, memorySize, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, memorySize, cudaMemcpyHostToDevice); dim3 block(32, 32); dim3 grid((matrixRowCount + block.x - 1) / block.x, (matrixColumnCount + block.y - 1) / block.y); addMatrix<<<grid, block>>>(d_c, d_a, d_b, matrixRowCount, matrixColumnCount); cudaDeviceSynchronize(); cudaMemcpy(h_c, d_c, memorySize, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // printMatrix(h_a, matrixRowCount, matrixColumnCount); // printMatrix(h_b, matrixRowCount, matrixColumnCount); // printMatrix(h_c, matrixRowCount, matrixColumnCount); cudaDeviceReset(); return 0; }
13,330
// *********************************************************************** // // Demo program pro vyuku predmetu APPS (10/2012) // Petr Olivka, katedra informatiky, FEI, VSB-TU Ostrava // email:petr.olivka@vsb.cz // // Priklad pouziti CUDA technologie. // Ukazka organizace vlaken v blocich. // Kazde vlakno vypise informaci o sve pozici v bloku, // a polohu bloku v gridu. // // *********************************************************************** #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> // Demo kernel pro zobrazeni hierarchie vlaken // POZOR! Funkce printf je dostupna od verze compute capability 2.x __global__ void thread_hierarchy() { // globalni promenne identifikujici vlakno // rozmery gridu - gridDim // pozice bloku v gridu - blockIdx // rozmery bloku - blockDim // pozice vlakna v bloku - threadIdx printf( "Block{%d,%d}[%d,%d] Thread{%d,%d}[%d,%d]\n", gridDim.x, gridDim.y, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, threadIdx.x, threadIdx.y ); } void run_cuda() { cudaError_t cerr; // nasledujicim prikazem je mozno zvetsit vnitrni buffer pro printf /*cerr = cudaDeviceSetLimit( cudaLimitPrintfFifoSize, required_size ); if ( err != cudaSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) ); */ // vytvoreni vlaken se zvolenym kernelem // prvni parametr dim3 urcuje rozmer gridu // druhy parametr dim3 urcuje rozmer bloku thread_hierarchy<<< dim3( 2, 2 ), dim3( 3, 3 )>>>(); if ( ( cerr = cudaGetLastError() ) != cudaSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) ); // vystupy funkce printf jsou ulozeny v pameti graficke karty, // nutno provest synchronizeci, aby se zobrazily cudaDeviceSynchronize(); }
13,331
#include <stdio.h> __global__ void cube( float *d_out, float *d_in ){ int idx = threadIdx.x; float num = d_in[idx]; d_out[idx] = num * num; } int main( int argc, char ** argv ){ const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof( float ); //generate the input array on the host float h_in[ ARRAY_SIZE ]; for( int i = 0; i< ARRAY_SIZE; i++ ){ h_in[i] = float(i); } float h_out[ ARRAY_SIZE ]; //declare GPU memory pointers float *d_in; float *d_out; // allocate GPU memory cudaMalloc( (void **) &d_in, ARRAY_BYTES ); cudaMalloc( (void **) &d_out, ARRAY_BYTES ); // transfer the array to the GPU cudaMemcpy( d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice ); // launch the kernel cube<<<1, ARRAY_SIZE>>>(d_out, d_in ); //launch one block of kernels of size ARRAY_SIZE // copy back the result array to the CPU cudaMemcpy( h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost ); // print out the resulting array for( int i = 0; i< ARRAY_SIZE; i++ ){ printf("%f", h_out[i]); printf(( (i%4) != 3 )? "\t" : "\n" ); } cudaFree(d_in); cudaFree(d_out); return 0; }
13,332
#include<iostream> #include<vector> #include<fstream> #include<sstream> #include<string> #include<iterator> #include<ctype.h> #include <iomanip> #include <math.h> #include <fstream> #include<cuda_runtime.h> using namespace std; vector<string> parFile; vector<string> particleConfig; __global__ void updateVel (double* d_pPos, double delTime, double* d_pVel,double* d_calculatedData, int* d_pMass, double* d_oldForce) { int Idx = blockIdx.x; d_pVel[Idx*3+0]+=(d_oldForce[Idx*3+0]+d_calculatedData[Idx*3+0])*delTime/(2.0*d_pMass[Idx]); d_pVel[Idx*3+1]+=(d_oldForce[Idx*3+1]+d_calculatedData[Idx*3+1])*delTime/(2.0*d_pMass[Idx]); d_pVel[Idx*3+2]+=(d_oldForce[Idx*3+2]+d_calculatedData[Idx*3+2])*delTime/(2.0*d_pMass[Idx]); // printf("Old force_kernel: %f \t new FOrce_kernel:%f \tIdx: %d\n", d_oldForce[Idx*3], d_calculatedData[Idx*3], Idx); // printf("vel_kernel: %f \nIdx: %d\n", d_pVel[Idx*3], Idx); } __global__ void updatePos (double*d_pPos, double delTime, double* d_pVel, double* d_calculatedData, int* d_pMass, int x_max, int y_max, int z_max) { int Idx = blockIdx.x; //int Idy = blockIdx.y; //if(Idx!=Idy) d_pPos[Idx*3+0]+=delTime*d_pVel[Idx*3+0]+(d_calculatedData[Idx*3+0]*pow(delTime,2)/(2.0*d_pMass[Idx])); d_pPos[Idx*3+1]+=delTime*d_pVel[Idx*3+1]+(d_calculatedData[Idx*3+1]*pow(delTime,2)/(2.0*d_pMass[Idx])); d_pPos[Idx*3+2]+=delTime*d_pVel[Idx*3+2]+(d_calculatedData[Idx*3+2]*pow(delTime,2)/(2.0*d_pMass[Idx])); // printf("force inside_POS_kernel: %f\t\t Idx:%d\n",d_calculatedData[Idx*3], Idx); //printf("POS_kernel_X: %f \nIdx: %d\n", d_pPos[Idx*3], Idx); //PBC condition on particle position //tolerance of 0.00995 is defined so that after the reappearance, particles do not remain at the boundary but within it if(d_pPos[Idx*3+0]<= 0.0) {d_pPos[Idx*3+0] = x_max-0.00995;} else if(d_pPos[Idx*3+0]>= x_max) {d_pPos[Idx*3+0] = 0.00995;} if(d_pPos[Idx*3+1]<= 0.0) {d_pPos[Idx*3+1] = y_max-0.00995;} else if(d_pPos[Idx*3+1]>= y_max) {d_pPos[Idx*3+1] = 0.00995;} if(d_pPos[Idx*3+2]<= 0.0) {d_pPos[Idx*3+2] = z_max-0.00995;} else if(d_pPos[Idx*3+2]>= z_max) {d_pPos[Idx*3+2] = 0.00995;} } __global__ void updateCells(double* d_pPos, int* d_vec_particles, int* d_vec_cells, double len_x, double len_y, double len_z, double x_n, double y_n) { int Idx = blockIdx.x; int x_coord = 0, y_coord = 0, z_coord = 0; x_coord = (int)(d_pPos[Idx*3+0]/len_x); y_coord = (int)(d_pPos[Idx*3+1]/len_y); z_coord = (int)(d_pPos[Idx*3+2]/len_z); //printf("x_coord: %d\t len_x: %f\n", x_coord, len_x); // __syncthreads(); //d_vec_particles[Idx] = 0; d_vec_cells[int(x_coord+(y_coord*x_n)+(z_coord*x_n*y_n))] = -1; d_vec_particles[Idx] = atomicExch(&d_vec_cells[int(x_coord+(y_coord*x_n)+(z_coord*x_n*y_n))], Idx); //particle[current] = atomicExch(&cells[current], particlee[current]); //returns 0 //printf("tester: %d \t", d_vec_cells[0]); //d_vec_particles[Idx] = d_vec_cells[int(x_coord+(y_coord*x_n)+(z_coord*x_n*y_n))]; //d_vec_cells[int(x_coord+(y_coord*x_n)+(z_coord*x_n*y_n))] = Idx; } __global__ void forceTwoParticles(int nParticles, int* d_pMass, double* d_pPos, double* d_pVel, double* d_calculatedData, double* d_eps, double* d_sigma, double d_rCutOff)//, int* d_vec_cells) { int Idx = blockIdx.x; int l_sys = 1; double separation_magnitude = 0.0; //int Idy = blockIdx.y; //vector<double> initForce = totalForce(Idx, Idy, nParticles, eps, sigma, d_pPos); d_calculatedData [Idx*3+0]=0;d_calculatedData [Idx*3+1]=0;d_calculatedData [Idx*3+2]=0; //current cell: int x_coord = 0, y_coord = 0, z_coord = 0; x_coord = (int)(d_pPos[Idx*3+0]/l_sys); y_coord = (int)(d_pPos[Idx*3+1]/l_sys); z_coord = (int)(d_pPos[Idx*3+2]/l_sys); int x_n = 3, y_n = 3,z_n = 3; int currentCell = int(x_coord+(y_coord*x_n)+(z_coord*x_n*y_n)); for(int i=0; i<nParticles; ++i) { if(Idx!=i) { //force calculation for particles at BOUNDARY (opposite side of the domain) for PBC --> update separation vector if((d_pPos[Idx*3+0]/1==0 && d_pPos[i*3+0]/1==2) || (d_pPos[Idx*3+1]/1==0 && d_pPos[i*3+1]/1==2) || (d_pPos[Idx*3+2]/1==0 && d_pPos[i*3+2]/1==2) || (d_pPos[Idx*3+0]/1==2 && d_pPos[i*3+0]/1==0) || (d_pPos[Idx*3+1]/1==2 && d_pPos[i*3+1]/1==0) || (d_pPos[Idx*3+2]/1==2 && d_pPos[i*3+2]/1==0)) { double separation [3] = {d_pPos[Idx*3+0]-d_pPos[i*3+0], d_pPos[Idx*3+1]-d_pPos[i*3+1], d_pPos[Idx*3+2]-d_pPos[i*3+2]}; double separation_correction [3] = {separation[0]-l_sys*int(separation[0]/l_sys),separation[1]-l_sys*int(separation[1]/l_sys), separation[2]-l_sys*int(separation[2]/l_sys)}; separation_magnitude = sqrt (pow(separation_correction[0],2)+pow(separation_correction[1],2)+pow(separation_correction[2],2)); } else //for particles NOT at the boundary { double separation [3] = {d_pPos[Idx*3+0]-d_pPos[i*3+0], d_pPos[Idx*3+1]-d_pPos[i*3+1], d_pPos[Idx*3+2]-d_pPos[i*3+2]}; //for (auto i: separation) //printf("separation vector0: %f \nseparation vector3: %f \nidx: %d \nidy: %d \n", separation[0], separation[3] , Idx, Idy); separation_magnitude = sqrt (pow(separation[0],2)+pow(separation[1],2)+pow(separation[2],2)); // printf("separation: %f Idx: %d Idy: %d \n", separation_magnitude,Idx, Idy); } double force_LJ = 0; // cut off radius if(separation_magnitude <= d_rCutOff) force_LJ = 24*(*d_eps/(pow(separation_magnitude,2)))*pow(*d_sigma/separation_magnitude,6)*(2*pow(*d_sigma/separation_magnitude,6)-1); //printf("force: %f Idx: %d Idy: %d \n", force_LJ,Idx, Idy); //printf("s: %f \n", pow(separation_magnitude,2)); //printf("Idx: %d\nx: %f",Idx, force_LJ*(d_pPos[Idx*3+0]-d_pPos[i*3+0])); d_calculatedData [Idx*3+0] += force_LJ*(d_pPos[Idx*3+0]-d_pPos[i*3+0]); d_calculatedData [Idx*3+1] += force_LJ*(d_pPos[Idx*3+1]-d_pPos[i*3+1]); d_calculatedData [Idx*3+2] += force_LJ*(d_pPos[Idx*3+2]-d_pPos[i*3+2]); } //printf("calculated FORCE_kernel %f, %f, %f\n%f, %f, %f\n", d_calculatedData [0],d_calculatedData [1],d_calculatedData [2],d_calculatedData [3],d_calculatedData [4],d_calculatedData [5]); } } int main(int argc, char* argv[]) { //enableMapHost(); ifstream parFileName (argv[1]); //reading *.par file if (parFileName.is_open()) { string str; while(getline(parFileName, str)) { stringstream temp(str); string str1; while (getline(temp, str1,' ')) { if(str1.length() != 0) { if(str1[str1.size()]=='\0' || '\n') //alpha-numeric character check { str1.pop_back(); } parFile.push_back(str1); } } } } else { cout<<"*.par file not found"<< endl; } cout<<"parFile contents: "<<endl; //std::copy(parFile.begin(),parFile.end(), ostream_iterator<string>(cout, "\n")); for(int i=0; i<parFile.size(); ++i) { cout<<"i: "<<i<<'\t'<<"strLength: "<<parFile[i].length()<<'\t'<<parFile[i]<<endl; } ifstream particleConfigFile (parFile[1]); //reading *.in file if (particleConfigFile.is_open()) { string str; while(getline(particleConfigFile,str)) { stringstream temp(str); string str1; while (getline(temp, str1,' ')) { if(str1[str1.size()-1]=='\0') //alpha-numeric character check { str1.pop_back(); } particleConfig.push_back(str1); } } } else { cout<<"*.in file not found"<< endl; } for(int i=0;i<particleConfig.size();++i) { cout<<"i: "<<i<<" "<<setprecision(10)<<(particleConfig[i])<<" size: "<<particleConfig[i].size()<<endl; } int nParticles = stoi(particleConfig[0]); particleConfig.erase(particleConfig.begin()); vector<int> pMass (nParticles); vector<double> pPos (nParticles*3); vector<double> pVel (nParticles*3); for(int i=0; i<nParticles; ++i) { pMass[i]=stoi(particleConfig[i*7]); } int i=0; for(int j=0; j<particleConfig.size(); ++j) { if((i*7+3)<=particleConfig.size()) { pPos[j]=stod(particleConfig[i*7+1]); pPos[j+1]=stod(particleConfig[i*7+2]); pPos[j+2]=stod(particleConfig[i*7+3]); j+=2; ++i; } else break; } i=0; for(int j=0; j<particleConfig.size(); ++j) { if((i*7+6)<=particleConfig.size()) { pVel[j]=stod(particleConfig[i*7+4]); pVel[j+1]=stod(particleConfig[i*7+5]); pVel[j+2]=stod(particleConfig[i*7+6]); j+=2; ++i; } else break; } double delTime = (int)(stod(parFile[5])*1000.0)/1000.0, endTime = stod(parFile[3]), eps = stod(parFile[7]), sigma = stod(parFile[9]); string baseFile = parFile[13]; double initTime = delTime; cout<<"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ "<<"delTime: "<<fixed<<setprecision(35)<<delTime<<", "<<"endTIme: "<<endTime<<", "<<"initTIme: "<<initTime<<endl; //defining variables for computational domain double x_min = stoi(parFile[27]), x_max = stoi(parFile[29]), y_min = stoi(parFile[31]), y_max = stoi(parFile[33]), z_min = stoi(parFile[35]), z_max = stoi(parFile[37]); double x_n = stoi(parFile[39]), y_n = stoi(parFile[41]), z_n = stoi(parFile[43]); //total number of divisions in x,y and z directions double rCutOff = stod(parFile[45]); int nCells = (int)(x_n * y_n * z_n); //total number of cells cout<<"celllssss: "<<nCells<<endl; double len_x = (x_max - x_min) / x_n, len_y = (y_max - y_min) / y_n, len_z = (z_max - z_min) / z_n; //length of each cell in x,y,z dir cout<<"###"<<endl; cout<<"x_max: "<<x_max<<"\t"<<"x_min: "<<x_min<<"\t"<<"x_n: "<<x_n<<endl; vector<double> calculatedData(3*nParticles); //assigning pointer variables for device memory int* d_pMass; double* d_pPos; double* d_pVel; double* d_calculatedData; double* d_eps; double* d_sigma; double* d_delTime; //mem allocation in Device cudaMalloc (&d_pMass, nParticles*sizeof(int)); cudaMalloc (&d_pPos, nParticles*3*sizeof(double)); cudaMalloc (&d_pVel, nParticles*3*sizeof(double)); cudaMalloc (&d_calculatedData, 3*nParticles*sizeof(double)); cudaMalloc (&d_eps, sizeof(double)); cudaMalloc (&d_sigma, sizeof(double)); cudaMalloc (&d_delTime, sizeof(double)); //mem copy from HostToDevice cudaMemcpy (d_pMass, &pMass[0],nParticles*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy (d_pPos, &pPos[0],nParticles*3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy (d_pVel, &pVel[0],nParticles*3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy (d_calculatedData, &calculatedData[0],3*nParticles*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy (d_eps,&eps,sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy (d_sigma,&sigma,sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy (d_delTime,&d_delTime,sizeof(double), cudaMemcpyHostToDevice); int nThreads = nParticles*(nParticles-1); vector<double> oldForce; oldForce.resize(3*nParticles); double* d_oldForce; cudaMalloc (&d_oldForce, 3*nParticles*sizeof(double)); dim3 grid(nParticles,nParticles,1); vector<int> vec_cells (nCells,-1); //vector of cells, each initialized to -1 vector<int> vec_particles (nParticles); //vector of Particles, each initialized to 0 // for (int id=0; id<nParticles; ++id ) // { // int x_coord = 0, y_coord = 0, z_coord = 0; // x_coord = pPos[id*3+0]/1; // y_coord = pPos[id*3+1]/1; // z_coord = pPos[id*3+2]/1;x_max, y_max, z_ma // vec_particles[id] = vec_cells[x_coord+(y_coord*3)+(z_coord*9)]; // vec_cells[x_coord+(y_coord*3)+(z_coord*9)] = id; // } int* d_vec_cells; int* d_vec_particles; cudaMalloc (&d_vec_cells, nCells*sizeof(int)); cudaMalloc (&d_vec_particles, nParticles*sizeof(int)); // not copied them to device yet //initial force forceTwoParticles<<<nParticles, 1>>>(nParticles, d_pMass, d_pPos, d_pVel, d_calculatedData, d_eps, d_sigma, rCutOff); cudaMemcpy (&calculatedData[0], d_calculatedData, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost); // for(auto i: calculatedData) // {cout<<"initialForce: "<<i<<'\t';} // cout<<endl; int n=0; int vtk_count = 1; int out_count = 1; bool write_vtk = true; bool write_out = true; int temp_vtk = 0; int temp_out = 0; //calculate intial forces vector<double> force_new; //initial configuration at t=0 ofstream myfile; myfile.open(baseFile+"0"+".vtk"); myfile<<"# vtk DataFile Version 4.0\nhesp visualization file\nASCII\nDATASET UNSTRUCTURED_GRID\n"; myfile<<"POINTS"<<" "<<nParticles<<" "<<"double"<<'\n'; for(int i=0;i<nParticles;++i) { myfile<<fixed<<pPos[i*3+0]<<" "<<pPos[i*3+1]<<" "<<pPos[i*3+2]; myfile<<'\n'; } myfile<<"CELLS"<<" "<<0<<" "<<0<<'\n'; myfile<<"CELL_TYPES"<<" "<<0<<'\n'; myfile<<"POINT_DATA"<<" "<<nParticles<<'\n'; myfile<<"SCALARS"<<" "<<"m"<<" "<<"double"<<'\n'; myfile<<"LOOKUP_TABLE"<<" "<<"default"<<'\n'; for (int i=0; i<nParticles;++i) { myfile<<pMass[i]<<'\n'; } myfile<<"VECTORS"<<" "<<"v"<<" "<<"double"<<'\n'; for(int i=0;i<nParticles;++i) { myfile<<pVel[i*3+0]<<" "<<pVel[i*3+1]<<" "<<pVel[i*3+2]; myfile<<'\n'; } myfile.close(); myfile.open(baseFile+"0"+".out"); myfile<<nParticles<<'\n'; for(int i=0; i<nParticles; ++i) { myfile<<pMass[i]<<" "<<fixed<<pPos[i*3+0]<<" "<<pPos[i*3+1]<<" "<<pPos[i*3+2]<<" "<<pVel[i*3+0]<<" "<<pVel[i*3+1]<<" "<<pVel[i*3+2]<<'\n'; } myfile.close(); temp_vtk += 1; write_vtk=false; temp_out += 1; write_out=false; while (initTime<=endTime) { if (temp_vtk % stoi(parFile[15]) == 0) //cout<<temp_vtk<<endl; write_vtk = true; if (temp_out % stoi(parFile[11]) == 0) //cout<<temp_out<<endl; write_out = true; if (temp_vtk % stoi(parFile[15]) != 0) write_vtk=false; if (temp_out % stoi(parFile[11]) != 0) write_out=false; cout<<"\n####\tIteration: "<<n<<"\tTime: "<<fixed<<setprecision(20)<<initTime<<"\t####\n"; //calling Kernel /*for(int i=0; i<pPos.size();++i) { cout<<"old POS_main: "<<pPos[i]<<'\t'; } cout<<endl;*/ //cudaMemcpy (d_calculatedData, &calculatedData[0], 3*nParticles*sizeof(double), cudaMemcpyHostToDevice); vector<int> vec_cells (nCells,-1); //vector of cells, each initialized to -1 vector<int> vec_particles (nParticles); //vector of Particles, each initialized to 0 cudaMemcpy(d_vec_cells, &vec_cells[0], nCells*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_vec_particles, &vec_particles[0], nParticles*sizeof(int), cudaMemcpyHostToDevice); updatePos<<<nParticles , 1>>>(d_pPos, delTime, d_pVel, d_calculatedData,d_pMass, x_max, y_max, z_max); cudaMemcpy (&pPos[0], d_pPos, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy (d_pPos, &pPos[0], 3*nParticles*sizeof(double), cudaMemcpyHostToDevice); updateCells<<<nParticles , 1>>> (d_pPos, d_vec_particles, d_vec_cells, len_x, len_y, len_z, x_n, y_n); cudaMemcpy (&vec_cells[0], d_vec_cells, nCells*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy (d_vec_cells, &vec_cells[0], nCells*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy (&vec_particles[0], d_vec_particles, nParticles*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy (d_vec_particles, &vec_particles[0], nParticles*sizeof(int), cudaMemcpyHostToDevice); for(int i=0; i<nCells; ++i) { cout<<vec_cells[i]<<", "; } cout<<endl; for(int i=0; i<nParticles; ++i) { cout<<vec_particles[i]<<", "; } cout<<endl; //cout<<"hello"<<endl; cudaMemcpy (&calculatedData[0], d_calculatedData, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost); //oldForce.assign(calculatedData.begin(), calculatedData.end()-1); /*for (auto i: calculatedData) std::cout << i << ' ';*/ for(int i=0; i<oldForce.size();++i) { oldForce[i]=calculatedData[i]; //cout<<"old FORC_main: "<<oldForce[i]<<'\t'; } //cout<<endl; // cout<<"updated POS: "; // for(int i=0; i<pPos.size();++i) // { // cout<<fixed<<pPos[i]<<'\t'; // } // cout<<endl; cudaMemcpy (d_oldForce, &oldForce[0], 3*nParticles*sizeof(double), cudaMemcpyHostToDevice); forceTwoParticles<<<nParticles , 1>>>(nParticles, d_pMass, d_pPos, d_pVel, d_calculatedData, d_eps, d_sigma, rCutOff); cudaMemcpy (&calculatedData[0], d_calculatedData, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost); //cudaMemcpy (d_calculatedData, &calculatedData[0], totalTimeSteps*nParticles*sizeof(double), cudaMemcpyHostToDevice); // for(int i=0; i<calculatedData.size();++i) // { // cout<<"new FORCE_main: "<<calculatedData[i]<<'\t'; // } updateVel<<<nParticles , 1>>>(d_pPos, delTime, d_pVel, d_calculatedData,d_pMass, d_oldForce); cudaMemcpy (&pPos[0], d_pPos, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy (&pVel[0], d_pVel, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost); // cout<<"updated VEL: "; // for(int i=0;i<pVel.size();++i) // { // cout<<fixed<<pVel[i]<<'\t'; // } // cout<<endl; //writing *.vtk file if(write_vtk == true) { myfile.open(baseFile+to_string(vtk_count)+".vtk"); myfile<<"# vtk DataFile Version 4.0\nhesp visualization file\nASCII\nDATASET UNSTRUCTURED_GRID\n"; myfile<<"POINTS"<<" "<<nParticles<<" "<<"double"<<'\n'; for(int i=0;i<nParticles;++i) { myfile<<fixed<<pPos[i*3+0]<<" "<<pPos[i*3+1]<<" "<<pPos[i*3+2]; myfile<<'\n'; } myfile<<"CELLS"<<" "<<0<<" "<<0<<'\n'; myfile<<"CELL_TYPES"<<" "<<0<<'\n'; myfile<<"POINT_DATA"<<" "<<nParticles<<'\n'; myfile<<"SCALARS"<<" "<<"m"<<" "<<"double"<<'\n'; myfile<<"LOOKUP_TABLE"<<" "<<"default"<<'\n'; for (int i=0; i<nParticles;++i) { myfile<<pMass[i]<<'\n'; } myfile<<"VECTORS"<<" "<<"v"<<" "<<"double"<<'\n'; for(int i=0;i<nParticles;++i) { myfile<<pVel[i*3+0]<<" "<<pVel[i*3+1]<<" "<<pVel[i*3+2]; myfile<<'\n'; } myfile.close(); vtk_count+=1; } if(write_out == true) { //cout<<"temp_out: "<<temp_out<<"\t"; myfile.open(baseFile+to_string(out_count)+".out"); myfile<<nParticles<<'\n'; for(int i=0; i<nParticles; ++i) { myfile<<pMass[i]<<" "<<fixed<<pPos[i*3+0]<<" "<<pPos[i*3+1]<<" "<<pPos[i*3+2]<<" "<<pVel[i*3+0]<<" "<<pVel[i*3+1]<<" "<<pVel[i*3+2]<<'\n'; } myfile.close(); out_count+=1; } temp_vtk += 1; temp_out += 1; initTime+=delTime; ++n; } }
13,333
/* Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index. html#ixzz4CtH09yed */ #include <cstdlib> #include <ctime> #include <cstdio> #include <iostream> using namespace std; // Generate random floats between 0 and UP_BOUND #define UP_BOUND 100; // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 20 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Get a matrix element __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaError_t err = cudaMalloc(&d_A.elements, size); cout << "CUDA malloc A: " << cudaGetErrorString(err) << endl; err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); cout << "Copy A to device: " << cudaGetErrorString(err) << "\n" << endl; Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); err = cudaMalloc(&d_B.elements, size); cout << "CUDA malloc B: " << cudaGetErrorString(err) << endl; err = cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); cout << "Copy B to device: " << cudaGetErrorString(err) << "\n" << endl; // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); err = cudaMalloc(&d_C.elements, size); cout << "CUDA malloc C: " << cudaGetErrorString(err) << endl; // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); err = cudaThreadSynchronize(); cout << "Run kernel: " << cudaGetErrorString(err) << endl; // Read C from device memory err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); cout << "Copy C off of device: " << cudaGetErrorString(err) << "\n" << endl; // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0.0; for (int i = 0; i < (A.width - 1)/BLOCK_SIZE + 1; ++i) { int temp = i * BLOCK_SIZE + threadIdx.x; if (row < A.height && temp < A.width) As[threadIdx.y][threadIdx.x] = A.elements[row * A.width + temp]; else As[threadIdx.y][threadIdx.x] = 0.0; temp = i * BLOCK_SIZE + threadIdx.y; if (col < B.width && temp < B.height) Bs[threadIdx.y][threadIdx.x] = B.elements[temp * B.width + col]; else Bs[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for (int j = 0; j < BLOCK_SIZE; ++j) Cvalue += As[threadIdx.y][j] * Bs[j][threadIdx.x]; __syncthreads(); } if (row < C.height && col < C.width) C.elements[row * C.width + col] = Cvalue; /*---Original code from CUDA C Programming Guide---*/ /* // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); */ } int main(int argc, char const *argv[]) { clock_t t; Matrix A, B, C; int a1, a2, b1, b2; int i, j; srand(time(NULL)); if (argc < 4) cout << "Usage: ./accuracy.o A.height A.width B.width" << endl; // Get dimensions of A and B // Run $ ./matrixMul 1 1000000 400 a1 = atoi(argv[1]); // A's height a2 = atoi(argv[2]); // A's width b1 = a2; // B's height b2 = atoi(argv[3]); // B's width A.height = a1; A.width = A.stride = a2; A.elements = new float[A.width * A.height]; B.height = b1; B.width = B.stride = b2; B.elements = new float[B.width * B. height]; C.height = A.height; C.width = C.stride = B.width; C.elements = new float[C.width * C.height]; // Fill A and B with random floats for (i = 0; i < A.height; ++i) for (j = 0; j < A.width; ++j) A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND; for (i = 0; i < B.height; ++i) for (j = 0; j < B.width; ++j) B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND; // Call MatMul(), and therefore MatMulKernel() t = clock(); MatMul(A, B, C); // Print time multiplication took t = clock() - t; cout << "It took me " << fixed << ((float)t)/CLOCKS_PER_SEC; cout << " seconds.\n" << endl; // Print A, B, and C for (i = 0; i < min(10, A.height); ++i) { for (j = 0; j < min(10, A.width); ++j) cout << fixed << A.elements[i * A.width + j] << "\t"; cout << endl; } cout << endl; for (i = 0; i < min(10, B.height); ++i) { for (j = 0; j < min(10, B.width); ++j) cout << fixed << B.elements[i * B.width + j] << "\t"; cout << endl; } cout << endl; for (i = 0; i < min(10, C.height); ++i) { for (j = 0; j < min(10, C.width); ++j) cout << fixed << C.elements[i * C.width + j] << "\t"; cout << endl; } cout << endl; delete[] A.elements; delete[] B.elements; delete[] C.elements; return 0; }
13,334
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #define BUFFSIZE 4096 #define WINDOW_LENGTH 25 #define AXIS 3 #define Y_INDEX 1 #define Z_INDEX 2 /* Does all the processing on the CUDA device. Was intentionally NOT broken up into multiple functions for performance reasons, however is pretty well commented */ __global__ void cudaMagic( int* mag, int* in, int* means, float* sd, int* max, int* min, int* x, int* y, int* z, int* xcoords, int* ycoords, int* zcoords, int numOfLines, int length ) { int i = blockDim.x * blockIdx.x + threadIdx.x; int count, tempCount, avg = 0, sdx = 0, sdy = 0, sdz = 0; int sumx = 0, sumy = 0, sumz = 0, absumx = 0, absumy = 0, absumz = 0; int xmax = 0, ymax = 0, zmax =0, xmin = 0, ymin = 0, zmin = 0; /*makes a flat arrays of all windows */ if( i >= WINDOW_LENGTH && i <= numOfLines ){ for( count = i - WINDOW_LENGTH, tempCount = 0 ; count < i ; count++, tempCount++ ){ x[(i - WINDOW_LENGTH) * WINDOW_LENGTH + tempCount] = xcoords[count]; y[(i - WINDOW_LENGTH) * WINDOW_LENGTH + tempCount] = ycoords[count]; z[(i - WINDOW_LENGTH) * WINDOW_LENGTH + tempCount] = zcoords[count]; } } __syncthreads(); if( i < length ){ /* Initialize the max and min values to the first value */ xmax = x[i*WINDOW_LENGTH]; ymax = y[i*WINDOW_LENGTH]; zmax = z[i*WINDOW_LENGTH]; xmin = x[i*WINDOW_LENGTH]; ymin = y[i*WINDOW_LENGTH]; zmin = z[i*WINDOW_LENGTH]; for( count = i ; count < i + WINDOW_LENGTH ; count++ ){ /* Calculates the sum of the absolute values for the window */ absumx += fabsf(x[i * WINDOW_LENGTH + count]); absumy += fabsf(y[i * WINDOW_LENGTH + count]); absumz += fabsf(z[i * WINDOW_LENGTH + count]); /* Calculates the sums for the window */ sumx += x[i * WINDOW_LENGTH + count]; sumy += y[i * WINDOW_LENGTH + count]; sumz += z[i * WINDOW_LENGTH + count]; /* Calculates the average of the entire window */ avg += (x[i * WINDOW_LENGTH + count] + y[i * WINDOW_LENGTH + count] + z[i * WINDOW_LENGTH + count] ); /* Obtains the max coordinates for the window */ xmax = fmaxf( x[i * WINDOW_LENGTH + count], xmax ); ymax = fmaxf( y[i * WINDOW_LENGTH + count], ymax ); zmax = fmaxf( z[i * WINDOW_LENGTH + count], zmax ); /* Obtains the min coordinates for the window */ xmin = fminf( x[i * WINDOW_LENGTH + count], xmin ); ymin = fminf( y[i * WINDOW_LENGTH + count], ymin ); zmin = fminf( z[i * WINDOW_LENGTH + count], zmin ); } __syncthreads(); /* Extra loop to calculate standard deviation because it relies on results of sumx, sumy, and sumz */ for( count = 0 ; count < WINDOW_LENGTH ; count++ ){ sdx += powf( (x[i * WINDOW_LENGTH + count] - (sumx/WINDOW_LENGTH)), 2 ); sdy += powf( (y[i * WINDOW_LENGTH + count] - (sumy/WINDOW_LENGTH)), 2 ); sdz += powf( (z[i * WINDOW_LENGTH + count] - (sumz/WINDOW_LENGTH)), 2 ); } /* Writes all the results to their appropriate arrays */ mag[i] = (absumx + absumy + absumz) / WINDOW_LENGTH; in[i] = avg / WINDOW_LENGTH; means[i] = sumx / WINDOW_LENGTH; means[Y_INDEX * length + i] = sumy / WINDOW_LENGTH; means[Z_INDEX * length + i] = sumz / WINDOW_LENGTH; sd[i] = sqrtf( sdx ); sd[Y_INDEX * length + i] = sqrtf( sdy ); sd[Z_INDEX * length + i] = sqrtf( sdz ); max[i] = xmax; max[Y_INDEX * length + i] = ymax; max[Z_INDEX * length + i] = zmax; min[i] = xmin; min[Y_INDEX * length + i] = ymin; min[Z_INDEX * length + i] = zmin; } } /* Allocates device arrays with error checking (INT) */ __host__ void allocate_dev( int** array, const int size ) { cudaError_t err = cudaSuccess; err = cudaMalloc((void **)array, size ); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate array on device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /* Allocates device arrays with error checking (FLOAT */ __host__ void allocate_devf( float** array, const int size ) { cudaError_t err = cudaSuccess; err = cudaMalloc((void **)array, size ); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate array on device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /* Allocates the local arrays to copy the result arrays from the device to */ __host__ int allocateLocal( int** mag, int** in, int** means, float** sd, int** max, int** min, const int length ) { /* Allocate local result array (mag) */ (*mag) = (int*)malloc( length * sizeof( int ) ); (*in) = (int*)malloc( length * sizeof( int ) ); (*means) = (int*)malloc( length * AXIS * sizeof( int ) ); (*sd) = (float*)malloc( length * AXIS * sizeof( float ) ); (*max) = (int*)malloc( length * AXIS * sizeof( int ) ); (*min) = (int*)malloc( length * AXIS * sizeof( int ) ); if( *mag == NULL || *in == NULL || *means == NULL || *sd == NULL || *max == NULL || *min == NULL ){ fprintf( stderr, "Malloc of local array failed!\n" ); exit( EXIT_FAILURE ); } return 0; } /* Returns the number of lines there are in the file pointed to by the argument fp */ __host__ int getLineCount( FILE** const fp ) { int count = 0; char c; /* Get the number of lines in the file */ for( c = getc( *fp ) ; c != EOF ; c = getc( *fp ) ){ if( c == '\n' ){ count++; } } rewind( *fp ); return count; } /*populates the local arrays with the data */ __host__ int readData( int** x, int** y, int** z, int* numOfLines ) { char* token; char line[BUFFSIZE]; int count; const char del[2] = ","; FILE* input; /* Open file for reading */ if( ( input = fopen( "sheep_imu_data.csv", "r" ) ) == NULL ){ fprintf( stderr, "Failed to open file!" ); return 1; } *numOfLines = getLineCount( &input ); *x = (int*)malloc( (*numOfLines) * sizeof(int) ); *y = (int*)malloc( (*numOfLines) * sizeof(int) ); *z = (int*)malloc( (*numOfLines) * sizeof(int) ); if( *x == NULL || *y == NULL || *z == NULL ){ fprintf( stderr, "Malloc of local array failed!\n" ); exit( EXIT_FAILURE ); } count = 0; while( fgets( line, BUFFSIZE, input ) ){ token = strtok( line, del ); (*x)[count] = atoi( token ); token = strtok( NULL, del ); (*y)[count] = atoi( token ); token = strtok( NULL, del ); (*z)[count] = atoi( token ); count++; } fclose( input ); return 0; } /* Writes arrays to a csv file */ __host__ void writeCSV( const char* const filename, int** mag, int** intensity, int** means, float** sd, int** max, int** min, const int length ) { FILE* fp; unsigned int count; if( ( fp = fopen( filename, "w+" ) ) == NULL ){ fprintf( stderr, "Failed to open or create new file!" ); } for( count = 0 ; count < length ; count++ ){ fprintf( fp, "%d, %d, %d, %d, %d, %f, %f, %f, %d, %d, %d, %d, %d, %d\n", (*mag)[count], (*intensity)[count], (*means)[count], (*means)[Y_INDEX*length+count], (*means)[Z_INDEX*length+count], (*sd)[count], (*sd)[Y_INDEX*length+count], (*sd)[Z_INDEX*length+count], (*max)[count], (*max)[Y_INDEX*length+count], (*max)[Z_INDEX*length+count], (*min)[count], (*min)[Y_INDEX*length+count], (*min)[Z_INDEX*length+count] ); } fclose( fp ); }
13,335
/* * Copyright 2015 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <cstdlib> int main(void) { // generate 32M random numbers on the host thrust::host_vector<int> h_vec( 32 << 20 ); thrust::generate( h_vec.begin(), h_vec.end(), rand ); // replicate input on another host vector thrust::host_vector<int> h_vec1 = h_vec; //transfer data to the device thrust::device_vector<int> d_vec = h_vec; //create timers cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); //sort data on the device thrust::sort( d_vec.begin(), d_vec.end() ); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); float GPUelapsedTime; cudaEventElapsedTime( &GPUelapsedTime, start, stop ); GPUelapsedTime /= 1000.0; printf("sort of %ld in %f seconds\n", 32<<20, GPUelapsedTime ); printf("Sort of %f M / sec\n", (double)(32<<20) / (double)GPUelapsedTime * 1e-6); //transfer data back to host thrust::copy( d_vec.begin(), d_vec.end(), h_vec.begin() ); cudaEventRecord( start, 0 ); //sort data on host thrust::sort(h_vec1.begin(), h_vec1.end() ); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); float CPUelapsedTime; cudaEventElapsedTime( &CPUelapsedTime, start, stop ); CPUelapsedTime /= 1000.0; printf("sort of %ld in %f seconds\n", 32<<20,CPUelapsedTime ); printf("Sort of %f M / sec\n", (double)(32<<20) / (double)CPUelapsedTime * 1e-6); cudaEventDestroy(start); cudaEventDestroy(stop); printf("GPU is %5.2fX faster than CPU\n", CPUelapsedTime/GPUelapsedTime ); if ( thrust::equal( h_vec1.begin(), h_vec1.end(), h_vec.begin() ) ) printf("The arrays are equal\n"); else printf("The arrays are different!\n"); return 0; } /* end main */
13,336
#include "includes.h" __global__ void shared1R8C1W8C1G(float *A, float *B, float *C, const int N) { // compilador é esperto e aproveita o valor de i, mas faz 1W, 2 R nas outras posições da Shared __shared__ float Smem[512]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { Smem[((threadIdx.x+1)*8)%512] = i; C[i] = Smem[(threadIdx.x*8)%512]; } }
13,337
extern "C" __global__ void math_acos(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = acos(x[id]); } } extern "C" __global__ void math_acosh(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = acosh(x[id]); } } extern "C" __global__ void math_asin(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = asin(x[id]); } } extern "C" __global__ void math_asinh(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = asinh(x[id]); } } extern "C" __global__ void math_atan(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = atan(x[id]); } } extern "C" __global__ void math_atanh(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = atanh(x[id]); } } extern "C" __global__ void math_cbrt(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cbrt(x[id]); } } extern "C" __global__ void math_ceil(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = ceil(x[id]); } } extern "C" __global__ void math_cos(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cos(x[id]); } } extern "C" __global__ void math_cosh(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cosh(x[id]); } } extern "C" __global__ void math_cospi(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cospi(x[id]); } } extern "C" __global__ void math_cyl_bessel_i0(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cyl_bessel_i0(x[id]); } } extern "C" __global__ void math_cyl_bessel_i1(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cyl_bessel_i1(x[id]); } } extern "C" __global__ void math_erf(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erf(x[id]); } } extern "C" __global__ void math_erfc(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erfc(x[id]); } } extern "C" __global__ void math_erfcinv(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erfcinv(x[id]); } } extern "C" __global__ void math_erfcx(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erfcx(x[id]); } } extern "C" __global__ void math_erfinv(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erfinv(x[id]); } } extern "C" __global__ void math_exp(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = exp(x[id]); } } extern "C" __global__ void math_exp10(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = exp10(x[id]); } } extern "C" __global__ void math_exp2(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = exp2(x[id]); } } extern "C" __global__ void math_expm1(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = expm1(x[id]); } } extern "C" __global__ void math_fabs(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = fabs(x[id]); } } extern "C" __global__ void math_floor(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = floor(x[id]); } } extern "C" __global__ void math_j0(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = j0(x[id]); } } extern "C" __global__ void math_j1(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = j1(x[id]); } } extern "C" __global__ void math_lgamma(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = lgamma(x[id]); } } extern "C" __global__ void math_log(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = log(x[id]); } } extern "C" __global__ void math_log10(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = log10(x[id]); } } extern "C" __global__ void math_log1p(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = log1p(x[id]); } } extern "C" __global__ void math_log2(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = log2(x[id]); } } extern "C" __global__ void math_logb(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = logb(x[id]); } } extern "C" __global__ void math_nearbyint(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = nearbyint(x[id]); } } extern "C" __global__ void math_normcdf(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = normcdf(x[id]); } } extern "C" __global__ void math_normcdfinv(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = normcdfinv(x[id]); } } extern "C" __global__ void math_rcbrt(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = rcbrt(x[id]); } } extern "C" __global__ void math_rint(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = rint(x[id]); } } extern "C" __global__ void math_round(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = round(x[id]); } } extern "C" __global__ void math_rsqrt(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = rsqrt(x[id]); } } extern "C" __global__ void math_sin(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = sin(x[id]); } } extern "C" __global__ void math_sinh(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = sinh(x[id]); } } extern "C" __global__ void math_sinpi(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = sinpi(x[id]); } } extern "C" __global__ void math_sqrt(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = sqrt(x[id]); } } extern "C" __global__ void math_tan(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = tan(x[id]); } } extern "C" __global__ void math_tanh(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = tanh(x[id]); } } extern "C" __global__ void math_tgamma(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = tgamma(x[id]); } } extern "C" __global__ void math_trunc(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = trunc(x[id]); } } extern "C" __global__ void math_y0(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = y0(x[id]); } } extern "C" __global__ void math_y1(size_t n, double *result, double *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = y1(x[id]); } } extern "C" __global__ void math_atan2(size_t n, double *result, double *x, double *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = atan2(x[id],y[id]); } } extern "C" __global__ void math_copysign(size_t n, double *result, double *x, double *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = copysign(x[id],y[id]); } } extern "C" __global__ void math_fdim(size_t n, double *result, double *x, double *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = fdim(x[id],y[id]); } } extern "C" __global__ void math_fmin(size_t n, double *result, double *x, double *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = fmin(x[id],y[id]); } } extern "C" __global__ void math_fmod(size_t n, double *result, double *x, double *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = fmod(x[id],y[id]); } } extern "C" __global__ void math_nextafter(size_t n, double *result, double *x, double *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = nextafter(x[id],y[id]); } } extern "C" __global__ void math_pow(size_t n, double *result, double *x, double *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = pow(x[id],y[id]); } } extern "C" __global__ void math_remainder(size_t n, double *result, double *x, double *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = remainder(x[id],y[id]); } } extern "C" __global__ void math_rhypot(size_t n, double *result, double *x, double *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = rhypot(x[id],y[id]); } }
13,338
#include <stdio.h> #include <cuda.h> __device__ volatile int counter = 0; //Launching kernel with <<< (4,4) , (7,7) >>> gridDim.x =4 gridDim.y =4 __global__ void stencil(int* d_input, int M, int N) { // TODO: Your implementation goes here int tot_blocks = gridDim.x*gridDim.y; // =16 int thidx = blockDim.x*blockIdx.x + threadIdx.x; int thidy = blockDim.y*blockIdx.y + threadIdx.y; int write_this; if (thidx>0 && thidy >0 && thidx <N-1 && thidy < M-1) write_this = 0.2*(d_input[thidy*N+thidx] + d_input[(thidy+1)*N+thidx] + d_input[(thidy-1)*N+thidx] + d_input[thidy*N+thidx+1] + d_input[thidy*N+thidx- 1]); __syncthreads(); if(threadIdx.x==0 && threadIdx.y==0) atomicAdd((int *)&counter,1); while(counter<tot_blocks); // Waits here for infinite time. if (thidx>0 && thidy >0 && thidx <N-1 && thidy < M-1) d_input[thidy*N+thidx] = write_this; } int main() { int *arr; const int M = 16, N = 16; cudaMalloc(&arr, M * N * sizeof(int)); stencil<<<M, N>>>(arr, M, N); cudaDeviceSynchronize(); }
13,339
#include<stdio.h> #include<iostream> #include<cstdlib> #include<time.h> #include<cuda.h> #define TILE_WIDTH 32 using namespace std; void print(int *A, int n, int m) { for (int i=0; i<n; i++) { for (int j=0; j<m; j++) { cout<<A[n*i+j]<<" | "; } cout<<endl; } } //Function used just to fill the given matrix with a given value void fillMatrix (int *mat, int value, int n, int m) { int size=n*m; for (int i=0; i<size; i++) { mat[i] = value; } } //===================================================================================== //Sequential //Function used to multiply both matrices taking each matrix as a vector void multMatrixsequential (int *h_matA, int *h_matB, int *h_matC, int n, int m, int o) { //Row*Width+Col to find the value in the given bidimensional index for (int i=0; i<n; i++) { for (int j=0; j<o; j++) { int sum=0; for (int k=0; k<m; k++) { sum += h_matA[m*i+k]*h_matB[o*k+j]; } h_matC[o*i+j] = sum; //cout<<h_matC[n*i+j]<<" | "; } //cout<<endl; } } __global__ void matrixMultKernel (int *d_matA, int *d_matB, int *d_matC, int n, int m, int o) { int Row = blockIdx.y*blockDim.y+threadIdx.y; int Col = blockIdx.x*blockDim.x+threadIdx.x; if ((Row<n)&&(Col<o)) { int temp=0; for (int i=0; i<m; i++) { temp += d_matA[Row*m+i]*d_matB[i*o+Col]; } d_matC[Row*o+Col] = temp; } } //the multiplication kernel with tiles __global__ void matrixMulKernelTiled(int *d_matA, int *d_matB, int *d_matC, int n, int m, int o){ __shared__ int Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * TILE_WIDTH + ty; int col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int k = 0; k < (m+TILE_WIDTH-1)/(TILE_WIDTH); ++k) { if (k*TILE_WIDTH + tx < m && row < n) { Mds[ty][tx] = d_matA[row * m + k*TILE_WIDTH + tx]; } else { Mds[ty][tx] = 0; } if (k*TILE_WIDTH + ty < m && col < o) { Nds[ty][tx] = d_matB[(k*TILE_WIDTH + ty) * o + col]; } else { Nds[ty][tx] =0; } __syncthreads(); for(int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } if (row < n && col < o) { d_matC[row * o + col] = Pvalue; } } //Function to call the kernel of the tiled multiplication void multMatrixParallelTiled(int *A, int *B, int *C, int n, int m, int o) { float blockSize = 32.0; int *d_matA, *d_matB, *d_matC; //1. Allocate memory for d_matA, etc. on the device (cudaMalloc) cudaMalloc(&d_matA, n * m * sizeof(int)); cudaMalloc(&d_matB, m * o * sizeof(int)); cudaMalloc(&d_matC, n * o * sizeof(int)); //2. Copy Data from host to d_matA, etc. (cudaMemcpy) cudaMemcpy(d_matA, A, n * m * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_matB, B, m * o * sizeof(int), cudaMemcpyHostToDevice); dim3 threads(blockSize,blockSize,1); //How many blocks U want in each direction -- U have to respect the GPU's capacity dim3 blocks(ceil(o/blockSize),ceil(n/blockSize),1);//How many threads U want to have per block -- //3. Kernel Launch Code matrixMultKernel<<<blocks,threads>>>(d_matA,d_matB,d_matC,n,m,o); cudaMemcpy (C, d_matC, n * o * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_matA); cudaFree(d_matB); cudaFree(d_matC); } //Function to call the tile less multiplication kernel void multMatrixParallel(int *A, int *B, int *C, int n, int m, int o) { float blockSize = 32.0; int *d_matA, *d_matB, *d_matC; //1. Allocate memory for d_matA, etc. on the device (cudaMalloc) cudaMalloc(&d_matA, n * m * sizeof(int)); cudaMalloc(&d_matB, m * o * sizeof(int)); cudaMalloc(&d_matC, n * o * sizeof(int)); //2. Copy Data from host to d_matA, etc. (cudaMemcpy) cudaMemcpy(d_matA, A, n * m * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_matB, B, m * o * sizeof(int), cudaMemcpyHostToDevice); dim3 threads(blockSize,blockSize,1); //How many blocks U want in each direction -- U have to respect the GPU's capacity dim3 blocks(ceil(o/blockSize),ceil(n/blockSize),1);//How many threads U want to have per block -- //3. Kernel Launch Code matrixMultKernel<<<blocks,threads>>>(d_matA,d_matB,d_matC,n,m,o); cudaMemcpy (C, d_matC, n * o * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_matA); cudaFree(d_matB); cudaFree(d_matC); } //Function used to compare the results int compareMatrix (int *A, int *B,int n, int m) { int size=n*m; for (int i=0; i<size; i++ ) { if (A[i]!=B[i]) { cout<<"## sequential and Parallel results are NOT equal ##"<<endl; return 0; } } cout<<"== sequential and Parallel results are equal =="<<endl; return 0; } //========================================== MAIN ===================================== int main() { clock_t start, finish; double elapsedsequential,elapsedParallel,elapsedParallelTiles,optimizationP,optimizationT; int n=2; int m=4; int o=8; int *matA = (int *) malloc(n * m * sizeof(int)); int *matB = (int *) malloc(m * o * sizeof(int)); int *matCS = (int *) malloc(n * o * sizeof(int)); int *matCP = (int *) malloc(n * o * sizeof(int)); int *matCPT = (int *) malloc(n * o * sizeof(int)); fillMatrix(matA,1,n,m); fillMatrix(matB,1,m,o); fillMatrix(matCS,0,n,o); fillMatrix(matCP,0,n,o); fillMatrix(matCPT,0,n,o); start = clock(); multMatrixsequential(matA,matB,matCS,n,m,o); finish = clock(); elapsedsequential = (((double) (finish - start)) / CLOCKS_PER_SEC ); cout<< "The sequential process took: " << elapsedsequential << " seconds to execute "<< endl<< endl; start = clock(); multMatrixParallel(matA,matB,matCP,n,m,o); finish = clock(); elapsedParallel = (((double) (finish - start)) / CLOCKS_PER_SEC ); cout<< "The parallel process took: " << elapsedParallel << " seconds to execute "<< endl<< endl; start = clock(); multMatrixParallelTiled(matA,matB,matCPT,n,m,o); finish = clock(); elapsedParallelTiles = (((double) (finish - start)) / CLOCKS_PER_SEC ); cout<< "The parallel using Tiles process took: " << elapsedParallelTiles << " seconds to execute "<< endl<< endl; optimizationP = elapsedsequential/elapsedParallel; cout<< "The acceleration we've got without using Tiles: " << optimizationP << "X" <<endl; optimizationT = elapsedsequential/elapsedParallelTiles; cout<< "The acceleration we've got using Tiles: " << optimizationT << "X" <<endl; cout<< "Comparing Serial vs Parallel result " <<endl; compareMatrix(matCS,matCP,n,o); cout<< "Comparing Serial vs Parallel with Tiles result " <<endl; compareMatrix(matCS,matCPT,n,o); free (matA); free (matB); free (matCS); free (matCP); free (matCPT); return 0; }
13,340
#include <cuda.h> #include <stdio.h> #include <math.h> void readFile(char* fname, int* N, int* iter, float** A, float** b) { FILE *fp; char buf[100]; int i, j; fp = fopen(fname, "r"); if(!fp) { *N = 0; *iter = 0; printf("Stale File Handle\n"); return; } if(fscanf(fp, "%s", buf) > 0) *N = atoi(buf); if(fscanf(fp, "%s", buf) > 0) *iter = atoi(buf); printf("N = %d\nIterations = %d\n", *N, *iter); *b = (float*) malloc(*N*sizeof(float)); *A = (float*) malloc((*N)*(*N)*sizeof(float)); for(i = 0; i < *N; i++) { for(j = 0; j < *N; j++) { fscanf(fp, "%s", buf); (*A)[ ((*N*i)+j) ] = (float)atoi(buf); } } fscanf(fp, "%s", buf); // Ignore the "solution" in the text for(i = 0; i < *N; i++) { fscanf(fp, "%s", buf); (*b)[i] = (float)atoi(buf); } fclose(fp); } void jacobi(float** A, float** b, int* N, int* iter, float** x) { int i, j, k; float t; float *y; *x = (float*) malloc(*N*sizeof(float)); y = (float*) malloc(*N*sizeof(float)); for(i = 0; i < *N; i++) { (*x)[i] = 0.0; // Initial Guess } for(k = 0; k < *iter; k++) { for(i = 0; i < *N; i++) { t = 0; for(j = 0; j < *N; j++) { if(i != j) { t += (((*A)[ ((*N*i)+j) ]) * ((*x)[j])); } } y[i] = (((*b)[i]) - t)/((*A)[ ((*N*i)+i) ]); //printf("k %02d i %02d t %f y %f\n", k, i, t, y[i]); } for(i = 0; i < *N; i++) { (*x)[i] = y[i]; } } //free(y); } void iloop(float* A, float* b, int N, float* x, float* y) { float t; for(int i = 0; i < N; i++) { t = 0.0; for(int j = 0; j < N; j++) { if(i != j) { t = t + (( A[ ((N*i)+j) ] ) * x[j]); } } y[i] = ((b[i] - t)/( A[ ((N*i)+i) ])); } } int main(int argc, char* argv[]) { float time = 0.0; float maxError = 0.0; float* M; int k; float* A; float* b; float* x; float* y; float* c; char* fname; int N, iter, i, j; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if(argc == 2) fname = argv[1]; else fname = "../inputs/8.txt"; readFile(fname, &N, &iter, &A, &b); printf("CUDA : Parsed file%s\n", fname); x = (float*) malloc(N*sizeof(float)); y = (float*) malloc(N*sizeof(float)); for(i = 0; i < N; i++) { x[i] = 0.0; y[i] = 0.0; } cudaEventRecord(start); //jacobi(&A, &b, &N, &iter, &x); for(k = 0; k < iter; k++) { iloop(A, b, N, x, y); for(j = 0; j < N; j++) x[j] = y[j]; } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("CUDA : Done Computing Jacobi on CPU\n"); //for(i = 0; i < N; i++) //{ // for(j = 0; j < N; j++) // { // printf("%d", (int)A[i][j]); // if(j < N-1) printf(" "); // } // printf("\n"); //} //for(i = 0; i < N; i++) //{ // printf("%d\n", (int)b[i]); //} //printf("\n\n"); //for(i = 0; i < N; i++) //{ // printf("%f\n", x[i]); //} //printf("\n\n"); c = (float*) malloc(N*sizeof(float)); for(i = 0; i < N; i++) { c[i] = 0; for(j = 0; j < N; j++) { c[i] += A[ ((N*i)+j) ] * x[j]; } //printf("%0.2f\n", c[i]); maxError = fmax(maxError, fabs(c[i] - b[i])); } printf("\nCUDA : Time %f ms\n", time); printf("CUDA : MaxError = %f\n\n\n", maxError); free(A); free(b); free(x); free(y); free(c); return 0; }
13,341
#include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <ctype.h> #include <fcntl.h> #include <unistd.h> #include <sys/mman.h> #include <time.h> #include <sys/time.h> #include "deflate_kernel.cu" // defined in deflate_kernel.cu // define CHUNK_SIZE 32768 // define THREAD_NUM 1024 // Input: Filename int main(int argc, char *argv[]) { int i; int f_handle; char *f_in; char *f_out; struct stat finfo; char * inputfname; char * outputfname; if (argc < 3) { printf("USAGE: %s <input filename> <output filename>\n", argv[0]); exit(1); } inputfname = argv[1]; outputfname = argv[2]; f_handle = open(inputfname, O_RDONLY); fstat(f_handle, &finfo); f_in = (char*) malloc(finfo.st_size); f_out = (char*) malloc(finfo.st_size); unsigned int data_bytes = (unsigned int)finfo.st_size; printf("This file has %d bytes data\n", data_bytes); read (f_handle, f_in, data_bytes); //Set the number of blocks and threads dim3 grid(1, 1, 1); dim3 block(THREAD_NUM, 1, 1); char* d_in; cudaMalloc((void**) &d_in, data_bytes); cudaMemcpy(d_in, f_in, data_bytes, cudaMemcpyHostToDevice); char* d_out; cudaMalloc((void**) &d_out, data_bytes); cudaMemset(d_out, 0, data_bytes); struct timeval start_tv, end_tv; time_t sec; time_t ms; time_t diff; gettimeofday(&start_tv, NULL); deflatekernel<<<grid, block>>>(data_bytes, d_in, d_out); cudaThreadSynchronize(); gettimeofday(&end_tv, NULL); sec = end_tv.tv_sec - start_tv.tv_sec; ms = end_tv.tv_usec - start_tv.tv_usec; diff = sec * 1000000 + ms; printf("%10s:\t\t%fms\n", "Time elapsed", (double)((double)diff/1000.0)); cudaMemcpy(f_out, d_out, data_bytes, cudaMemcpyDeviceToHost); // Inflate data_out using zlib // Meh // Compare inflated data with input // whatever FILE *writeFile; writeFile = fopen(outputfname,"w+"); for(i = 0; i < data_bytes; i++) fprintf(writeFile,"%c", f_out[i]); fclose(writeFile); return 0; }
13,342
__global__ void bloom_gpu() {}
13,343
#define t_max 1 #define t 1 /* (u[0][0][0][0][0]=((alpha*(ux[1][0][0][0][1]-ux[-1][0][0][0][1]))+((beta*(uy[0][1][0][0][2]-uy[0][-1][0][0][2]))+(gamma*(uz[0][0][1][0][3]-uz[0][0][-1][0][3]))))) */ __global__ void divergence(float * * u_0_0_out, float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c) { /* float * const u__u_0[16] = { u_0_0 } ; float * const u__ux_1[16] = { ux_1_0 } ; float * const u__uy_2[16] = { uy_2_0 } ; float * const u__uz_3[16] = { uz_3_0 } ; */ int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int chunk_idx_x; int chunk_idx_x_max; int chunk_idx_y; int chunk_idx_y_max; int chunk_idx_z; int chunk_idx_z_max; int idx_1_2; int size_1_1; int size_1_2; //int t; int thd_idx_x; int thd_idx_y; int thd_idx_z; int thdblks_idx_x; int thdblks_idx_x_max; int thdblks_idx_y; int thdblks_idx_y_max; int thdblks_idx_z; int thdblks_idx_z_max; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x))); chunk_idx_x_max=(chunk_idx_x+c); chunk_idx_y=(threadIdx.y+(tmp*blockDim.y)); chunk_idx_y_max=(chunk_idx_y+1); chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); chunk_idx_z_max=(chunk_idx_z+1); thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x))); thdblks_idx_x_max=(thdblks_idx_x+tbx); thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y))); thdblks_idx_y_max=(thdblks_idx_y+tby); thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z))); thdblks_idx_z_max=(thdblks_idx_z+tbz); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */ /* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */ /* for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... } */ { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ thd_idx_z=chunk_idx_z; thd_idx_y=chunk_idx_y; for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1) { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0]) */ /* _idx0 = (((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(thd_idx_y*x_max))+((2*t)*thd_idx_y))+thd_idx_x)+2) */ _idx0=(((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(thd_idx_y*x_max))+((2*t)*thd_idx_y))+thd_idx_x)+2); /* _idx1 = ((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(thd_idx_y*x_max))+((2*t)*thd_idx_y))+thd_idx_x) */ _idx1=(_idx0-2); /* _idx2 = ((((thd_idx_z*x_max)*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+thd_idx_x) */ _idx2=(((_idx1-(((2*t)*thd_idx_z)*y_max))+((((2*t)*thd_idx_z)+2)*x_max))-((2*t)*thd_idx_y)); /* _idx3 = ((((thd_idx_z*x_max)*y_max)+((((2*t)*thd_idx_z)+thd_idx_y)*x_max))+thd_idx_x) */ _idx3=(_idx2-(2*x_max)); /* _idx4 = (((((thd_idx_z+2)*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x) */ _idx4=((_idx3+((2*x_max)*y_max))-(((2*t)*thd_idx_z)*x_max)); /* _idx5 = ((((thd_idx_z*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x) */ _idx5=(_idx4-((2*x_max)*y_max)); u_0_0[_idx5]=((alpha*(ux_1_0[_idx0]-ux_1_0[_idx1]))+((beta*(uy_2_0[_idx2]-uy_2_0[_idx3]))+(gamma*(uz_3_0[_idx4]-uz_3_0[_idx5])))); } } } } __global__ void initialize(float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c) { float * const u__u_0[16] = { u_0_0 } ; float * const u__ux_1[16] = { ux_1_0 } ; float * const u__uy_2[16] = { uy_2_0 } ; float * const u__uz_3[16] = { uz_3_0 } ; int _idx0; int _idx1; int _idx2; int _idx3; int _idx4; int _idx5; int chunk_idx_x; int chunk_idx_x_max; int chunk_idx_y; int chunk_idx_y_max; int chunk_idx_z; int chunk_idx_z_max; int idx_1_2; int size_1_1; int size_1_2; //int t; int thd_idx_x; int thd_idx_y; int thd_idx_z; int thdblks_idx_x; int thdblks_idx_x_max; int thdblks_idx_y; int thdblks_idx_y_max; int thdblks_idx_z; int thdblks_idx_z_max; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x))); chunk_idx_x_max=(chunk_idx_x+c); chunk_idx_y=(threadIdx.y+(tmp*blockDim.y)); chunk_idx_y_max=(chunk_idx_y+1); chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); chunk_idx_z_max=(chunk_idx_z+1); thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x))); thdblks_idx_x_max=(thdblks_idx_x+tbx); thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y))); thdblks_idx_y_max=(thdblks_idx_y+tby); thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z))); thdblks_idx_z_max=(thdblks_idx_z+tbz); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */ /* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */ /* for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... } */ { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ thd_idx_z=chunk_idx_z; thd_idx_y=chunk_idx_y; for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1) { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0]) */ /* _idx0 = ((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(thd_idx_y*x_max))+((2*t)*thd_idx_y))+thd_idx_x) */ _idx0=((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(thd_idx_y*x_max))+((2*t)*thd_idx_y))+thd_idx_x); u__ux_1[(t-1)][_idx0]=0.2; /* _idx1 = ((((thd_idx_z*x_max)*y_max)+((((2*t)*thd_idx_z)+thd_idx_y)*x_max))+thd_idx_x) */ _idx1=(((_idx0-(((2*t)*thd_idx_z)*y_max))+(((2*t)*thd_idx_z)*x_max))-((2*t)*thd_idx_y)); u__uy_2[(t-1)][_idx1]=0.30000000000000004; /* _idx2 = ((((thd_idx_z*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x) */ _idx2=(_idx1-(((2*t)*thd_idx_z)*x_max)); u__uz_3[(t-1)][_idx2]=0.4; /* _idx3 = (((((thd_idx_z+2)*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x) */ _idx3=(_idx2+((2*x_max)*y_max)); u__uz_3[(t-1)][_idx3]=0.4; /* _idx4 = ((((thd_idx_z*x_max)*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+thd_idx_x) */ _idx4=(_idx1+(2*x_max)); u__uy_2[(t-1)][_idx4]=0.30000000000000004; /* _idx5 = (((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(thd_idx_y*x_max))+((2*t)*thd_idx_y))+thd_idx_x)+2) */ _idx5=(_idx0+2); u__ux_1[(t-1)][_idx5]=0.2; u__u_0[(t-1)][_idx2]=0.1; } } } }
13,344
#include <stdio.h> #include <cuda.h> #define N 1024 #define BLOCK_SIZE 32 __global__ void mm(double *A, double *B, double *C) { // Block index const uint bx = blockIdx.x; const uint by = blockIdx.y; // Thread index const uint tx = threadIdx.x; const uint ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block const uint aBegin = N * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block const uint aEnd = aBegin + N - 1; // Step size used to iterate through the sub-matrices of A const uint aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block const uint bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B const uint bStep = BLOCK_SIZE * N; // The element of the block sub-matrix that is computed // by the thread double Csub = 0; // Loop over all the sub-matrices of A and B required to // compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Shared memory for the sub-matrix of A __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; // Shared memory for the sub-matrix of B __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from global memory to shared memory // each thread loads one element of each matrix As[ty][tx] = A[a + N * ty + tx]; Bs[ty][tx] = B[b + N * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element const uint c = N * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + N * ty + tx] = Csub; } int main () { double *a, *b, *c; double *d_a, *d_b, *d_c; double size = sizeof(double) * N*N; dim3 grid(N/BLOCK_SIZE, N/BLOCK_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE); a = (double *) malloc (size); b = (double *) malloc (size); c = (double *) malloc (size); cudaMalloc ((void**)&d_a, size); cudaMalloc ((void**)&d_b, size); cudaMalloc ((void**)&d_c, size); for( int i = 0; i < N*N; i++ ) { a[i] = (double) ( rand() ) / ( RAND_MAX + 1.0 ); b[i] = (double) ( rand() ) / ( RAND_MAX + 1.0 ); c[i] = 0; } cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice); for (int i=0; i<5; i++) { mm <<<grid, block>>> (d_a, d_b, d_c); } cudaDeviceSynchronize(); cudaMemcpy (c, d_c, size, cudaMemcpyDeviceToHost); //for( int i=0; i < N*N; i++ ) //{ // printf("%f\t%f\t%f\n", a[i], b[i], c[i]); //} printf("%f\n", c[N/2 * N/2]); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(a); free(b); free(c); return 0; }
13,345
__global__ void advectLevelset(const float dt, const float inv_dx, const unsigned char * d_mask, const float * d_levelsetIn, float * d_levelsetOut, const float * d_velIn_x, const float * d_velIn_y) { //todo remove const int T_THREADS = 16; const int i = threadIdx.x + blockDim.x * blockIdx.x; const int j = threadIdx.y + blockDim.y * blockIdx.y; const int g_idx = i + j * blockDim.x * gridDim.x; //Allocate shared memory for Level Set, +2 in for apron __shared__ float s_phi[(T_THREADS + 2) * (T_THREADS + 2)]; //Load inner phi int s_idx = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x + 2); s_phi[s_idx] = d_levelsetIn[g_idx]; //Load phi at the apron //Left boundary if (threadIdx.x == 0 && blockIdx.x != 0) { s_idx = (threadIdx.y + 1) * (blockDim.x + 2); s_phi[s_idx] = d_levelsetIn[g_idx - 1]; } //Right boundary if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1) { s_idx = (threadIdx.y + 1) * (blockDim.x + 2) + threadIdx.x + 2; s_phi[s_idx] = d_levelsetIn[g_idx + 1]; } //Bottom boundary if (threadIdx.y == 0 && blockIdx.y != 0) { s_idx = threadIdx.x + 1; s_phi[s_idx] = d_levelsetIn[g_idx - gridDim.x * blockDim.x]; } //Top boundary if (threadIdx.y == blockDim.y - 1 && blockIdx.y != gridDim.y - 1) { s_idx = (threadIdx.y + 2) * (blockDim.x + 2) + threadIdx.x + 1; s_phi[s_idx] = d_levelsetIn[g_idx + gridDim.x * blockDim.x]; } //Sync all threads __syncthreads(); //Allocate memory for velocities __shared__ float s_vel_x[(T_THREADS + 1)*(T_THREADS + 1)]; __shared__ float s_vel_y[(T_THREADS + 1)*(T_THREADS + 1)]; s_idx = threadIdx.y * (blockDim.x + 1) + threadIdx.x; //Because of MaC grid, global memeory has one extra component int g_idx_vel = i * j * (blockDim.x * gridDim.x + 1); //Load inner velocities s_vel_x[s_idx] = d_velIn_x[g_idx_vel]; s_vel_y[s_idx] = d_velIn_y[g_idx_vel]; //Load boundary velocities //Right boundary if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1) { s_idx = threadIdx.y * (blockDim.x + 1) + threadIdx.x + 1; s_vel_x[s_idx] = d_velIn_x[g_idx_vel + 1]; } //Top boundary if (threadIdx.y == blockDim.y - 1 && blockIdx.y != gridDim.y - 1) { s_idx = (threadIdx.y + 1) * (blockDim.x + 1) + threadIdx.x; s_vel_x[s_idx] = d_velIn_x[g_idx_vel + blockDim.x * gridDim.x + 1]; } //Sync all threads __syncthreads(); int vel_idx = threadIdx.x + threadIdx.y * (blockDim.x + 1); float vel_x = (s_vel_x[vel_idx] + s_vel_x[vel_idx + 1]) * 0.5f; float vel_y = (s_vel_y[vel_idx] + s_vel_y[vel_idx + blockDim.x + 1]) * 0.5f; float dphidx, dphidy; int phi_idx = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x + 2); float phi = s_phi[phi_idx]; if (vel_x > 0.0f) { dphidx = (phi - s_phi[phi_idx - 1]) * inv_dx; } else { dphidx = (s_phi[phi_idx + 1] - phi) * inv_dx; } if (vel_y > 0.0f) { dphidy = (phi - s_phi[phi_idx - (blockDim.x + 2)]) * inv_dx; } else { dphidy = (s_phi[phi_idx + (blockDim.x + 2)] - phi) * inv_dx; } d_levelsetOut[g_idx] = phi - dt * (dphidx * vel_x + dphidy * vel_y); } void advectLevelset(dim3 blocks, dim3 threads, const float dt, const float inv_dx, const unsigned char * d_mask, const float * d_levelsetIn, float * d_levelsetOut, const float * d_velIn_x, const float * d_velIn_y) { advectLevelset<<<blocks,threads>>>(dt, inv_dx, d_mask, d_levelsetIn, d_levelsetOut, d_velIn_x, d_velIn_y); }
13,346
//#include "Chunk.cuh" //#include <bitset> //#include <iostream> //#include <cstdio> // //using namespace std; //namespace FFF{ //__constant__ idx_t p_Mod[max_nonzero_coefs_in_mod]; //__constant__ idx_t p_ModLen; //__constant__ Element element_mul; //__constant__ Chunk c; // // //__device__ void a_chunkToNormal(Chunk *d_a, Elements_Chunk *d_b, idx_t idx) //{ // cell_t ans = 0; // idx_t element_idx = idx & andMask(Chunk::log_elements_in_chunk); // idx_t cell_idx = idx >> Chunk::log_elements_in_chunk; // for(unsigned int i = cell_idx<<Element::log_bits_in_cell ; i < ((cell_idx+1)<<Element::log_bits_in_cell); ++i) // ans^=(((cell_t)(((d_a->v[i])>>(element_idx))&1))<<(i-(cell_idx<<Element::log_bits_in_cell))); // d_b->e[element_idx].c[cell_idx]=ans; //} //__global__ void k_chunkToNormal(Chunk *d_a,Elements_Chunk *d_b , len_t len) //{ // const unsigned int threads_in_chunk = Chunk::elements_in_chunk * Element::element_len; // __shared__ Chunk input[max_block_size / threads_in_chunk]; // idx_t idx = threadIdx.x + blockDim.x*blockIdx.x; // if(idx >= len*threads_in_chunk) // return; // idx_t chunkIdx = (idx) / (Element::element_len*Chunk::elements_in_chunk); // idx_t in_chunkIdx = (idx & (Element::element_len * Chunk::elements_in_chunk - 1)); // idx_t chunks_in_block = blockDim.x / Chunk::cells_in_chunk; // idx_t inBlockChunkIdx = chunkIdx & (threads_in_chunk-1); // for(unsigned int i = 0 ; i < sizeof(cell_t)/sizeof(chunk_cell_t) ; ++i){ // input[inBlockChunkIdx].v[in_chunkIdx + i*threads_in_chunk] = d_a[chunkIdx].v[in_chunkIdx+i*threads_in_chunk]; // } // a_chunkToNormal(&(input[inBlockChunkIdx]), &(d_b[chunkIdx]),in_chunkIdx); //} //__host__ void Chunk::chunkToNormal(Chunk(*h_a), Elements_Chunk(*h_b), len_t len, bool copy) //{ // //Declare device variables // Chunk (*d_a); // Elements_Chunk (*d_b); // // const unsigned int num_element = len*elements_in_chunk; // const unsigned int threads = Element::element_len * num_element; // // //Define Block and Grid Size. // dim3 blockSize(max_block_size,1,1); // dim3 gridSize(sizeCiel(threads,max_block_size),1,1); // if(copy){ // //Allocate Memory on GPU. (global) // cudaMalloc(&d_a,sizeof(Chunk)*len); // cudaMalloc(&d_b,sizeof(Elements_Chunk)*len); // // //Copy memory to GPU. // cudaMemcpy(d_a,h_a,sizeof(Chunk)*len,cudaMemcpyHostToDevice); // } else { // d_a = h_a; // d_b = h_b; // } // // //Launch Kernel // k_chunkToNormal<<<gridSize,blockSize>>>(d_a,d_b,len); // if(copy){ // //Copy results back to memory // cudaMemcpy(h_b,d_b,sizeof(Elements_Chunk)*len,cudaMemcpyDeviceToHost); // // //Free allocated memory. // cudaFree(d_a); // cudaFree(d_b); // } //} //__device__ void a_normalToChunk(Elements_Chunk *d_a, Chunk *d_b, idx_t idx) //{ // chunk_cell_t ans = 0; // idx_t cell_idx = idx>>Element::log_bits_in_cell; // for(unsigned int i = 0 ; i < Chunk::elements_in_chunk ; ++i) // ans^=((((d_a->e[i].c[cell_idx])>>(idx& andMask(Element::log_bits_in_cell)))&1)<<i); // d_b->v[idx]=ans; //} //__global__ void k_normalToChunk(Elements_Chunk *d_a,Chunk *d_b , len_t len) //{ // idx_t idx = threadIdx.x + blockDim.x*blockIdx.x; // if(idx >= (len<<Chunk::log_cells_in_chunk)) // return; // idx_t chunkIdx = (idx) >> Chunk::log_cells_in_chunk; // idx_t in_chunkIdx = (idx & andMask(Chunk::log_cells_in_chunk)); // a_normalToChunk(&(d_a[chunkIdx]),&(d_b[chunkIdx]),in_chunkIdx); //} //__host__ void Chunk::normalToChunk(Elements_Chunk(*h_a), Chunk (*h_b), len_t len,bool copy) //{ // // //Declare device variables // Elements_Chunk (*d_a); // Chunk (*d_b); // // const unsigned int threads = len<<Chunk::log_cells_in_chunk; // // //Define Block and Grid Size. // dim3 blockSize(max_block_size,1,1); // dim3 gridSize(sizeCiel(threads,max_block_size),1,1); // // //Allocate Memory on GPU. (global) // if(copy){ // cudaMalloc(&d_a,sizeof(Elements_Chunk)*len); // cudaMalloc(&d_b,sizeof(Chunk)*len); // // //Copy memory to GPU. // cudaMemcpy(d_a,h_a,sizeof(Elements_Chunk)*len,cudaMemcpyHostToDevice); // } else{ // d_a = h_a; // d_b = h_b; // } // //Launch Kernel // k_normalToChunk<<<gridSize,blockSize>>>(d_a,d_b,len); // // //Copy results back to memory // if(copy){ // cudaMemcpy(h_b,d_b,sizeof(Chunk)*len,cudaMemcpyDeviceToHost); // //Free allocated memory. // cudaFree(d_a); // cudaFree(d_b); // } // //} //__host__ void Chunk::setMod(){ // cudaMemcpyToSymbol(p_Mod,&(Element::irr_poly_index[ord>>log_warp_size]),sizeof(idx_t)*max_nonzero_coefs_in_mod); // cudaMemcpyToSymbol(p_ModLen,&(Element::mod_len[ord>>log_warp_size]),sizeof(idx_t)); //} //__device__ void Chunk::chunk_reduce_xor(Chunk *a, Chunk *c_bottom, Chunk*c_top, idx_t idx) //{ // chunk_cell_t ans=c_bottom->v[idx]; // unsigned int temp_idx; // for(idx_t i = 0 ; i < p_ModLen ; ++i) // { // for(idx_t j = 0 ; j < p_ModLen ; ++j) // { // temp_idx = idx+(ord<<1)-p_Mod[i]-p_Mod[j]; // if(temp_idx >= (ord<<1)-p_Mod[j] && temp_idx < (ord<<1)) // ans^=c_top->v[temp_idx-ord]; // } // } // a->v[idx]^=ans; //} //__device__ void Chunk::chunk_xor(Chunk *a, Chunk* b, idx_t idx){ // a->v[idx]^=b->v[idx]; //} //__device__ void Chunk::chunk_reduce_xor(Chunk *a, Chunk *c_bottom, idx_t idx,Chunk* to_xor ,int shift) //{ // unsigned int k = p_ModLen; // for(unsigned int i = 0 ; i < (ord>>1); i+=warp_size) // for(unsigned int j = 0 ; j+1 < k ; ++j) // { // c_bottom->v[(ord>>1)+idx+i+p_Mod[j]]^=c_bottom->v[(ord>>1)+ord+idx+i]; // } // for(unsigned int i = 0 ; i < (ord>>1); i+=warp_size) // for(unsigned int j = 0 ; (j+1) < k ; ++j) // { // c_bottom->v[idx+i+p_Mod[j]]^=c_bottom->v[ord+idx+i]; // } // for(unsigned int i = 0 ; i < ord ; i+=warp_size){ // to_xor->v[idx+i]^=(c_bottom->v[idx+i]>>shift); // } //} // //__device__ void Chunk::chunk_reduce(Chunk *a, Chunk *c_bottom, idx_t idx) //{ // unsigned int k = p_ModLen; // for(unsigned int i = 0 ; i < (ord>>1); i+=warp_size) // for(unsigned int j = 0 ; j+1 < k ; ++j) // { // c_bottom->v[(ord>>1)+idx+i+p_Mod[j]]^=c_bottom->v[(ord>>1)+ord+idx+i]; // } // for(unsigned int i = 0 ; i < (ord>>1); i+=warp_size) // for(unsigned int j = 0 ; (j+1) < k ; ++j) // { // c_bottom->v[idx+i+p_Mod[j]]^=c_bottom->v[ord+idx+i]; // } // for(unsigned int i = 0 ; i < ord ; i+=warp_size){ // a->v[idx+i]=c_bottom->v[idx+i]; // } //} //__device__ void Chunk::chunkClmul(Chunk (*a), Element (*e), idx_t idx, Chunk (*c)) //{ // chunk_cell_t my_ans[2][(ord>>(log_warp_size))]={0}; // for(unsigned int k = 0 ; k < ord ; ++k) // { // if(EXTRACT_BIT(e->c,k)) // for(unsigned int t = 0 ; t < (ord>>log_warp_size); ++t) // { // int b = (k>(idx+warp_size*t)); // my_ans[b][t]^=a->v[idx+warp_size*t+(b<<log_ord)-k]; // } // } // for(unsigned int i = 0 ; i < (ord>>log_warp_size); ++i) // { // c->v[idx+i*warp_size] = my_ans[0][i]; // c->v[ord+idx+i*warp_size] = my_ans[1][i]; // } //} //__device__ void Chunk::aux_k_clmul(Chunk *a, Element* e, len_t len,Chunk* c_shared) //{ // // idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; // const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); // const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); // const idx_t shared_chunk_idx = ((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk)); // Chunk* my_shared_chunk = c_shared+(shared_chunk_idx<<1); // for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size) // my_shared_chunk->v[in_chunk_idx+i]=a[chunk_idx].v[in_chunk_idx+i]; // Chunk::chunkClmul(my_shared_chunk,e,in_chunk_idx,my_shared_chunk); // Chunk::chunk_reduce(a+chunk_idx,my_shared_chunk,in_chunk_idx); //} //__global__ void k_clmul(Chunk *a,Element *e,len_t len ) //{ // const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; // __shared__ Chunk c_shared[shared_len<<1]; // idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; // if(idx>=(len<<Chunk::log_threads_in_chunk)) // return; // Chunk::aux_k_clmul(a,e,len,c_shared); //} //__host__ void Chunk::mul(Chunk (*h_a),Element (*h_e),len_t len, Chunk (*h_res)){ // //#ifdef __MEASURE // cudaEvent_t start,stop; // float time; // cudaEventCreate(&start); // cudaEventCreate(&stop); //#endif // //Declare device variables // Chunk (*d_a); // Element (*d_e); // // //Define Block and Grid Size. // dim3 blockSize(max_block_size,1,1); // dim3 gridSize(sizeCiel(len<<Chunk::log_threads_in_chunk,max_block_size),1,1); // // //Allocate Memory on GPU. (global) // cudaMalloc(&d_a,sizeof(Chunk)*len); // cudaMalloc(&d_e,sizeof(Element)); // // //Copy memory to GPU. // cudaMemcpy(d_a,h_a,sizeof(Chunk)*len,cudaMemcpyHostToDevice); // cudaMemcpy(d_e,h_e,sizeof(Element),cudaMemcpyHostToDevice); // // //Set Mod // setMod(); //// setElementMul(h_e); // //Launch Kernel //#ifdef __MEASURE // cudaEventRecord(start,0); //#endif // k_clmul<<<gridSize,blockSize>>>(d_a,d_e,len); //#ifdef __MEASURE // cudaEventRecord(stop,0); //#endif // // //Copy results to host // cudaMemcpy(h_res,d_a,sizeof(Chunk)*len,cudaMemcpyDeviceToHost); // //Free allocated memory. // cudaFree(d_a); // cudaFree(d_e); //#ifdef __MEASURE // cudaEventElapsedTime(&time,start,stop); // printf("Time for the mul: %f ms on %d chunks \n",time,len); //#endif //} // //__global__ void k_add(Chunk (*a), Chunk (*b), len_t l) //{ // unsigned int idx = threadIdx.x+blockIdx.x*blockDim.x; // if(idx>=l*Chunk::cells_in_chunk) // return; // ((chunk_cell_t*)a)[idx]^=((chunk_cell_t*)b)[idx]; //} //__host__ void Chunk::add(Chunk (*h_a),Chunk (*h_b),len_t len) //{ // // //Declare device variables // Chunk (*d_a); // Chunk (*d_b); // // //Define Block and Grid Size. // dim3 blockSize(max_block_size,1,1); // dim3 gridSize(sizeCiel(max_block_size,len),1,1); // // //Allocate Memory on GPU. (global) // cudaMalloc(&d_a,sizeof(Chunk)*len); // cudaMalloc(&d_b,sizeof(Chunk)*len); // // //Copy memory to GPU. // cudaMemcpy(d_a,h_a,sizeof(Chunk)*len,cudaMemcpyHostToDevice); // cudaMemcpy(d_b,h_b,sizeof(Chunk)*len,cudaMemcpyHostToDevice); // // //Launch Kernel // k_add<<<gridSize,blockSize>>>(d_a,d_b,len); // // //Copy results to CPU memory // cudaMemcpy(h_a,d_a,sizeof(Chunk)*len,cudaMemcpyDeviceToHost); // // //Free allocated memory. // cudaFree(d_a); // cudaFree(d_b); //} //__host__ void Chunk::print() const { // for(unsigned int i = 0 ; i < cells_in_chunk ; ++i){ // cout << bitset<bits_in_byte*sizeof(chunk_cell_t)>(this->v[i])<<endl; // } //} //__host__ void Elements_Chunk::print() const{ // for(unsigned int i = 0 ; i < elements_in_elements_chunk ; ++i){ // Element::printElement(this->e[i]); // cout<<endl; // } // } ////Mul chunk by another chunk //__device__ void Chunk::clmul_by_chunk(Chunk& a, Chunk& e, idx_t idx, Chunk* c){ // chunk_cell_t my_ans[2][(ord>>(log_warp_size))]={0}; // for(unsigned int k = 0 ; k < ord ; ++k) // for(unsigned int t = 0 ; t < (ord>>log_warp_size); ++t) // { // int b = (k>(idx+warp_size*t)); // my_ans[b][t]^=a.v[idx+warp_size*t+(b<<log_ord)-k]&e.v[k]; // } // for(unsigned int i = 0 ; i < (ord>>log_warp_size); ++i) // { // c->v[idx+i*warp_size] = my_ans[0][i]; // c->v[ord+idx+i*warp_size] = my_ans[1][i]; // } //} ////Mul chunk by another chunk //__global__ void k_mul_chunk(Chunk* cs, Chunk* c, len_t cs_len) //{ // const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; // __shared__ Chunk c_shared[shared_len<<1]; // idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; // if(idx>=(cs_len<<Chunk::log_threads_in_chunk)) // return; // const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); // const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); // const idx_t shared_chunk_idx = ((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk)); // Chunk* my_shared_chunk = c_shared+(shared_chunk_idx<<1); // for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size){ // my_shared_chunk->v[in_chunk_idx+i]=cs[chunk_idx].v[in_chunk_idx+i]; // my_shared_chunk[1].v[in_chunk_idx+i]=c->v[in_chunk_idx+i]; // } // Chunk::clmul_by_chunk(my_shared_chunk[0],my_shared_chunk[1],in_chunk_idx,my_shared_chunk); // Chunk::chunk_reduce(cs+chunk_idx,my_shared_chunk,in_chunk_idx); //} //__global__ void k_mul_chunk_xor(Chunk* cs, Chunk* c, len_t cs_len,Chunk* to_xor, int shift = 0) //{ // const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; // __shared__ Chunk c_shared[shared_len<<1]; // idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; // if(idx>=(cs_len<<Chunk::log_threads_in_chunk)) // return; // const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); // const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); // const idx_t shared_chunk_idx = ((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk)); // Chunk* my_shared_chunk = c_shared+(shared_chunk_idx<<1); // for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size){ // my_shared_chunk->v[in_chunk_idx+i]=cs[chunk_idx].v[in_chunk_idx+i]; // my_shared_chunk[1].v[in_chunk_idx+i]=c->v[in_chunk_idx+i]; // } // Chunk::clmul_by_chunk(my_shared_chunk[0],my_shared_chunk[1],in_chunk_idx,my_shared_chunk); // Chunk::chunk_reduce_xor(cs+chunk_idx,my_shared_chunk,in_chunk_idx,to_xor,shift); //} ////Mul a chunk by a chunk //void Chunk::chunk_mul(Chunk (* h_a), Chunk (*h_b) , len_t len, Chunk (*h_res), bool copy, bool do_xor, int shift){ //#ifdef __MEASURE // cudaEvent_t start,stop; // float time; // cudaEventCreate(&start); // cudaEventCreate(&stop); //#endif // //Declare device variables // Chunk (*d_a); // Chunk (*d_b); // // //Define Block and Grid Size. // dim3 blockSize(max_block_size,1,1); // dim3 gridSize(sizeCiel(len<<Chunk::log_threads_in_chunk,max_block_size),1,1); // // if(copy){ // //Allocate Memory on GPU. (global) // cudaMalloc(&d_a,sizeof(Chunk)*len); // cudaMalloc(&d_b,sizeof(Chunk)); // //Copy memory to GPU. // cudaMemcpy(d_a,h_a,sizeof(Chunk)*len,cudaMemcpyHostToDevice); // cudaMemcpy(d_b,h_b,sizeof(Chunk),cudaMemcpyHostToDevice); // } else { // d_a = h_a; // d_b = h_b; // } // // // //Set Mod // setMod(); //// setElementMul(h_e); // //Launch Kernel //#ifdef __MEASURE // cudaEventRecord(start,0); //#endif // if(do_xor) // k_mul_chunk_xor<<<gridSize,blockSize>>>(d_a,d_b,len,d_a,shift); // else // k_mul_chunk<<<gridSize,blockSize>>>(d_a,d_b,len); //#ifdef __MEASURE // cudaEventRecord(stop,0); //#endif // // if(copy){ // //Copy results to host // cudaMemcpy(h_res,d_a,sizeof(Chunk)*len,cudaMemcpyDeviceToHost); // //Free allocated memory. // cudaFree(d_a); // cudaFree(d_b); // } //#ifdef __MEASURE // cudaEventElapsedTime(&time,start,stop); // printf("Time for the mul: %f ms on %d chunks \n",time,len); //#endif //} //}
13,347
#include <stdio.h> #include <future> #include <thread> #include <chrono> #include <iostream> #include <cstring> #include <stdio.h> #define N 1000000 #define SIZE 100 //Macro for checking cuda errors following a cuda launch or api call #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(0); \ } \ } __constant__ int factor = 0; __global__ void vectorAdd(int *a, int *b, int *c) { int i = blockIdx.x*blockDim.x + threadIdx.x; c[i] = factor*(a[i] + b[i]); } __global__ void matrixAdd(int **a,int **b, int**c) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; c[i][j] = a[i][j] + b[i][j]; } #define PRINT(x) \ std::cout << #x " = " << x << std::endl void func(const char* ptr) { std::cout << "ptr = " << ptr << std::endl; } #define INF 2e10f struct Sphere { float r,b,g; float radius; float x,y,z; __device__ float hit(float ox, float oy, float *n) { float dx = ox - x; float dy = oy - y; printf("%f %f %f %f\n", x, y, dx, dy); printf("%f %f %f\n", dx*dx, dy*dy, radius*radius); if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf(radius*radius - dx*dx - dy*dy); *n = dz / sqrtf(radius*radius); printf("n = %f\n", *n); return dz + z; } return -INF; } }; #define rnd(x) (x*rand() / RAND_MAX) #define SPHERES 20 __constant__ Sphere s[SPHERES]; __global__ void kernel(char *image) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x+ y*blockDim.x*gridDim.x; float ox = (x - SIZE/2); float oy = (y - SIZE/2); float r=0, g=0, b=0; float maxz = -INF; for (int i=0; i<SPHERES; i++) { float n; float t = s[i].hit(ox, oy, &n); printf("t = %f\n", t); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; printf("%f\n %f\n", s[i].r, fscale); } } image[offset] = r>0 && g>0 && b>0 ? 'x' : ' '; } void printImage(char *ptr) { for (auto i=0; i<SIZE; i++) { char cpyPtr[SIZE+1]; std::memcpy((void*)cpyPtr, (void*)(ptr + SIZE*i), SIZE); cpyPtr[SIZE] = '\0'; printf("%s\n", cpyPtr); } printf("\n"); } int main(int argc, char** argv) { // start time auto startTime = std::chrono::high_resolution_clock::now(); printf("Hello World\n"); // get the number of devices int numDevices; cudaGetDeviceCount(&numDevices); cudaCheckError(); PRINT(numDevices); cudaDeviceProp prop; for (auto i=0 ; i<numDevices; i++) { cudaGetDeviceProperties(&prop, i); PRINT(prop.name); PRINT(prop.totalGlobalMem); PRINT(prop.sharedMemPerBlock); PRINT(prop.regsPerBlock); PRINT(prop.warpSize); PRINT(prop.memPitch); PRINT(prop.maxThreadsPerBlock); PRINT(prop.maxThreadsDim[0]); PRINT(prop.maxThreadsDim[1]); PRINT(prop.maxThreadsDim[2]); PRINT(prop.maxGridSize[0]); PRINT(prop.maxGridSize[1]); PRINT(prop.maxGridSize[2]); PRINT(prop.totalConstMem); PRINT(prop.major); PRINT(prop.minor); PRINT(prop.clockRate); PRINT(prop.textureAlignment); PRINT(prop.deviceOverlap); PRINT(prop.multiProcessorCount); PRINT(prop.kernelExecTimeoutEnabled); PRINT(prop.integrated); PRINT(prop.canMapHostMemory); PRINT(prop.computeMode); PRINT(prop.maxTexture1D); PRINT(prop.maxTexture2D[0]); PRINT(prop.maxTexture2D[1]); PRINT(prop.maxTexture3D[0]); PRINT(prop.maxTexture3D[1]); PRINT(prop.maxTexture3D[2]); // PRINT(prop.maxTexture2DArray[0]); // PRINT(prop.maxTexture2DArray[1]); // PRINT(prop.maxTexture2DArray[2]); PRINT(prop.concurrentKernels); } // capture the start/stop time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaCheckError(); char *d_image; cudaMalloc(&d_image, SIZE*SIZE*sizeof(char)); cudaCheckError(); Sphere *h_s = new Sphere[SPHERES]; for (auto i=0; i<SPHERES; i++) { h_s[i].r = rnd(1.0f); h_s[i].g = rnd(1.0f); h_s[i].b = rnd(1.0f); h_s[i].x = rnd((float)SIZE) - SIZE/2; h_s[i].y = rnd((float)SIZE) - SIZE/2; h_s[i].z = rnd((float)SIZE) - SIZE/2; h_s[i].radius = rnd((float)SIZE/10) + 2; } cudaMemcpyToSymbol(s, h_s, sizeof(Sphere) * SPHERES); cudaCheckError(); // cudaMemcpy(s, h_s, sizeof(Sphere) * SPHERES, // cudaMemcpyHostToDevice); delete [] h_s; dim3 grids(SIZE/16, SIZE/16); dim3 threads(16,16); kernel<<<grids, threads>>>(d_image); cudaCheckError(); char h_image[SIZE*SIZE]; cudaMemcpy(h_image, d_image, SIZE*SIZE*sizeof(char), cudaMemcpyDeviceToHost); cudaCheckError(); cudaFree(d_image); cudaCheckError(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsed; cudaEventElapsedTime(&elapsed, start, stop); printf("elapsed = %f\n", elapsed); cudaEventDestroy(start); cudaEventDestroy(stop); printImage(h_image); // stop time auto stopTime = std::chrono::high_resolution_clock::now(); PRINT((stopTime - startTime).count()); printf("Goodbye World\n"); }
13,348
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> // The conventions for pointers is: if the value is stored in the device(GPU) it starts with d_, else if the value is stored // in the host(CPU) the convention is to start with a h_ __global__ void kernel(float *d_out, float *d_in) { // Create a function that returns pointers int idx = threadIdx.x; float f = d_in[idx]; // Output to a pointer from a float operation d_out[idx] = f * f * f; } int main() { // Define the size of the array to utilize const int ARRAY_SIZE = 1000; // Calculate the size in bytes const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // Create the input array on the host(CPU) float h_in[ARRAY_SIZE]; for(int i = 0; i<ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // Declare GPU memory pointers float *d_in; float *d_out; // Allocate GPU memory cudaMalloc((void **)&d_in, ARRAY_BYTES); cudaMalloc((void **)&d_out, ARRAY_BYTES); // Transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // Launch the kernel, each block(left input) defines how many times the threads(right input) will be executed kernel <<<1, ARRAY_SIZE>>>(d_out, d_in); // Copy back the array to the host cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // Print the resulting array for(int i = 0; i<ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); system("pause"); return 0; }
13,349
/* * "sample.cu" * * An example for CUDA: summing vectors * c[i] = a[i]+b[i], where a[i]=i, b[i]=2*i * * This program implements the case above on both * CPU and GPU, check their results and also * compares their performances. * * Prefixes: * "d_" indicates device (GPU) memory pointer; * "h_" indicates host (CPU) memory pointer. * * Same timing function is called in both cases. */ #include <stdio.h> #include <stdlib.h> //includes CUDA #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define N (1000*1000) #define block_size 512// no more than 512 #define block_num (N+block_size-1)/block_size// no more than 65535 // return type of a CUDA kernel should always be "void" __global__ void kernel_sum(int *d_a, int *d_b, int *d_c, int num){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid<num){ d_a[tid] = tid; d_b[tid] = tid*2; d_c[tid] = d_a[tid]+d_b[tid]; } } double gpusum(int *result){ // create cudaEvents for timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // allocate memory on device (GPU) int *d_a, *d_b, *d_c; cudaMalloc((void **)&d_a, N*sizeof(int)); cudaMalloc((void **)&d_b, N*sizeof(int)); cudaMalloc((void **)&d_c, N*sizeof(int)); cudaEventRecord(start, 0);// record start // put everything you want to record here kernel_sum<<<block_num, block_size>>>(d_a, d_b, d_c, N);// launch the kernel cudaEventRecord(stop, 0);// record stop cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop);// calculate elapsedTime cudaEventDestroy(start); cudaEventDestroy(stop); // copy data from device (GPU) to host (CPU) cudaMemcpy(result, d_c, N*sizeof(int), cudaMemcpyDeviceToHost); return double(elapsedTime); } double cpusum(int *result){ int *h_a = new int[N]; int *h_b = new int[N]; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0);// record start for(int i=0; i<N; i++){ h_a[i] = i; h_b[i] = i*2; result[i] = h_a[i]+h_b[i]; } cudaEventRecord(stop, 0);// record stop cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop);// calculate elapsedTime cudaEventDestroy(start); cudaEventDestroy(stop); delete [] h_a; delete [] h_b; return double(elapsedTime); } bool check_result(int *vec_x, int *vec_y, int num){ for(int i=0; i<num; i++) if(vec_x[i] != vec_y[i]) return false; return true; } int main(){ int *result_cpu = new int[N]; int *result_gpu = new int[N]; double time_cpu = cpusum(result_cpu); double time_gpu = gpusum(result_gpu); if(check_result(result_cpu, result_gpu, N)){ printf("Results are correct!\n"); printf("CPU Time:\t%f ms.\n", time_cpu); printf("GPU Time:\t%f ms.\n", time_gpu); printf("Speedup:\t%f.\n", time_cpu/time_gpu); } else{ printf("Results are incorrect!\n"); } delete [] result_cpu; delete [] result_gpu; return 0; }
13,350
#include <stdio.h> #include <math.h> #include <stdlib.h> #define pi 3.141592653589793 using namespace std; __global__ void remosaic_cuda(float *quad_raw, float *raw, float *in_GX, float *in_GY,int h_hsize,int f_hsize,int types_size, int Qangle, int Qstrength, int Qcoherence, float *stre, float * cohe, float *Q, float *V,float *mark,int row,int col,int width) { int x = threadIdx.x+ blockIdx.x* blockDim.x; int y = threadIdx.y+ blockIdx.y* blockDim.y; int p_hsize = max(h_hsize, f_hsize); //int tid = j1 + i1*col; if(x < p_hsize ||x >= col - p_hsize) return; if(y < p_hsize ||y >= row - p_hsize) return; int pos = y % types_size * types_size + x % types_size; float dx_val=0.0; float dy_val=0.0; int rect_w=2*p_hsize+1; int rect_h=2*p_hsize+1; float g00 = 0.0; float g01 = 0.0; float g11 = 0.0; //for i in range(0-p_hsize,p_hsize+1): // for j in range(0-p_hsize,p_hsize+1): // dx_val = dx[(y+i),(x+j)] // dy_val = dy[(y+i),(x+j)] // g00 += dx_val * dx_val // g01 += dx_val * dy_val // g11 += dy_val * dy_val for(int i = 0-p_hsize;i<p_hsize+1;i++) { for(int j= 0-p_hsize;j<p_hsize+1;j++) { float dx_val = in_GX[(y+i)*width+x+j]; float dy_val = in_GY[(y+i)*width+x+j]; g00 += dx_val * dx_val; g01 += dx_val * dy_val; g11 += dy_val * dy_val; } } float tmp1 = g00 + g11; float tmp2 = sqrt((g00 - g11) * (g00 - g11) + 4 * g01 * g01); float S1 = (tmp1 + tmp2) / 2; float S2 = (tmp1 - tmp2) / 2; float theta = 0; if(fabs(g01)>1e-9) { theta =atan((g00-S1)/(-g01))/pi*180; } else if(g00>g11) { theta = 90; } else{ theta = 0; } if(theta<0){ theta = theta + 180; } int Q_theta = ceil(theta/180*Qangle); if(Q_theta==0){ Q_theta = 1; } Q_theta= Q_theta -1; float lamda = sqrt(S1); float u = (sqrt(S1) - sqrt(S2))/(sqrt(S1) + sqrt(S2) + 0.00000000000000001); //Q_lamda = len(stre) //Q_u = len(cohe) //for k in reversed(range(0,len(stre))): // if(lamda < stre[k]): // Q_lamda = k //for k in reversed(range(0,len(cohe))): // if(u < cohe[k]): // Q_u = k int Q_lamda = Qstrength-1; int Q_u = Qcoherence -1; for(int k=Qstrength-1; k>=0; k--){ Q_lamda = lamda < stre[k] ? k : Q_lamda; } for(int k=Qcoherence-1; k>=0; k--){ Q_u = u < cohe[k] ? k : Q_u; } int dim0 = Qangle*Qstrength * Qcoherence; int index = Q_theta * Qstrength * Qcoherence + Q_lamda * Qcoherence + Q_u; atomicAdd(mark + pos*dim0+index, 1); int patchL_shape_2 = (2*h_hsize+1)*(2*h_hsize+1); int V_offset = pos*dim0*patchL_shape_2+index*patchL_shape_2; //int Q_offset = pos*dim0*patchL_shape_2*patchL_shape_2 + index*patchL_shape_2*patchL_shape_2; int Q_offset = V_offset*patchL_shape_2; float *Q_ptr = Q+Q_offset; float *V_ptr = V+V_offset; int patchL_dim0 = 0; int patchL_dim1 = 0; for(int i=y-h_hsize;i<y+h_hsize+1;i++) { for(int j=x-h_hsize; j< x+h_hsize+1;j++) { float val = quad_raw[i*width+j]; for(int k= y-h_hsize;k<y+h_hsize+1;k++) { for(int m= x-h_hsize;m<x+h_hsize+1;m++) { float ret=val*quad_raw[k*width+m]; atomicAdd(Q_ptr + patchL_dim1, ret); patchL_dim1+=1; } } float b1 = val*raw[y*width+x]; atomicAdd(V_ptr+patchL_dim0, b1); patchL_dim0 +=1; } } };
13,351
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <ctype.h> #include <sys/time.h> __global__ void AddArraysAtDevice(int *a, int *b, int count){ int t_id = blockIdx.x * blockDim.x + threadIdx.x; if(t_id < count){ a[t_id] += b[t_id]; } } int main(){ int count = 1000; int *h_a = (int*)malloc(sizeof(int) * 1000); int *h_b = (int*)malloc(sizeof(int) * 1000); for(int i=0; i<count; i++){ h_a[i] = i; h_b[i] = count-i; } int *d_a, *d_b; if(cudaMalloc(&d_a, sizeof(int)*count) != cudaSuccess){ printf("Problem with memory allocation\n"); return 0; } if(cudaMalloc(&d_b, sizeof(int)*count) != cudaSuccess){ printf("Problem with memory allocation\n"); return 0; } if(cudaMemcpy(d_a, h_a, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){ printf("Problem with copying from host to device\n"); return 0; } if(cudaMemcpy(d_b, h_b, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){ printf("Problem with copying from host to device\n"); return 0; } AddArraysAtDevice<<<count / 256 + 1, 256>>>(d_a, d_b, count); if(cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess){ printf("Problem with copying from device to host\n"); return 0; } printf("Numbers added on GPU!!\n"); for(int i=0; i<count; i++){ printf("Num %d: %d\n", i+1, h_a[i]); } free(h_a); free(h_b); cudaFree(d_a); cudaFree(d_b); return 0; }
13,352
#include <stdio.h> // #include <cuda.h> #include <iostream> #include <random> #include <chrono> #define NB_OF_ELEM 16777216 #define DIM 4096 #define MAX_NB_THREADS 1024 //nb de mesures pour le calcul du temps moyen d'execution #define SAMPLE_SIZE 10 __global__ void multiply(int* a, int* b, int* c) { // // je calcule le nombre de threads dans la grid, // // sa racine carré correspond a la dim des matrices // int dim = sqrtf(gridDim.x * blockDim.x); int index_c = threadIdx.x + blockIdx.x * blockDim.x; int index_a = ((int)(index_c / DIM)) * DIM; int index_b = index_c % DIM; c[index_c] = 0; for(int i = 0 ; i < DIM ; ++i) { c[index_c] += a[index_a] * b[index_b]; ++index_a; //pas de 1 index_b += DIM; //pas de dim } } void randomInts(int* a, int size) { std::random_device random; for (int i = 0 ; i < size ; ++i) a[i] = random() % 1000; } void printMatrix(int* m, int dim) { printf("matrix %p :\n", m); for(int i = 0 ; i < 3 ; ++i) { for(int j = 0 ; j < 2 ; ++j) printf("%d\t", m[i+j*dim]); printf("%d...\n", m[i+2*dim]); } printf("...\n\n"); } void resultTest() { printf("DEBUT TEST RESULTAT\n"); int *a, *b, *c; int *d_a, *d_b, *d_c; int size = NB_OF_ELEM * sizeof(int); cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); a = (int*)malloc(size); randomInts(a, NB_OF_ELEM); b = (int*)malloc(size); randomInts(b, NB_OF_ELEM); c = (int*)malloc(size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); multiply<<<NB_OF_ELEM/MAX_NB_THREADS,MAX_NB_THREADS>>>(d_a, d_b, d_c); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); // j'affiche les 3 premières lignes/col de la mat1, mat2 // et mat résultat printMatrix(a, DIM); printMatrix(b, DIM); printMatrix(c, DIM); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("FIN TEST RESULTAT\n\n"); } void speedTest(int nbOfBlocks, int nbThreadsPerBlock) { printf("%d BLOCS ET %d THREADS/BLOC\n", nbOfBlocks, nbThreadsPerBlock); int *a, *b, *c; int *d_a, *d_b, *d_c; int size = NB_OF_ELEM * sizeof(int); cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); a = (int*)malloc(size); b = (int*)malloc(size); c = (int*)malloc(size); int t_ns = 0; for(int i = 1 ; i <= SAMPLE_SIZE ; ++i) { randomInts(a, NB_OF_ELEM); randomInts(b, NB_OF_ELEM); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); auto t1 = std::chrono::high_resolution_clock::now(); multiply<<<nbOfBlocks,nbThreadsPerBlock>>>(d_a, d_b, d_c); auto t2 = std::chrono::high_resolution_clock::now(); std::cout<<std::chrono::duration_cast<std::chrono::nanoseconds>(t2 - t1).count()<<std::endl; t_ns += std::chrono::duration_cast<std::chrono::nanoseconds>(t2 - t1).count(); } std::cout<<"done in "<<t_ns / SAMPLE_SIZE <<" ns (in average)"<<std::endl<<std::endl; free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } // nvcc -o bin/exo3 src/exo3.cu int main(void) { resultTest(); printf("DEBUT TESTS DE VITESSE\n"); speedTest(NB_OF_ELEM, 1); speedTest(524288, 32); speedTest(NB_OF_ELEM/MAX_NB_THREADS, MAX_NB_THREADS); printf("FIN TESTS DE VITESSE\n"); return 0; }
13,353
#include <cmath> #include <cstdlib> #include <cstdio> #include <ctime> #include <cuda_runtime.h> #include <thrust/reduce.h> #include <thrust/device_ptr.h> #include <thrust/swap.h> #define Max(a,b) ((a)>(b)?(a):(b)) #define CUDA_SAFE_CALL(call)\ do {\ cudaError_t err = call;\ if (cudaSuccess != err) {\ printf("Cuda error in file '%s' in line %i: %s\n",\ __FILE__, __LINE__, cudaGetErrorString(err));\ exit(1);\ }\ } while (false)\ FILE *in; int TRACE = 0; double EPS; int M, N, K, MID, ITMAX; double MAXEPS = 0.1; int devCount = 0; double **A, **A_GPU; #define A(d, i, j, k) A[d][((i)*N+(j))*K+(k)] double solution(int i, int j, int k) { double x = 10.*i / (M - 1), y = 10.*j / (N - 1), z = 10.*k / (K - 1); return 2.*x*x - y*y - z*z; } double jac(double **a, int mm, int nn, int kk, int mid, int itmax, double maxeps); int main(int an, char **as) { int i, j, k; CUDA_SAFE_CALL(cudaGetDeviceCount(&devCount)); in = fopen("data3.in", "r"); if (in == NULL) { printf("Can not open 'data3.in' "); exit(1); } i = fscanf(in, "%d %d %d %d %d", &M, &N, &K, &ITMAX, &TRACE); if (i < 4) { printf("Wrong 'data3.in' (M N K ITMAX TRACE)"); exit(2); } MID = M / 2; A = (double **) malloc(2 * sizeof(double *)); A[0] = (double *) malloc((MID + 1)*N*K * sizeof(double)); A[1] = (double *) malloc((M - MID + 1)*N*K * sizeof(double)); for (i = 0; i <= MID; i++) for (j = 0; j <= N - 1; j++) for (k = 0; k <= K - 1; k++) { A(0, i, j, k) = (i == 0 || i == M - 1 || j == 0 || j == N - 1 || k == 0 || k == K - 1) ? solution(i, j, k) : 0.; } for (i = MID - 1; i <= M - 1; i++) for (j = 0; j <= N - 1; j++) for (k = 0; k <= K - 1; k++) { A(1, i - MID + 1, j, k) = (i == 0 || i == M - 1 || j == 0 || j == N - 1 || k == 0 || k == K - 1) ? solution(i, j, k) : 0.; } int vecSize[] = {(MID + 1)*N*K, (M - MID + 1)*N*K}; A_GPU = (double **) malloc(2 * sizeof(double *)); for (int device = 0; device < 2; ++device) { CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); CUDA_SAFE_CALL(cudaMalloc(&A_GPU[device], vecSize[device] * sizeof(double))); CUDA_SAFE_CALL(cudaMemcpy(A_GPU[device], A[device], vecSize[device] * sizeof(double), cudaMemcpyHostToDevice)); } clock_t t = clock(); EPS = jac(A_GPU, M, N, K, MID, ITMAX, MAXEPS); t = clock() - t; double elapsed = 1.0 * t / CLOCKS_PER_SEC; printf("%dx%dx%d x %d\t<", M, N, K, ITMAX); printf("%3.5f s.>\teps=%.4g\n", elapsed, EPS); for (int device = 0; device < 2; ++device) { CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); CUDA_SAFE_CALL(cudaMemcpy(A[device], A_GPU[device], vecSize[device] * sizeof(double), cudaMemcpyDeviceToHost)); } if (TRACE) { EPS = 0.; for (i = 0; i <= MID; i++) for (j = 0; j <= N - 1; j++) for (k = 0; k <= K - 1; k++) EPS = Max(fabs(A(0, i, j, k) - solution(i, j, k)), EPS); for (i = MID - 1; i <= M - 1; i++) for (j = 0; j <= N - 1; j++) for (k = 0; k <= K - 1; k++) EPS = Max(fabs(A(1, i - MID + 1, j, k) - solution(i, j, k)), EPS); printf("delta=%.4g\n", EPS); } for (int device = 0; device < 2; ++device) { free(A[device]); CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); CUDA_SAFE_CALL(cudaFree(A_GPU[device])); } free(A); free(A_GPU); return 0; } #define a(i,j,k) a[((i)*nn+(j))*kk+(k)] #define diff(i,j,k) diff[((i)*(nn - 2)+(j))*(kk - 2)+(k)] #define border(j,k) border[(j)*kk+(k)] __global__ void jac_kernel(double *a, int mm, int nn, int kk, double *diff, double *border, bool isLeft) { int k = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int i = blockIdx.z * blockDim.z + threadIdx.z; if (i >= 1 && i <= mm - 2 && j >= 1 && j <= nn - 2 && k >= 1 && k <= kk - 2) { double tmp = (a(i - 1, j, k) + a(i + 1, j, k) + a(i, j - 1, k) + a(i, j + 1, k) + a(i, j, k - 1) + a(i, j, k + 1)) / 6.; diff(i - 1, j - 1, k - 1) = fabsf(a(i, j, k) - tmp); a(i, j, k) = tmp; if (isLeft && i == mm - 2) { border(j, k) = tmp; } else if (!isLeft && i == 1) { border(j, k) = tmp; } } } void run_jac_kernel(double *a, int mm, int nn, int kk, double *diff, double *border, bool isLeft) { dim3 gridDim = dim3((kk + 15) / 16, (nn + 15) / 16, mm); dim3 blockDim = dim3(16, 16, 1); jac_kernel<<<gridDim, blockDim>>>(a, mm, nn, kk, diff, border, isLeft); CUDA_SAFE_CALL(cudaGetLastError()); } __global__ void writeBorder_kernel(double *a, int nn, int kk, double *border, int borderIdx) { int k = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int i = borderIdx; if (j >= 0 && j <= nn - 2 && k >= 1 && k <= kk - 2) { a(i, j, k) = border(j, k); } } void writeBorder(double *a, int nn, int kk, double *border, int borderIdx) { dim3 gridDim = dim3((kk + 15) / 16, (nn + 15) / 16); dim3 blockDim = dim3(16, 16); writeBorder_kernel<<<gridDim, blockDim>>>(a, nn, kk, border, borderIdx); CUDA_SAFE_CALL(cudaGetLastError()); } void barrier() { for (int device = 0; device < 2; ++device) { CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); } } double jac(double **a_gpu, int mm, int nn, int kk, int mid, int itmax, double maxeps) { int it; double eps; int vecSize[] = {(mid - 1)*(nn - 2)*(kk - 2), (mm - mid - 1)*(nn - 2)*(kk - 2)}; int borderSize = nn * kk; double **diff = (double **) malloc(2 * sizeof(double *)); double **borders = (double **) malloc(2 * sizeof(double *)); for (int device = 0; device < 2; ++device) { CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); CUDA_SAFE_CALL(cudaMalloc(&diff[device], vecSize[device] * sizeof(double))); CUDA_SAFE_CALL(cudaMalloc(&borders[device], borderSize * sizeof(double))); } double **bordersTmp = (double **) malloc(2 * sizeof(double *)); bordersTmp[0] = (double *) malloc(borderSize * sizeof(double)); bordersTmp[1] = (double *) malloc(borderSize * sizeof(double)); int mms[] = {mid + 1, mm - mid + 1}; int borderIdxs[] = {mid, 0}; double epsD[2] = {0.0, 0.0}; for (it = 1; it <= itmax; it++) { for (int device = 0; device < 2; ++device) { CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); run_jac_kernel(a_gpu[device], mms[device], nn, kk, diff[device], borders[device], device == 0); } for (int device = 0; device < 2; ++device) { CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); epsD[device] = thrust::reduce( thrust::device_pointer_cast<double>(diff[device]), thrust::device_pointer_cast<double>(diff[device]) + vecSize[device], 0.0f, thrust::maximum<double>() ); } for (int device = 0; device < 2; ++device) { CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); CUDA_SAFE_CALL(cudaMemcpyAsync(bordersTmp[device], borders[device], borderSize * sizeof(double), cudaMemcpyDeviceToHost)); } barrier(); for (int device = 0; device < 2; ++device) { CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); CUDA_SAFE_CALL(cudaMemcpyAsync(borders[device], bordersTmp[1 - device], borderSize * sizeof(double), cudaMemcpyHostToDevice)); writeBorder(a_gpu[device], nn, kk, borders[device], borderIdxs[device]); } eps = Max(epsD[0], epsD[1]); if (TRACE && it%TRACE == 0) printf("IT=%d eps=%.4g\n", it, eps); if (eps < maxeps) break; } for (int device = 0; device < 2; ++device) { CUDA_SAFE_CALL(cudaSetDevice(device % devCount)); CUDA_SAFE_CALL(cudaFree(diff[device])); CUDA_SAFE_CALL(cudaFree(borders[device])); } free(diff); free(borders); free(bordersTmp[0]); free(bordersTmp[1]); free(bordersTmp); return eps; }
13,354
#include <stdio.h> #include <time.h> #include <malloc.h> #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } //макрос для обработки ошибок __global__ void gInitVectors(long long n, double* vector1, double* vector2) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n) return; vector1[i] = (double)i; vector2[i] = (double)i; } int main(int argc, char *argv[]) { //установить предпочтительную конфигурацию кэша для текущего устройства: cudaFuncSetCacheConfig(gInitVectors, cudaFuncCachePreferL1); if (argc < 3) { printf("Error: run program with 2 args: vector size, threads per block\n"); return 1; } long long vector_size, threads; vector_size = atoi(argv[1]); threads = atoi(argv[2]); double *vector1_d, *vector2_d; for (int i = 0; i < 10; i++) { CUDA_CHECK_RETURN(cudaMalloc((void**)&vector1_d, vector_size * sizeof(double))); CUDA_CHECK_RETURN(cudaMalloc((void**)&vector2_d, vector_size * sizeof(double))); gInitVectors <<< vector_size / threads, threads >>> (vector_size, vector1_d, vector2_d); cudaDeviceSynchronize(); CUDA_CHECK_RETURN(cudaGetLastError()); cudaFree(vector1_d); cudaFree(vector2_d); } return 0; }
13,355
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "max_reduction.cuh" int* arr; static void generate_integers(const int ARR_SIZE); static int sequential(const int ARR_SIZE); int main(int argc, char* argv[]) { if (argc != 2 && argc != 3) { fprintf(stderr, "Usage: %s BLOCK_SIZE [ARRAY_SIZE]\n", argv[0]); return 1; } const int BLOCK_SIZE = atoi(argv[1]); const int ARR_SIZE = (argc == 3 ? atoi(argv[2]) : DEF_ARR_SIZE); generate_integers(ARR_SIZE); sequential(ARR_SIZE); reduction_divergent(arr, ARR_SIZE); reduction_opt_1(arr, ARR_SIZE); reduction_opt_2(arr, ARR_SIZE, BLOCK_SIZE); free(arr); return 0; } static void generate_integers(const int ARR_SIZE) { if ((arr = (int*) malloc(sizeof(int) * ARR_SIZE)) == NULL) { fprintf(stderr, "Error while allocating memory\n"); exit(1); } srand(time(NULL)); for (int i = 0; i < ARR_SIZE; i++) { arr[i] = rand(); } } static int sequential(const int ARR_SIZE) { double start, end; GET_TIME(start); int mx = arr[0]; for (int i = 1; i < ARR_SIZE; i++) { mx = max(mx, arr[i]); } GET_TIME(end); printf("[reduction_sequential]\tMaximum: %d\tTime: %fs\n", mx, end - start); return mx; }
13,356
#include <cstdlib> #include <cstdio> #include <cmath> #include <cstring> #include <iostream> #include <fstream> #include <vector> //#include "CSCIx229.h" //#include <SDL.h> //#include <SDL_opengl.h> //#include "objects.h" using namespace std; //////////////////// int compMode = 0; // 0 = single thread // 1 = multi thread // 2 = GPU /////////////////////////////////// __global__ void physics(const int n, const int frame, const float* aminosPrev, float* aminosNext, float* history) { float k = 1.0; // Bond Spring Constant float ke = -0.01; // Electrostatic Constant float kh = -0.2; // Hydrophobicity Constant float kc = 1.0; // Collision Force Constant int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { float hx = 0.0; float hy = 0.0; float hz = 0.0; float ex = 0.0; float ey = 0.0; float ez = 0.0; float cx = 0.0; float cy = 0.0; float cz = 0.0; float fx = 0.0; float fy = 0.0; float fz = 0.0; for (int j=0; j < n; ++j) { if (i != j) { //calculate forces float x1 = aminosPrev[8*i + 0]; float y1 = aminosPrev[8*i + 1]; float z1 = aminosPrev[8*i + 2]; float x2 = aminosPrev[8*j + 0]; float y2 = aminosPrev[8*j + 1]; float z2 = aminosPrev[8*j + 2]; float dx = x2-x1; float dy = y2-y1; float dz = z2-z1; float dist = sqrt(dx*dx + dy*dy + dz*dz); float h1 = aminosPrev[8*i + 6]; float e1 = aminosPrev[8*i + 7]; float h2 = aminosPrev[8*j + 6]; float e2 = aminosPrev[8*j + 7]; float vx = dx/dist; float vy = dy/dist; float vz = dz/dist; // Hydrophobic forces // Fh = Kh*h1*h2/(r^14-r^8) float d = max(dist, 1.0f); hx = kh*h1*h2*(pow(d,-14) - pow(d,-8)) * vx; hy = kh*h1*h2*(pow(d,-14) - pow(d,-8)) * vy; hz = kh*h1*h2*(pow(d,-14) - pow(d,-8)) * vz; // Electrsostatic forces // Fe = k*q1*q2/r^2 ex = ke*e1*e2/min(dist*dist, 1.0f) * vx; ey = ke*e1*e2/min(dist*dist, 1.0f) * vy; ez = ke*e1*e2/min(dist*dist, 1.0f) * vz; // Collision forces // soft collisions, spring force model cx = 0.0; cy = 0.0; cz = 0.0; if (dist < 1.0) { cx = kc*(1.0-dist) * vx; cy = kc*(1.0-dist) * vy; cz = kc*(1.0-dist) * vz; } //if (hx*hx + hy*hy + hz*hz > 0.01) // cout << "H" << i << ":\t"<< hx << "\t" << hy << "\t" << hz << "\t" << dist << endl; //if (ex*ex + ey*ey + ez*ez > 0.01) // cout << "E" << i << ":\t"<< ex << "\t" << ey << "\t" << ez << "\t" << dist << endl; //if (cx*cx + cy*cy + cz*cz > 0.01) // cout << "C" << i << ":\t"<< cx << "\t" << cy << "\t" << cz << "\t" << dist << endl; fx += hx + ex - cx; fy += hy + ey - cy; fz += hz + ez - cz; } } // spring tension if (i > 0) { float x1 = aminosPrev[8*i + 0]; float y1 = aminosPrev[8*i + 1]; float z1 = aminosPrev[8*i + 2]; float x2 = aminosPrev[8*(i-1) + 0]; float y2 = aminosPrev[8*(i-1) + 1]; float z2 = aminosPrev[8*(i-1) + 2]; float dx = x2-x1; float dy = y2-y1; float dz = z2-z1; float dist = sqrt(dx*dx + dy*dy + dz*dz); fx += k*(dist-1.0) * dx/dist; fy += k*(dist-1.0) * dy/dist; fz += k*(dist-1.0) * dz/dist; } if (i < n-1) { float x1 = aminosPrev[8*i + 0]; float y1 = aminosPrev[8*i + 1]; float z1 = aminosPrev[8*i + 2]; float x2 = aminosPrev[8*(i+1) + 0]; float y2 = aminosPrev[8*(i+1) + 1]; float z2 = aminosPrev[8*(i+1) + 2]; float dx = x2-x1; float dy = y2-y1; float dz = z2-z1; float dist = sqrt(dx*dx + dy*dy + dz*dz); fx += k*(dist-1.0) * dx/dist; fy += k*(dist-1.0) * dy/dist; fz += k*(dist-1.0) * dz/dist; } // update velocities aminosNext[8*i+3] = aminosPrev[8*i+3] + fx; aminosNext[8*i+4] = aminosPrev[8*i+4] + fy; aminosNext[8*i+5] = aminosPrev[8*i+5] + fz; // damping aminosNext[8*i+3] *= 0.9995; aminosNext[8*i+4] *= 0.9995; aminosNext[8*i+5] *= 0.9995; // update positions aminosNext[8*i+0] = aminosPrev[8*i+0] + 0.1*aminosNext[8*i+3]; aminosNext[8*i+1] = aminosPrev[8*i+1] + 0.1*aminosNext[8*i+4]; aminosNext[8*i+2] = aminosPrev[8*i+2] + 0.1*aminosNext[8*i+5]; // copy to history history[3*n*frame + 3*i + 0] = aminosNext[8*i+0]; history[3*n*frame + 3*i + 1] = aminosNext[8*i+1]; history[3*n*frame + 3*i + 2] = aminosNext[8*i+2]; } } /////////////////////////////////// //void dummy(int n, float* nodes, float* hist) //{ // for (int i=0; i < n; ++i) // { // float hx = 0.0; // float hy = 0.0; // float hz = 0.0; // float ex = 0.0; // float ey = 0.0; // float ez = 0.0; // float cx = 0.0; // float cy = 0.0; // float cz = 0.0; // float fx = 0.0; // float fy = 0.0; // float fz = 0.0; // for (int j=0; j < n; ++j) // { // if (i != j) // { // float x1 = nodes[8*i + 0]; // float y1 = nodes[8*i + 1]; // float z1 = nodes[8*i + 2]; // float x2 = nodes[8*j + 0]; // float y2 = nodes[8*j + 1]; // float z2 = nodes[8*j + 2]; // float dx = x2-x1; // float dy = y2-y1; // float dz = z2-z1; // float dist = sqrt(dx*dx + dy*dy + dz*dz); // float h1 = nodes[8*i + 6]; // float e1 = nodes[8*i + 7]; // float h2 = nodes[8*j + 6]; // float e2 = nodes[8*j + 7]; // // // Hydrophobic forces // // Fh = Kh*h1*h2/(r^14-r^8) // float d = max(dist, 1.0f); // hx = kh*h1*h2*(pow(d,-14) - pow(d,-8)) * dx/dist; // hy = kh*h1*h2*(pow(d,-14) - pow(d,-8)) * dy/dist; // hz = kh*h1*h2*(pow(d,-14) - pow(d,-8)) * dz/dist; // // // Electrsostatic forces // // Fe = k*q1*q2/r^2 // ex = ke*e1*e2/min(dist*dist, 1.0f) * dx/dist; // ey = ke*e1*e2/min(dist*dist, 1.0f) * dy/dist; // ez = ke*e1*e2/min(dist*dist, 1.0f) * dz/dist; // // // Collision forces // // soft collisions, spring force model // if (dist < 1.0) // { // cx = kc*(1.0-dist) * dx/dist; // cy = kc*(1.0-dist) * dy/dist; // cz = kc*(1.0-dist) * dz/dist; // } // else // { // cx = 0.0; // cy = 0.0; // cz = 0.0; // } // // //if (hx*hx + hy*hy + hz*hz > 0.01) // // cout << "H" << i << ":\t"<< hx << "\t" << hy << "\t" << hz << "\t" << dist << endl; // //if (ex*ex + ey*ey + ez*ez > 0.01) // // cout << "E" << i << ":\t"<< ex << "\t" << ey << "\t" << ez << "\t" << dist << endl; // //if (cx*cx + cy*cy + cz*cz > 0.01) // // cout << "C" << i << ":\t"<< cx << "\t" << cy << "\t" << cz << "\t" << dist << endl; // // fx += hx + ex - cx; // fy += hy + ey - cy; // fz += hz + ez - cz; // } // } // // update velocities // nodes[8*i + 3] += fx; // nodes[8*i + 4] += fy; // nodes[8*i + 5] += fz; // } // // Spring Tension // for (int i=0; i < n-1; ++i) // { // int j = i + 1; // float x1 = nodes[8*i + 0]; // float y1 = nodes[8*i + 1]; // float z1 = nodes[8*i + 2]; // float x2 = nodes[8*j + 0]; // float y2 = nodes[8*j + 1]; // float z2 = nodes[8*j + 2]; // float dx = x2-x1; // float dy = y2-y1; // float dz = z2-z1; // float dist = sqrt(dx*dx + dy*dy + dz*dz); // nodes[8*i + 3] += k*(dist-1.0) * dx/dist; // nodes[8*i + 4] += k*(dist-1.0) * dy/dist; // nodes[8*i + 5] += k*(dist-1.0) * dz/dist; // nodes[8*j + 3] -= k*(dist-1.0) * dx/dist; // nodes[8*j + 4] -= k*(dist-1.0) * dy/dist; // nodes[8*j + 5] -= k*(dist-1.0) * dz/dist; // // //if (dist < 0.9 || dist > 1.25) // // cout << dist << endl; // } // for (int i=0; i < n; ++i) // { // // damping // nodes[8*i + 3] *= 0.9995; // nodes[8*i + 4] *= 0.9995; // nodes[8*i + 5] *= 0.9995; // // update positions // nodes[8*i + 0] += 0.1*nodes[8*i + 3]; // nodes[8*i + 1] += 0.1*nodes[8*i + 4]; // nodes[8*i + 2] += 0.1*nodes[8*i + 5]; // //hist->push_back(nodes[8*i + 0]); // //hist->push_back(nodes[8*i + 1]); // //hist->push_back(nodes[8*i + 2]); // } //} int main(int argc, char *argv[]) { // flags for (int i=0; i < argc; ++i) { if (strcmp(argv[i],"-m") == 0) { compMode = 1; for (int j=i+1; j < argc; ++j) { argv[j-1] = argv[j]; } i--; argc--; } if (strcmp(argv[i],"-g") == 0) { compMode = 2; for (int j=i+1; j < argc; ++j) { argv[j-1] = argv[j]; } i--; argc--; } } // args if (argc != 3 && argc != 4) { cerr << "Usage: sim infile outfile [num_frames]\n"; return 1; } //Initialize int num_frames = 6000; if (argc == 4) num_frames = stoi(argv[3]); ifstream infile(argv[1]); if (!infile.is_open()) { cerr << "could not open file " << argv[1] << endl; return 1; } string line; getline(infile, line); int nAminos = stoi(line); cout << nAminos << endl; float* aminos = new float[nAminos*8]; for (int i=0; i < nAminos; ++i) { aminos[8*i + 0] = nAminos/2.0 - i; // x coordinate aminos[8*i + 1] = 0.0; // y coordinate aminos[8*i + 2] = 0.0; // z coordinate aminos[8*i + 3] = 0.0; // x velocity aminos[8*i + 4] = 0.0; // y velocity aminos[8*i + 5] = 0.0; // z velocity getline(infile, line); aminos[8*i + 6] = stof(line); // hydrophobicity getline(infile, line); aminos[8*i + 7] = stof(line); // electrostatic charge } infile.close(); aminos[1] = 0.1; aminos[5] = 0.01; //aminos[nAminos*8-7] = -0.1; //aminos[nAminos*8-3] = -0.01; //vector<float> history; float* history = new float[3*nAminos*num_frames]; int frames = 0; float* g_aminos1 = NULL; float* g_aminos2 = NULL; float* g_history = NULL; cudaMalloc(&g_aminos1, 8*nAminos*sizeof(float)); cudaMalloc(&g_aminos2, 8*nAminos*sizeof(float)); cudaMalloc(&g_history, 3*num_frames*nAminos*sizeof(float)); cudaMemcpy(g_aminos1, aminos, 8*nAminos*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(g_aminos2, aminos, 8*nAminos*sizeof(float), cudaMemcpyHostToDevice); ////////Main Loop//////// while (frames < num_frames) { physics<<<max(1,nAminos/128),128>>>(nAminos, frames, g_aminos1, g_aminos2, g_history); // float test[8]; // cudaMemcpy(&test, g_aminos2, 8*sizeof(float), cudaMemcpyDeviceToHost); // for (int i=0; i < 8; ++i) // cout << test[i] << " "; // cout << endl; frames += 1; float* tmp = g_aminos1; g_aminos1 = g_aminos2; g_aminos2 = tmp; } // wait and copy back history cudaMemcpy(history, g_history, 3*num_frames*nAminos*sizeof(float), cudaMemcpyDeviceToHost); // for (int i=0; i < frames; ++i) // { // cout << history[3*nAminos*i + 0 + 0] << " "; // cout << history[3*nAminos*i + 0 + 1] << " "; // cout << history[3*nAminos*i + 0 + 2] << endl; // } // write to file ofstream outfile(argv[2], ofstream::binary); if (!outfile.is_open()) cerr << "could not open file: " << argv[2] << endl; else { float aminoList[2*nAminos]; for (int i=0; i < nAminos; ++i) { aminoList[2*i] = aminos[8*i + 6]; aminoList[2*i+1] = aminos[8*i + 7]; } outfile.write((char*)&nAminos, sizeof(int)); outfile.write((char*)&frames, sizeof(int)); outfile.write((char*)aminoList, 2*nAminos*sizeof(float)); outfile.write((char*)history, 3*frames*nAminos*sizeof(float)); outfile.close(); } //cout << "Shutting Down\n"; delete[] aminos; delete[] history; cudaFree(g_aminos1); cudaFree(g_aminos2); cudaFree(g_history); return 0; }
13,357
/** \file Definitions for fast_heap for Cuda implementation. */ #include "fast_heap.cf"
13,358
#include<stdio.h> #include<stdlib.h> #include<iostream> #include<algorithm> #include<time.h> #include<cuda.h> using namespace std; void Init_input(float* input, int input_h_size, int input_w_size) { srand(time(NULL)); for (int h = 0; h < input_h_size; h++) { for (int w = 0; w < input_w_size; w++) { input[(h * input_w_size) + w] = rand() % 10; } } } __global__ void Avg_pooling(int pooled_h, int pooled_w, int pool_h_stride, int pool_w_stride, int pool_h_size, int input_h_size, int pool_w_size, int input_w_size, int sum, float avg, float* gpu_input, float* gpu_output_data) { int i = blockIdx.x; int j = blockIdx.y; int w_start = i * pool_w_stride; int h_start = j * pool_h_stride; int w_end = min(w_start + pool_w_size, input_w_size); int h_end = min(h_start + pool_h_size, input_h_size); w_start = max(w_start, 0); h_start = max(h_start, 0); sum=0; avg=0; int pool_index = (j * pooled_w) + i; for (int h = h_start; h < h_end; h++) { for (int w = w_start; w < w_end; w++) { int index = (h * input_w_size) + w; sum += gpu_input[index]; } avg = (float)sum / (pool_h_size * pool_w_size); gpu_output_data[pool_index] = avg; } } void print(float* data, int h_size, int w_size) { for (int h = 0; h < h_size; h++) { for (int w = 0; w < w_size; w++) { printf("%.2f ", data[(h * w_size) + w]); } cout << endl; } cout << endl; cout << endl; } int main() { int sum ; float avg; int input_h_size = 6; int input_w_size = 6; /*pool => window size*/ int pool_w_size = 3; int pool_h_size = 3; int pool_w_stride = 3; int pool_h_stride = 3; /*pooling 된 행렬들*/ int pooled_h = ((input_h_size - pool_h_size) / pool_h_stride) + 1; int pooled_w = ((input_w_size - pool_w_size) / pool_w_stride) + 1; float* input = new float[input_h_size * input_w_size * sizeof(float)]; float* cpu_output_data = new float[input_h_size * input_w_size* sizeof(float)]; Init_input(input, input_h_size, input_w_size); printf("=====Matrix Initial value====\n"); print(input, input_h_size, input_w_size); float* gpu_input; float* gpu_output_data; cudaMalloc((void**)&gpu_input, input_h_size*input_w_size* sizeof(float)); cudaMalloc((void**)&gpu_output_data, input_h_size*input_w_size* sizeof(float)); cudaMemcpy(gpu_input, input, input_h_size*input_w_size* sizeof(float), cudaMemcpyHostToDevice); dim3 dimGrid(input_h_size, input_w_size); dim3 dimBlock(1, 1); Avg_pooling<<< dimGrid, dimBlock >>>(pooled_h, pooled_w, pool_h_stride, pool_w_stride, pool_h_size, input_h_size, pool_w_size, input_w_size, sum, avg, gpu_input, gpu_output_data); cudaMemcpy(cpu_output_data, gpu_output_data, input_h_size*input_w_size* sizeof(float), cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); printf("====GPU Pooling Result value=====\n"); print(cpu_output_data, pooled_h, pooled_w); cudaFree(gpu_input); cudaFree(gpu_output_data); delete input; delete cpu_output_data; return 0; }
13,359
__global__ void count_newlines(char *arr, long n, int *result) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; long chars_per_thread = (n+stride-1) / stride; long start = index * chars_per_thread; long end = start + chars_per_thread; int count = 0; for (long i = start; i < end && i < n; i += 1) { if (arr[i] == '\n') { count += 1; } } result[index] = count; }
13,360
#include <pthread.h> #include <stdio.h> const int N = 1 << 20; __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } } void *launch_kernel(void *dummy) { float *data; cudaMalloc(&data, N * sizeof(float)); kernel<<<1, 64>>>(data, N); cudaStreamSynchronize(0); return NULL; } int main() { const int num_threads = 8; pthread_t threads[num_threads]; for (int i = 0; i < num_threads; i++) { if (pthread_create(&threads[i], NULL, launch_kernel, 0)) { fprintf(stderr, "Error creating threadn"); } } for (int i = 0; i < num_threads; i++) { if(pthread_join(threads[i], NULL)) { fprintf(stderr, "Error joining threadn"); return 2; } } cudaDeviceReset(); return 0; }
13,361
/* calculating pi via area under the curve * This code uses an algorithm fairly easily ported to all parallel methods. * Since it calculates pi, it is easy to verify that results are correct. * It can also be used to explore accuracy of results and techniques for managing error. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define NUMRECT 10000000 /* students learn in grammar school that the area of a circle is pi*radius*radius. * They learn in high school that the formula of a circle is x^2 + y^2 = radius^2. * * These facts allows students calculating pi by estimating area of mid-point rectangles * * Area of unit circle is pi, y = sqrt(1-x^2) is formula for semicircle from -1 to 1 */ // constants useful to CUDA const int threadsPerBlock = 256; const int blocksPerGrid = 32; const int totalThreads = threadsPerBlock * blocksPerGrid; const float overallWidth = 2.0f; const float block_width = overallWidth / blocksPerGrid; __global__ void calcArea(int *d_rectPerThread, float *d_width, float *partPiByBlock) { __shared__ float partPiByThread[threadsPerBlock]; int reduce_i = blockDim.x / 2; // index for reducing thread results to single block value float width = *d_width; int rectPerThread = *d_rectPerThread; float x = -1.0f + (overallWidth * blockIdx.x) / blocksPerGrid + (block_width * threadIdx.x) / threadsPerBlock - width / 2; float partPi = 0.0f; for (int i = 0; i < rectPerThread; i++) { x += width; partPi += width * sqrtf(1.0f - x * x); } partPiByThread[threadIdx.x] = partPi; // reduce all threads in the block to a single block value while (reduce_i != 0) { __syncthreads(); if (threadIdx.x < reduce_i) partPiByThread[threadIdx.x] += partPiByThread[threadIdx.x + reduce_i]; reduce_i /= 2; } // store block result in correct spot for reducing on CPU side if (threadIdx.x == 0) partPiByBlock[blockIdx.x] = partPiByThread[0]; } int main(int argc, char **argv) { int numRect; // number of rectangles int *d_rectPerThread, rectPerThread; // number of rectangles per thread int i; // loop index float *d_width, width; // width of each rectangle float *d_partPiByBlock, h_partPiByBlock[blocksPerGrid]; // partial pi values returned by CUDA float pi, halfPI = 0.0; // sum of area of rectangles gives pi/2 numRect = argc == 2 ? atoi(argv[1]) : NUMRECT; // get number of rectangles rectPerThread = numRect / totalThreads; numRect = rectPerThread * totalThreads; width = overallWidth / numRect; // calculate width of each rectangle cudaMalloc((void**)&d_rectPerThread, sizeof(int)); cudaMalloc((void**)&d_width, sizeof(int)); cudaMalloc((void**)&d_partPiByBlock, sizeof(float) * blocksPerGrid); cudaMemcpy(d_rectPerThread, &rectPerThread, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_width, &width, sizeof(float), cudaMemcpyHostToDevice); calcArea <<<blocksPerGrid, threadsPerBlock>>> (d_rectPerThread, d_width, d_partPiByBlock); cudaMemcpy(h_partPiByBlock, d_partPiByBlock, sizeof(float) * blocksPerGrid, cudaMemcpyDeviceToHost); for (i = 0; i < blocksPerGrid; ++i) halfPI += h_partPiByBlock[i]; /* calculate pi/4, with room for better error mgmt */ pi = 2.0 * halfPI; printf ("\n==\n==\t%20s = %15.10f\n", "pi", pi); printf ("==\t%20s = %15d\n", "total rectangles", numRect); printf ("==\t%20s = %15d\n==\n\n", "CUDA threads", totalThreads); return 0; }
13,362
#include <math.h> #include <cuda.h> #include <vector> #include <fstream> #include <string> #include <sstream> #include <iostream> #define BLOCK_WIDTH 512 /* extern __shared__ stands for shared memory on device, which has two "warps" of 32 threads. Google CUDA shared memory and warps. To replace extern __shared__ int __smem[]; which requires you to explicitly know the data type is integer in advance. But input file could be int, float, or double. Since we don't know the data type of shared meomry __smem[], we use template<class T> where T stands for all possible data types. We also need to instantiate all possible data types later In return (T *) __smem; it is data type conversion Suggest to figure out difference between overload, override, redefine */ template<class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T *) __smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *) __smem; } }; ///////////////////////////////////////////////////////////////////////////// // CUDA Kernel: Global memory ///////////////////////////////////////////////////////////////////////////// template<class T, int blockSize> __global__ void countGlobalMem(T *g_idata, int *g_odata, int N) { unsigned int i = blockSize * blockIdx.x + threadIdx.x; int gi = 0; if (i < N) { if (g_idata[i] == 1000) { atomicAdd(&g_odata[9], 1); } else { gi = (int) g_idata[i] / 100; atomicAdd(&g_odata[gi], 1); } } } ///////////////////////////////////////////////////////////////////////////// // CUDA Kernel: shared memory ///////////////////////////////////////////////////////////////////////////// template<class T, int blockSize> __global__ void countSharedMem(T *g_idata, int *g_odata, int N, int maxNum, int barrelSize) { /* Each block has a sdata */ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; int numBarrel = maxNum/barrelSize; unsigned int i = blockSize * blockIdx.x + threadIdx.x; //gi is group/barrel index int gi = 0; if (i < N) { if (g_idata[i] == maxNum) { atomicAdd(&sdata[numBarrel-1], 1); } else { gi = (int) g_idata[i] / barrelSize; atomicAdd(&sdata[gi], 1); } } //wait until sdata[0~9] in all blocks are ready __syncthreads(); /* every block has threadIdx.x from 0 to 511 size of g_odata is numBarrel * blocks sum of all blocks is done in myCountTest(), note there is += when output to "q2b.txt" */ if (tid < numBarrel) { g_odata[blockIdx.x * numBarrel + tid] = sdata[tid]; } } ////////////////////////////////////////////////////////////////////////////// // CUDA Kernel: prefix sum (Naiive) /////////////////////////////////////////////////////////////////////////////// int nextPowerOf2(int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } __global__ void scan(int *d_idata, int *d_odata, int N) { extern __shared__ int sdata[]; //cunyi int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { sdata[threadIdx.x] = d_idata[i]; //printf("\n sdata[%d]: %d", i, sdata[threadIdx.x]); } for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) { __syncthreads(); int in1 = sdata[threadIdx.x - stride]; __syncthreads(); sdata[threadIdx.x] += in1; } __syncthreads(); if(i < N) { d_odata[threadIdx.x] = sdata[threadIdx.x]; //printf("\n sdata[%d]: %d", i, d_odata[threadIdx.x]); } } /////////////////////////////////////////////////////////////////////////////// // Wrapper for countGlobalMem /////////////////////////////////////////////////////////////////////////////// template<class T> void countGMWrapper(int threads, int blocks, T *g_idata, int *g_odata ,int N) { /* 1D block and 1D grid */ dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof (T); countGlobalMem<T, BLOCK_WIDTH><<<dimGrid, dimBlock, smemSize>>>(g_idata, g_odata, N); } /////////////////////////////////////////////////////////////////////////////// // Wrapper for countSharedMem /////////////////////////////////////////////////////////////////////////////// template<class T> void countSWrapper(int threads, int blocks, T *g_idata, int *g_odata ,int N, int maxNum, int barrelSize) { /* 1D block and 1D grid */ dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof (T); countSharedMem<T, BLOCK_WIDTH><<<dimGrid, dimBlock, smemSize>>>(g_idata, g_odata, N, maxNum, barrelSize); } ///////////////////////////////////////////////////////////////////////////////// // Instantiate Template ///////////////////////////////////////////////////////////////////////////////// template void countGMWrapper<int>(int threads, int blocks, int *g_idata, int *g_odata, int N); template void countGMWrapper<float>(int threads, int blocks, float *g_idata, int *g_odata, int N); template void countGMWrapper<double>(int threads, int blocks, double *g_idata, int *g_odata, int N); template void countSWrapper<int>(int threads, int blocks, int *g_idata, int *g_odata ,int N, int maxNum, int barrelSize); ////////////////////////////////////////////////////////////////////////////////// // Test Function ////////////////////////////////////////////////////////////////////////////////// void myCountTest(const char* filename) { int numBarrel = 10; //read test file and decide size of array std::vector<int> data; std::string line_; std::ifstream file_(filename); if(file_.is_open()) { while (getline(file_, line_)) { std::stringstream ss(line_); int i; while(ss>>i) { data.push_back(i); if (ss.peek() == ',' || ss.peek() == ' ') { ss.ignore(); } } } file_.close(); } int num_els = data.size(); int numBlocks = num_els/BLOCK_WIDTH + 1; //Start to run Kernel_a int *d_in = NULL; int *d_out = NULL; cudaMalloc( (void **) &d_in, num_els * sizeof(int)); cudaMalloc( (void **) &d_out, numBarrel * sizeof(int)); int *in = (int *) malloc(num_els * sizeof(int)); int *out = (int *) malloc(numBarrel * sizeof(int)); in = &data[0]; std::vector<int> v(10); std::fill(v.begin(), v.end(), 0); std::copy(v.begin(), v.end(), out); cudaMemcpy(d_in, in, num_els * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_out, out, numBarrel * sizeof(int), cudaMemcpyHostToDevice); countGMWrapper(BLOCK_WIDTH, numBlocks, d_in, d_out, num_els); cudaMemcpy(out, d_out, numBarrel * sizeof(int), cudaMemcpyDeviceToHost); std::ofstream fout1("q2a.txt", std::ios::app); for(int i = 0; i < numBarrel; i++) { if(fout1.is_open()) { fout1 << "\n Count[" <<i<<"]: " <<out[i]; } } fout1.close(); fout1.clear(); cudaFree(d_out); //free(out); //d_in is not cleaned because we are going to run more cuda kernels using d_in free(in); //cudaFree(d_in); //Start to run Kernel_b, almost the same as kernel_a int *d_out_b = NULL; cudaMalloc( (void **) &d_out_b, numBarrel * numBlocks * sizeof(int)); int *out_b = (int *) malloc(numBarrel * numBlocks * sizeof(int)); //size of out_b is changed v.resize(numBarrel * numBlocks); std::fill(v.begin(), v.end(), 0); std::copy(v.begin(), v.end(), out_b); cudaMemcpy(d_out_b, out_b, numBarrel * numBlocks * sizeof(int), cudaMemcpyHostToDevice); countSWrapper(BLOCK_WIDTH, numBlocks, d_in, d_out ,num_els, 1000, 100); cudaMemcpy(out_b, d_out_b, numBarrel * numBlocks * sizeof(int), cudaMemcpyDeviceToHost); std::ofstream fout2("q2b.txt", std::ios::app); int out_b_all; //int B[numBarrel]; for(int i = 0; i < numBarrel; i++) { out_b_all = 0; for (int j = 0; j < numBlocks; j++) out_b_all += out_b[i + j * numBarrel]; //B[i] = out_b_all; if(fout2.is_open()) { fout2 << "\n Count[" <<i<<"]: " <<out_b_all; } } fout2.close(); fout2.clear(); cudaFree(d_out_b); free(out_b); cudaFree(d_in); //start to run Kernel_c int n3 = nextPowerOf2(numBarrel); int *d_out_c = NULL; int *d_in_c = NULL; int *out_c = (int *) malloc(n3 * sizeof(int)); v.resize(n3); std::fill(v.begin(), v.end(), 0); std::copy(v.begin(), v.end(), out_c); cudaMalloc( (void **) &d_in_c, n3 * sizeof(int)); cudaMalloc( (void **) &d_out_c, n3 * sizeof(int)); int *in_test = (int *) malloc(n3 * sizeof(int)); std::vector<int> in_c; for (int i = 0; i < n3; i++) { if (i < numBarrel) { in_c.push_back(out[i]); } else { in_c.push_back(0); } //printf("\n c: %d", in_c[i]); } in_test = &in_c[0]; cudaMemcpy(d_in_c, in_test, n3 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_out_c, out_c, n3 * sizeof(int), cudaMemcpyHostToDevice); scan<<<1, n3, n3*sizeof(int)>>>(d_in_c, d_out_c, n3); cudaMemcpy(out_c, d_out_c, n3 * sizeof(int), cudaMemcpyDeviceToHost); std::ofstream fout3("q2c.txt", std::ios::app); for(int i = 0; i < numBarrel; i++) { if(fout3.is_open()) { fout3 << "\n prescan[" <<i<<"]: " <<out_c[i]; } } fout3.close(); fout3.clear(); } int main(int argc, char **argv) { myCountTest("inp.txt"); return 0; }
13,363
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cuda.h> //You can change the dimension, program will produce two matrices. #define M 600 #define N 800 #define CUDA_CALL(x) {if((x) != cudaSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE);}} __global__ void matrixAdd(int d_x[][N], int d_y[][N], int d_z[][N]) { int idx = threadIdx.x; int idy = threadIdx.y; if (idx < M && idy < N) { d_z[idx][idy] = d_x[idx][idy] + d_y[idx][idy]; } } int main() { int size = (M * N) * sizeof(int); int h_x[M][N], h_y[M][N], h_z[M][N]; int(*d_x)[N], (*d_y)[N], (*d_z)[N]; int i = 0; int j = 0; //Initialize matrix for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_x[i][j] = M; h_y[i][j] = N; h_z[i][j] = 0; } } cudaEvent_t startC, stopC; float elapsed_time_msC; cudaEventCreate( &startC ); cudaEventCreate( &stopC ); cudaEventRecord( startC, 0 ); for (i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_z[i][j] =h_x[i][j] + h_y[i][j] ; } } cudaEventRecord( stopC, 0 ); cudaEventSynchronize( stopC ); cudaEventElapsedTime( &elapsed_time_msC, startC, stopC ); printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC); CUDA_CALL(cudaMalloc(&d_x, size)); CUDA_CALL(cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&d_y, size)); CUDA_CALL(cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&d_z, size)); dim3 dimGrid(1, 1); dim3 dimBlock(M, N); cudaEvent_t start, stop; float elapsed_time_ms; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); matrixAdd <<< dimGrid, dimBlock >>> (d_x, d_y, d_z); CUDA_CALL(cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost)); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsed_time_ms, start, stop ); printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms); cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); printf("Output of Summation\n"); // for (i = 0; i<M; i++) { // for (j = 0; j<N; j++) { // printf("%d\t", h_z[i][j]); // } // printf("\n"); // } printf("\n"); }
13,364
#include <stdio.h> __global__ void matAdd(double *a_D, double *c_D) { int t_rank; t_rank = threadIdx.y*blockDim.x + threadIdx.x; c_D[t_rank] = t_rank+1; } int main(int argc, char **argv) { int i, j; int size, block_size = 8, grid_size = 1; double *c_H, *c_D, *a_H, *a_D; size = block_size*block_size*sizeof(double); a_H = (double *)malloc(size); c_H = (double *)malloc(size); cudaMalloc((void **)&a_D, size); cudaMalloc((void **)&c_D, size); for (i=0; i<block_size; i++) for (j=0; j<block_size; j++) a_H[(i*block_size)+j]=(i*block_size)+j; for(i = 0; i < block_size; i++, printf("\n")) for (j=0; j < block_size; j++) //printf("matric[%d][%d] is %f\n", i, j, c_H[(i*block_size)+j]); printf(" %4.1f", a_H[(i*block_size)+j]); printf("\n"); cudaMemcpy(a_D, a_H, size, cudaMemcpyHostToDevice); dim3 Block(block_size, block_size); dim3 Grid(grid_size, grid_size); matAdd<<<Grid, Block>>>(a_D, c_D); cudaMemcpy(c_H, c_D, size, cudaMemcpyDeviceToHost); for(i = 0; i < block_size; i++, printf("\n")) for (j=0; j < block_size; j++) //printf("matric[%d][%d] is %f\n", i, j, c_H[(i*block_size)+j]); printf(" %4.1f", c_H[(i*block_size)+j]); free(a_H); free(c_H); cudaFree(a_D); cudaFree(c_D); return 0; }
13,365
#ifndef __DI_TRACER_CU__ #define __DI_TRACER_CU__ #endif // !__DI_TRACER_CU__
13,366
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <sys/time.h> #define SIZE 102400 #define MOD 102399 #define STEP 128 /* ARRAY A INITIALIZER */ void init_a(int * a) { int i; for(i=0; i<SIZE; i++) { a[i] = 1; } } /* ARRAY B INITIALIZER */ void init_b(int * b) { int i, j; j=0; for(i=0; i<SIZE-1; i++) { b[j] = i; j = (j+STEP)%MOD; } b[SIZE-1] = SIZE-1; } /* CHECKING A VALUES */ int check_a(int * a) { int i; int correct = 1; for(i=0; i<SIZE; i++) { if(a[i] != (i+1)) { correct = 0; } } return correct; } // First version of the function (the original one). __global__ void mykernel1(int * a, int * b, int N) { for(int i = threadIdx.x; i < N; i+=blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } // Second version of the function (question #3). __global__ void mykernel2(int * a, int * b, int *count, int N) { // blockIdx.x: position of the block within the grid. // blockDim.x: dimension of a block (relatively to the direction "x"). // threadIdx.x: position of the thread relatively to the block. printf("blockIdx.x:%d * blockDim.x:%d + threadIdx.x:%d => %d\n", blockIdx.x, blockDim.x, threadIdx.x, blockIdx.x * blockDim.x + threadIdx.x); *count = *count + 1; int index = blockIdx.x * blockDim.x + threadIdx.x; for(int i=index; i<N; i+=gridDim.x * blockDim.x) { int v = b[i]; a[v] = a[v] + v; } } #define Q 2 int main(int argc, char * argv[]) { struct timeval start; struct timeval stop; int * a = (int *)malloc(sizeof(int)*SIZE); int * b = (int *)malloc(sizeof(int)*SIZE); int count = 0; init_a(a); init_b(b); /* INSERT CUDA ALLOCATION AND COPY HERE */ int * d_a, * d_b, *d_count; cudaMalloc(&d_a, sizeof(int)*SIZE); cudaMalloc(&d_b, sizeof(int)*SIZE); cudaMalloc(&d_count, sizeof(int)); cudaMemcpy(d_a, a, sizeof(int)*SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(int)*SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_count, &count, sizeof(int), cudaMemcpyHostToDevice); dim3 nBlocks; dim3 nThperBlock; // We define 1024 threads in only one block. // This is not a good solution. if (1 == Q) { nBlocks.x = 1; // number of block in the grid. nThperBlock.x = 1024; // number of threads per bloc } else { nBlocks.x = 16; // number of blocks in the grid. nThperBlock.x = 1024; // number of threads per bloc } // Execute the "kernel" in the GPU. // nBlocks: number of block in the grid // nThperBlock: number of threads per bloc gettimeofday(&start, nullptr); if (1 == Q) { mykernel1<<< nBlocks , nThperBlock >>>(d_a, d_b, SIZE); } else { printf("nBlocks = %d\n", nBlocks.x); printf("nThperBlock = %d\n", nThperBlock.x); mykernel2<<< nBlocks , nThperBlock >>>(d_a, d_b, d_count, SIZE); } // The kernel executes asynchronously relatively to the CPU. // That is: (1) the CPU starts the kernel. // (2) the kernel starts its execution. // (3) but, before the kernel stops, the CPU continues its execution! // Thus, we need to synchronize the CPU and the kernel. // The function "cudaDeviceSynchronize" waits for the kernel to finish. cudaDeviceSynchronize(); gettimeofday(&stop, nullptr); printf("Execution duration: %ld (s) %ld (us)\n", stop.tv_sec - start.tv_sec, stop.tv_usec - start.tv_usec); // Copy the result from the GPU to the RAM. // Note: cudaMemcpy(dst, src, count, kind) cudaMemcpy(a, d_a, sizeof(int)*SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(&count, d_count, sizeof(int), cudaMemcpyDeviceToHost); printf("Total number of loops: %d\n", count); int correct = check_a(a);; if(0 == correct) { printf("\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n"); } else { printf("\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n"); } return 1; }
13,367
#include "includes.h" __device__ double digamma(double x) { double result = 0.0, xx, xx2, xx4; for ( ; x < 7.0; ++x) { /* reduce x till x<7 */ result -= 1.0/x; } x -= 1.0/2.0; xx = 1.0/x; xx2 = xx*xx; xx4 = xx2*xx2; result += log(x)+(1./24.)*xx2-(7.0/960.0)*xx4+(31.0/8064.0)*xx4*xx2-(127.0/30720.0)*xx4*xx4; return result; } __global__ void kernel_evaluatenu(int Nd, double qsum, double *q, double deltanu,double nulow) { unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid<Nd) { double thisnu=(nulow+((double)tid)*deltanu); double dgm=digamma(thisnu*0.5+0.5); q[tid]=dgm-log((thisnu+1.0)*0.5); /* psi((nu+1)/2)-log((nu+1)/2) */ dgm=digamma(thisnu*0.5); q[tid]+=-dgm+log((thisnu)*0.5); /* -psi((nu)/2)+log((nu)/2) */ q[tid]+=-qsum+1.0; /* -(-sum(ln(w_i))/N+sum(w_i)/N)+1 */ } }
13,368
#include "stdio.h" #define COLUMNS 4 #define ROWS 3 __global__ void add(int* a, int* c) { int column = threadIdx.x; int total = 0; for(int i = 0; i < ROWS; ++i){ total += a[(COLUMNS*i) + column]; } c[column]=total; } int main() { int a[ROWS][COLUMNS], c[COLUMNS]; int* dev_a, * dev_c; cudaMalloc((void**)&dev_a, ROWS * COLUMNS * sizeof(int)); cudaMalloc((void**)&dev_c, COLUMNS * sizeof(int)); for (int y = 0; y < ROWS; y++) // Fill Arrays for (int x = 0; x < COLUMNS; x++) a[y][x] = 7; cudaMemcpy(dev_a, a, ROWS * COLUMNS * sizeof(int), cudaMemcpyHostToDevice); add <<<1, COLUMNS >>> (dev_a, dev_c); cudaMemcpy(c, dev_c, COLUMNS * sizeof(int), cudaMemcpyDeviceToHost); int total = 0; for(int i = 0; i < COLUMNS; ++i){ total += c[i]; } printf("Total sum of all elements is: %d\n", total); cudaFree(dev_a); cudaFree(dev_c); return 0; }
13,369
#include <stdio.h> /* * compile with `gcc -o test test.c * run `./test` */ int main(int argc, char **argv){ const int A_SIZE = 1 << 21; const short INT_SIZE = sizeof(int); printf("Array Size: \t%d\nInt Size: \t%d\n", A_SIZE, INT_SIZE); return 0; }
13,370
#include <iostream> using namespace std; __global__ void dkernel() { printf("Hello World from GPU!\n"); } int main() { dkernel<<<1,332>>>(); cudaDeviceSynchronize(); return 0; }
13,371
#include <iostream> #include <chrono> const size_t size = 1 << 20; __global__ void transpose(float_t *matrixOrigin, float_t *matrixRes) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; size_t width = gridDim.x * blockDim.x; matrixRes[x + y * width] = matrixOrigin[y + x * width]; } __global__ void saxpy(float_t *vectorA, float_t *vectorB, float_t alpha) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; vectorA[index] = vectorA[index] * alpha + vectorB[index]; } int32_t main() { std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); cudaStream_t stream0; const size_t num = 32; const size_t Nx = 1 << 10; const size_t Ny = 1 << 10; cudaStreamCreate(&stream0); float_t *matrix, *matrix_dev_origin, *matrix_dev_res; cudaHostAlloc((void **) &matrix, size * sizeof(float_t), cudaHostAllocDefault); for (int64_t i = 0; i < size; ++i) matrix[i] = i; cudaMalloc((void **) &matrix_dev_origin, sizeof(float_t) * size); cudaMalloc((void **) &matrix_dev_res, sizeof(float_t) * size); cudaMemcpyAsync(matrix_dev_origin, matrix, sizeof(float_t) * size, cudaMemcpyHostToDevice, stream0); transpose <<< dim3(Nx / num, Ny / num), dim3(num, num) >>>(matrix_dev_origin, matrix_dev_res); cudaMemcpyAsync(matrix, matrix_dev_res, sizeof(float_t) * size, cudaMemcpyDeviceToHost, stream0); cudaStreamSynchronize(stream0); std::chrono::high_resolution_clock::time_point stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<double_t> time_span = std::chrono::duration_cast<std::chrono::duration<double_t>>( stop - start); std::cout << "Transpose time (s) - " << time_span.count() << std::endl; // for(int64_t i = 0; i < Ny; ++i) // { // for(int64_t j = 0; j < Nx; ++j) // std:: cout << matrix[i * Nx + j] << " "; // std::cout << std::endl; // } cudaFree(matrix_dev_origin); cudaFree(matrix_dev_res); cudaFreeHost(matrix); start = std::chrono::high_resolution_clock::now(); float_t *vecA, *vecB, *vecA_device, *vecB_device; cudaStream_t stream_m0; cudaStreamCreate(&stream_m0); cudaStream_t stream1; cudaStreamCreate(&stream1); cudaHostAlloc((void **) &vecA, size * sizeof(float_t), cudaHostAllocDefault); cudaHostAlloc((void **) &vecB, size * sizeof(float_t), cudaHostAllocDefault); for (int64_t i = 0; i < size; ++i) { vecA[i] = i; vecB[i] = i * 2 - 1; } cudaMalloc((void **) &vecA_device, sizeof(float_t) * size); cudaMalloc((void **) &vecB_device, sizeof(float_t) * size); cudaMemcpyAsync(vecA_device, vecA, sizeof(int) * size, cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(vecB_device, vecB, sizeof(int) * size, cudaMemcpyHostToDevice, stream1); saxpy <<< size / 2 / 1024, 1024, 0, stream0 >>>(vecA_device, vecB_device, 2.25); saxpy <<< size / 2 / 1024, 1024, 0, stream1 >>>(vecA_device + size / 2, vecB_device + size / 2, 2.25); cudaMemcpyAsync(vecA, vecA_device, sizeof(float_t) * size / 2, cudaMemcpyDeviceToDevice, stream0); cudaMemcpyAsync(vecA + size / 2, vecA_device + size / 2, sizeof(float_t) * size / 2, cudaMemcpyDeviceToDevice, stream1); cudaStreamSynchronize(stream0); cudaStreamSynchronize(stream1); stop = std::chrono::high_resolution_clock::now(); time_span = std::chrono::duration_cast<std::chrono::duration<double_t>>(stop - start); std::cout << "SAXPY time (s) - " << time_span.count() << std::endl; //for (int64_t i = 0; i < size; ++i) // std::cout << vecA[i] << " "; cudaFree(vecA_device); cudaFree(vecB_device); cudaFreeHost(vecA); cudaFreeHost(vecB); }
13,372
#include <stdio.h> int main(){ cudaDeviceProp devProps; cudaGetDeviceProperties(&devProps,0); printf("Device 0 name: %s\n",devProps.name); printf("Compute capability %d.%d\n",devProps.major,devProps.minor); }
13,373
#include "includes.h" __global__ void backward_maxpool_depth_layer_kernel(int n, int w, int h, int c, int batch, float *delta, float *prev_delta, int *indexes) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= n) return; int index = indexes[id]; prev_delta[index] += delta[id]; }
13,374
// Last update: 2018/12/01 #include <stdio.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; void scanByHost(int * in, int * out, int n) { GpuTimer timer; timer.Start(); out[0] = in[0]; for (int i = 1; i < n; i++) { out[i] = out[i - 1] + in[i]; } timer.Stop(); printf("Time of scanByHost: %.3f ms\n\n", timer.Elapsed()); } /* Scan within each block's data (work-inefficient), write results to "out", and write each block's sum to "blkSums" if "blkSums" is not NULL. */ __global__ void scanBlks1(int * in, int * out, int n, int * blkSums) { // TODO extern __shared__ int s_data[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) s_data[threadIdx.x] = in[i]; else s_data[threadIdx.x] = 0; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { if (threadIdx.x >= stride){ int neededVal = s_data[threadIdx.x - stride]; __syncthreads(); s_data[threadIdx.x] += neededVal; } __syncthreads(); } if (i < n) out[i] = s_data[threadIdx.x]; if (blkSums != NULL) blkSums[blockIdx.x] = s_data[blockDim.x - 1]; } /* Scan within each block's data (work-efficient), write results to "out", and write each block's sum to "blkSums" if "blkSums" is not NULL. */ __global__ void scanBlks2(int * in, int * out, int n, int * blkSums) { // TODO // 1. Each block loads data from GMEM to SMEM extern __shared__ int s_data[]; int i1 = blockIdx.x * 2 * blockDim.x + threadIdx.x; int i2 = i1 + blockDim.x; if (i1 < n) s_data[threadIdx.x] = in[i1]; if (i2 < n) s_data[threadIdx.x + blockDim.x] = in[i2]; __syncthreads(); // 2. Each block does scan with data on SMEM // 2.1. Reduction phase for (int stride = 1; stride < 2 * blockDim.x; stride *= 2) { int s_dataIdx = (threadIdx.x + 1) * 2 * stride - 1; // To avoid warp divergence if (s_dataIdx < 2 * blockDim.x) s_data[s_dataIdx] += s_data[s_dataIdx - stride]; __syncthreads(); } // 2.2. Post-reduction phase for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { int s_dataIdx = (threadIdx.x + 1) * 2 * stride - 1 + stride; // Wow if (s_dataIdx < 2 * blockDim.x) s_data[s_dataIdx] += s_data[s_dataIdx - stride]; __syncthreads(); } // 3. Each block writes results from SMEM to GMEM if (i1 < n) out[i1] = s_data[threadIdx.x]; if (i2 < n) out[i2] = s_data[threadIdx.x + blockDim.x]; if (blkSums != NULL && threadIdx.x == 0) blkSums[blockIdx.x] = s_data[2 * blockDim.x - 1]; } __global__ void addPrevSum(int * blkSumsScan, int * blkScans, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x + blockDim.x; if (i < n) { blkScans[i] += blkSumsScan[blockIdx.x]; } } void scanByDevice(int * in, int * out, int n, int kernelType, int blkSize) { GpuTimer timer; timer.Start(); // Allocate device memories int *d_in, *d_out; size_t bytes = n * sizeof(int); CHECK(cudaMalloc(&d_in, bytes)); CHECK(cudaMalloc(&d_out, bytes)); int blkDataSize; if (kernelType == 1) blkDataSize = blkSize; else blkDataSize = 2 * blkSize; int * d_blkSums; int numBlks = (n - 1) / blkDataSize + 1; CHECK(cudaMalloc(&d_blkSums, numBlks * sizeof(int))); // Copy data to device memories CHECK(cudaMemcpy(d_in, in, bytes, cudaMemcpyHostToDevice)); // Call kernel to scan within each block's input data if (kernelType == 1) scanBlks1<<<numBlks, blkSize, blkDataSize * sizeof(int)>>>(d_in, d_out, n, d_blkSums); else // KernelType == 2 scanBlks2<<<numBlks, blkSize, blkDataSize * sizeof(int)>>>(d_in, d_out, n, d_blkSums); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // Scan "d_blkSums" (by host) int * blkSums; blkSums = (int *)malloc(numBlks * sizeof(int)); CHECK(cudaMemcpy(blkSums, d_blkSums, numBlks * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 1; i < numBlks; i++) blkSums[i] += blkSums[i-1]; CHECK(cudaMemcpy(d_blkSums, blkSums, numBlks * sizeof(int), cudaMemcpyHostToDevice)); free(blkSums); // Call kernel to add block's previous sum to block's scan result addPrevSum<<<numBlks - 1, blkDataSize>>>(d_blkSums, d_out, n); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // Copy result from device memories CHECK(cudaMemcpy(out, d_out, bytes, cudaMemcpyDeviceToHost)); // Free device memories CHECK(cudaFree(d_in)); CHECK(cudaFree(d_out)); CHECK(cudaFree(d_blkSums)); timer.Stop(); printf("Time of scanByDevice (kernelType=%d): %.3f ms\n\n", kernelType, timer.Elapsed()); } void printDeviceInfo() { cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n\n"); } bool checkCorrectness(int * out, int * correctOut, int n) { for (int i = 0; i < n; i++) if (out[i] != correctOut[i]) return false; return true; } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; printf("Input size: %d\n\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(int); int * in = (int *)malloc(bytes); int * out = (int *)malloc(bytes); // Device result int * correctOut = (int *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = (int)(rand() & 0xFF) - 127; // random int in [-127, 128] // DETERMINE BLOCK SIZE int blockSize1 = 512; // Default for "scanBlks1" int blockSize2 = 512; // Default for "scanBlks2" if (argc == 2) { blockSize1 = blockSize2 = atoi(argv[1]); } else if (argc == 3) { blockSize1 = atoi(argv[1]); blockSize2 = atoi(argv[2]); } // SCAN BY HOST scanByHost(in, correctOut, n); // SCAN BY DEVICE, KERNEL 1 int kernelType = 1; scanByDevice(in, out, n, kernelType, blockSize1); if (checkCorrectness(out, correctOut, n) == false) printf("scanByDevice (kernelType=%d) is INCORRECT!\n\n", kernelType); // SCAN BY DEVICE, KERNEL 2 memset(out, 0, bytes); // Reset output kernelType = 2; scanByDevice(in, out, n, kernelType, blockSize2); if (checkCorrectness(out, correctOut, n) == false) printf("scanByDevice (kernelType=%d) is INCORRECT!\n\n", kernelType); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
13,375
/* blockDim=[16, 16, 1] * * i refers to x coordinate of the j or y pixel * j refers to y coordinate of the j or y pixel * k refers to x coordinate of the i or x pixel * l refers to y coordinate of the i or x pixel * */ __global__ void calc_partial_sums7(float const * const A, float *partialSum16x16, float *ZpartialSum16x16, float patchSigma, float filtSigmaSquared, int m, int n) { __shared__ float Z[256], my_w[256]; float differences_of_areaG[49]; int i=blockDim.x*blockIdx.x+threadIdx.x+3; int j=blockDim.y*blockIdx.y+threadIdx.y+3; int k=(blockIdx.z)%(m+6)+3; int l=(blockIdx.z)/(m+6)+3; int tid=threadIdx.y*16+threadIdx.x; for (int u=-3; u<=3; u++) { for (int v=-3; v<=3; v++) { differences_of_areaG[7*(u+3)+v+3]=(A[(l+u)*(m+6)+k+v]-A[(j+u)*(m+6)+i+v])*expf(-(u*u+v*v)/(2*patchSigma)); // The first A is the same for the whole block. } } Z[tid]=expf(-powf(normf(49, differences_of_areaG),2)/(2*filtSigmaSquared)); my_w[tid]=Z[tid]*A[i+j*(m+6)]; // Reduction algorithm __syncthreads(); if (tid<128) {Z[tid]+=Z[tid+128]; my_w[tid]+=my_w[tid+128]; } __syncthreads(); if (tid< 64) {Z[tid]+=Z[tid+ 64]; my_w[tid]+=my_w[tid+ 64]; } __syncthreads(); if (tid< 32) {Z[tid]+=Z[tid+ 32]; my_w[tid]+=my_w[tid+ 32]; } __syncthreads(); if (tid< 16) {Z[tid]+=Z[tid+ 16]; my_w[tid]+=my_w[tid+ 16]; } __syncthreads(); if (tid< 8) {Z[tid]+=Z[tid+ 8]; my_w[tid]+=my_w[tid+ 8]; } __syncthreads(); if (tid< 4) {Z[tid]+=Z[tid+ 4]; my_w[tid]+=my_w[tid+ 4]; } __syncthreads(); if (tid< 2) {Z[tid]+=Z[tid+ 2]; my_w[tid]+=my_w[tid+ 2]; } __syncthreads(); if (tid< 1) { // Pass the value to global variable ZpartialSum16x16[gridDim.x*gridDim.y*blockIdx.z+gridDim.x*blockIdx.y+blockIdx.x]=Z[0]+Z[1]; partialSum16x16[gridDim.x*gridDim.y*blockIdx.z+gridDim.x*blockIdx.y+blockIdx.x]=my_w[0]+my_w[1]; } } /* blockDim=[16, 16, 1] * * i refers to x coordinate of the j or y pixel * j refers to y coordinate of the j or y pixel * k refers to x coordinate of the i or x pixel * l refers to y coordinate of the i or x pixel * */ __global__ void calc_partial_sums5(float const * const A, float *partialSum16x16, float *ZpartialSum16x16, float patchSigma, float filtSigmaSquared, int m, int n) { __shared__ float Z[256], my_w[256]; float differences_of_areaG[25]; int i=blockDim.x*blockIdx.x+threadIdx.x+2; int j=blockDim.y*blockIdx.y+threadIdx.y+2; int k=(blockIdx.z)%(m+4)+2; int l=(blockIdx.z)/(m+4)+2; int tid=threadIdx.y*16+threadIdx.x; for (int u=-2; u<=2; u++) { for (int v=-2; v<=2; v++) { differences_of_areaG[7*(u+2)+v+2]=(A[(l+u)*(m+4)+k+v]-A[(j+u)*(m+4)+i+v])*expf(-(u*u+v*v)/(2*patchSigma)); // The first A is the same for the whole block. } } Z[tid]=expf(-powf(normf(25, differences_of_areaG),2)/(2*filtSigmaSquared)); my_w[tid]=Z[tid]*A[i+j*(m+4)]; // Reduction algorithm __syncthreads(); if (tid<128) {Z[tid]+=Z[tid+128]; my_w[tid]+=my_w[tid+128]; } __syncthreads(); if (tid< 64) {Z[tid]+=Z[tid+ 64]; my_w[tid]+=my_w[tid+ 64]; } __syncthreads(); if (tid< 32) {Z[tid]+=Z[tid+ 32]; my_w[tid]+=my_w[tid+ 32]; } __syncthreads(); if (tid< 16) {Z[tid]+=Z[tid+ 16]; my_w[tid]+=my_w[tid+ 16]; } __syncthreads(); if (tid< 8) {Z[tid]+=Z[tid+ 8]; my_w[tid]+=my_w[tid+ 8]; } __syncthreads(); if (tid< 4) {Z[tid]+=Z[tid+ 4]; my_w[tid]+=my_w[tid+ 4]; } __syncthreads(); if (tid< 2) {Z[tid]+=Z[tid+ 2]; my_w[tid]+=my_w[tid+ 2]; } __syncthreads(); if (tid< 1) { // Pass the value to global variable ZpartialSum16x16[gridDim.x*gridDim.y*blockIdx.z+gridDim.x*blockIdx.y+blockIdx.x]=Z[0]+Z[1]; partialSum16x16[gridDim.x*gridDim.y*blockIdx.z+gridDim.x*blockIdx.y+blockIdx.x]=my_w[0]+my_w[1]; } } __global__ void calc_partial_sums3(float const * const A, float *partialSum16x16, float *ZpartialSum16x16, float patchSigma, float filtSigmaSquared, int m, int n) { __shared__ float Z[256], my_w[256]; float differences_of_areaG[9]; int i=blockDim.x*blockIdx.x+threadIdx.x+1; int j=blockDim.y*blockIdx.y+threadIdx.y+1; int k=(blockIdx.z)%(m+2)+1; int l=(blockIdx.z)/(m+2)+1; int tid=threadIdx.y*16+threadIdx.x; for (int u=-1; u<=1; u++) { for (int v=-1; v<=1; v++) { differences_of_areaG[7*(u+1)+v+1]=(A[(l+u)*(m+2)+k+v]-A[(j+u)*(m+2)+i+v])*expf(-(u*u+v*v)/(2*patchSigma)); // The first A is the same for the whole block. } } Z[tid]=expf(-powf(normf(9, differences_of_areaG),2)/(2*filtSigmaSquared)); my_w[tid]=Z[tid]*A[i+j*(m+2)]; // Reduction algorithm __syncthreads(); if (tid<128) {Z[tid]+=Z[tid+128]; my_w[tid]+=my_w[tid+128]; } __syncthreads(); if (tid< 64) {Z[tid]+=Z[tid+ 64]; my_w[tid]+=my_w[tid+ 64]; } __syncthreads(); if (tid< 32) {Z[tid]+=Z[tid+ 32]; my_w[tid]+=my_w[tid+ 32]; } __syncthreads(); if (tid< 16) {Z[tid]+=Z[tid+ 16]; my_w[tid]+=my_w[tid+ 16]; } __syncthreads(); if (tid< 8) {Z[tid]+=Z[tid+ 8]; my_w[tid]+=my_w[tid+ 8]; } __syncthreads(); if (tid< 4) {Z[tid]+=Z[tid+ 4]; my_w[tid]+=my_w[tid+ 4]; } __syncthreads(); if (tid< 2) {Z[tid]+=Z[tid+ 2]; my_w[tid]+=my_w[tid+ 2]; } __syncthreads(); if (tid< 1) { // Pass the value to global variable ZpartialSum16x16[gridDim.x*gridDim.y*blockIdx.z+gridDim.x*blockIdx.y+blockIdx.x]=Z[0]+Z[1]; partialSum16x16[gridDim.x*gridDim.y*blockIdx.z+gridDim.x*blockIdx.y+blockIdx.x]=my_w[0]+my_w[1]; } }
13,376
__global__ void Kernel_4 (int* B,int* D, int* Kernel_4_output, int* Start_A, int* Start_B, int* Length_Seq_K4, int K3_Length, int K3_Report, int K3_Safety, int K_3_R, int MyProc, int Start_Th1, int End_Th1,int K4_S1, int K4_S2,int K4_S3) { for(int Sub_Block =(MyProc * K_3_R + blockIdx.x); Sub_Block < ((MyProc+1)*K_3_R); Sub_Block += gridDim.x) { for (int Sub_Thread1=Start_Th1+threadIdx.x; Sub_Thread1<End_Th1; Sub_Thread1+=blockDim.x) { int Sub_Thread = Sub_Thread1 % blockDim.x; int A_Loc = Start_A[Sub_Thread1] * K3_Safety * K3_Length + Sub_Block * K3_Safety * K3_Length * K3_Report; int B_Loc = Start_B[Sub_Thread1] * K3_Safety * K3_Length + Sub_Block * K3_Safety * K3_Length * K3_Report; int End_A = Length_Seq_K4[Start_A[Sub_Thread1] + Sub_Block * K3_Report]; int End_B = Length_Seq_K4[Start_B[Sub_Thread1] + Sub_Block * K3_Report]; for (int i = 0; i<End_B; i++ ) { for (int j = 0; j<End_A; j++) { int D_Sim; int Num = i *(K3_Length+1) + j + Sub_Thread*(K3_Length+1)*(K3_Length+1) + Sub_Block * (K3_Length+1)*(K3_Length+1)*(blockDim.x+1); int Num1 = (i+1)*(K3_Length+1) + j + Sub_Thread*(K3_Length+1)*(K3_Length+1) + Sub_Block * (K3_Length+1)*(K3_Length+1)*(blockDim.x+1); if (B[A_Loc + j]==B[B_Loc + i]) D_Sim = D[Num]+K4_S1; else D_Sim=D[Num]+K4_S2; int F = D[Num+1] + K4_S3; int E = D[Num1] + K4_S3; D[Num1+1] = min(min(E,F),D_Sim); } } int Index_1 = Sub_Thread1 + Sub_Block * K3_Report * K3_Report; int Num1 = (End_B)*(K3_Length+1) + End_A-1 + Sub_Thread*(K3_Length+1)*(K3_Length+1) + Sub_Block * (K3_Length+1)*(K3_Length+1)*(blockDim.x+1); Kernel_4_output[Index_1] = D[Num1+1]; } } } // __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
13,377
/* * This code is courtesy of, and copyright 2015, * Tomas Oppelstrup, Livermore National Lab. Please * do not redistribute without his approval. */ #define NTHREADS_RADIX 128 #define NBLOCKS_RADIX 56 __global__ static void boxsum_stage1(int nc,int count[]) { const int tid = threadIdx.x, nt = NTHREADS_RADIX; const int bid = blockIdx.x, nb = gridDim.x; const int pid = bid*nt + tid, np = nb*nt; int i; for(i = pid; i<nc; i+=np) count[i] = 0; } __global__ static void boxsum_stage2(int n,int v[],int listid[],int count[]) { const int tid = threadIdx.x, nt = NTHREADS_RADIX; const int bid = blockIdx.x, nb = gridDim.x; const int pid = bid*nt + tid, np = nb*nt; int i,vi,x; volatile __shared__ struct { int vi[NTHREADS_RADIX],x[NTHREADS_RADIX]; } shm; if(0) { for(i = pid; i<n; i+=np) { // Compute index within box. vi = v[i]; x = atomicAdd(&count[vi],1); listid[i] = x; } } else { for(i = pid; i<n+tid; i+=np) { // Compute index within box. if(n-(i-tid) < nt) { if(i < n) { vi = v[i]; x = atomicAdd(&count[vi],1); listid[i] = x; } } else { shm.vi[tid] = v[i]; __syncthreads(); // Requirement is that gcd(11,nt) = 1 shm.x[(19*tid)%nt] = atomicAdd(&count[shm.vi[(19*tid)%nt]],1); __syncthreads(); listid[i] = shm.x[tid]; } } /* int i0,i1; { int q = n/np; int r = n%np; if(pid >= r) { i0 = q*pid + r; i1 = i0 + q; } else { i0 = q*pid + pid; i1 = i0 + q + 1; } } for(i = i0; i<i1; i++) { vi = v[i]; x = atomicAdd(&count[vi],1); listid[i] = x; } */ } } __global__ static void boxsum_stage3(int nboxes,int count[],int psum[]) { const int tid = threadIdx.x, nt = NTHREADS_RADIX; const int bid = blockIdx.x, nb = gridDim.x; const int pid = bid*nt + tid, np = nb*nt; int i,x; volatile __shared__ struct { int x[NTHREADS_RADIX]; } shm; for(i = pid; i<nboxes+tid; i+=np) { __syncthreads(); x = 0; if(i < nboxes) x = count[i]; shm.x[tid] = x; __syncthreads(); if(tid < 64) shm.x[tid] += shm.x[tid+64]; __syncthreads(); if(tid < 32) { shm.x[tid] += shm.x[tid+32]; shm.x[tid] += shm.x[tid+16]; shm.x[tid] += shm.x[tid+ 8]; shm.x[tid] += shm.x[tid+ 4]; shm.x[tid] += shm.x[tid+ 2]; shm.x[tid] += shm.x[tid+ 1]; } if(tid == 0) psum[i/nt] = shm.x[0]; } } __global__ static void boxsum_stage4(int n,int psum[]) { const int tid = threadIdx.x, nt = NTHREADS_RADIX; const int bid = blockIdx.x; int i,j,x,s; volatile __shared__ int xshare[NTHREADS_RADIX]; s = 0; if(bid == 0) for(i = tid; i<n+tid; i+=nt) { __syncthreads(); x = 0; if(i < n) x = psum[i]; xshare[tid] = x; __syncthreads(); // Make cumulative summation of columns in type! j = 1; while(j < nt) { if(tid >= j) x += xshare[tid-j]; __syncthreads(); xshare[tid] = x; j = j*2; __syncthreads(); } if(i < n) psum[i] = xshare[tid] + s; s = s + xshare[nt-1]; } } __global__ static void boxsum_stage5(int nboxes,int count[],int psum[]) { const int tid = threadIdx.x, nt = NTHREADS_RADIX; const int bid = blockIdx.x, nb = NBLOCKS_RADIX; const int pid = bid*nt + tid, np = nb*nt; int i,x,x1; volatile __shared__ struct { int psum,x[2][NTHREADS_RADIX],y[2][NTHREADS_RADIX]; } shm; shm.x[0][tid] = 0; shm.y[0][tid] = 0; for(i = pid; i<nboxes+tid; i+=np) { __syncthreads(); if(tid == 0) shm.psum = psum[i/nt]; x = 0; if(i < nboxes) x = count[i]; x1 = x; shm.x[1][tid] = x; __syncthreads(); x += shm.x[1][tid- 1]; shm.y[1][tid] = x; __syncthreads(); x += shm.y[1][tid- 2]; shm.x[1][tid] = x; __syncthreads(); x += shm.x[1][tid- 4]; shm.y[1][tid] = x; __syncthreads(); x += shm.y[1][tid- 8]; shm.x[1][tid] = x; __syncthreads(); x += shm.x[1][tid-16]; shm.y[1][tid] = x; __syncthreads(); x += shm.y[1][tid-32]; shm.x[1][tid] = x; __syncthreads(); x += shm.x[1][tid-64]; shm.y[1][tid] = x; __syncthreads(); x += shm.psum - shm.y[1][nt-1]; //if(i == nboxes-1) { x=0; x1=0; } if(i < nboxes) count[i] = x-x1; } } __global__ static void boxsum_stage6(int n,int v[],int listid[],int count[]) { const int tid = threadIdx.x, nt = NTHREADS_RADIX; const int bid = blockIdx.x, nb = NBLOCKS_RADIX; const int pid = bid*nt + tid, np = nb*nt; int i,lid,bno,idx; for(i = pid; i<n; i+=np) { lid = listid[i]; bno = v[i]; idx = 0; if(bno > 0) idx = count[bno]; listid[i] = lid + idx; } } __global__ static void boxsum_stage7(int n,int listid[],int bin[],float xx[][4], float vv[][4], int bout[],float xxout[][4], float vvout[][4]) { const int tid = threadIdx.x, nt = NTHREADS_RADIX; const int bid = blockIdx.x, nb = NBLOCKS_RADIX; const int pid = bid*nt + tid, np = nb*nt; int i,j,xend; float x,v; volatile __shared__ struct { int idx[NTHREADS_RADIX]; } shm; for(i = pid; i<n+tid; i+=np) { j = 0; if(i < n) j = listid[i]; xend = min(nt,n-(i-tid)); __syncthreads(); shm.idx[tid] = j; __syncthreads(); if(i < n) bout[j] = bin[i]; for(j = tid; j<4*xend; j+=nt) { x = xx[i-tid][j]; v = vv[i-tid][j]; xxout[shm.idx[j/4]][j%4] = x; vvout[shm.idx[j/4]][j%4] = v; } } } void rsort_card(int n,int nc, int *xin_g,float (*data1in_g)[4],float (*data2in_g)[4], int *xout_g,float (*data1out_g)[4],float (*data2out_g)[4], int *count_g) { static int n_init = 0, nc_init = 0; static int *psum_g,*listid_g; int ns = (nc+NTHREADS_RADIX-1)/NTHREADS_RADIX; if(n <= 0 || nc <= 0) { if(n_init > 0) { cudaFree(listid_g); cudaFree(psum_g); } n_init = 0; nc_init = 0; } else if(n > n_init || nc > nc_init) { if(n_init > 0) { cudaFree(listid_g); cudaFree(psum_g); } cudaMalloc((void **) &psum_g,sizeof(int) * ns); cudaMalloc((void **) &listid_g,sizeof(int) * n); n_init = n; nc_init = nc; } if(n > 0 && nc > 0) { /* int *listid = (int *) malloc(sizeof(int) * n); int *count = (int *) malloc(sizeof(int) * nc); int *count2 = (int *) malloc(sizeof(int) * nc); int *psum = (int *) malloc(sizeof(int) * ns); int *psum2 = (int *) malloc(sizeof(int) * ns); int i,s; */ boxsum_stage1<<<NBLOCKS_RADIX,NTHREADS_RADIX>>>(nc,count_g); /* cudaThreadSynchronize(); s = 0; for(i = 0; i<nc; i++) s += abs(count[i]); if(s != 0) printf("count not zeroed, s=%d\n",s); */ boxsum_stage2<<<NBLOCKS_RADIX,NTHREADS_RADIX>>>(n,xin_g,listid_g,count_g); /* cudaThreadSynchronize(); cudaMemcpy(listid,listid_g,sizeof(int) * n,cudaMemcpyDeviceToHost); cudaMemcpy(count,count_g,sizeof(int) * nc,cudaMemcpyDeviceToHost); s = 0; for(i = 0; i<nc; i++) s += count[i]; if(s != n) printf("Error in count, s=%d, n=%d\n",s,n); */ boxsum_stage3<<<NBLOCKS_RADIX,NTHREADS_RADIX>>>(nc,count_g,psum_g); /* cudaThreadSynchronize(); cudaMemcpy(psum,psum_g,sizeof(int) * ns,cudaMemcpyDeviceToHost); for(i = 0; i<ns; i++) { int j; s = 0; for(j = 0; j<NTHREADS_RADIX; j++) if(i*NTHREADS_RADIX+j < nc) s += count[i*NTHREADS_RADIX+j]; if(s != psum[i]) printf("psum error, i=%d ns=%d s=%d psum=%d\n",i,ns,s,psum[i]); } */ boxsum_stage4<<<1,NTHREADS_RADIX>>>(ns,psum_g); /* cudaThreadSynchronize(); cudaMemcpy(psum2,psum_g,sizeof(int) * ns,cudaMemcpyDeviceToHost); s = 0; for(i = 0; i<ns; i++) { s += psum[i]; if(s != psum2[i]) printf("cumsum error in psum: s=%d psum2=%d i=%d ns=%d\n", s,psum2[i],i,ns); } */ boxsum_stage5<<<NBLOCKS_RADIX,NTHREADS_RADIX>>>(nc,count_g,psum_g); /* cudaThreadSynchronize(); cudaMemcpy(count2,count_g,sizeof(int) * nc,cudaMemcpyDeviceToHost); s = 0; for(i = 0; i<nc; i++) { s += count[i]; if(s != count2[i]) printf("cumsum error in count: s=%d count2=%d i=%d nc=%d\n", s,count2[i],i,nc); } */ boxsum_stage6<<<NBLOCKS_RADIX,NTHREADS_RADIX>>>(n,xin_g,listid_g,count_g); boxsum_stage7<<<NBLOCKS_RADIX,NTHREADS_RADIX>>>(n,listid_g,xin_g, data1in_g,data2in_g,xout_g, data1out_g,data2out_g); } }
13,378
#define NUM 512 __device__ inline void swap(int & a, int & b) { // Alternative swap doesn't use a temporary register: // a ^= b; // b ^= a; // a ^= b; int tmp = a; a = b; b = tmp; } __global__ static void bitonicSort(int * values, int *results) { extern __shared__ int shared[]; const unsigned int tid = threadIdx.x; const unsigned int bid = blockIdx.x; // Copy input to shared mem. shared[tid] = values[(bid*NUM) + tid]; __syncthreads(); // Parallel bitonic sort. for (unsigned int k = 2; k <= NUM; k *= 2) { // Bitonic merge: for (unsigned int j = k / 2; j>0; j /= 2) { unsigned int ixj = tid ^ j; if (ixj > tid) { if ((tid & k) == 0) { if (shared[tid] > shared[ixj]) { swap(shared[tid], shared[ixj]); } } else { if (shared[tid] < shared[ixj]) { swap(shared[tid], shared[ixj]); } } } __syncthreads(); } } // Write result. results[(bid*NUM) + tid] = shared[tid]; }
13,379
#include <stdio.h> #include <cuda_runtime.h> int main(void) { printf("HOLA MUNDO\n"); }
13,380
#include<stdio.h> #include<iostream> #define WARP_SIZE 32 #define GROUPSET 16 #define NUMFACES 3 #define fouralpha 1.82 #define fouralpha4 5.82 #define Connect(a,b,c) Connect[ a + 3 * ( b + mC * c ) ] extern "C" { __global__ void GPU_sweep( int size_maxCorner, int size_maxcf, int nAngle, int nzones, int ncornr, int Groups, int nbelem, int* AngleOrder, double* soa_omega, int* nextZ, int* next, int* soa_nCorner, int* soa_nCFaces, int* soa_c0, double* soa_STotal, double* STimeBatch, double* soa_SigtInv, double* soa_Volume, double* soa_Sigt, double* soa_A_fp, double* soa_A_ez, int* soa_Connect, double* psic, double* psib, double* omega_A_fp, double* omega_A_ez, int* Connect_ro, int* passZ, bool calcSTime, double tau ); __global__ void GPU_fp_ez_hplane( int size_maxCorner, int size_maxcf, int nzones, int ncornr, int Groups, int nbelem, int* AngleOrder, double* soa_omega, int* nextZ, int* next, int* soa_nCorner, int* soa_nCFaces, int* soa_c0, double* soa_A_fp, double* soa_A_ez, double* omega_A_fp, double* omega_A_ez, int* soa_Connect, int* soa_Connect_reorder, int* passZ ); void fp_ez_c ( int *anglebatch, int *numzones, int *numgroups, int *ncornr, int *numAngles, int *d_AngleOrder, int *maxcorners, int *maxfaces, int *NangBin, int *nbelem, double *d_omega, int *d_nCorner, int *d_nCFaces, int *d_c0, double *d_A_fp, double *d_omega_A_fp, double *d_A_ez, double* d_omega_A_ez, int *d_Connect, int* d_Connect_reorder, int *d_next, int *d_nextZ, int *d_passZ, cudaStream_t streamid ) { int nZ = *numzones; int nA = *numAngles; // will need this for large problems int nAbatch = *anglebatch; int mC = *maxcorners; int mF = *maxfaces; int nG = *numgroups; int nC = *ncornr; int nBe = *nbelem; GPU_fp_ez_hplane<<<dim3(nAbatch,1,1),128,0,streamid>>>( mC, mF, // nZ, nC, nG, nBe, d_AngleOrder, d_omega, d_nextZ, d_next, d_nCorner, d_nCFaces, d_c0, d_A_fp, d_A_ez, d_omega_A_fp, d_omega_A_ez, d_Connect, d_Connect_reorder, d_passZ ); } void snswp3d_c ( int *anglebatch, int *numzones, int *numgroups, int *ncornr, int *numAngles, int *d_AngleOrder, int *maxcorners, int *maxfaces, int *octant, //=binRecv int *NangBin, int *nbelem, double *d_omega, int *d_nCorner, int *d_nCFaces, int *d_c0, double *d_A_fp, double *d_omega_A_fp, double *d_A_ez, double* d_omega_A_ez, int *d_Connect, int* d_Connect_reorder, double *d_STotal, double *d_STimeBatch, double *d_Volume, double *d_psic, double *d_psib, int *d_next, int *d_nextZ, double *d_Sigt, double *d_SigtInv, int *d_passZ, bool *calcSTime, double *tau, cudaStream_t streamid ) { static int dump_cnt=0; //int zone,ic; //static double* d_omega_A_fp; //static double* d_omega_A_ez; //static int* d_Connect_reorder; int nZ = *numzones; int nA = *numAngles; // will need this for large problems int nAbatch = *anglebatch; int mC = *maxcorners; int mF = *maxfaces; int nG = *numgroups; int nC = *ncornr; int nBe = *nbelem; { int groupsize=32; int nGG = ceil(nG / groupsize); //printf("nGG=%d\n",nGG); if (nG%groupsize != 0) {printf("current version must use groups of multiple of %d!!! sorry \n",groupsize); exit(0);} // shared memory needs are (8+3+3*blockDim.x+3)*blockDim.y; //GPU_sweep<<<dim3(*anglebatch,nGG,2),dim3(32,16,1),(8+3+3*32+3)*16*sizeof(double),streamid>>>( GPU_sweep<<<dim3(*anglebatch,nGG,1),dim3(groupsize,32,1),(8+3+3*groupsize+3)*32*sizeof(double),streamid>>>( mC, mF, // nA, nZ, nC, nG, nBe, d_AngleOrder, d_omega, d_nextZ, d_next, d_nCorner, d_nCFaces, d_c0, d_STotal, d_STimeBatch, d_SigtInv, d_Volume, d_Sigt, d_A_fp, d_A_ez, d_Connect, d_psic, d_psib, d_omega_A_fp, d_omega_A_ez, d_Connect_reorder, d_passZ, *calcSTime, *tau ); //printf("Completed a batch sweep\n"); dump_cnt++; //std::cout<<"dump_cnt="<<dump_cnt<<std::endl; } } } // extern "C"
13,381
#include <stdio.h> #include <stdlib.h> #include <math.h> typedef unsigned long long bignum; // CUDA kernel. Each thread takes care of one element of c __device__ int isPrime(bignum x) { bignum i; bignum lim = (bignum)sqrt((float)x) + 1; if (x % 2 == 0) { return 0; } for (i = 3; i < lim; i += 2) { if (x % i == 0) return 0; } return 1; } __global__ void checkPrimes(int *results, int arr_size) { // Get our global thread ID bignum index = blockIdx.x * blockDim.x + threadIdx.x; if (index < arr_size) { bignum number = 2 * index + 1; results[index] = isPrime(number); } } int main(int argc, char *argv[]) { if (argc < 2) { printf("Usage: prime upbound\n"); exit(-1); } bignum n = (bignum)atoi(argv[1]); // Host input vectors int *h_results; // Device input vectors int *d_results; size_t arr_size = (int)ceil((float) ((n - 1.0) / 2.0)); printf("arr_size: %ld\n", arr_size); // Size, in bytes, of each vector size_t results_num_bytes = arr_size * sizeof(int); // Allocate memory for each vector on host h_results = (int *)malloc(results_num_bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_results, results_num_bytes); bignum i; // Initialize vectors on host for (i = 0; i < arr_size; i++) { h_results[i] = 0; } // Copy host vectors to device cudaMemcpy(d_results, h_results, results_num_bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = (int)ceil((float) ((n + 1.0) / 2.0 / blockSize)); // gridSize = (int)ceil((float)n / blockSize); printf("gridSize: %d\n", gridSize); printf("gridSize * blockSize: %d\n", gridSize*blockSize); // Execute the kernel checkPrimes<<<gridSize, blockSize>>>(d_results, arr_size); // Copy array back to host cudaMemcpy(h_results, d_results, results_num_bytes, cudaMemcpyDeviceToHost); // Sum up vector c and print result divided by n, this should equal 1 without error bignum sum = 0; for (i = 0; i < arr_size; i++) { sum += h_results[i]; } printf("final result: %lld\n", sum); // Release device memory cudaFree(d_results); // Release host memory free(h_results); return 0; }
13,382
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #include <time.h> #define Mask_size 3 //filter size #define Width 1024 // image width #define Height 1024 // image height #define N (Width*Height) //---------------kernel------------------- __global__ void ConvolutionKernel (int *I_input, int *Mask1,int *Mask2,int *I_output1,int *I_output2) { /* Thread Row Index */ int Row = blockIdx.y * blockDim.y + threadIdx.y; /* Thread column Index */ int Col = blockIdx.x * blockDim.x + threadIdx.x; float value1 = 0; float value2 = 0; int Index = Row*Width+Col; //output Image index /* convolution */ for(int i=0; i<Mask_size; i++) { for(int j=0; j<Mask_size; j++) { int R_start = i + Row - 1; int C_start = j + Col - 1; if((C_start>= 0 && C_start < Width) && (R_start>= 0 && R_start < Height)) { value1 += Mask1[i * Mask_size + j] * I_input[R_start* Width + C_start]; value2 += Mask2[i * Mask_size + j] * I_input[R_start* Width + C_start]; } } } if((Row < Height) && (Col < Width)) { I_output1[Index] = value1; // convolved image I_output2[Index] = value2; } } //----------------------------main----------------------------------- int main(void) { //------------------------------------------------------------------- int *Image, *Output1,*Output2; int *mask1, *mask2; int SIZE= Width*Height*sizeof(int); int Row,Col; Image= (int *)malloc(SIZE); Output1= (int *)malloc(SIZE); Output2= (int *)malloc(SIZE); mask1= (int *)malloc(Mask_size*Mask_size*sizeof(int)); mask2= (int *)malloc(Mask_size*Mask_size*sizeof(int)); //------------------------------------------------------------------- int *d_image, *d_mask1,*d_mask2,*d_output1, *d_output2; /* pointer to device memory for input image, mask and output */ //----------------------------------------------------------- for(Row=0;Row<Width;Row++) for(Col=0;Col<Height;Col++) { Image[Row*Width+Col]=1; Output1[Row*Width+Col]=0; Output2[Row*Width+Col]=0; } //----------------------------------------------------------- for(Row=0;Row<Mask_size;Row++) for(Col=0;Col<Mask_size;Col++) { mask1[Row*Mask_size+Col]=1; mask2[Row*Mask_size+Col]=2; } //------------------------------------------------------ /* Device Memory Allocation */ cudaMalloc(&d_image, (Width*Height)* sizeof(int)); cudaMalloc(&d_output1, (Width*Height)* sizeof(int)); cudaMalloc(&d_output2, (Width*Height)* sizeof(int)); cudaMalloc(&d_mask1, (Mask_size*Mask_size)* sizeof(int)); cudaMalloc(&d_mask2, (Mask_size*Mask_size)* sizeof(int)); //--------------------------------------------------------- cudaEvent_t start, stop; // Cuda API to measure time for Cuda Kernel Execution. cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //-------------------------------------------------------- /*Copying Input Image to GPU Memory */ cudaMemcpy(d_image, Image, (Width*Height)* sizeof(int), cudaMemcpyHostToDevice); /*Copying Mask to GPU Memory */ cudaMemcpy(d_mask1, mask1, (Mask_size*Mask_size)* sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_mask2, mask2, (Mask_size*Mask_size)* sizeof(int), cudaMemcpyHostToDevice); /* Two Dimesional blocks with two dimensional threads */ dim3 grid(((Width)/Mask_size),((Height)/Mask_size)); /*Number of threads per block is 3x3=9 */ dim3 block(Mask_size,Mask_size); //--------------------------------------------- printf ("GPU Executing Convolution Kernel...\n") ; printf("\n"); //-------------------------------------------- /*Kernel Launch configuration*/ ConvolutionKernel <<<grid, block >>>(d_image, d_mask1,d_mask2,d_output1, d_output2); /*copying output Image to Host Memory*/ cudaMemcpy(Output1, d_output1, (Width*Height)* sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(Output2, d_output2, (Width*Height)* sizeof(int), cudaMemcpyDeviceToHost); //------------------------------------------- cudaEventRecord(stop); cudaEventSynchronize(stop); // Blocks CPU execution until Device Kernel finishes its job. float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("GPU Execution Time for Convolution Kernel: %fn\n", milliseconds); //GPU Execution Time. printf("Effective Bandwidth (GB/s): %fn\n", N*4*2/milliseconds/1e6); //N*4 is the total number of Bytes transferred and (1+1)=2 is for read Input Image and write Output Image. printf("\n"); //------------------------------------------ free(Image); free(Output1); free(Output2); free(mask1); free(mask2); cudaFree(d_image); cudaFree(d_mask1); cudaFree(d_mask2); cudaFree(d_output1); cudaFree(d_output2); return 0; }
13,383
#include <stdlib.h> #include <stdio.h> #define TEST_SIZE 35 // (424*520) #define RAND_RANGE 100 #define BLOCK_WIDTH 4 // 32 #define CEILING_DIVIDE(X, Y) (1 + (((X) - 1) / (Y))) void printTest(unsigned int *d_arr, size_t size) { unsigned int *h_arr; h_arr = (unsigned int*)malloc(sizeof(unsigned int)*size); cudaMemcpy(h_arr, d_arr, sizeof(unsigned int)*size, cudaMemcpyDeviceToHost); printf("h_testVals = [ "); for(int i=0; i<size; i++){ printf("%2d ", h_arr[i]); } printf("];\n"); free(h_arr); } // Computes a blockwise exclusive sum scan __global__ void partialScan(unsigned int *d_in, unsigned int *d_out, unsigned int *d_total, size_t n) { __shared__ unsigned int temp[BLOCK_WIDTH]; int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; if(index < n) { temp[tx] = d_in[index]; } else { temp[tx] = 0; } __syncthreads(); // Perform the actual scan for(int offset = 1; offset < BLOCK_WIDTH; offset <<= 1) { if(tx + offset < BLOCK_WIDTH) { temp[tx + offset] += temp[tx]; } __syncthreads(); } // Shift when copying the result so as to make it an exclusive scan if(tx +1 < BLOCK_WIDTH && index + 1 < n) { d_out[index + 1] = temp[tx]; } d_out[0] = 0; // Store the total sum of each block d_total[bx] = temp[BLOCK_WIDTH - 1]; } // Compute a map on a partial scan to create a total scan from __global__ void mapScan(unsigned int *d_array, unsigned int *d_total, size_t n) { int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; if(index < n) { d_array[index] += d_total[bx]; } } // Compute the predicates for radix sort __global__ void mapPredicate(unsigned int *d_zeros, unsigned int *d_ones, unsigned int *d_in, unsigned int bit, size_t n) { int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; if(index < n) { unsigned int isOne = (d_in[index] >> bit) & 1; d_ones[index] = isOne; d_zeros[index] = 1 - isOne; } } // Given the computed addresses, perform the scatter step for radix sort __global__ void scatter(unsigned int *d_inVals, unsigned int *d_outVals, unsigned int *d_inPos, unsigned int *d_outPos, unsigned int *d_zerosScan, unsigned int *d_onesScan, unsigned int *d_zerosPredicate, unsigned int *d_onesPredicate, size_t n) { int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; int offset = d_zerosScan[n - 1] + d_zerosPredicate[n - 1]; if(index < n) { int scatterIdx; if(d_zerosPredicate[index]) { scatterIdx = d_zerosScan[index]; } else { scatterIdx = d_onesScan[index] + offset; } if(scatterIdx < n) { //sanity check d_outVals[scatterIdx] = d_inVals[index]; d_outPos[scatterIdx] = d_inPos[index]; } } } // Compute exclusive sum scan for arbitrary sized array (device pointers as input) void totalScan(unsigned int *d_in, unsigned int *d_out, size_t n) { size_t numBlocks = CEILING_DIVIDE(n, BLOCK_WIDTH); unsigned int *d_total; cudaMalloc(&d_total, sizeof(unsigned int) * numBlocks); cudaMemset(d_total, 0, sizeof(unsigned int) * numBlocks); partialScan<<<numBlocks, BLOCK_WIDTH>>>(d_in, d_out, d_total, n); if(numBlocks > 1) { unsigned int *d_total_scanned; cudaMalloc(&d_total_scanned, sizeof(unsigned int) * numBlocks); cudaMemset(d_total_scanned, 0, sizeof(unsigned int) * numBlocks); totalScan(d_total, d_total_scanned, numBlocks); mapScan<<<numBlocks, BLOCK_WIDTH>>>(d_out, d_total_scanned, n); cudaFree(d_total_scanned); } cudaFree(d_total); } // Do radix sort on d_inputVals and store to d_outputVals. The assosciated // positions are also moved accordingly void radix(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) { unsigned int *d_inVals; unsigned int *d_inPos; unsigned int *d_zerosPredicate; unsigned int *d_onesPredicate; unsigned int *d_zerosScan; unsigned int *d_onesScan; size_t memsize = sizeof(unsigned int) * numElems; size_t numBlocks = CEILING_DIVIDE(numElems, BLOCK_WIDTH); cudaMalloc(&d_inVals, memsize); cudaMalloc(&d_inPos, memsize); cudaMalloc(&d_zerosPredicate, memsize); cudaMalloc(&d_onesPredicate, memsize); cudaMalloc(&d_zerosScan, memsize); cudaMalloc(&d_onesScan, memsize); cudaMemcpy(d_inVals, d_inputVals, memsize, cudaMemcpyDeviceToDevice); cudaMemcpy(d_inPos, d_inputPos, memsize, cudaMemcpyDeviceToDevice); for(unsigned int bit = 0; bit < 32; bit++) { cudaMemset(d_zerosScan, 0, memsize); cudaMemset(d_onesScan, 0, memsize); mapPredicate<<<numBlocks, BLOCK_WIDTH>>>( d_zerosPredicate, d_onesPredicate, d_inVals, bit, numElems ); totalScan(d_zerosPredicate, d_zerosScan, numElems); totalScan(d_onesPredicate, d_onesScan, numElems); scatter<<<numBlocks, BLOCK_WIDTH>>>( d_inVals, d_outputVals, d_inPos, d_outputPos, d_zerosScan, d_onesScan, d_zerosPredicate, d_onesPredicate, numElems ); cudaMemcpy(d_inVals, d_outputVals, memsize, cudaMemcpyDeviceToDevice); cudaMemcpy(d_inPos, d_outputPos, memsize, cudaMemcpyDeviceToDevice); } cudaFree(d_inVals); cudaFree(d_inPos); cudaFree(d_zerosPredicate); cudaFree(d_onesPredicate); cudaFree(d_zerosScan); cudaFree(d_onesScan); } //////////////////////////////////////////////////////////////////////////////// // Wrapper for totalScan (host pointers as input) void radixHost(unsigned int* const h_inputVals, unsigned int* const h_inputPos, unsigned int* const h_outputVals, unsigned int* const h_outputPos, const size_t numElems) { unsigned int *d_inputVals; unsigned int *d_inputPos; unsigned int *d_outputVals; unsigned int *d_outputPos; size_t memsize = sizeof(unsigned int) * numElems; cudaMalloc(&d_inputVals, memsize); cudaMalloc(&d_inputPos, memsize); cudaMalloc(&d_outputVals, memsize); cudaMalloc(&d_outputPos, memsize); cudaMemcpy(d_inputVals, h_inputVals, memsize, cudaMemcpyHostToDevice); cudaMemcpy(d_inputPos, h_inputPos, memsize, cudaMemcpyHostToDevice); radix(d_inputVals, d_inputPos, d_outputVals, d_outputPos, numElems); cudaMemcpy(h_outputVals, d_outputVals, memsize, cudaMemcpyDeviceToHost); cudaMemcpy(h_outputPos, d_outputPos, memsize, cudaMemcpyDeviceToHost); cudaFree(d_inputVals); cudaFree(d_inputPos); cudaFree(d_outputVals); cudaFree(d_outputPos); } int main(int argc, char **argv) { unsigned int *h_inVals; unsigned int *h_inPos; unsigned int *h_outVals; unsigned int *h_outPos; srand(0); size_t memsize = sizeof(unsigned int) * TEST_SIZE; h_inVals = (unsigned int*)malloc(memsize); h_inPos = (unsigned int*)malloc(memsize); h_outVals = (unsigned int*)malloc(memsize); h_outPos = (unsigned int*)malloc(memsize); // Random test values (seeded) for(int i=0; i<TEST_SIZE; i++){ h_inVals[i] = i+1; }//rand() % RAND_RANGE; } // Test positions 0 ... TEST_SIZE for(int i=0; i<TEST_SIZE; i++){ h_inPos[i] = i; } // Compute radixHost(h_inVals, h_inPos, h_outVals, h_outPos, TEST_SIZE); // Print input printf("h_inVals = [ "); for(int i=0; i<TEST_SIZE; i++){ printf("%2d ", h_inVals[i]); } printf("];\nh_inPos = [ "); for(int i=0; i<TEST_SIZE; i++){ printf("%2d ", h_inPos[i]); } printf("];\n"); // Print output printf("h_outVals = [ "); for(int i=0; i<TEST_SIZE; i++){ printf("%2d ", h_outVals[i]); } printf("];\nh_outPos = [ "); for(int i=0; i<TEST_SIZE; i++){ printf("%2d ", h_outPos[i]); } printf("];\n"); free(h_inVals); free(h_inPos); free(h_outVals); free(h_outPos); return 0; }
13,384
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include <fstream> #include <thread> #include <chrono> #include <atomic> using namespace std; int n; int *a = NULL; int *b = NULL; int *c = NULL; atomic<int> cnt(0); bool run = false; __global__ void kernel(int* a, int* b, int*c, int l, int r){ int i = blockIdx.x*blockDim.x+threadIdx.x; if(l <= i && i < r) c[i] = a[i] + b[i]; } void slave(int id){ cudaSetDevice(id); cudaSetDeviceFlags(cudaDeviceMapHost); cnt++; while(!run) this_thread::sleep_for(chrono::milliseconds(20)); kernel<<<(n/3+31), 32>>>(a, b, c, n/3*id, n/3*(id+1)); cudaDeviceSynchronize(); cnt++; } int main(){ ifstream in("input.txt"); ofstream out("output.txt"); thread slave0(slave, 0); thread slave1(slave, 1); while(cnt != 2) this_thread::sleep_for(chrono::milliseconds(20)); in >> n; cudaHostAlloc(&a, n*sizeof(int), cudaHostAllocMapped); cudaHostAlloc(&b, n*sizeof(int), cudaHostAllocMapped); cudaHostAlloc(&c, n*sizeof(int), cudaHostAllocMapped); for(int i = 0; i < n ; i++) in >> a[i]; for(int i = 0; i < n ; i++) in >> b[i]; run = true; for(int i = n/3*2; i < n; i++) c[i] = a[i] + b[i]; if(slave0.joinable()) slave0.join(); if(slave1.joinable()) slave1.join(); for(int i = 0; i < n; i++) out << c[i] << ' '; return 0; }
13,385
/* ############################################### # Kernel1 -> out = sin(input1) + cos(input2) # # Kernel2 -> out = log(input) # # Kernel3 -> out = sqrt(input) # # Input data in test.txt # # Kirtan Mali # ############################################### */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void process_kernel1(float *input1, float *input2, float *output, int datasize) { int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (i < datasize) { output[i] = sin(input1[i]) + cos(input2[i]); } } __global__ void process_kernel2(float *input, float* output, int datasize) { int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (i < datasize) { output[i] = log(input[i]); } } __global__ void process_kernel3(float *input, float *output, int datasize) { int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (i < datasize) { output[i] = sqrt(input[i]); } } int main() { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // The vector length to be used, and compute its size // =============================== int numElements = 16384; // =============================== size_t size = numElements * sizeof(float); // Allocate the host input vector input1 float *h_input1 = (float *)malloc(size); // Allocate the host input vector input2 float *h_input2 = (float *)malloc(size); // Allocate the host input vector output float *h_output = (float *)malloc(size); // Verify that allocations succeeded if (h_input1 == NULL || h_input2 == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { scanf("%f", &h_input1[i]); } for (int i = 0; i < numElements; ++i) { scanf("%f", &h_input2[i]); } // Allocate the device input vector input1 float *d_input1 = NULL; err = cudaMalloc((void **)&d_input1, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector input2 float *d_input2 = NULL; err = cudaMalloc((void **)&d_input2, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector output1 float *d_output1 = NULL; err = cudaMalloc((void **)&d_output1, size); // Allocate the device output vector output2 float *d_output2 = NULL; err = cudaMalloc((void **)&d_output2, size); // Allocate the device output vector output float *d_output = NULL; err = cudaMalloc((void **)&d_output, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors input1 and input2 in host memory to the device input vectors in // device memory err = cudaMemcpy(d_input1, h_input1, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_input2, h_input2, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel // According to question dim3 gridsize1(4,2,2); dim3 blocksize1(32,32,1); dim3 gridsize2(2,8,1); dim3 blocksize2(8,8,16); dim3 gridsize3(16,1,1); dim3 blocksize3(128,8,1); process_kernel1<<<gridsize1, blocksize1>>>(d_input1, d_input2, d_output1, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } process_kernel2<<<gridsize2, blocksize2>>>(d_output1, d_output2, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch process_kernel2 kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } process_kernel3<<<gridsize3, blocksize3>>>(d_output2, d_output, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch process_kernel3 kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. err = cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost); // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(sqrt(log(sin(h_input1[i]) + cos(h_input2[i]))) - h_output[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } printf("%0.2f ", h_output[i]); } printf("\n"); return 0; }
13,386
#include "includes.h" __global__ void forward_kernel(const float *x, const float *mean, const float *var, const float *weight, const float *bias, float *y, float *z, float eps, int N, int C, int S) { int plane = blockIdx.x; float _mean = mean[plane]; float _var = var[plane]; float invStd = 0; if (_var != 0.f || eps != 0.f) { invStd = 1 / sqrt(_var + eps); } float gamma = weight != 0 ? abs(weight[plane]) + eps : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; for (int batch = 0; batch < N; ++batch) { for (int n = threadIdx.x; n < S; n += blockDim.x) { float _x = x[(batch * C + plane) * S + n]; float _y = (_x - _mean) * invStd; float _z = _y * gamma + beta; y[(batch * C + plane) * S + n] = _y; z[(batch * C + plane) * S + n] = _z; } } }
13,387
#include "library.cuh" __global__ void hello() {}
13,388
#include "includes.h" __global__ void dual(float* p1, float* p2, const float* u_, const double lambda, const double sigma, const int X, const int Y) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // center point int c = y*X + x; float nabla_x = 0.0f; float nabla_y = 0.0f; if (x < X-1) nabla_x = u_[c+1]-u_[c]; if (y < Y-1) nabla_y = u_[c+X]-u_[c]; //p1[c] = fmaxf(-lambda, fminf(lambda, p1[c] + sigma*nabla_x)); //p2[c] = fmaxf(-lambda, fminf(lambda, p2[c] + sigma*nabla_y)); p1[c] += sigma*nabla_x; p2[c] += sigma*nabla_y; float denom = fmaxf(1.0f, sqrt(p1[c]*p1[c] + p2[c]*p2[c])/lambda); p1[c] /= denom; p2[c] /= denom; }
13,389
/** * @author NageshAC * @email nagesh.ac.aralaguppe@fau.de * @create date 2021-08-10 12:11:29 * @modify date 2021-08-10 12:11:29 * @desc Contains few operators definitions used in this project. */ #pragma once #include<iostream> #include<cmath> #include<cuda_runtime.h> //************************************************************** // copy function //************************************************************** __device__ __host__ inline void copy(double* x, const double* y, int n = 3){ for(int i=0; i<n; i++) x[i] = y[i]; } //************************************************************** // l2 norm //************************************************************** __device__ inline double norm(const double* x, int dim = 3){ double result = 0; for(int i=0; i<dim; i++){ result += pow(x[i],2); } return sqrt(result); } __device__ inline double norm2(const double* x, int dim = 3){ double result = 0; for(int i=0; i<dim; i++){ result += pow(x[i],2); } return (result); } //************************************************************** // vector const multiplication //************************************************************** __device__ inline void axpy(const double a, const double* x, double* y, int dim =3){ for(int i=0; i<3; i++) y[i] += a * x[i]; } //************************************************************** // vector const multiplication //************************************************************** __device__ inline void multiply(const double* c, double* x, int dim = 3){ for(auto i=0; i<dim; i++) x[i] *= *c; } __device__ inline void multiply(double* r, const double c, const double* x, int dim = 3){ for(auto i=0; i<dim; i++) r[i] = x[i] * (c); } //************************************************************** // vector vector subtraction //************************************************************** __device__ inline void subtract(double* x, const double* y, int dim = 3){ for(int i=0; i<3; i++) x[i] = x[i] - y[i]; } __device__ inline void subtract(double* r, const double* x, const double* y, int dim = 3){ for(int i=0; i<3; i++) r[i] = x[i] - y[i]; } //************************************************************** // vector vector addition //************************************************************** __device__ inline void add(double* x, const double* y, int dim = 3){ for(int i=0; i<3; i++) x[i] = x[i] + y[i]; } __device__ inline void add(double* r, const double* x, const double* y, int dim = 3){ for(int i=0; i<3; i++) r[i] = x[i] + y[i]; }
13,390
#include <iostream> #include <cstdlib> #include <math.h> #include <stdio.h> #include <assert.h> #include <fstream> #include <time.h> __global__ void image_convolution_kernel(float *input, float *out, float *kernelConv, int img_width, const int img_height, const int kernel_width, const int kernel_height ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if ((x < img_width) && (y < img_height)){ float sum = 0; for ( int j = 0; j < kernel_height; j++ ) { for ( int i = 0; i < kernel_width; i++ ) { int dX = x + i - kernel_width / 2; int dY = y + j - kernel_height / 2; if ( dX < 0 ) dX = 0; if ( dX >= img_width ) dX = img_width - 1; if ( dY < 0 ) dY = 0; if ( dY >= img_height ) dY = img_height - 1; const int idMat = j * kernel_width + i; const int idPixel = dY * img_width + dX; sum += (float)input[idPixel] * kernelConv[idMat]; } } const int idOut = y * img_width + x; out[idOut] = abs(sum); } } void MC(float * input,float* output, int img_height, int img_width, const int r, float & gpu_elapsed_time_ms) { // initialize kernel here int kernel_height = r; int kernel_width = r; float *kernel; kernel = new float[r*r]; for (int i = 0; i < r*r; i++){ kernel[i] = rand() % 10 + 1; } float * mask = new float[kernel_height*kernel_width]; for (int i = 0; i < kernel_height*kernel_width; i++) { mask[i] = kernel[i]; } float * d_input, * d_output, * d_kernel; cudaMalloc(&d_input, img_width*img_height*sizeof(float)); cudaMalloc(&d_output, img_width*img_height*sizeof(float)); cudaMalloc(&d_kernel, kernel_height*kernel_width*sizeof(float)); cudaMemcpy(d_input, input, img_width*img_height*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, mask, kernel_height*kernel_width*sizeof(float), cudaMemcpyHostToDevice); dim3 blocksize(16,16); dim3 gridsize; gridsize.x=(img_width+blocksize.x-1)/blocksize.x; gridsize.y=(img_height+blocksize.y-1)/blocksize.y; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); image_convolution_kernel<<<gridsize,blocksize>>>(d_input,d_output,d_kernel,img_width,img_height,kernel_width,kernel_height); cudaMemcpy(output, d_output, img_width*img_height*sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); } int main(){ // open the output file std::ofstream ofile; // customize output filename ofile.open("matrix_conv_gpu_500_points_Quadro.csv"); // number of instances of data generated int NUM = 500; for (int iterator = 0; iterator < NUM; iterator++) { if (iterator % 10 == 0) std::cout << "iter: " << iterator << std::endl; float *in, *out; int m = rand() % 1024 + 10; int n = rand() % 1024 + 10; int is = n * m; int r = (rand() % 3 + 1) * 2 + 1; in = new float[is]; out = new float[is]; // density int power; double d; power = rand() % int((log2(double(m * n)) + 1)); d = 1 / pow(2, power); // initialize matrix A // if A is a sparse matrix if (d <= 0.5) { int count_a = m * n * d; for (int it = 0; it < count_a; it++) { int i = rand() % m; int j = rand() % n; in[i * n + j] = rand() % 1024 + 1; } // if A is a dense matrix } else { for (int i = 0; i < m * n; i++) { in[i] = rand() % 1024 + 1; } } float time; // perform kernel operation MC(in, out, n, m, r, time); int c = (m-r+1)*(n-r+1)*r*r; ofile << time / 1000; ofile << "," << m << "," << n << "," << r << "," << d << "," << c << ",\n"; } ofile.close(); return 0; }
13,391
#include <stdio.h> #include <cuda_runtime.h> #define MatrixWidth 10 //GPU matrix add function __global__ void mat_add(float *A,float *B,float *C,int N) { int row=threadIdx.x; int col=threadIdx.y; C[row*MatrixWidth+col]=A[row*MatrixWidth+col]+B[row*MatrixWidth+col]; } //GPU matrix sub function __global__ void mat_sub(float *A,float *B,float *C,int N) { int row=threadIdx.x; int col=threadIdx.y; C[row*MatrixWidth+col]=A[row*MatrixWidth+col]-B[row*MatrixWidth+col]; } //GPU matrix mult function __global__ void mat_mult(float *A,float *B,float *C,int N) { int row=threadIdx.x; int col=threadIdx.y; for (int i=0;i<MatrixWidth;i++) { C[row*MatrixWidth+col]+=A[row*MatrixWidth+i]*B[i*MatrixWidth+col]; } } //CPU matrix add function void mat_add_serial(float *A,float *B,float *C,int N) { for(int i=0;i<N;i++) { C[i]=A[i]+B[i]; } } //CPU matrix sub function void mat_sub_serial(float *A,float *B,float *C,int N) { for(int i=0;i<N;i++) { C[i]=A[i]-B[i]; } } //CPU matrix mult function void mat_mult_serial(float *A,float *B,float *C) { for(int i=0;i<MatrixWidth;i++) { for(int j=0;j<MatrixWidth;j++) { float sum=0; for(int k=0;k<MatrixWidth;k++) { sum+=A[i*MatrixWidth+k]*B[k*MatrixWidth+j]; } C[i*MatrixWidth+j]=sum; } } } int main(){ float *A, *B, *C; float *d_A, *d_B, *d_C; int Size=MatrixWidth*MatrixWidth; A=(float *)malloc(Size*sizeof(float)); B=(float *)malloc(Size*sizeof(float)); C=(float *)malloc(Size*sizeof(float)); //Allocate input memory on GPU cudaMalloc(&d_A,Size*sizeof(float)); cudaMalloc(&d_B,Size*sizeof(float)); cudaMalloc(&d_C,Size*sizeof(float)); //init A, B, and C for (int i=0;i<MatrixWidth;i++) { for (int j=0;j<MatrixWidth;j++) { A[i+j*MatrixWidth]=i+j*MatrixWidth; B[i+j*MatrixWidth]=i+j*MatrixWidth; C[i+j*MatrixWidth]=0; } } cudaMemcpy(d_A,A,Size*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_B,B,Size*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_C,C,Size*sizeof(float),cudaMemcpyHostToDevice); dim3 BlocksPerGrid(1,1); dim3 ThreadsPerBlock(MatrixWidth,MatrixWidth); //init for cuda timer cudaEvent_t start; cudaEvent_t end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start,0); //mat_add<<< BlocksPerGrid,ThreadsPerBlock>>>(d_A,d_B,d_C,Size); //mat_sub<<< BlocksPerGrid,ThreadsPerBlock>>>(d_A,d_B,d_C,Size); mat_mult<<< BlocksPerGrid,ThreadsPerBlock>>>(d_A,d_B,d_C,Size); //mat_add_serial(A,B,C,Size); //mat_sub_serial(A,B,C,Size); //mat_mult_serial(A,B,C); cudaEventRecord(end,0); cudaEventSynchronize(end); float time; cudaEventElapsedTime(&time,start,end); //Read C from device cudaMemcpy(C,d_C,Size*sizeof(float),cudaMemcpyDeviceToHost); for (int i=0; i<MatrixWidth;i++) { for(int j=0;j<MatrixWidth;j++) { printf("%f ",C[i*MatrixWidth+j]); } printf("\n"); } printf("Time: %f \n\n",time); cudaEventDestroy(start); cudaEventDestroy(end); free(A); free(B); free(C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); getchar(); return 0; }
13,392
#include <sys/utsname.h> // Includes, system #include <stdio.h> #include <cassert> // Includes CUDA #include <cuda_runtime.h> const char *sampleName = "simpleAssert"; //////////////////////////////////////////////////////////////////////////////// // Auto-Verification Code bool testResult = true; //////////////////////////////////////////////////////////////////////////////// // Kernels //////////////////////////////////////////////////////////////////////////////// //! Tests assert function. //! Thread whose id > N will print assertion failed error message. //////////////////////////////////////////////////////////////////////////////// __global__ void testKernel(int N) { int gtid = blockIdx.x*blockDim.x + threadIdx.x ; assert(gtid < N) ; } void simpleAssert( void ) { int Nblocks = 2; int Nthreads = 32; cudaError_t error ; // Kernel configuration, where a one-dimensional // grid and one-dimensional blocks are configured. dim3 dimGrid(Nblocks); dim3 dimBlock(Nthreads); printf("Launch kernel to generate assertion failures\n"); testKernel<<<dimGrid, dimBlock>>>(60); //Synchronize (flushes assert output). printf("\n-- Begin assert output\n\n"); error = cudaDeviceSynchronize(); printf("\n-- End assert output\n\n"); //Check for errors and failed asserts in asynchronous kernel launch. if (error == cudaErrorAssert) { printf("Device assert failed as expected, " "CUDA error message is: %s\n\n", cudaGetErrorString(error)); } testResult = error == cudaErrorAssert; }
13,393
#include <stdio.h> //#define DEVICE_ALLOC #define UVM_ALLOC //#define HOST_ALLOC //#define SIZE (2048 * 4) //#define SIZE (1024 * 1024) //#define SIZE (1024 * 1024 * 1024) #define SIZE (1024 * 1024 * 1024L * 2) __global__ void kernel(int *input, unsigned long long size) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int s_tmp; if (i < size) input[i] = i; } int main() { int *d_input; cudaEvent_t start; cudaEvent_t end; #if defined(DEVICE_ALLOC) cudaMalloc(&d_input, SIZE*sizeof(int)); #elif defined(UVM_ALLOC) cudaMallocManaged(&d_input, SIZE*sizeof(int)); #elif defined(HOST_ALLOC) cudaMallocHost(&d_input, SIZE*sizeof(int)); #else return 0; #endif // init #if defined(DEVICE_ALLOC) int *h_input; h_input = (int*)malloc(SIZE*sizeof(int)); for (unsigned long long i = 0; i < SIZE; i++) { h_input[i] = rand() % 10; } cudaMemcpy(d_input, h_input, SIZE*sizeof(int), cudaMemcpyHostToDevice); #elif defined(UVM_ALLOC) || defined(HOST_ALLOC) for (unsigned long long i = 0; i < SIZE; i++) { d_input[i] = rand() % 10; } #endif unsigned ThreadNum = 256; unsigned long long BlockNum = (SIZE - 1) / ThreadNum + 1; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start,0); kernel<<<BlockNum, ThreadNum>>>(d_input, SIZE); cudaThreadSynchronize(); cudaEventRecord(end, 0); cudaEventSynchronize(end); float elapsed_time1; cudaEventElapsedTime(&elapsed_time1, start, end); cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start,0); kernel<<<BlockNum, ThreadNum>>>(d_input, SIZE); cudaThreadSynchronize(); cudaEventRecord(end, 0); cudaEventSynchronize(end); float elapsed_time2; cudaEventElapsedTime(&elapsed_time2, start, end); cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start,0); kernel<<<BlockNum, ThreadNum>>>(d_input, SIZE); cudaThreadSynchronize(); cudaEventRecord(end, 0); cudaEventSynchronize(end); float elapsed_time3; cudaEventElapsedTime(&elapsed_time3, start, end); double AvgTP1 = (double)SIZE*sizeof(int) / (elapsed_time1 / 1000.0) / 1e9; double AvgTP2 = (double)SIZE*sizeof(int) / (elapsed_time2 / 1000.0) / 1e9; double AvgTP3 = (double)SIZE*sizeof(int) / (elapsed_time3 / 1000.0) / 1e9; printf("Average throughput: %f GB/s, %f GB/s, %f GB/s\n", AvgTP1, AvgTP2, AvgTP3); printf("Time: %f ms, %f ms, %f ms\n", elapsed_time1, elapsed_time2, elapsed_time3); cudaFree(d_input); cudaEventDestroy(start); cudaEventDestroy(end); return 0; }
13,394
/* ** Hello World using CUDA ** ** The string "Hello World!" is mangled then restored using a common CUDA idiom ** ** Byron Galbraith ** 2009-02-18 */ #include <cuda.h> #include <stdio.h> // Prototypes __global__ void helloWorld(char*); void devicenfo(void); // Host function int main(int argc, char** argv) { int i; //Prints out device info devicenfo(); // desired output char str[] = "Hello World!"; // mangle contents of output // the null character is left intact for simplicity for(i = 0; i < 12; i++) str[i] -= i; // allocate memory on the device char *d_str; size_t size = sizeof(str); cudaMalloc((void**)&d_str, size); // copy the string to the device cudaMemcpy(d_str, str, size, cudaMemcpyHostToDevice); // set the grid and block sizes dim3 dimGrid(2); // one block per word dim3 dimBlock(6); // one thread per character // invoke the kernel helloWorld<<< dimGrid, dimBlock >>>(d_str); // retrieve the results from the device cudaMemcpy(str, d_str, size, cudaMemcpyDeviceToHost); // free up the allocated memory on the device cudaFree(d_str); // everyone's favorite part printf("%s\n", str); return 0; } // Device kernel __global__ void helloWorld(char* str) { // determine where in the thread grid we are int idx = blockIdx.x * blockDim.x + threadIdx.x; // unmangle output str[idx] += idx; } // Device info void devicenfo(void) { struct cudaDeviceProp capabilities; cudaGetDeviceProperties (&capabilities, 0); printf("->CUDA Platform & Capabilities\n"); printf("Name: %s\n", capabilities.name); printf("totalGlobalMem: %.2f MB\n", capabilities.totalGlobalMem/1024.0f/1024.0f); printf("sharedMemPerBlock: %.2f KB\n", capabilities.sharedMemPerBlock/1024.0f); printf("regsPerBlock (32 bits): %d\n", capabilities.regsPerBlock); printf("warpSize: %d\n", capabilities.warpSize); printf("memPitch: %.2f KB\n", capabilities.memPitch/1024.0f); printf("maxThreadsPerBlock: %d\n", capabilities.maxThreadsPerBlock); printf("maxThreadsDim: %d x %d x %d\n", capabilities.maxThreadsDim[0], capabilities.maxThreadsDim[1], capabilities.maxThreadsDim[2]); printf("maxGridSize: %d x %d\n", capabilities.maxGridSize[0], capabilities.maxGridSize[1]); printf("totalConstMem: %.2f KB\n", capabilities.totalConstMem/1024.0f); printf("major.minor: %d.%d\n", capabilities.major, capabilities.minor); printf("clockRate: %.2f MHz\n", capabilities.clockRate/1024.0f); printf("textureAlignment: %d\n", capabilities.textureAlignment); printf("deviceOverlap: %d\n", capabilities.deviceOverlap); printf("multiProcessorCount: %d\n", capabilities.multiProcessorCount); }
13,395
#include "includes.h" __global__ void big_add(int *a, int *b, int *c, unsigned int N){ // init thread id int tid; tid = blockIdx.x * blockDim.x + threadIdx.x; // stride is for big arrays, i.e. bigger than threads we have int stride = blockDim.x * gridDim.x; // do the operations while(tid < N){ c[tid] = a[tid] + b[tid]; tid += stride; } }
13,396
// filename: integration.cu // Performs the inner integration loop extern "C" { __device__ int innerFunc(const float *coefs,const float i,const float isq2,const float isq3,const float isq4,const float isq5,const float isq6,const float isq7,const float isq8,const float j,const float jsq2,const float jsq3,const float jsq4,const float jsq5,const float jsq6,const float jsq7,const float jsq8) { return abs(coefs[0]*(jsq2) + coefs[1]*(jsq3) + coefs[2]*(jsq4) + coefs[3]*(jsq5) + coefs[4]*jsq6 + coefs[5]*jsq7 + coefs[6]*jsq8 + coefs[7]*(i) + coefs[8]*(i)*(jsq2) + coefs[9]*i*jsq3 + coefs[10]*(i)*(jsq4) + coefs[11]*i*jsq5 + coefs[12]*(i)*(jsq6) + coefs[13]*i*jsq7 + coefs[14]*(isq2) + coefs[15]*(isq2)*(jsq2) + coefs[16]*isq2*jsq3 + coefs[17]*(isq2)*(jsq4) + coefs[18]*isq2*jsq5 + coefs[19]*(isq2)*(jsq6) + coefs[20]*(isq3) + coefs[21]*(isq3)*(jsq2) + coefs[22]*isq3*jsq3 + coefs[23]*(isq3)*(jsq4) + coefs[24]*isq3*jsq5 + coefs[25]*(isq4) + coefs[26]*(isq4)*(jsq2) + coefs[27]*isq4*jsq3 + coefs[28]*(isq4)*(jsq4) + coefs[29]*(isq5) + coefs[30]*(isq5)*(jsq2) + coefs[31]*isq5*jsq3+ coefs[32]*(isq6) + coefs[33]*(isq6)*(jsq2) + coefs[34]*(isq7) + coefs[35]*(isq8))<1; } __global__ void integration(const float *coefs, const float *iArr, const float *jArr, const int sizei, const int sizej, const int equalDiv,const int startIdx, int *tmp) { int index = threadIdx.x + blockIdx.x * blockDim.x; int globalIndex = index*equalDiv+startIdx; int loopInd; float i; float j; float isq2; float isq3; float isq4; float isq5; float isq6; float isq7; float isq8; float jsq2; float jsq3; float jsq4; float jsq5; float jsq6; float jsq7; float jsq8; int ans = 0; for(loopInd=0;loopInd<equalDiv;loopInd=loopInd+1){ i = iArr[(globalIndex+loopInd)/sizej]; j = jArr[(globalIndex+loopInd)%sizej]; if(globalIndex+loopInd >= sizei*sizej){ break; } if((globalIndex+loopInd)%sizej==0 || loopInd==0){ isq2 = i*i; isq3 = i*isq2; isq4 = isq2*isq2; isq5 = i*isq4; isq6 = isq4*isq2; isq7 = i*isq6; isq8 = isq4*isq4; } jsq2 = j*j; jsq3 = j*jsq2; jsq4 = jsq2*jsq2; jsq5 = j*jsq4; jsq6 = jsq2*jsq4; jsq7 = j*jsq6; jsq8 = jsq4*jsq4; ans = ans + innerFunc(coefs,i,isq2,isq3,isq4,isq5,isq6,isq7,isq8,j,jsq2,jsq3,jsq4,jsq5,jsq6,jsq7,jsq8); } tmp[index] = ans; } }
13,397
/*#include "device_atomic_functions.h" __global__ void inner_product1_GPU(double kk, double *a, double *b, int N, int N_ln) { kk = 0.0; __shared__ double temp[128]; const int i = blockIdx.x*blockDim.x + threadIdx.x; //for ( i = 0; i < N_ln*N_ln; i++) if (i < N_ln*N_ln) temp[threadIdx.x] = a[i] * b[i]; __syncthreads(); //parallel reduction //for (int j = threadIdx.x; j < N_ln*N_ln ; j += blockDim.x) return; } __global__ void inner_product2_GPU(double kk, double *a, double *b, int N, int N_ln) { kk = 0.0; __shared__ double temp[128]; const int i = blockIdx.x*blockDim.x + threadIdx.x; const int j = blockIdx.y*blockDim.y + threadIdx.y; //for ( i = 0; i < N_ln; i++) //for ( j = 0; j < N_ln; j++) if (i < N_ln && j < N_ln) kk += a[N * (i + 1) + (j + 1)] * b[N_ln * i + j] return; } */ __global__ void laplacian_GPU(double *La, double *x, double dx, double dy, int N, int N_ln) { const int i = blockIdx.x*blockDim.x + threadIdx.x; const int j = blockIdx.y*blockDim.y + threadIdx.y; if (i < N_ln && j < N_ln) La[N_ln * i + j] = (x[N * i + (j + 1)] + x[N * (i + 2) + (j + 1)] + x[N * (i + 1) + j] + x[N * (i + 1) + (j + 2)] - 4.0 * x[N * (i + 1) + (j + 1)]) / (dx * dy); return; } __global__ void YPEAX_GPU(double *y, double *x, double a, int N) // Y += a*X { const int i = blockIdx.x*blockDim.x + threadIdx.x; //for ( i = 0; i < N * N; i++) if (i< N*N) y[i] += a * x[i]; return; } __global__ void YEAYPX_GPU(double *y, double *x, double a, int N, int N_ln) // Y = a*Y + X { const int i = blockIdx.x*blockDim.x + threadIdx.x; const int j = blockIdx.y*blockDim.y + threadIdx.y; //for ( i = 0; i < N_ln; i++) //for ( j = 0; j < N_ln; j++) if (i < N_ln && j < N_ln) y[N * (i + 1) + (j + 1)] = a * y[N * (i + 1) + (j + 1)] + x[N_ln * i + j]; return; } double inner_product(double *a, double *b, int type, int N, int N_ln) { double kk = 0.0; int i, j; if (type == 0) { // for N_ln^2 * N_ln^2 for ( i = 0; i < N_ln * N_ln; i++) { kk += a[i] * b[i]; } } else { // for N^2 * N_ln^2 for ( i = 0; i < N_ln; i++) { for ( j = 0; j < N_ln; j++) { kk += a[N * (i + 1) + (j + 1)] * b[N_ln * i + j]; } } } return kk; } void laplacian(double *La, double *x, double dx, double dy, int N, int N_ln) { int i, j; for ( i = 0; i < N_ln; i++) { for ( j = 0; j < N_ln; j++) { La[N_ln * i + j] = (x[N * i + (j + 1)] + x[N * (i + 2) + (j + 1)] + x[N * (i + 1) + j] + x[N * (i + 1) + (j + 2)] - 4.0 * x[N * (i + 1) + (j + 1)]) / (dx * dy); } } return; } void YEAYPX(double *y, double *x, double a, int N, int N_ln) // Y = a*Y + X { // const int i = blockIdx.x*blockDim.x + threadIdx.x; // const int j = blockIdx.y*blockDim.y + threadIdx.y; int i,j; for ( i = 0; i < N_ln; i++) for ( j = 0; j < N_ln; j++) { y[N * (i + 1) + (j + 1)] = a * y[N * (i + 1) + (j + 1)] + x[N_ln * i + j]; } return; }
13,398
/* Example of using threads and blocks in a CUDA program */ #include <stdio.h> #include <time.h> #define N 16 #define THREADS_PER_BLOCK 8 #define BLOCK_SIZE 5 #define RADIUS 2 __global__ void add(int *a, int *b, int *c){ int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index] + b[index]; } void random_ints(int *a, int n) { int i; for (i=0; i < n; ++i){ a[i] = rand(); } } __global__ void stencil_1d(int *in, long int *out){ __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int centre_index = threadIdx.x + blockIdx.x * blockDim.x; int last_index = threadIdx.x + RADIUS; temp[last_index] = in[centre_index]; if(threadIdx.x < RADIUS) { temp[last_index - RADIUS] = in[centre_index - RADIUS]; temp[last_index + BLOCK_SIZE] = in[centre_index + BLOCK_SIZE]; } //__syncthreads(); printf("blockId : %d block dim: %d thread: %d last: %d centre %d \n",blockIdx.x,blockDim.x,threadIdx.x,last_index,centre_index); long int result = 0; for ( int offset = -RADIUS ; offset <= RADIUS ; offset++){ printf("%d ", temp[last_index + offset]); result += temp[last_index + offset]; //printf("-------------------------------"); } printf("-------------------------------\n"); out[centre_index] = result; printf("%ld\n",result); } int main(void) { int *a; long int *b, *c; int *d_a;long int *d_b, *d_c; int size = N * sizeof(int); cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, N*sizeof(long int)); cudaMalloc((void **)&d_c, N*sizeof(long int )); a = (int *)malloc(size); b = (long *)malloc(N*sizeof(long int)); c = (long *)malloc(N*sizeof(long int)); //random_ints(b, N); random_ints(a, N); int i; for (i=0;i<N; i++){ printf("%d\n",a[i]); } cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_c, b, size, cudaMemcpyHostToDevice); clock_t t; t = clock(); stencil_1d<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_c); t = clock() - t; double time_taken = ((double)t)/CLOCKS_PER_SEC; printf("Time taken by function is %f seconds\n",time_taken); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(a); free(b); free(c); return 0; }
13,399
#include <algorithm> #include <cstring> #include <iostream> #include <memory> #include <utility> constexpr auto block_size = 16; bool almost_equal(float a, float b) { return a - b < 0.00001f || b - a < 0.00001f; } class Matrix; void swap(Matrix &left, Matrix &right) noexcept; struct Matrix { size_t row; size_t col; std::unique_ptr<float[]> elements; float *gpu_elements; float get(int row, int col) const { return *(elements.get() + row * this->col + col); } void set(int row, int col, float value) { *(elements.get() + row * this->col + col) = value; } ~Matrix() { if (gpu_elements) cudaFree(gpu_elements); } Matrix(size_t row, size_t col) : row(row), col(col) { elements = std::unique_ptr<float[]>(new float[row * col]); } Matrix(const Matrix &other) : row(other.row), col(other.col), elements(new float[other.row * other.col]), gpu_elements(nullptr) { std::memcpy(elements.get(), other.elements.get(), row * col * sizeof(float)); } Matrix &operator=(const Matrix &other) { using std::swap; if (this == &other) { return *this; } Matrix mat(other); swap(*this, mat); return *this; } void swap(Matrix &other) noexcept { using std::swap; swap(row, other.row); swap(col, other.col); swap(elements, other.elements); } }; void swap(Matrix &left, Matrix &right) noexcept { left.swap(right); } void print_matrix(const Matrix &mat) { for (int i = 0; i < mat.row; ++i) { for (int j = 0; j < mat.col; ++j) { std::cout << mat.get(i, j) << ", "; } std::cout << std::endl; } } struct GPUMatrix { size_t row; size_t col; size_t pitch; float *elements; __device__ float get(int row_idx, int col_idx) const { float *row = reinterpret_cast<float *>(reinterpret_cast<char *>(elements) + row_idx * pitch); return row[col_idx]; } __device__ void set(int row_idx, int col_idx, float value) { float *row = reinterpret_cast<float *>(reinterpret_cast<char *>(elements) + row_idx * pitch); row[col_idx] = value; } }; GPUMatrix load_to_gpu(Matrix &matrix) { size_t pitch; cudaMallocPitch(&matrix.gpu_elements, &pitch, matrix.col * sizeof(float), matrix.row); cudaMemcpy2D(matrix.gpu_elements, pitch, matrix.elements.get(), matrix.col * sizeof(float), matrix.col * sizeof(float), matrix.row, cudaMemcpyHostToDevice); return GPUMatrix{matrix.row, matrix.col, pitch, matrix.gpu_elements}; } Matrix copy_to_host(GPUMatrix matrix) { Matrix new_matrix(matrix.row, matrix.col); cudaError_t err = cudaMemcpy2D(new_matrix.elements.get(), matrix.col * sizeof(float), matrix.elements, matrix.pitch, matrix.col * sizeof(float), matrix.row, cudaMemcpyDeviceToHost); std::cout << "cudaMemcpy2D done: " << cudaGetErrorString(err) << std::endl; return new_matrix; } __global__ void matrix_mul_strait(GPUMatrix mat0, GPUMatrix mat1, GPUMatrix result) { int row = blockDim.x * blockIdx.x + threadIdx.x; int col = blockDim.y * blockIdx.y + threadIdx.y; // target index: [row][col] float element = 0; for (int i = 0; i < mat0.col; ++i) { element += mat0.get(row, i) * mat1.get(i, col); } // printf("(%d, %d): %d\n", row, col, int(element)); result.set(row, col, element); } __global__ void matrix_mul_shared(GPUMatrix mat0, GPUMatrix mat1, GPUMatrix result) { const int row_min = blockDim.x * blockIdx.x; const int col_min = blockDim.y * blockIdx.y; const int target_row = blockDim.x * blockIdx.x + threadIdx.x; const int target_col = blockDim.y * blockIdx.y + threadIdx.y; // assign matrix values in range // row: [row_min, row_max), col: [col_min, col_max) // iterate mat0.col / blockDim.x times. float c_value = 0.0f; for (int iter = 0, base = 0; iter < int(mat0.col / blockDim.y); ++iter, base += block_size) { __shared__ float mat0_submatrix[block_size][block_size]; __shared__ float mat1_submatrix[block_size][block_size]; mat0_submatrix[threadIdx.x][threadIdx.y] = mat0.get( row_min + threadIdx.x, base + threadIdx.y); mat1_submatrix[threadIdx.x][threadIdx.y] = mat1.get( base + threadIdx.x, col_min + threadIdx.y); __syncthreads(); for (int i = 0; i < block_size; ++i) { c_value += mat0_submatrix[threadIdx.x][i] * mat1_submatrix[i][threadIdx.y]; } __syncthreads(); } result.set(target_row, target_col, c_value); } int main() { Matrix host_matrix_A(64, 128); for (int i = 0; i < host_matrix_A.row; ++i) { for (int j = 0; j < host_matrix_A.col; ++j) { host_matrix_A.set(i, j, 1.0); } } Matrix host_matrix_B(128, 64); for (int i = 0; i < host_matrix_B.row; ++i) { for (int j = 0; j < host_matrix_B.col; ++j) { host_matrix_B.set(i, j, 1.0); } } Matrix host_matrix_C(64, 64); for (int i = 0; i < host_matrix_C.row; ++i) { for (int j = 0; j < host_matrix_C.col; ++j) { host_matrix_C.set(i, j, 0.0); } } GPUMatrix gpu_matrix_A = load_to_gpu(host_matrix_A); GPUMatrix gpu_matrix_B = load_to_gpu(host_matrix_B); GPUMatrix gpu_matrix_C = load_to_gpu(host_matrix_C); dim3 block_dim(16, 16); dim3 grid_dim(64 / 16, 64 / 16); matrix_mul_shared<<<grid_dim, block_dim>>>(gpu_matrix_A, gpu_matrix_B, gpu_matrix_C); host_matrix_C = copy_to_host(gpu_matrix_C); print_matrix(host_matrix_C); return 0; }
13,400
#include "includes.h" __global__ void kSwapRows(float* source, float* target, float* indices1, float* indices2, int nRowIs, int nCols, int nRows){ __shared__ int sourceRowIndices[32], targetRowIndices[32]; const int startRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ sourceRowIndices[tid] = int(indices1[startRowI + tid]); targetRowIndices[tid] = int(indices2[startRowI + tid]); if (sourceRowIndices[tid]<0) sourceRowIndices[tid] += nRows; if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nRows) sourceRowIndices[tid] = -1; if (targetRowIndices[tid]<0) targetRowIndices[tid] += nRows; if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nRows) targetRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int sourceRowI = sourceRowIndices[i], targetRowI = targetRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) { const float temp1 = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; const float temp2 = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : target[targetRowI * nCols + colI]; if (sourceRowI != -1) source[sourceRowI * nCols + colI] = temp2; if (targetRowI != -1) target[targetRowI * nCols + colI] = temp1; } } }