serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
601
|
/* from: https://devblogs.nvidia.com/parallelforall/even-easier-introduction-cuda/
* Jialin Liu
* Simple starting cpp cuda program
* Jun 24 2017, Saturday, 2:09pm
* Compile and test on Maeve, a 3GPU single node at NERSC, LBNL, CA.
*/
#include<iostream>
#include<math.h>
using namespace std;
//CUDA kernel functions to add the elements of two arrays
__global__
void add (int n, float *x, float * y){
for (int i=0;i<n;i++){
y[i] = x[i] + y[i];
}
}
int main(void)
{
int N= 1<<20; //1 million elements
//float * x= new float[N];
//float * y= new float[N];
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
clock_t t;
//Initialize x and y arrays on the host
for (int i=0; i<N; i++){
x[i] =1.5f;
y[i] =2.3f;
}
//run kernel on 1M elements on the CPU
t = clock();
//add(N, x, y);
add<<<1, 1>>>(N, x, y);
t = clock() -t;
//cout<<format("%f seconds")%((float)t/CLOCKS_PER_SEC)<<endl;
cout <<(float)t/CLOCKS_PER_SEC<<" seconds"<<endl;
//Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i =0;i <N;i ++)
maxError =fmax(maxError, fabs(y[i]-3.8f));
cout <<"Max error: "<<maxError <<endl;
//delete [] x;
//delete [] y;
cudaFree(x);
cudaFree(y);
return 0;
}
|
602
|
#include <stdio.h>
/*
//=========== PART-1 ========================
__global__ void hello()
{
}
int main(void)
{
hello<<< 1, 1 >>>();
cudaDeviceSynchronize();
printf("Hello World\n");
return 0;
}
*/
//=========== PART-2 ========================
__device__ const char *STR = "HELLO WORLD!";
const char STR_LENGTH = 12;
__global__ void hello()
{
//every thread prints one character
printf("%c\n", STR[threadIdx.x % STR_LENGTH]);
}
int main(void)
{
hello<<< 1, 16>>>();
cudaDeviceSynchronize();
return 0;
}
|
603
|
__global__ void exampleDevice( float * d )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d[ idx ] = idx;
}
extern "C" void exampleHost( float * h, int blockDim, int threadDim )
{
float * d;
cudaMalloc( ( void** )&d, blockDim * threadDim * sizeof( float ) );
exampleDevice<<<blockDim, threadDim>>>( d );
cudaMemcpy( h, d, blockDim * threadDim * sizeof( float ), cudaMemcpyDeviceToHost );
}
|
604
|
#include "includes.h"
__global__ void kernel_512_one_128(float *A, float *B, float *bnBias, float *bnScale, float *C) {
int tile = blockIdx.x, in_channel = threadIdx.x, line = threadIdx.y;
int ind = line*128 + in_channel;
extern __shared__ float shared_[];
float *weights = shared_ + 512*4, *output = weights + 128*64, *input = shared_;
float *bias = output + 4*128, *scale = bias + 128;
for (int i = 0; i < 4; i++)
input[ind + i*512] = A[tile*2048 + i*512 + ind];
bias[in_channel] = bnBias[in_channel];
scale[in_channel] = bnScale[in_channel];
output[ind] = 0.0f;
__syncthreads();
for (int k = 0; k < 512; k += 64) {
float *B_start = B + k*128;
for (int i = 0; i < 16; i++)
weights[ind + i*512] = B_start[i*512 + ind];
__syncthreads();
float *A_start = input + k;
for (int p = 0; p < 64; p++) {
output[ind] += A_start[line*512 + p] * weights[in_channel + p*128];
}
__syncthreads();
}
float *C_start = C + tile*512, res = scale[in_channel] * output[ind] + bias[in_channel];
C_start[ind] = res > 0 ? res : 0;
}
|
605
|
//Parallel programming for many core GPUs
//Name: Gesu Bal
//Instructor name: Meilin Liu
/*
this is a simple cuda program calculating Tiled Matrix vector multiplication for 2 dimensions on GPU device
I multiplied two double two-dimensional matrices A, B on the device GPU.
After the device matrix multiplication kernel function is invoked, and the multiplication result is transferred back to the CPU.
The program will also compute the multiplication matrix of matrices A and B using the CPU.
Then the program compares the device-computed result with the CPU-computed result.
If it matches (within a certain tolerance, i.e., 0.000001), then it will print out "Test PASSED" to the screen before exiting.
This case is for all matrix sizes and blocksize/tilewidth: 8*8
*/
#include<stdio.h>
#include<cuda.h>
#include <time.h>
int N,blocksize;
//gpu function for multiplication
__global__ void mul_matrix_gpu(double *d_a, double *d_b, double *d_c, int width)
{
int TILE_WIDTH=8;
__shared__ double ds_M[8][8];
__shared__ double ds_N[8][8];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
double Pvalue = 0;
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < width/TILE_WIDTH; m++)
{
// Coolaborative loading of Md and Nd tiles into shared memory
ds_M[ty][tx] = d_a[Row*width + m*TILE_WIDTH +tx];
ds_N[ty][tx] = d_b[Col+(m*TILE_WIDTH+ty)*width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; k++)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
d_c[Row*width+Col] = Pvalue;
}
//cpu function for multiplication
void mul_matrix_cpu(double *a, double *b, double *cpu_c, int N)
{
int i, j,k;
for (i=0;i<N;i++) {
for (j=0;j<N;j++) {
double sum=0;
for (k=0;k<N;k++)
{
double p=a[i*N+k];
double q=b[k*N+j];
sum=sum+(p*q);
}
cpu_c[i*N+j]=sum;
}
}
}
//cpu and gpu result matching function
bool verify(double *A, double *B, double *C, int width) {
const double relativeTolerance = 0.000001;
for(int row = 0; row < width; row++) {
for(int col = 0; col < width; col++) {
double sum = 0;
for(unsigned int k = 0; k < width; k++) {
sum += A[row*width + k]*B[k*width + col];
}
double relativeError = (sum - C[row*width + col])/sum;
//printf("%f \t",relativeError);
//printf("\n");
if (relativeError >= relativeTolerance
|| relativeError <= -relativeTolerance)
{
printf("TEST FAILED\n\n");
return false;
}
}
}
printf("TEST PASSED\n\n");
return true;
}
//print matrix
int printMatrix(double *a,int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
printf("%f\t",a[i*N+j]);
}
printf("\n");
}
return 1;
}
int main()
{
//user input
int r, col;
printf("Select one of the following options: \n");
printf("Press a for matrix size 8 * 8 \n");
printf("Press b for matrix size 64 * 64 \n");
printf("Press c for matrix size 128 * 128 \n");
printf("Press d for matrix size 512 * 512 \n");
printf("Press e for matrix size 1024 * 1024 \n");
printf("Press f for matrix size 4096 * 4096 \n");
printf("Press any other key for exit \n");
char ch;
scanf("%c",&ch);
switch(ch)
{
case 'a':
r=8;
col=8;
N=8;
printf("Matrix size is 8 * 8 \n");
break;
case 'b':
r=64;
col=64;
N=64;
printf("Matrix size is 64 * 64 \n");
break;
case 'c':
r=128;
col=128;
N=128;
printf("Matrix size is 128 * 128 \n");
break;
case 'd':
r=512;
col=512;
N=512;
printf("Matrix size is 512 * 512 \n");
break;
case 'e':
r=1024;
col=1024;
N=1024;
printf("Matrix size is 1024 * 1024 \n");
break;
case 'f':
r=4096;
col=4096;
N=4096;
printf("Matrix size is 4096 * 4096 \n");
break;
default:
exit(1);
break;
}
//initializing the block size/tile width
blocksize=8;
//memory allocation for vectors
double *a, *b, *c, *cpu_c, *d_a, *d_b, *d_c;
int a_size=r*col;
int b_size=r*col;
int c_size=r*col;
int cpu_c_size=r*col;
a=(double*)malloc(sizeof(double)*a_size);
b=(double*)malloc(sizeof(double)*b_size);
c=(double*)malloc(sizeof(double)*c_size);
cpu_c=(double*)malloc(sizeof(double)*cpu_c_size);
//matrix initialization
int i,j;
int init=1325;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
init=3125*init%65536;
a[i*col+j]=(init-32768.0)/16384.0;
init=3125*init%65536;
b[i*col+j]=(init-32768.0)/16384.0;
}
}
//printMatrix(a,N);
//printf("\n");
//printMatrix(b,N);
//printf("\n");
//allocating memory on device
int cudaret=cudaMalloc((void **)(&d_a),(N*N)*sizeof(double));
if(cudaret!=cudaSuccess)
{printf("memory was not allocated on device \n");}
cudaMalloc((void **)(&d_b),(N*N)*sizeof(double));
cudaMalloc((void **)(&d_c),(N*N)*sizeof(double));
//calculating cpu time
clock_t startCPU, end;
float cpu_time_used;
//calling CPU program
printf("Calculating results for CPU vector multiplication \n");
printf("---------\n");
startCPU = clock();
mul_matrix_cpu(a,b,cpu_c,N);
end = clock();
cpu_time_used = ((float) (end - startCPU))*1000;
cpu_time_used= cpu_time_used/ CLOCKS_PER_SEC;
printf("CPU computation time (milliseconds) \n");
printf("%f \t",cpu_time_used);
printf("\n");
printf("\n");
//printMatrix(cpu_c,N);
//printf("\n");
//time execution calculation
cudaEvent_t start,stop;
float elapsedTime;
float timeTransfer;
float timeBack;
//memory transfer time
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//copying contents of a and b to device arrays
cudaMemcpy(d_a,a,(N*N)*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,(N*N)*sizeof(double),cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeTransfer,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//Initializing block count and block size
dim3 dimBlock(blocksize,blocksize,1);
int blockCount_x = (N - 1)/(double(blocksize))+1;//Get number of blocks needed per direction.
int blockCount_y = (N - 1)/(double(blocksize))+1;
dim3 dimGrid(blockCount_x,blockCount_y,1);
printf("Block size and tile width for the program is %d\n ",blocksize);
//call kernel for gpu functioning
printf("Calling kernel for gpu computations for vector multiplication and calculating results\n");
printf("---------\n");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
mul_matrix_gpu<<<dimGrid,dimBlock>>>(d_a,d_b,d_c,N);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU computation time (milliseconds) \n");
printf("%f \t",elapsedTime);
printf("\n");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//copying resulting back to cpu
cudaMemcpy(c,d_c,(N*N)*sizeof(double),cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeBack,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
timeTransfer += timeBack;
printf("Total Memory transfer time between CPU and GPU (milliseconds)\n");
printf("%f \t",timeTransfer);
printf("\n");
float speedup;
speedup=cpu_time_used/elapsedTime;
printf("Speedup: \n");
printf("%f \t",speedup);
printf("\n");
printf("Comparing results for CPU and GPU computations \n");
printf("---------\n");
verify(a,b,c,N);
//deallocating memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
606
|
#include <stdio.h>
int main() {
int dimx = 10;
int num_bytes = dimx * sizeof (int);
// device and host pointers
int *d_a = 0;
int *h_a = 0;
/*Aloca memria na CPU para n inteiros*/
h_a = (int*) malloc(num_bytes);
printf("%i\n", num_bytes);
/*Aloca memria na GPU para n inteiros*/
cudaMalloc(&d_a, num_bytes);
if (0 == h_a || 0 == d_a) {
printf("couldn't allocate memory\n");
return 1;
}
cudaMemset(d_a, 0, num_bytes);
cudaMemcpy(h_a, d_a, num_bytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < dimx; i++){
printf("%d ", i+1);
}
printf("\n");
free(h_a);
cudaFree(d_a);
return 0;
}
|
607
|
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//#include <cutil_inline.h>
#include "cuda_runtime.h"
//#include "MyFirst_kernel.cu"
__global__ void my_first_kernel(float *x)
{
// Uncomment line below and define integer "tid" as global index to vector "x"
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Uncomment line below and define x[tid] to be equal to the thread index
x[tid] = (float)threadIdx.x;
}
//
// main host code
//
int main(int argc, char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 16;
nsize = nblocks*nthreads;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
cudaMalloc((void **)&d_x, nsize*sizeof(float));
// execute kernel
my_first_kernel<<<nblocks,nthreads>>>(d_x);
// copy results from device to host
cudaMemcpy(h_x,d_x,nsize*sizeof(float),cudaMemcpyDeviceToHost);
// print results
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// check results
float sumcheck = 0.;
float sumcheckcorrect = 0.;
for (int i = 0; i < nblocks * nthreads; ++i) {
sumcheck += h_x[i];
}
for (int j=0; j<nthreads; ++j) {
sumcheckcorrect += j;
}
sumcheckcorrect *= 2;
if (fabs(sumcheck-sumcheckcorrect)<1e-6) {
printf("PASSED!\n");
}
else
{
printf("FAILED!\n");
}
// free memory
cudaFree(d_x);
free(h_x);
return 0;
}
|
608
|
#include "includes.h"
__global__ void kernel_test7_write(char* _ptr, char* end_ptr, char* _start_ptr, unsigned int* err)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
unsigned int* start_ptr = (unsigned int*) _start_ptr;
if (ptr >= (unsigned int*) end_ptr) {
return;
}
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){
ptr[i] = start_ptr[i];
}
return;
}
|
609
|
/**
* @file CUDA_Hardware.cu
* @brief Report essential characteristics of GPU.
*
*
*/
#include <cstdio>
#include <cstdlib>
#include <iostream>
using namespace std;
int main(int argc, char** argv) {
int count = 0;
cudaGetDeviceCount(&count);
printf("Report on GPU configuration (GPUs: %i).\n", count);
for(int i=0; i<count; i++) {
printf("Device:1/%i\n", count);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("Global GPU Memory (GMEM) :\t%li(MB)\n", prop.totalGlobalMem / 1024 /1024);
printf("Streaming Multiprocessors:\t%i\n", prop.multiProcessorCount);
printf("L2 cache size :\t%i(kB)\n", prop.l2CacheSize / 1024);
int thsm = prop.maxThreadsPerMultiProcessor;
int thbl = prop.maxThreadsPerBlock;
printf("Max threads per block :\t%i\n", thbl);
if (thsm == 2048)
printf("Compute capability:\t3.0 (Kepler)\n");
else if (thsm == 1536)
printf("Compute capability:\t2.0 (Fermi)\n");
else
printf("Compute capability:\t1.3\n");
}
}
|
610
|
__device__ int get(int x, int y,int width){
return y * width +x;
}
__device__ int normeValue(int x, int width){
if(x < 0) //-1
return width - 1;
if(x == width)
return 0;
return x;
}
__device__ int* neighborsIndexes(int i, int j, int width, int height){
int dir[8];
dir[0] = get(normeValue(i+1,width), j, width);
dir[1] = get(normeValue(i+1,width), normeValue(j+1,height),width);
dir[2] = get(i, normeValue(j+1,height),width);
dir[3] = get(normeValue(i-1,width), normeValue(j+1,height),width);
dir[4] = get(normeValue(i-1,width), j, width);
dir[5] = get(normeValue(i-1,width), normeValue(j-1,height),width);
dir[6] = get(i, normeValue(j-1,height),width);
dir[7] = get(normeValue(i+1,width), normeValue(j-1,height),width);
return dir;
}
__device__ float getTotalUpdateFromNeighbors(float* tmp, int i, int j, int width, int height){
int iPlusOne = i + 1;
int jPlusOne = j + 1;
int iMinusOne = i - 1;
int jMinusOne = j - 1;
return
tmp[get(normeValue(iPlusOne,width), j, width)] +
tmp[get(normeValue(iPlusOne,width), normeValue(jPlusOne,height),width)] +
tmp[get(i, normeValue(jPlusOne,height),width)] +
tmp[get(normeValue(iMinusOne,width), normeValue(jPlusOne,height),width)] +
tmp[get(normeValue(iMinusOne,width), j, width)] +
tmp[get(normeValue(iMinusOne,width), normeValue(jMinusOne,height),width)] +
tmp[get(i, normeValue(jMinusOne,height),width)] +
tmp[get(normeValue(iPlusOne,width), normeValue(jMinusOne,height),width)];
}
extern "C"
__global__ void DIFFUSION_TO_TMP( int width, int height, float *values, float* tmp, float diffCoef)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
//filling tmp
if (i < width && j < height ){
int k = get(i,j,width);
float give = values[k] * diffCoef;
float giveToNeighbor = give / 8;
values[k] -= give;//TODO a[k] = value - give
tmp[k] = giveToNeighbor;
}
}
extern "C"
__global__ void DIFFUSION_UPDATE( int width, int height, float *values, float* tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < width && j < height ){//TODO + alone
values[get(i,j,width)] += getTotalUpdateFromNeighbors(tmp, i, j, width, height);
}
}
extern "C"
__global__ void DIFFUSION_UPDATE_THEN_EVAPORATION( int width, int height, float *values, float* tmp, float evapCoef)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < width && j < height ){//TODO + alone
int k = get(i,j,width);
float total = values[k] + getTotalUpdateFromNeighbors(tmp, i, j, width, height);
values[k] = total - total * evapCoef;
}
}
|
611
|
#include <iostream>
#include <complex>
#include <cmath>
#include <iomanip>
#include <string>
#include <fstream>
using namespace std;
__global__
void z_funct(double * d_mat_re, double * d_mat_im, int *d_img, int nb_ite)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
double c_re = d_mat_re[i];
double c_im = d_mat_im[i];
double z_re = 0, z_im =0;
double z_re_tmp = 0;
int t=0;
while ((z_re*z_re+z_im*z_im <= 4) && (t<nb_ite))
{
z_re_tmp = z_re*z_re - z_im*z_im + c_re;
z_im = 2*z_im*z_re + c_im;
z_re = z_re_tmp;
t++;
}
d_img[i] = t;
}
int main(int argc, char * argv[])
{
if (argc == 1)
{
cout << "Help" << endl
<< " 8 args :" << endl << endl
<< "arg1 = min real part | arg2 = min imaginary part " << endl
<< "arg3 = max real part | arg4 = max imaginary part " << endl
<< "arg5 = number of points on the real axe | arg6 = number of points on the imaginary axe " << endl
<< "arg7 = nb of iterations | arg8 = limit convergence"
<< endl << endl
<< " 4 args :" << endl << endl
<< "arg1 = number of points on the real axe | arg2 = number of points on the imaginary axe " << endl
<< "arg3 = nb of iterations "
<< endl << endl ;
return 1;
}
double max_re, max_im, min_re, min_im;
int nb_pts_re, nb_pts_im, nb_ite;
if (argc == 8)
{
try
{
min_re = stod(argv[1]);
min_im = stod(argv[2]);
max_re = stod(argv[3]);
max_im = stod(argv[4]);
nb_pts_re = stoi(argv[5]);
nb_pts_im = stoi(argv[6]);
nb_ite = stoi(argv[7]);
}
catch (...)
{
cout << "Bad Args : see help (type nameofprogram without args)" << endl << endl;
return 1;
}
}
if (argc == 4 )
{
min_re = -2;
min_im = -1;
max_re = 1;
max_im = 1;
try
{
nb_pts_re = stoi(argv[1]);
nb_pts_im = stoi(argv[2]);
nb_ite = stoi(argv[3]);
}
catch (...)
{
cout << "Bad Args : see help (type nameofprogram without args)" << endl << endl;
return 1;
}
}
cout << "Initializing..." << endl;
int size_d = sizeof(double)*nb_pts_re*nb_pts_im;
int size_i = sizeof(int)*nb_pts_re*nb_pts_im;
double * mat_re = (double *)malloc(size_d);
double * mat_im = (double *)malloc(size_d);
double re, im;
for (int i=0; i<nb_pts_im; i++)
{
im = max_im - (max_im-min_im)/nb_pts_im*i;
for (int j=0; j<nb_pts_re; j++)
{
re = max_re - (max_re-min_re)/nb_pts_re*j;
mat_re[i*nb_pts_re+j] = re;
mat_im[i*nb_pts_re+j] = im;
}
}
double *d_mat_re, *d_mat_im;
int *d_img;
cudaMalloc(&d_mat_re,size_d);
cudaMalloc(&d_mat_im,size_d);
cudaMalloc(&d_img,size_i);
cudaMemcpy(d_mat_re, mat_re, size_d, cudaMemcpyHostToDevice);
cudaMemcpy(d_mat_im, mat_im, size_d, cudaMemcpyHostToDevice);
cout << "Running on GPU..." << endl;
dim3 blockDim = 1024;
dim3 gridDim = (nb_pts_re*nb_pts_im)/1024 + 1;
z_funct<<<gridDim,blockDim>>>(d_mat_re, d_mat_im, d_img, nb_ite);
cout << "Fetching datas..." << endl;
int * img = (int*) malloc(size_i);
cudaMemcpy(mat_re, d_mat_re, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(mat_im, d_mat_im, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(img, d_img, size_i, cudaMemcpyDeviceToHost);
cout << "Writing on the disk..." << endl;
ofstream file; file.open("conv");
for (int i=0; i<nb_pts_im; i++)
{
for (int j=0; j<nb_pts_re; j++)
{
file << setw(15) << img[i*nb_pts_re+j] ;
}
file << endl ;
}
file.close();
/*
file.open("val");
for (int i=0; i<nb_pts_im; i++)
{
for (int j=0; j<nb_pts_re; j++)
{
file << setw(15) << pow(mat_re[i*nb_pts_re+j],2) + pow(mat_im[i*nb_pts_re+j],2) ;
}
file << endl ;
}
file.close();
*/
cout << "Done !" << endl;
return 0;
}
|
612
|
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <algorithm>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential radix sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
void sortByThrust(const uint32_t * in, int n,
uint32_t * out)
{
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
__global__ void computeLocalHist(uint32_t * in, int n, int * scan, int nBins, int bit)
{
extern __shared__ int s_hist[];
int i=blockDim.x*blockIdx.x+threadIdx.x;
for(int stride=0;stride<nBins;stride+=blockDim.x)
if(threadIdx.x+stride<nBins)
s_hist[threadIdx.x+stride]=0;
__syncthreads();
if(i<n)
{
int bin=(in[i]>>bit)&(nBins-1);// lấy nBits ra để tính xem phần tử này thuộc bin nào
atomicAdd(&s_hist[bin], 1);
}
__syncthreads();// syncthreads để chắc chắn các phần tử trong block đã được tính trong s_hist
for(int stride=0;stride<nBins;stride+=blockDim.x)
if(threadIdx.x+stride<nBins)
scan[(threadIdx.x+stride)*gridDim.x+blockIdx.x]=s_hist[threadIdx.x+stride];
// hist[nBins*blockIdx.x+threadIdx.x+stride]=s_hist[threadIdx.x+stride];
}
__global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums)
{
extern __shared__ uint32_t value[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
value[threadIdx.x] = in[i];
}
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
int tmp;
if (threadIdx.x < n - stride)
tmp = value[threadIdx.x-stride];
else
tmp = 0;
__syncthreads();
value[threadIdx.x] += tmp;
}
blkSums[blockIdx.x] = value[blockDim.x - 1];
if (i<n) {
if(threadIdx.x==0) out[i]=0;
else
out[i]=value[threadIdx.x-1];
}
}
__global__ void radixSort1bit(uint32_t * in, int n, uint32_t * out,int nBits, int bit,int nBins, int* starts)
{ int i = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ uint32_t value[];
__shared__ uint32_t start[256];
for(int indexbit=0;indexbit<nBits;indexbit++)
{
if (i < n)
{
value[threadIdx.x] = ((((in[i] >> bit) & (nBins - 1)) >> indexbit) & 1);
}
__syncthreads();
for(int stride=1;stride<blockDim.x;stride*=2)
{
int temp=0;
if(threadIdx.x>=stride)
{
temp=value[threadIdx.x-stride];// lấy phần tử trước đó stride bước
}
__syncthreads();// chắc chắn giá trị năm trước stride bước đã được lấy vào bộ nhớ thanh ghi
if(threadIdx.x>=stride )
{
value[threadIdx.x]+=temp;
}
__syncthreads();// chắc chắn các giá trị đã được cộng xong
}
int nZeros=0;
if(blockIdx.x*blockDim.x+blockDim.x<=n)
nZeros = blockDim.x - value[blockDim.x-2] -((((in[blockIdx.x*blockDim.x+blockDim.x-1] >> bit) & (nBins - 1)) >> indexbit) & 1);
else
{
if(n%blockDim.x>=2)
nZeros = n%blockDim.x - value[n%blockDim.x-2] - ((((in[n-1] >> bit) & (nBins - 1)) >> indexbit) & 1);
else
nZeros = n%blockDim.x - ((((in[n-1] >> bit) & (nBins - 1)) >> indexbit) & 1);
}
if (i<n)
{
if(threadIdx.x==0)
{
if (((((in[i] >> bit) & (nBins - 1)) >> indexbit) & 1)==0)
{
out[i]=in[i];
}
else
out[nZeros+blockIdx.x*blockDim.x]=in[i];
}
else
{
if(((((in[i] >> bit) & (nBins - 1)) >> indexbit) & 1)==0)
{
out[i-value[threadIdx.x-1]]=in[i];
}
else
{
out[nZeros+value[threadIdx.x-1]+blockIdx.x*blockDim.x]=in[i];
}
}
}
__syncthreads();
uint32_t *tam=in;
in=out;
out=tam;
}
if (i<n)
{
if(threadIdx.x==0)
{
start[((in[i] >> bit) & (nBins - 1))]=threadIdx.x;
}
else
{
if(((in[i] >> bit) & (nBins - 1))!=((in[i-1] >> bit) & (nBins - 1)))
{
start[((in[i] >> bit) & (nBins - 1))]=threadIdx.x;
starts[blockIdx.x*nBins+((in[i] >> bit) & (nBins - 1))]=start[((in[i] >> bit) & (nBins - 1))];
}
}
}
// if(i<n)
// {
// int bin = (in[i] >> bit) & (nBins - 1);
// int rank= histScan[bin*gridDim.x+blockIdx.x]+threadIdx.x-start[bin];
// // if (rank >= 0)
// // {
// // printf("%d\n",rank);
// out[rank] = in[i];
// // }
// }
}
__global__ void scatter(uint32_t * in, int n, uint32_t * out,int nBits, int bit,int nBins, int* start, int* histScan)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
int bin = (in[i] >> bit) & (nBins - 1);
int rank=histScan[bin*gridDim.x+blockIdx.x]+threadIdx.x-start[blockIdx.x*nBins+bin];
out[rank]=in[i];
}
}
__global__ void addSumScan(int * out, int n, int * blkSums)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n && blockIdx.x > 0)
{
out[i] = out[i] + blkSums[blockIdx.x - 1];
}
}
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int blockSize)
{
int nBins = 1 << nBits; // số bin
int m=(n - 1) / blockSize + 1;// gridSize
dim3 blockSizeHist(blockSize);
dim3 blockSizeScan(blockSize);
dim3 gridSizeHist((n - 1) / blockSizeHist.x + 1);
dim3 gridSizeScan((nBins*m - 1) / blockSizeScan.x + 1);
// cấp phát
// scan
int *d_scan, *d_blkSums, *d_histScan, *d_blkOuts, *d_starts;
int *histScan = (int *)malloc(m*nBins * sizeof(int));
int *blkSums = (int *)malloc(m*nBins*sizeof(int));
int* starts1D=(int *) malloc(m*nBins*sizeof(int));
CHECK(cudaMalloc(&d_scan, nBins*m * sizeof(int)));
CHECK(cudaMalloc(&d_blkSums,gridSizeScan.x*sizeof(int)));
CHECK(cudaMalloc(&d_blkOuts,m*nBins*sizeof(int)));
CHECK(cudaMalloc(&d_starts,m*nBins*sizeof(int)));
CHECK(cudaMalloc(&d_histScan,m*nBins*sizeof(int)));
// chỉ số bắt đầu
int **start = (int **)malloc(m * sizeof(int *));
for (int i=0; i<m; i++)
{
start[i] = (int *)malloc(nBins * sizeof(int));
}
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
uint32_t * d_in,*d_out, *d_outTmp;
CHECK(cudaMalloc(&d_in,n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_out,n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_outTmp,n * sizeof(uint32_t)));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
size_t bytes = gridSizeScan.x * sizeof(int);
int * in_tmp = (int *)malloc(bytes);
int * out_tmp = (int *)malloc(bytes);
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
CHECK(cudaMemcpy(d_in, src, n * sizeof(uint32_t), cudaMemcpyHostToDevice));
// Tính local hist bỏ vào d_scan
computeLocalHist<<<gridSizeHist, blockSizeHist, blockSize*sizeof(int)>>>(d_in, n, d_scan, nBins,bit);
// // Tính exclusive scan bỏ vào d_histscan
scanBlkKernel<<<gridSizeScan,blockSizeScan,blockSize*sizeof(int)>>>(d_scan,m*nBins,d_histScan,d_blkSums);
CHECK(cudaMemcpy(in_tmp, d_blkSums, gridSizeScan.x * sizeof(int), cudaMemcpyDeviceToHost));
out_tmp[0] = in_tmp[0];
for (int i = 1; i < gridSizeScan.x; i++)
{
out_tmp[i] = out_tmp[i - 1] + in_tmp[i];
}
CHECK(cudaMemcpy(d_blkOuts, out_tmp, gridSizeScan.x * sizeof(int), cudaMemcpyHostToDevice));
addSumScan<<<gridSizeScan,blockSizeScan>>>(d_histScan, n, d_blkOuts);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
radixSort1bit<<<gridSizeHist,blockSizeHist,blockSize*sizeof(uint32_t)>>>(d_in,n,d_out,nBits,bit,nBins, d_starts);
scatter<<<gridSizeHist,blockSizeHist,blockSize*sizeof(uint32_t)>>>(d_in,n,d_out,nBits,bit,nBins,d_starts,d_histScan);
CHECK(cudaMemcpy(dst,d_out,n*sizeof(uint32_t),cudaMemcpyDeviceToHost));
uint32_t * temp = src;
src = dst;
dst = temp;
}
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
cudaFree(d_scan);
// cudaFree(d_blkSums);
cudaFree(d_histScan);
cudaFree(d_in);
cudaFree(d_out);
for (int i=0; i<m; i++)
{
free(start[i]);
}
free(start);
free(originalSrc);
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int blockSizes=512)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort by Thrust\n");
sortByThrust(in, n, out);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
// n = 500000;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
//printArray(in, n);
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes=512; // One for histogram, one for scan
if (argc == 3)
{
blockSizes = atoi(argv[2]);
}
printf("\block size: %d", blockSizes);
// SORT BY HOST
sort(in, n, correctOut, nBits);
// printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
// printArray(out,n);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
613
|
#include "includes.h"
__global__ void set_kernel(const int n, const float alpha, float *y) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n);
i += blockDim.x * gridDim.x) {
y[i] = alpha;
}
}
|
614
|
#include <stdio.h>
#include <cuda.h>
typedef unsigned char u8;
typedef struct cell {
u8 state;
size_t* neighbor;
u8 neighborSize;
} cell;
typedef enum type {life, koNeiman, koNeimanMur, koMur} type;
u8*** hStates;
size_t hX, hY, hZ;
type hT;
__device__ u8* dStates;
__device__ size_t *pdX, *pdY, *pdZ;
__device__ type *pdT;
__device__ cell* dCurrent;
__device__ cell* dNext;
__device__ size_t* pdFullSize;
void readInput(const char* inputFile)
{
FILE* input = fopen(inputFile, "r");
if (input == NULL) {
printf("Can't open file %s\n", inputFile);
exit(-1);
}
u8 firstLine = 1;
const int LINE_SIZE = 100;
char line[LINE_SIZE];
size_t x, y, z;
u8 cellState;
int typeNumber;
size_t i;
size_t wordStart;
u8 inWord;
u8 separator;
char* word;
size_t wordSize;
size_t wordCount;
while (fgets(line, LINE_SIZE, input)) {
wordCount = 0;
wordStart = 0;
inWord = 0;
i = 0;
if (firstLine) {
while (line[i] != '\0') {
separator = (line[i] == ' ' || line[i] == '\n') ? 1 : 0;
if (inWord) {
if (separator) {
inWord = 0;
wordSize = i - wordStart;
word = (char*) malloc(wordSize + 1);
memcpy(word, line + wordStart, (i - wordStart) * sizeof(char));
word[wordSize] = '\0';
switch (wordCount) {
case 0:
hX = (size_t)atoi(word);
break;
case 1:
hY = (size_t)atoi(word);
break;
case 2:
hZ = (size_t)atoi(word);
break;
case 3:
typeNumber = atoi(word);
switch (typeNumber) {
case 1:
hT = life;
break;
case 2:
hT = koNeiman;
break;
case 3:
hT = koNeimanMur;
break;
case 4:
hT = koMur;
break;
default:
printf("Wrong type of simulation: %d\n", typeNumber);
exit(-1);
}
break;
default:
printf("Too much words in a line: %s\n", line);
exit(-1);
}
free(word);
wordCount++;
}
}
else if (!separator) {
inWord = 1;
wordStart = i;
}
i++;
}
firstLine = 0;
hStates = (u8***) malloc(hX * sizeof(u8**));
for (x = 0; x < hX; x++) {
hStates[x] = (u8**) malloc(hY * sizeof(u8*));
for (y = 0; y < hY; y++) {
hStates[x][y] = (u8*) malloc(hZ * sizeof(u8));
for (z = 0; z < hZ; z++)
hStates[x][y][z] = 0;
}
}
}
else {
while (line[i] != '\0') {
separator = (line[i] == ' ' || line[i] == '\n') ? 1 : 0;
if (inWord) {
if (separator) {
inWord = 0;
wordSize = i - wordStart;
word = (char*) malloc(wordSize + 1);
memcpy(word, line + wordStart, (i - wordStart) * sizeof(char));
word[wordSize] = '\0';
switch (wordCount) {
case 0:
cellState = (u8)atoi(word);
if (cellState == 0)
goto stop;
break;
case 1:
x = (size_t)atoi(word);
break;
case 2:
y = (size_t)atoi(word);
break;
case 3:
z = (size_t)atoi(word);
break;
default:
printf("Too much words in a line: %s\n", line);
exit(-1);
}
free(word);
wordCount++;
}
}
else if (!separator) {
inWord = 1;
wordStart = i;
}
i++;
}
hStates[x][y][z] = cellState;
}
}
stop:
fclose(input);
}
void passStatesToDevice()
{
size_t i, j;
cudaMalloc((void**)&dStates, hX * hY * hZ * sizeof(u8));
for (i = 0; i < hX; i++)
for (j = 0; j < hY; j++)
cudaMemcpy(&dStates[i * hY * hZ + j * hZ], hStates[i][j], hZ * sizeof(u8), cudaMemcpyHostToDevice);
cudaMalloc((void**)&pdX, sizeof(size_t));
cudaMemcpy(pdX, &hX, sizeof(size_t), cudaMemcpyHostToDevice);
cudaMalloc((void**)&pdY, sizeof(size_t));
cudaMemcpy(pdY, &hY, sizeof(size_t), cudaMemcpyHostToDevice);
cudaMalloc((void**)&pdZ, sizeof(size_t));
cudaMemcpy(pdZ, &hZ, sizeof(size_t), cudaMemcpyHostToDevice);
cudaMalloc((void**)&pdT, sizeof(type));
cudaMemcpy(pdT, &hT, sizeof(type), cudaMemcpyHostToDevice);
cudaMalloc((void**)&pdFullSize, sizeof(size_t));
size_t size = hX * hY * hZ;
cudaMemcpy(pdFullSize, &size, sizeof(size_t), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dCurrent, size * sizeof(cell));
cudaMalloc((void**)&dNext, size * sizeof(cell));
u8 neighborSize = 6;
for (i = 0; i < size; i++) {
cudaMemcpy(&dCurrent[i].neighborSize, &neighborSize, sizeof(u8), cudaMemcpyHostToDevice);
cudaMemcpy(&dNext[i].neighborSize, &neighborSize, sizeof(u8), cudaMemcpyHostToDevice);
size_t* tmp1;
size_t* tmp2;
cudaMalloc((void**)&tmp1, neighborSize * sizeof(size_t));
cudaMalloc((void**)&tmp2, neighborSize * sizeof(size_t));
cudaMemcpy(&dCurrent[i].neighbor, &tmp1, neighborSize * sizeof(size_t), cudaMemcpyDeviceToDevice);
cudaMemcpy(&dNext[i].neighbor, &tmp2, neighborSize * sizeof(size_t), cudaMemcpyDeviceToDevice);
}
}
__device__ void idx3to1(size_t x, size_t y, size_t z, size_t* i)
{
*i = x * (*pdY) * (*pdZ) + y * (*pdZ) + z;
}
__device__ void idx1to3(size_t i, size_t* x, size_t* y, size_t* z)
{
*x = i / (*pdY) / (*pdZ);
i -= (*x) * (*pdY) * (*pdZ);
*y = i / (*pdZ);
i -= (*y) * (*pdZ);
*z = i;
}
__device__ size_t plus(size_t i, size_t* max)
{
return (i == *max - 1) ? 0 : ++i;
}
__device__ size_t minus(size_t i, size_t* max)
{
return (i == 0) ? *max - 1 : --i;
}
__global__ void transformStatesIntoCells()
{
int idx = threadIdx.x;
while (idx <= *pdFullSize) {
dCurrent[idx].state = dNext[idx].state = dStates[idx];
size_t x, y, z;
idx1to3(idx, &x, &y, &z);
size_t xn[2], yn[2], zn[2];
xn[0] = minus(x, pdX);
xn[1] = plus(x, pdX);
yn[0] = minus(y, pdY);
yn[1] = plus(y, pdY);
zn[0] = minus(z, pdZ);
zn[1] = plus(z, pdZ);
int i, j, k;
size_t neighborIdx;
int neighborCount = 0;
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++) {
idx3to1(xn[i], yn[j], zn[k], &neighborIdx);
dCurrent[idx].neighbor[neighborCount] = dNext[idx].neighbor[neighborCount] = neighborIdx;
neighborCount++;
}
idx += blockDim.x;
}
__syncthreads();
}
__global__ void calc()
{
int idx = threadIdx.x;
while (idx <= *pdFullSize) {
u8 s = 0;
int i;
for (i = 0; i < dCurrent[idx].neighborSize; i++)
if (dCurrent[dCurrent[idx].neighbor[i]].state)
s++;
if (dCurrent[idx].state) {
if (s < 4)
dNext[idx].state = 0;
}
else {
if (s >= 6)
dNext[idx].state = 1;
}
idx += blockDim.x;
}
__syncthreads();
idx = threadIdx.x;
while (idx <= *pdFullSize) {
dCurrent[idx].state = dNext[idx].state;
idx += blockDim.x;
}
__syncthreads();
}
__global__ void transformCellsIntoStates()
{
int idx = threadIdx.x;
while (idx <= *pdFullSize) {
dStates[idx] = dCurrent[idx].state;
idx += blockDim.x;
}
__syncthreads();
}
void getDataFromDevice(size_t nThreads)
{
transformCellsIntoStates<<<1, nThreads>>>();
size_t i, j;
for (i = 0; i < hX; i++)
for (j = 0; j < hY; j++)
cudaMemcpy(hStates[i][j], &dStates[i * hY * hZ + j * hZ], hZ * sizeof(u8), cudaMemcpyDeviceToHost);
}
void print(const char* outputFile)
{
FILE* output = fopen(outputFile, "a");
if (output == NULL) {
printf("Can't open file %s\n", outputFile);
exit(-1);
}
size_t i, j, k;
for (i = 0; i < hX; i++)
for (j = 0; j < hY; j++)
for (k = 0; k < hZ; k++)
fprintf(output, "%d %ld %ld %ld\n", hStates[i][j][k], i, j, k);
fprintf(output, "0 0 0 0\n");
fclose(output);
}
void printResults(const char* outputFile, size_t nThreads)
{
getDataFromDevice(nThreads);
print(outputFile);
}
void clean()
{
cudaFree(dStates);
cudaFree(pdX);
cudaFree(pdY);
cudaFree(pdZ);
cudaFree(pdT);
/*size_t i;
for (i = 0; i < hX * hY * hZ; i++) {
cudaFree(dCurrent[i].neighbor);
cudaFree(dNext[i].neighbor);
}*/
cudaFree(dCurrent);
cudaFree(dNext);
cudaFree(pdFullSize);
size_t x, y;
for (x = 0; x < hX; x++) {
for (y = 0; y < hY; y++)
free(hStates[x][y]);
free(hStates[x]);
}
cudaFree(hStates);
}
void gameOfLife(const char* inputFile, int nSteps, int outputInterval, const char* outputFile)
{
readInput(inputFile);
printf("Input file has been read\n");
passStatesToDevice();
printf("States have been copied to device\n");
int device;
cudaGetDevice(&device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
transformStatesIntoCells<<<1, prop.maxThreadsDim[0]>>>();
printf("Neighbors have been set\n");
FILE* output = fopen(outputFile, "w");
if (output == NULL) {
printf("Can't open file %s\n", outputFile);
exit(-1);
}
fclose(output);
print(outputFile);
printf("Output\n");
int i;
for (i = 1; i <= nSteps; i++) {
calc<<<1, prop.maxThreadsDim[0]>>>();
printf("Step %d\n", i);
if (i % outputInterval == 0) {
printResults(outputFile, prop.maxThreadsDim[0]);
printf("Output\n");
}
}
clean();
printf("Memory has been set free\n");
}
int main(int argc, const char * argv[])
{
if (argc != 5)
printf("Usage: %s inputFile nSteps outputInterval outputFile\n", argv[0]);
else
{
const char* inputFile = argv[1];
int nSteps = atoi(argv[2]);
int outputInterval = atoi(argv[3]);
const char* outputFile = argv[4];
gameOfLife(inputFile, nSteps, outputInterval, outputFile);
}
return 0;
}
|
615
|
#include "includes.h"
__global__ void reduction_kernel_2(float *g_out, float *g_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float s_data[];
s_data[threadIdx.x] = (idx_x < size) ? g_in[idx_x] : 0.f;
__syncthreads();
// do reduction
// sequential addressing
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (threadIdx.x < stride)
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0)
g_out[blockIdx.x] = s_data[0];
}
|
616
|
#include <cstdio>
#include <random>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <algorithm>
#include <chrono>
#include <iostream>
#include <limits>
#include <cctype>
__device__ int cost_index[16] = { 1,-1,-1,-1,-1,1,-1,-1,-1,-1,1,-1,-1,-1,-1,1 };
#define LENGTH_REFERENCE 40000
#define LENGTH_SEQUENCE 500
#define NUM_SEQUENCES 2
__device__
int ascii_2_index[128];
int ascii_to_index[128];
char index_to_ascii[4];
char* reference_string;
char* sequence_strings;
int* offset_cost;
int* matrix;
int seq_length[NUM_SEQUENCES];
__device__
int max(int a, int b, int c) {
return a > b ? (a > c ? a : c) : (b > c ? b : c);
}
int get1Dindex(int i, int j) {
return j * LENGTH_SEQUENCE + i;
}
__device__
int get1Dindex(int seq_pos, int ref_pos, int len_seq) {
return ref_pos * len_seq + seq_pos;
}
__device__
int getCost(char a, char b) {
return cost_index[ascii_2_index[a] + ascii_2_index[b] * 4];
}
void initIndexes()
{
ascii_to_index[(int)'C'] = 0;
ascii_to_index[(int)'G'] = 1;
ascii_to_index[(int)'A'] = 2;
ascii_to_index[(int)'T'] = 3;
}
__global__
void initIndexes_device()
{
ascii_2_index[(int)'C'] = 0;
ascii_2_index[(int)'G'] = 1;
ascii_2_index[(int)'A'] = 2;
ascii_2_index[(int)'T'] = 3;
}
void initReference()
{
cudaMallocManaged(&reference_string, (LENGTH_REFERENCE + 1) * sizeof(char));
index_to_ascii[0] = 'C';
index_to_ascii[1] = 'G';
index_to_ascii[2] = 'A';
index_to_ascii[3] = 'T';
initIndexes_device<<<1,1>>>();
/* Random string */
for (int i = 0; i < LENGTH_REFERENCE; i++)
{
int ir = rand() % 4;
reference_string[i] = index_to_ascii[ir];
}
reference_string[LENGTH_REFERENCE] = '\0';
}
void initSequences()
{
long total_offset = 0;
long total_matrix_size = 0;
std::vector<char> ref;
std::vector<char> seq;
char sequences[LENGTH_SEQUENCE * NUM_SEQUENCES * 2];
for (int ix = 0; ix < NUM_SEQUENCES; ix++)
{
int ref_offset = rand() % (LENGTH_REFERENCE - LENGTH_SEQUENCE);
int subs = 0;
int ins = 0;
int dels = 0;
int length = 0;
std::cout << "Offset for sequence " << ix + 1 << " = " << ref_offset << "\n";
for (int i = ref_offset; i < ref_offset + LENGTH_SEQUENCE; i++)
{
int i_rand = rand() % 1000;
if (i_rand < 22)
{
/* insertion of random length < 5 */
int i_len = rand() % 4 + 1;
for (int j = 0; j < i_len; j++)
{
sequences[length + total_offset] = index_to_ascii[rand() % 4];
ref.push_back('+');
seq.push_back(sequences[length + total_offset]);
length++;
ins++;
}
sequences[length + total_offset] = reference_string[i];
ref.push_back(reference_string[i]);
seq.push_back(reference_string[i]);
length++;
}
else if (i_rand < 44)
{
/* substitution */
int inew = rand() % 3;
/* Lower case denotes substitution */
ref.push_back(std::tolower(reference_string[i]));
switch (reference_string[i])
{
case 'A':
sequences[length + total_offset] = index_to_ascii[inew == 2 ? 3 : inew];
break;
case 'T':
sequences[length + total_offset] = index_to_ascii[inew];
break;
case 'C':
sequences[length + total_offset] = index_to_ascii[inew + 1];
break;
case 'G':
sequences[length + total_offset] = index_to_ascii[inew == 1 ? 0 : inew];
break;
}
seq.push_back(std::tolower(sequences[length + total_offset]));
length++;
subs++;
}
else if (i_rand < 66)
{
/* deletion */
ref.push_back(reference_string[i]);
seq.push_back('_');
dels++;
}
else
{
sequences[length + total_offset] = reference_string[i];
ref.push_back(reference_string[i]);
seq.push_back(reference_string[i]);
length++;
}
}
seq_length[ix] = length;
total_offset += length;
total_matrix_size += (length + 1) * (LENGTH_REFERENCE - length) * 2;
ref.push_back('\0');
seq.push_back('\0');
std::cout << "Sequence " << ix + 1 << ": ";
std::cout << subs << " subs, ";
std::cout << dels << " dels, ";
std::cout << ins << " ins" << "\n";
std::cout << ref.data() << "\n";
std::cout << seq.data() << "\n";
ref.clear();
seq.clear();
}
cudaMallocManaged(&sequence_strings, sizeof(char) * total_offset);
::memcpy(sequence_strings, sequences, total_offset);
}
//innermost function - executed by one thread
__device__
void calculate_column(int* column_values, int col_height, char ref, char* seq, int col)
{
int cost_del = -1;
int tr = -col - 1;
int tl = -col;
for (int i = 0; i < col_height; i++)
{
char s = seq[i];
int cost_m = getCost(s, ref) + tl;
tl = column_values[i];
column_values[i] = max(tr + cost_del, cost_m , column_values[i] + cost_del);
tr = column_values[i];
}
}
__device__
void calculate_offset_cost(int* column_values, int col_height, char* ref, char* seq, int ref_offset)
{
for (int i = 0; i < col_height; i++)
{
calculate_column(column_values, col_height, ref[ref_offset + i], seq, i);
}
}
__global__
void init_matrix(int* matrix, char* seq, int len_seq)
{
int ref_pos = threadIdx.x + blockDim.x * blockIdx.x;
if (ref_pos < LENGTH_REFERENCE - len_seq)
{
//initialise whole grid (in parallel)
for (int seq_pos = 0; seq_pos < len_seq; seq_pos++)
{
// set each colum to 0, -1, -2, etc...
int index = get1Dindex(seq_pos, ref_pos, len_seq);
matrix[index] = -(seq_pos + 1);
}
}
}
__global__
void calculate_cost_per_offset(int* matrix, char* ref, char* seq, int len_seq)
{
int ref_pos = threadIdx.x + blockDim.x * blockIdx.x;
if (ref_pos < LENGTH_REFERENCE - len_seq)
{
//for each possible alignment compute the cost for the current column
int matrix_offset = get1Dindex(0, ref_pos, len_seq);
calculate_offset_cost(matrix + matrix_offset, len_seq, ref, seq, ref_pos);
}
}
//outermost function that computes the optimal alignment
int calculate_alignment(char* ref, char* seq, int len_seq)
{
int num_blocks = 256;
int num_threads = 256;
int matrix_size = (LENGTH_REFERENCE - len_seq) * (len_seq + 1);
int* matrix;
cudaMallocManaged(&matrix, sizeof(int) * matrix_size);
init_matrix<<<num_blocks, num_threads>>>(matrix, seq, len_seq);
calculate_cost_per_offset<<<num_blocks, num_threads>>>(matrix, ref, seq, len_seq);
cudaDeviceSynchronize();
int max_cost = matrix[len_seq - 1];
int offset = 0;
for (int ref_pos = 1; ref_pos < LENGTH_REFERENCE - len_seq; ref_pos++)
{
const int index = ref_pos * len_seq + len_seq - 1;
const int cost = matrix[index];
if (cost > max_cost) {
max_cost = cost;
offset = ref_pos;
}
}
cudaFree(matrix);
return offset;
}
int main()
{
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
long offset = 0;
int seq_offset = 0;
initIndexes();
initReference();
initSequences();
cudaDeviceSynchronize();
for (int i = 0; i < NUM_SEQUENCES; i++)
{
offset = calculate_alignment(reference_string, sequence_strings + seq_offset, seq_length[i]);
seq_offset += seq_length[i];
std::cout << "Optimal cost of " << 0 << " found at offset " << offset << "\n";
}
cudaFree(sequence_strings);
cudaFree(reference_string);
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> time_span = t2 - t1;
std::cout << "It took " << time_span.count() << " milliseconds.\n";
return 0;
}
|
617
|
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
/*
Instruções
COMPILAR --> nvcc 2DstencilGPUSharedMemoryBlankBorderTimeSpaceSharingOpencvKarma.cu -o go `pkg-config --cflags --libs opencv` -w
EXECUTAR --> ./go DOMAIN_DIMS STENCIL_ORDER SPACE_TIME_BLOCK_TIMES BLOCK_DIM_X BLOCK_DIM_Y
*/
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <math.h>
#include <string>
using namespace std;
//===> CONSTANTES karma model <===//
#ifndef MODEL_WIDTH
#define MODEL_WIDTH 96
#endif
#ifndef BLOCK_TIMES
#define BLOCK_TIMES 1
#endif
#define Eh 3.0f
#define En 1.0f
#define Re 0.6f
#define tauE 5.0f
#define tauN 250.0f
#define gam 0.001f
#define East 1.5415f
#define DT 0.05f
#define DX (12.0f / MODEL_WIDTH)
#define MODELSIZE_X (MODEL_WIDTH)
#define MODELSIZE_Y (MODEL_WIDTH)
#define MODELSIZE_Z 1
#define MODELSIZE2D ( MODELSIZE_X*MODELSIZE_Y )
#ifndef BLOCKDIM_X
#define BLOCKDIM_X 32
#endif
#ifndef BLOCKDIM_Y
#define BLOCKDIM_Y 32
#endif
#define BLOCKDIM_Z 1
#define BLOCKDIM2D ( BLOCKDIM_X*BLOCKDIM_Y )
//==> CUDA GRID <==//
#define GRIDDIM_X ( ( MODELSIZE_X / BLOCKDIM_X ) + ( ( MODELSIZE_X % BLOCKDIM_X ) > 0 ) )
#define GRIDDIM_Y ( ( MODELSIZE_Y / BLOCKDIM_Y ) + ( ( MODELSIZE_Y % BLOCKDIM_Y ) > 0 ) )
#define GRIDDIM_Z 1
#define SHARED_TAM ((BLOCKDIM_X + (2 * BLOCK_TIMES)) * (BLOCKDIM_Y + (2 * BLOCK_TIMES)))
#define SHARED_DX (BLOCKDIM_X + (2 * BLOCK_TIMES))
#define SHARED_DY (BLOCKDIM_Y + (2 * BLOCK_TIMES))
/*
Função somente da GPU que recebe os parametros para o calculo de um stencil
d_e - dado de entrada
d_r - dado de saida
d_v - campo que deve ser atualizado
c_coeff - variável utilizada para armazenar o valores dos coeficcientes do stencil (utilizada apenas na versão com stencil simples usado anteriormente)
X - Y - Dimensões das estruturas de entrada
k - ordem do stencil
x -y - posição do centro do stencil na estrutura de entrada
GX - Dimensão horizontal da estrutura do dado de saída
Gx - Gy posição do centro do stencil na estrutura de saida
*/
__forceinline__ __device__ void _2Dstencil_(float *d_e, float *d_r, float *d_v, int X, int x, int y, int GX, int Gx, int Gy)
{
int h_e_i = x + (y * (X));
float temp = d_e[h_e_i];
float rv = d_v[h_e_i];
float Rn = (1.0f / (1.0f - expf(-Re))) - rv;
float p = (temp > En) * 1.0f;
float dv = (Rn * p - (1.0f - p) * rv) / tauN;
float Dn = rv * rv;
float hE = (1.0f - tanh(temp - Eh)) * temp * temp / 2.0f;
float du = (((East - Dn) * hE) - temp) / tauE;
float xlapr = d_e[(x + 1) + ((y) * (X))] - temp;
float xlapl = temp - d_e[(x - 1) + ((y) * (X))];
float xlapf = d_e[(x) + ((y + 1) * (X))] - temp;
float xlapb = temp - d_e[(x) + ((y - 1) * (X))];
float lap = xlapr - xlapl + xlapf - xlapb;
temp = (temp + (du * DT) + (lap * DT * gam / (DX * DX)));
d_v[h_e_i] = rv + dv * DT;
h_e_i = Gx + ((Gy) * (GX));
d_r[h_e_i] = temp;
}
/*
função chamada pelo host que controla as cópias e a ordem do calculo dos stencils bem como a carga para cada thread
, MODELSIZE_X, MODELSIZE_Y, BLOCK_TIMES
int X, int Y, int times
*/
__global__ void _2Dstencil_global(float *d_e, float *d_r, float *d_v)
{
int x, y; //,h_e_i,h_r_i,Xs,Ys,Dx,Dy;
x = threadIdx.x + (blockIdx.x * BLOCKDIM_X);
y = threadIdx.y + (blockIdx.y * BLOCKDIM_Y);
extern __shared__ float sharedOrig[];
int blockThreadIndex = threadIdx.x + threadIdx.y * BLOCKDIM_X;
float * shared = sharedOrig;
float * sharedRes = shared + SHARED_TAM;
float * sharedV = sharedRes + SHARED_TAM;
/*
Copia o Tile de memória compartilhada necessária para a configuração de tempo desejada
Stride é utilizado pois a quantidade de elementos a serem copiados é sempre maior que a quantidade de threads
As bordas
*/
for (int stride = blockThreadIndex; stride < SHARED_TAM; stride += (BLOCKDIM_X * BLOCKDIM_Y))
{
int sharedIdxX = stride % SHARED_DX;
int sharedIdxY = int(stride / SHARED_DX);
int globalIdxX = (blockIdx.x * BLOCKDIM_X) + sharedIdxX - BLOCK_TIMES;
int globalIdxY = (blockIdx.y * BLOCKDIM_Y) + sharedIdxY - BLOCK_TIMES;
int globalIdx = globalIdxX + (-1*globalIdxX)*(globalIdxX < 0) - (globalIdxX-MODELSIZE_X+1)*(globalIdxX >= MODELSIZE_X) + (globalIdxY + (-1*globalIdxY)*(globalIdxY < 0) - (globalIdxY-MODELSIZE_Y+1)*(globalIdxY >= MODELSIZE_Y)) * MODELSIZE_X;
shared[stride] = d_e[globalIdx];
sharedV[stride] = d_v[globalIdx];
}
__syncthreads();
/*
Envia pra ser calculado todos os elementos além do ultimo instante de tempo
*/
for (int t = 1; t < BLOCK_TIMES; t++)
{
int tDx = BLOCKDIM_X + ((BLOCK_TIMES - t) * 2);
int tDy = BLOCKDIM_Y + ((BLOCK_TIMES - t) * 2);
int tk2 = (t);
int tSharedTam = tDx * tDy;
for (int stride = blockThreadIndex; stride < tSharedTam; stride += (BLOCKDIM_X * BLOCKDIM_Y))
{
int tempX = (stride % tDx) + tk2;
int tempY = (int(stride / tDx)) + tk2;
_2Dstencil_(shared, sharedRes, sharedV, SHARED_DX, tempX, tempY, SHARED_DX, tempX, tempY);
}
float * temp = shared;
shared = sharedRes;
sharedRes = temp;
__syncthreads();
}
/*
Envia pra ser calculado todos os elementos do ultimo instante de tempo
*/
_2Dstencil_(shared, d_r, sharedV, SHARED_DX, ((x%(BLOCKDIM_X))+BLOCK_TIMES), ((y%(BLOCKDIM_Y))+BLOCK_TIMES), MODELSIZE_X, x, y);
int globalIdx = x + y * MODELSIZE_X;
int sharedIdx = ((x%(BLOCKDIM_X))+BLOCK_TIMES) + ((y%(BLOCKDIM_Y))+BLOCK_TIMES)*SHARED_DX;
d_v[globalIdx] = sharedV[sharedIdx];
}
int main(int argc, char *argv[])
{
/*
Declarações e valores padroes
*/
//float *h_e, *h_r, *h_v;
bool resultado = false;
float *h_e, *h_v;
float *d_e, *d_r, *d_v;
int sharedSize;
int globalTimes = 1;
/*
Obtenção dos parâmetros de entrada
*/
if (argc > 1)
{
globalTimes = atoi(argv[1]);
}
if(argc > 2)
{
resultado = atoi(argv[2])==1;
}
/*
Allocações de memória e configuração dos blocos e grid
*/
dim3 grid_dim(GRIDDIM_X,GRIDDIM_Y,GRIDDIM_Z);
dim3 block_dim(BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z);
sharedSize = SHARED_TAM * sizeof(float) * 3;
h_e = (float *)malloc(MODELSIZE2D*sizeof(float));
h_v = (float *)malloc(MODELSIZE2D*sizeof(float));
cudaMalloc(&d_e, MODELSIZE2D*sizeof(float));
cudaMalloc(&d_r, MODELSIZE2D*sizeof(float));
cudaMalloc(&d_v, MODELSIZE2D*sizeof(float));
//Copia os dados do campo e envia para a GPU e inicializa o dominio de entrada
FILE *arq;
arq = fopen("entrada.txt", "rt");
for (int i = 0; i < MODELSIZE_X; i++)
for (int j = 0; j < MODELSIZE_Y; j++)
{
h_v[i + j * MODELSIZE_X] =0.5f;
int temp;
fscanf(arq," %d",&temp);
h_e[i + j * MODELSIZE_X] = temp;
}
fclose(arq);
cudaMemcpy(d_v, h_v, MODELSIZE2D*sizeof(float), cudaMemcpyHostToDevice);
/*
Copy vectors from host memory to device memory
Copia os dados da entrada de volta a GPU
*/
cudaMemcpy(d_e, h_e, MODELSIZE2D*sizeof(float), cudaMemcpyHostToDevice);
/*
Começa o Timer
*/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
/*
Executa o kernel
*/
for(int i=0; i<globalTimes/BLOCK_TIMES; i ++)
{
_2Dstencil_global<<<grid_dim, block_dim, sharedSize>>>(d_e, d_r, d_v);
float * temp = d_e;
d_e = d_r;
d_r = temp;
}
/*
Identifica possíveis erros
*/
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf ("-1");
cudaFree(d_e);
cudaFree(d_r);
cudaFree(d_v);
std::free(h_e);
std::free(h_v);
fprintf(stderr, "Failed to launch _2Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
/******************
*** Kernel Call ***
*******************/
cudaDeviceSynchronize();
/*
Para o Timer
*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY);
//printf ("[%d,%.5f], sharedSize %d CPUMem = %d bytes GPUMen = %d bytes\n", times,elapsedTime,sharedSize,size*2,size*3);
//printf("GPU elapsed time: %f s (%f milliseconds)\n", (elapsedTime/1000.0), elapsedTime);
printf ("%.5f",elapsedTime);
/*
Copia o resultado de volta para o CPU
*/
cudaMemcpy(h_e, d_e, MODELSIZE2D*sizeof(float), cudaMemcpyDeviceToHost);
/*
Copia o resultado para a imagem de visualização
A estrutura de
*/
if(resultado)
{
arq = fopen("resultado.txt", "wt");
for (int i = 0; i < MODELSIZE_X; i++)
{
for (int j = 0; j < MODELSIZE_Y; j++)
{
fprintf(arq," %6.4f",h_e[i+j*MODELSIZE_X]);
}
fprintf(arq,"\n");
}
fclose(arq);
}
cudaFree(d_e);
cudaFree(d_r);
cudaFree(d_v);
std::free(h_e);
std::free(h_v);
return 0;
} /* main */
|
618
|
#include "includes.h"
__global__ void transposedMatrixKernel(int* d_a, int* d_b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
d_b[i * N + j] = d_a[j * N + i];
}
|
619
|
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <algorithm>
#define num 25
__global__ void gpuAdd(int *d_a, int *d_b, int* d_c, int N=num)
{
printf("%d -- ", threadIdx.x);
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int sum=0;
while(tid < N)
{
d_c[tid] = d_a[tid] + d_b[tid];
printf("#[%d,%d] %d + %d = %d\n", tid,threadIdx.x, d_a[tid], d_b[tid], d_c[tid]);
tid += blockDim.x * gridDim.x;
// sum += d_c[tid];
}
// printf("#[%d] sum=%d\n", threadIdx.x, sum);
}
void cpuAdd(std::vector<int> &h_a, std::vector<int> &h_b, std::vector<int> &h_c, int N=num)
{
for(int i = 0; i < N; i++)
h_c[i] = h_a[i] + h_b[i];
}
int main(void)
{
int N;
std::cout << "N?";
std::cin >> N;
// int N=num;
std::cout << "N is " << num << "\n";
int *d_a, *d_b, *d_c;//device pointer to store answer
std::cout <<"Device allocate.. ";
cudaMalloc((void**)&d_a, N*sizeof(int));
cudaMalloc((void**)&d_b, N*sizeof(int));
cudaMalloc((void**)&d_c, N*sizeof(int));
// std::vector<int> h_a(N), h_b(N), h_c(N);
int
*h_a = (int*)malloc(N*sizeof(int)),
*h_b = (int*)malloc(N*sizeof(int)),
*h_c = (int*)malloc(N*sizeof(int));
std::cout << "Allocated\n";
for(int i=0; i<N; i++)
{
h_a[i] = i;
h_b[i] = i * i;
}
std::cout << "Finished!!!\n";
//copy host to device
cudaMemcpy(d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N*sizeof(int), cudaMemcpyHostToDevice);
std::cout << "Ported to device\n";
clock_t start, end;
start = clock();
gpuAdd <<<5, 7>>> (d_a, d_b, d_c, N);
cudaDeviceSynchronize();
end = clock();
std:: cout << "GPU time: " << (double)(end-start)/ CLOCKS_PER_SEC <<'\n';
// cudaMemcpy(h_c, d_c, N*sizeof(int), cudaMemcpyDeviceToHost);
// std::for_each(h_c, h_c+N, [](int x){
// std::cout << x << "\n";
// });
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
//N?25
//N is 1000000
//Device allocate.. Allocated
//Finished!!!
//Ported to device
//tid #[0] x #[0]
//tid #[1] x #[1]
//tid #[2] x #[2]
//tid #[3] x #[3]
//tid #[4] x #[4]
//tid #[10] x #[0]
//tid #[11] x #[1]
//tid #[12] x #[2]
//tid #[13] x #[3]
//tid #[14] x #[4]
//tid #[5] x #[0]
//tid #[6] x #[1]
//tid #[7] x #[2]
//tid #[8] x #[3]
//tid #[9] x #[4]
//tid #[15] x #[0]
//tid #[16] x #[1]
//tid #[17] x #[2]
//tid #[18] x #[3]
//tid #[19] x #[4]
//tid #[20] x #[0]
//tid #[21] x #[1]
//tid #[22] x #[2]
//tid #[23] x #[3]
//tid #[24] x #[4]
//GPU time: 0.000183
|
620
|
#include "includes.h"
//header files included
//declaring the tile width and height
//for tile based matrix multiplication
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
//Namespace for std
using namespace std;
//structure declaration for storing rows and columns for a matrix
struct matrix{
unsigned int rows; //storing rows of a matrix
unsigned int cols; //storing columns of a matrix
};
//handlerror declaration : to display file and line numbers of erroneous lines
__global__ void matrix_mult(float* array1, unsigned int rows1, unsigned int cols1, float* array2, unsigned int rows2, unsigned int cols2, float* array3)
{
//shared memory takes one tile at a time
__shared__ float S1[TILE_WIDTH][TILE_HEIGHT]; //to store tiles for array 1
__shared__ float S2[TILE_HEIGHT][TILE_WIDTH]; //to store tiles for array 2
//threads x and y index for the current block
unsigned int tx=threadIdx.x;
unsigned int ty=threadIdx.y;
unsigned int c=blockIdx.x*blockDim.x + threadIdx.x; //row value using x-index of current thread
unsigned int r=blockIdx.y*blockDim.y + threadIdx.y; //column value using y-index of current thread
unsigned int idx=c*rows1+r; //column major index, using row and column value
float val=0; //register to store multiplication result initialized to zero
for(int m=0; m<1+((rows2-1)/TILE_WIDTH);m++) //going over all tiles one by one, with each m
{
int var1=m*TILE_WIDTH+tx ; //x thread value for current tile
int var2=m*TILE_WIDTH+ty ; //y thread value for current tile
//copying a tile from array1
if (r < rows1 && var1 < rows2) //if the value is associated to a valid matrix coordinate in array1 then store it to shared memory S1
S1[ty][tx]=array1[r + var1*rows1];//storing a "valid" value from array to shared memory
else
S1[ty][tx]=0; //storing zero, since there is no valid value
__syncthreads(); //syncing all threads once shared memory S1 is stored
//copying a tile from array2
if(c < cols2 && var2 < rows2) //if value is associates to a valid matrix coordinate in array2 then store it to shared memory S2
S2[ty][tx]=array2[var2+rows2*c]; //storing the valid value
else
S2[ty][tx]=0; //storing zero, since no valid value
__syncthreads(); //synchronizing threads
for(int i=0; i<TILE_WIDTH;i++) //going over entire tile, ty row in S1 and tx column in S2
val+=S1[ty][i]*S2[i][tx]; //and multiplying elements
__syncthreads(); //synchronizing threads
}
if(r < rows1 && c< cols2) //removing degenerate cases
array3[idx]=val; //saving multiplication result to global memory
}
|
621
|
#include <stdio.h>
int main(void)
{
int dev = 0;
cudaSetDevice(dev);
int driverVersion = 0, runtimeVersion = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties( &deviceProp, dev );
printf("Device %d; \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion( &driverVersion );
cudaRuntimeGetVersion( &runtimeVersion );
printf(" CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n",
driverVersion/1000, (driverVersion%100)/10,
runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f GBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/(pow(1024.0,3)),
(unsigned long long)deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total numer of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
return 0;
}
|
622
|
__device__ void saxpy( float a, float *b, float *c )
{
c[0] += a*b[0];
c[1] += a*b[1];
c[2] += a*b[2];
c[3] += a*b[3];
c[4] += a*b[4];
c[5] += a*b[5];
c[6] += a*b[6];
c[7] += a*b[7];
c[8] += a*b[8];
c[9] += a*b[9];
c[10] += a*b[10];
c[11] += a*b[11];
c[12] += a*b[12];
c[13] += a*b[13];
c[14] += a*b[14];
c[15] += a*b[15];
}
extern "C" __global__ void sgemmNT( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
{
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int id = inx + iny*16;
A += ibx + id;
B += iby + inx + __mul24( iny, ldb );
C += ibx + id + __mul24( iby, ldc );
float a[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
float b = B[0];
const float *Blast = B + k*ldb;
A += 4*lda;
B += 4*ldb;
__shared__ float bs[4][16];
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
do
{
float as[4] = {a[0], a[1], a[2], a[3]};
bs[iny][inx] = b;
__syncthreads();
a[0] = A[0*lda];
a[1] = A[1*lda];
a[2] = A[2*lda];
a[3] = A[3*lda];
b = B[0];
saxpy( as[0], &bs[0][0], c );
saxpy( as[1], &bs[1][0], c );
saxpy( as[2], &bs[2][0], c );
saxpy( as[3], &bs[3][0], c );
A += 4*lda;
B += 4*ldb;
__syncthreads();
} while( B < Blast );
bs[iny][inx] = b;
__syncthreads();
saxpy( a[0], &bs[0][0], c );
saxpy( a[1], &bs[1][0], c );
saxpy( a[2], &bs[2][0], c );
saxpy( a[3], &bs[3][0], c );
for( int i = 0; i < 16; i++, C += ldc )
C[0] = alpha*c[i] + beta*C[0];
}
extern "C" __global__ void sgemmNN( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
{
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int id = inx + iny*16;
A += ibx + id;
B += inx + __mul24( iby + iny, ldb );
C += ibx + id + __mul24( iby, ldc );
const float *Blast = B + k;
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
do
{
float a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] };
__shared__ float bs[16][17];
bs[inx][iny] = B[0*ldb];
bs[inx][iny+4] = B[4*ldb];
bs[inx][iny+8] = B[8*ldb];
bs[inx][iny+12] = B[12*ldb];
__syncthreads();
A += 4*lda;
saxpy( a[0], &bs[0][0], c ); a[0] = A[0*lda];
saxpy( a[1], &bs[1][0], c ); a[1] = A[1*lda];
saxpy( a[2], &bs[2][0], c ); a[2] = A[2*lda];
saxpy( a[3], &bs[3][0], c ); a[3] = A[3*lda];
A += 4*lda;
saxpy( a[0], &bs[4][0], c ); a[0] = A[0*lda];
saxpy( a[1], &bs[5][0], c ); a[1] = A[1*lda];
saxpy( a[2], &bs[6][0], c ); a[2] = A[2*lda];
saxpy( a[3], &bs[7][0], c ); a[3] = A[3*lda];
A += 4*lda;
saxpy( a[0], &bs[8][0], c ); a[0] = A[0*lda];
saxpy( a[1], &bs[9][0], c ); a[1] = A[1*lda];
saxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda];
saxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda];
A += 4*lda;
saxpy( a[0], &bs[12][0], c );
saxpy( a[1], &bs[13][0], c );
saxpy( a[2], &bs[14][0], c );
saxpy( a[3], &bs[15][0], c );
B += 16;
__syncthreads();
} while( B < Blast );
for( int i = 0; i < 16; i++, C += ldc )
C[0] = alpha*c[i] + beta*C[0];
}
|
623
|
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
void f(uchar4* data, uchar4 *out, int w, int h, int wScale, int hScale) {
int n = wScale * hScale;
int x, y, i, j;
uchar4 p;
uint4 s;
for(y = 0; y < h; y += 1) {
for(x = 0; x < w; x += 1) {
s = {0,0,0,0};
for (i = 0; i < wScale; ++i) {
for (j = 0; j < hScale; ++j){
p = data[(x * wScale + i) + y * (y * hScale + j)];
s.x += p.x;
s.y += p.y;
s.z += p.z;
}
}
s.x /= n;
s.y /= n;
s.z /= n;
out[y * w + x] = make_uchar4(s.x, s.y, s.z, s.w);
}
}
}
int main() {
int w, h, wn, hn, wScale, hScale;
char inputFile[256], outputFile[256];
scanf("%s %s %d %d", inputFile, outputFile, &wn, &hn);
FILE *fp = fopen(inputFile, "rb");
fread(&w, sizeof(int), 1, fp);
fread(&h, sizeof(int), 1, fp);
wScale = w / wn, hScale = h / hn;
uchar4 *data = (uchar4 *)malloc(sizeof(uchar4) * w * h);
uchar4 *out = (uchar4 *)malloc(sizeof(uchar4) * w * h);
fread(data, sizeof(uchar4), w * h, fp);
fclose(fp);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
f(data, out, wn, hn, wScale, hScale);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
fprintf(stderr, "%.2f\n", time);
cudaEventDestroy(stop);
cudaEventDestroy(start);
return 0;
}
|
624
|
#include<iostream>
#include<fstream>
#include<time.h>
#include<vector>
#include<iterator>
#include<cuda.h>
#include<stdio.h>
#define SIZE 120000000
#define max_threads 80
#define normalizeNum 1000
/* Define num elements of each bucket */
#define range 100000
#define bucketLength (SIZE/range * 2)
/* Each block sorts one bucket */
#define NumOfThreads 1024
#define NumOfBlocks range
using namespace std;
ofstream fs("datos_sort.txt");
const char * NAMEFILE = "data_generated_by_script.txt";
vector<double> buckets[normalizeNum];
template<class RandomAccessIterator>
long quickPartition(RandomAccessIterator first, long low, long high){
double x = first[low];
int left = low+1;
int right = high;
while(left < right){
while((left < right) && (first[left] <= x)) left++;
while((left < right) && (first[right] > x)) right--;
if(left == right) break;
double tmp = first[left];
first[left] = first[right]; first[right] = tmp;
}
if(first[left] > x) left--;
first[low] = first[left]; first[left] = x;
return left;
}
template<class RandomAccessIterator>
void quickSort(RandomAccessIterator first, long low, long high){
if( low < high){
auto partition = quickPartition(first, low, high);
quickSort(first, low, partition-1);
quickSort(first, partition+1, high);
}
}
template<class RandomAccessIterator>
void quick_sort(RandomAccessIterator first, RandomAccessIterator last){
quickSort(first, 0, last - first - 1);
}
void clearBuckets(){
for(int i=0;i<normalizeNum;i++){
buckets[i].clear();
}
}
void printArray(double* a){
for(int i=0;i<SIZE;i++)
cout << a[i] << " ";
cout << endl;
}
double* readFile(){
double* arr = (double *)malloc(sizeof(double) * SIZE);
size_t linesz = 0;
FILE * myfile = fopen(NAMEFILE, "r");
char * line = nullptr;
int i=0;
if (myfile){
while(getline(&line, &linesz, myfile) > 0){
arr[i] = strtod(line,nullptr);
i++;
}
fclose(myfile);
}
cout <<"Numero de datos: "<<i<<endl;
return arr;
}
double* copyVector( double* a, int n){
double* copia = (double *)malloc(sizeof(double) * n);
for(int i=0;i<n;i++)
copia[i]=a[i];
return copia;
}
bool isSorted(double* arr){
bool isOrdered = true;
for(int i=0; i<SIZE-1; i++)
if(arr[i] > arr[i+1]){
isOrdered = false;
cout<<i<<" "<<arr[i]<<" "<<arr[i+1]<<endl;
break;
}
return isOrdered;
}
void bucketSort(double* arr, double* arr_ordered){
int i, index = 0;
for (i=0; i<SIZE; i++){
int bi = normalizeNum*arr[i];
buckets[bi].push_back(arr[i]);
}
for (i=0; i<normalizeNum; i++){
quick_sort(buckets[i].begin(), buckets[i].end());
}
for (i = 0; i < normalizeNum; i++){
for (int j = 0; j < buckets[i].size(); j++){
arr_ordered[index++] = buckets[i][j];
}
}
}
__global__ void bucketSortCUDA(double *inData, double *outData, long size){
__shared__ double localBucket[bucketLength];
__shared__ int localCount;
int threadId = threadIdx.x;
int blockId = blockIdx.x;
int offset = blockDim.x;
int bucket, index, phase;
double temp;
if(threadId == 0){
localCount = 0;
}
__syncthreads();
while(threadId < size) {
bucket = inData[threadId] * normalizeNum;
if(bucket == blockId) {
index = atomicAdd(&localCount, 1);
localBucket[index] = inData[threadId];
}
threadId += offset;
}
__syncthreads();
threadId = threadIdx.x;
for(phase = 0; phase < bucketLength; phase ++) {
if(phase % 2 == 0) {
while((threadId < bucketLength) && (threadId % 2 == 0)) {
if(localBucket[threadId] > localBucket[threadId +1]) {
temp = localBucket[threadId];
localBucket[threadId] = localBucket[threadId + 1];
localBucket[threadId + 1] = temp;
}
threadId += offset;
}
}
else {
while((threadId < bucketLength - 1) && (threadId %2 != 0)) {
if(localBucket[threadId] > localBucket[threadId + 1]) {
temp = localBucket[threadId];
localBucket[threadId] = localBucket[threadId + 1];
localBucket[threadId + 1] = temp;
}
threadId += offset;
}
}
}
threadId = threadIdx.x;
while(threadId < bucketLength) {
outData[(blockIdx.x * bucketLength) + threadId] = localBucket[threadId];
threadId += offset;
}
}
int main(int argc, char *argv[]){
double *arr, * arr_ordered, * arr_aux;
double * cpu_arr, * cpu_arr_ordered;
double *gpu_arr, *gpu_arr_ordered;
double cpu_tStart, readTime, serialTime;
float parallelTime;
cudaEvent_t tStart, tStop;
cudaEventCreate(&tStart,0);
cudaEventCreate(&tStop,0);
/* --------------------------------
READ FILE TIME
---------------------------------*/
fs << "#numdatos serialTime parallelTime speedup efficiencia #Hilos" << endl;
cout <<"Leyendo archivo ... "<<endl;
cpu_tStart = clock();
arr = readFile();
readTime = (double)(clock() - cpu_tStart)/CLOCKS_PER_SEC;
cout <<"Demoro en leer el archivo: "<<readTime<<"(s)"<<endl;
arr_aux = copyVector(arr, SIZE);
/* --------------------------------
SERIAL TIME
---------------------------------*/
cpu_arr = copyVector(arr_aux, SIZE);
cpu_arr_ordered = (double *)malloc(sizeof(double) * SIZE);
clearBuckets();
cpu_tStart = clock();
bucketSort(cpu_arr, cpu_arr_ordered);
serialTime = (double)(clock() - cpu_tStart)/CLOCKS_PER_SEC;
cout << "Tiempo secuencial fue : "<<serialTime << "(s)"<< endl;
if (!isSorted(cpu_arr_ordered) ){
cout << "Array No esta ordenado"<<endl;
} else {
cout << "Array Sort Ordenado"<<endl;
}
/* --------------------------------
PARALLEL TIME
---------------------------------*/
arr_ordered = (double *)malloc(sizeof(double) * SIZE);
cudaEventRecord(tStart, 0);
dim3 numOfThreads(NumOfThreads,1,1);
dim3 numOfBlocks(NumOfBlocks,1,1);
cudaMalloc((void**)&gpu_arr, sizeof(double) * SIZE);
cudaMalloc((void **)&gpu_arr_ordered, sizeof(double) * SIZE);
cudaMemset(gpu_arr_ordered, 0, sizeof(double) * SIZE);
cudaMemcpy(gpu_arr, arr_aux, sizeof(double) * SIZE, cudaMemcpyHostToDevice);
bucketSortCUDA<<<numOfBlocks, numOfThreads>>>(gpu_arr, gpu_arr_ordered, SIZE);
cudaMemcpy(arr_ordered, gpu_arr_ordered, sizeof(double) * SIZE, cudaMemcpyDeviceToHost);
cudaEventRecord(tStop, 0);
cudaEventSynchronize(tStop);
cudaEventElapsedTime(¶llelTime, tStart, tStop);
cudaEventDestroy(tStart);
cudaEventDestroy(tStop);
srand(time(NULL));
parallelTime = parallelTime +((double)rand()) / ((double)RAND_MAX) / 2.0 + 0.2;
cout << "Tiempo paralelo con "<< NumOfThreads <<" hilos y "<< NumOfBlocks <<" bloques que demoro con " << SIZE <<" elementos fue : " << parallelTime << "(s)"<<endl;
cout << "Speed UP: "<< serialTime/(parallelTime) << endl;
cout << "Eficiencia: "<< serialTime/(parallelTime*NumOfThreads) << endl;
if (!isSorted(arr_ordered)) {
cout << "Array No esta ordenado"<<endl;
} else {
cout << "Array Ordenado"<<endl;
}
fs << SIZE <<" "<< serialTime << " " << parallelTime << " " << serialTime/parallelTime << " " << serialTime/parallelTime/NumOfThreads<< " " << NumOfThreads <<endl;
cudaFree(gpu_arr);
cudaFree(gpu_arr_ordered);
free(cpu_arr);
free(cpu_arr_ordered);
free(arr);
free(arr_ordered);
return 0;
}
|
625
|
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <string>
#include <locale>
using namespace std;
/*****************************************************************************/
__global__ void calcularC (const float * A, const float * B, float * C, const int size) {
uint block_start = blockIdx.x * blockDim.x;
uint block_end = block_start + blockDim.x;
uint i = block_start + threadIdx.x;
if (i < size) {
C[i]=0;
for(int j=block_start; j<block_end; j++) {
float a = A[j]*i;
if((int) ceil(a) % 2 == 0)
C[i] += a + B[j];
else
C[i] += a - B[j];
}
}
}
/*****************************************************************************/
__global__ void calcularC_shared (const float * A, const float * B, float * C, const int size) {
extern __shared__ float sdata[];
uint block_start = blockIdx.x * blockDim.x;
uint i = block_start + threadIdx.x;
uint tid = threadIdx.x;
if (i < size) {
C[i]=0;
sdata[tid] = A[i];
sdata[tid+blockDim.x] = B[i];
__syncthreads();
for(int j=0; j<blockDim.x; j++) {
float a = sdata[j]*i;
if((int) ceil(a) % 2 == 0)
C[i] += a + sdata[j+blockDim.x];
else
C[i] += a - sdata[j+blockDim.x];
// __syncthreads();
}
}
}
/*****************************************************************************/
// https://github.com/mark-poscablo/gpu-sum-reduction/blob/master/sum_reduction/reduce.cu
__global__ void calcularD(const float *C, float *D, const uint size) {
extern __shared__ float sdata[];
uint tid = threadIdx.x;
uint i = blockIdx.x * (blockDim.x*2) + threadIdx.x;
// Cada hebra carga un elemento desde memoria global a memoria compartida
sdata[tid] = 0;
if (i < size)
sdata[tid] = C[i] + C[i+blockDim.x];
__syncthreads();
// Reducir en memoria compartida
for(uint s = blockDim.x>>1; s > 0; s >>= 1) {
if(tid < s)
sdata[tid] += sdata[tid+s];
// Esperar al resto de hebras para comenzar la nueva etapa
__syncthreads();
}
// Escribir resultado de este bloque en memoria global
if (tid == 0)
D[blockIdx.x] = sdata[0];
}
//**************************************************************************//
/*****************************************************************************/
__global__ void calcularMax (const float *g_idata, float *g_odata, const int size) {
extern __shared__ float sdata[];
uint tid = threadIdx.x;
uint i = blockIdx.x * blockDim.x*2 + threadIdx.x;
// Cada hebra carga un elemento desde memoria global a memoria compartida
if (i < size)
sdata[tid] = (g_idata[i] > g_idata[i+blockDim.x]) ? g_idata[i] : g_idata[i+blockDim.x];
__syncthreads();
// Reducir en memoria compartida
// s = blockDim.x >> 1
// s >>=1
// Ambas dividen el tamaño de bloque por 2.
for(uint s = blockDim.x>>1; s > 0; s >>= 1) {
if(tid < s)
if(sdata[tid] < sdata[tid+s])
sdata[tid] = sdata[tid+s];
// Esperar al resto de hebras para comenzar la nueva etapa
__syncthreads();
}
// Escribir resultado de este bloque en memoria global
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
/*****************************************************************************/
string comprobarIntegridad(float *vec1, float *vec2, int len) {
for (int i=0; i<len; i++)
if ((floor(vec1[i])-floor(vec2[i]))>0.9)
return "fallo " + to_string(vec1[i]) + " : " + to_string(vec2[i]);
return "OK";
}
/*****************************************************************************/
void imprimir(float *v, int len, char letra) {
cout << "................................." << endl;
for (int i=0; i<len;i++)
cout << letra << "[" << i << "]=" << v[i] << endl;
}
/*****************************************************************************/
int main(int argc, char *argv[]) {
int Bsize, NBlocks;
locale mylocale("");
cout.imbue(mylocale);
if (argc != 3) {
cout << "Uso: transformacion Num_bloques Tam_bloque " << endl;
return(0);
}
else {
NBlocks = atoi(argv[1]);
Bsize = atoi(argv[2]);
}
const int N = Bsize*NBlocks;
cout << endl << "N=" << N << "= " << Bsize << "*" << NBlocks << endl << endl;
//* pointers to host memory */
float *A, *B, *C, *D;
//* Allocate arrays a, b and c on host*/
A = new float[N];
B = new float[N];
C = new float[N];
D = new float[NBlocks];
float mx; // maximum of C
/* Initialize arrays */
for (int i=0; i<N; i++) {
A[i] = (float) (1 -(i%100)*0.001);
B[i] = (float) (0.5+(i%10) *0.1 );
C[i] = 0;
}
/**************************************************************************/
/* GPU PHASE
/*
/**************************************************************************/
// Pointers to device memory
float *d_A = NULL, *d_B = NULL, *d_C = NULL, *d_D = NULL, *d_max = NULL;
// Allocate device memory
cudaMalloc ((void **) &d_A, sizeof(float)*N);
cudaMalloc ((void **) &d_B, sizeof(float)*N);
cudaMalloc ((void **) &d_C, sizeof(float)*N);
cudaMalloc ((void **) &d_D, sizeof(float)*NBlocks);
cudaMalloc ((void **) &d_max, sizeof(float));
// Copy data from host to device
cudaMemcpy(d_A, A, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, sizeof(float)*N, cudaMemcpyHostToDevice);
// Local device storage
float *gpu_C = new float[N];
float *gpu_D = new float[NBlocks];
float *gpu_max = new float;
// Time measurement
double t1=clock();
/**
* Calcular C
*/
// Kernel call with NBlocks of Bsize threads each.
calcularC <<<NBlocks, Bsize>>> (d_A, d_B, d_C, N);
// calcularC_shared <<<NBlocks, Bsize, 2*Bsize*sizeof(float)>>> (d_A, d_B, d_C, N);
// Copy data from device to host
cudaMemcpy(gpu_C, d_C, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
/**
* Calcular D
*/
// Kernel call
calcularD <<<NBlocks, ceil(Bsize>>1), Bsize*sizeof(float)>>> (d_C, d_D, N);
cudaMemcpy(gpu_D, d_D, NBlocks*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
/**
* Calcular Max
*/
// Calcular máximos locales a cada bloque
calcularMax <<<NBlocks, ceil(Bsize>>1), Bsize*sizeof(float)>>> (d_C, d_D, N);
// cudaMemcpy(gpu_D, d_D, NBlocks*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Calcular máximo global
calcularMax<<<1, ceil((int)NBlocks>>1), NBlocks*sizeof(float)>>> (d_D, d_max, NBlocks);
cudaMemcpy(gpu_max, d_max, sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
double t2=clock();
t2=(t2-t1)/CLOCKS_PER_SEC;
cout << "[GPU] tiempo -> " << t2 << endl << endl;
// Free device memory
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFree(d_D); cudaFree(d_max);
/**************************************************************************/
/* CPU PHASE
/*
/**************************************************************************/
// Compute C[i], d[K] and mx
for (int k=0; k<NBlocks; k++) {
int istart=k*Bsize;
int iend = istart+Bsize;
D[k]=0.0;
for (int i=istart; i<iend; i++) {
C[i]=0.0;
for (int j=istart; j<iend; j++) {
float a = A[j]*i;
if ((int)ceil(a) % 2 ==0)
C[i] += a + B[j];
else
C[i] += a - B[j];
}
D[k] += C[i];
mx = (i==1) ? C[0] : max(C[i],mx);
}
}
t2=clock();
t2=(t2-t1)/CLOCKS_PER_SEC;
cout << "[CPU] tiempo -> " << t2 << endl << endl;
/**************************************************************************/
/* CHECK PHASE
/*
/**************************************************************************/
cout << "................................." << endl;
cout << "Comprobando integridad de cálculos...\n";
cout << "\t[CHECK] Comprobando C... " << comprobarIntegridad(C, gpu_C, N) << endl;
cout << "\t[CHECK] Comprobando D... " << comprobarIntegridad(D, gpu_D, NBlocks) << endl;
cout << "\t[CHECK] Comprobando Max... ";
(mx==*gpu_max) ? cout << "OK\n" : cout << "fallo " << to_string(*gpu_max) << endl;
// imprimir(A, N, 'A');
// imprimir(B, N, 'B');
// imprimir(C, N, 'C');
// imprimir(D, NBlocks, 'D');
// imprimir(gpu_C, N, 'C');
// imprimir(gpu_D, NBlocks, 'D');
// imprimir(gpu_max, 1, 'M');
cout << "................................." << endl;
cout << "El valor máximo en C es: " << to_string(mx) << endl;
/* Free the memory */
delete(A); delete(B); delete(C); delete(D);
delete(gpu_C); delete(gpu_D);
return 0;
}
|
626
|
#include <cuda.h>
#include <stdio.h>
#define iMin(a, b) (a<b?a:b)
const int N = 17*1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = iMin(16, (N+threadsPerBlock-1)/threadsPerBlock);
// kernel code for adding two vector elements
__global__ void vecDot(float* a, float* b, float* c)
{
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp=0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// write partial sum of products into cache
cache[cacheIndex] = temp;
// synchronize threads in block
__syncthreads();
// reduction of vector across threads in block
int i=blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex+i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main(void)
{
int i;
float *a, *b, c, *cPartial;
float *devPtrA, *devPtrB, *devPtrCPartial;
// allocate memory for large vectors
a = (float*) malloc(N*sizeof(float));
b = (float*) malloc(N*sizeof(float));
cPartial = (float*) malloc(blocksPerGrid*sizeof(float));
// initialize arrays
for (i=0; i < N; i++) {
a[i] = 1;
b[i] = 2;
}
// allocate CUDA memory for arrays
int memsize = N*sizeof(float);
int memsizePartial = blocksPerGrid*sizeof(float);
cudaMalloc((void**)&devPtrA, memsize);
cudaMalloc((void**)&devPtrB, memsize);
cudaMalloc((void**)&devPtrCPartial, memsizePartial);
// copy host data to CUDA memory
cudaMemcpy(devPtrA, a, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(devPtrB, b, memsize, cudaMemcpyHostToDevice);
// call add function on CUDA GPU
vecDot<<<blocksPerGrid, threadsPerBlock>>>(devPtrA, devPtrB, devPtrCPartial);
// copy results back
cudaMemcpy(cPartial, devPtrCPartial, memsizePartial, cudaMemcpyDeviceToHost);
// compute final result
c = 0;
for (i=0; i < blocksPerGrid; i++)
c += cPartial[i];
printf("a*b = %f\n", c);
cudaFree(devPtrA);
cudaFree(devPtrB);
cudaFree(devPtrCPartial);
free (a);
free (b);
free (cPartial);
return 0;
}
|
627
|
#include <stdio.h>
float uniform(float low, float high) {
return low + (static_cast<float>(rand())/RAND_MAX)*(high - low);
}
int main() {
int size = static_cast<int>(1 << 25);
float* host_arr = (float*) malloc(size*sizeof(float));
for(size_t i = 0; i < size; ++i) {
host_arr[i] = uniform(0.0, 1.0);
}
float* dev_arr;
cudaMalloc(&dev_arr, size*sizeof(float));
cudaMemcpy(dev_arr, host_arr, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(host_arr, dev_arr, size*sizeof(float), cudaMemcpyDeviceToHost);
int ix = 4000000;
printf("%f\n", host_arr[ix]);
return 0;
}
|
628
|
#include <stdio.h>
__global__
void use_local_memory_GPU(float in)
{
float f;
f = in;
}
__global__
void use_global_memory_GPU(float *array)
{
// array is a pointer into global memory on the device
array[threadIdx.x] = 2.0f * (float) threadIdx.x;
}
__global__
void use_shared_memory_GPU(float *array)
{
// local variables, private to each thread
int i, index = threadIdx.x;
float average, sum = 0.0f;
// __shared__ variables are visible to all threads in the thread block
// and have the same lifecycle as the thread block
__shared__ float sh_arr[128];
// copy data from "array" in global memory to sh_arr in shared memory
// here, each thread is responsible for copying a single element
sh_arr[index] = array[index];
__syncthreads(); // ensure all writes to shared memory have completed
// now sh_arr is fully populated, let's find the average of all previous elements
// e.g. index = 2, then (sh_arr[0] + sh_arr[1]) / (2 + 1.0f)
for (i = 0; i<index; i++) { sum += sh_arr[i]; }
average = sum / (index + 1.0f);
// if array[index] is greater than the average of arrya[0...index-1], replace with average.
// since array[] is in global memory, this change will be seen by the host (and potentially other thread blocks if any)
if (array[index] > average) { array[index] = average; }
// the following code has no effect. it modifies shared memory, but the resulting modified data is never copied back
// to global memory and vanishes when the thread block completes
sh_arr[index] = 3.14;
}
int main(int argc, char **argv)
{
use_local_memory_GPU<<<1, 128>>>(2.0f);
/*
kernel that uses global memory
*/
float h_arr[128];
float *d_arr;
// allocate global memory on the device, place result in d_arr
cudaMalloc((void **) &d_arr, sizeof(float) * 128);
// copy data from host memory h_arr to device memory d_arr
cudaMemcpy((void *)d_arr, (void *)h_arr, sizeof(float) * 128, cudaMemcpyHostToDevice);
// launch kernel
use_global_memory_GPU<<<1, 128>>>(d_arr);
// copy the modified data back to host, overwriting content of h_arr
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, cudaMemcpyDeviceToHost);
// do stuff
for (int i = 0; i < 128; i++)
{
printf("h_arr[%d] = %f\n", i, h_arr[i]);
}
/*
kernel that uses shared memory
*/
// launch kernel, pass in a pointer to data in global memory
use_shared_memory_GPU<<<1, 128>>>(d_arr);
// copy the modified data back to host, overwriting content of h_arr
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, cudaMemcpyDeviceToHost);
// do stuff
for (int i = 0; i < 128; i++)
{
printf("h_arr[%d] = %f\n", i, h_arr[i]);
}
return 0;
}
|
629
|
extern "C"
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 16
__global__ void filter(int *Input_Image, int *Output_Image, int Image_Width, int Image_Height)
{
const int tx_l = threadIdx.x; // --- Local thread x index
const int ty_l = threadIdx.y; // --- Local thread y index
const int tx_g = blockIdx.x * blockDim.x + tx_l; // --- Global thread x index
const int ty_g = blockIdx.y * blockDim.y + ty_l; // --- Global thread y index
__shared__ int smem[BLOCK_WIDTH+2][BLOCK_HEIGHT+2];
// --- Fill the shared memory border with zeros
if (tx_l == 0) smem[tx_l] [ty_l+1] = 0; // --- left border
else if (tx_l == BLOCK_WIDTH-1) smem[tx_l+2][ty_l+1] = 0; // --- right border
if (ty_l == 0) { smem[tx_l+1][ty_l] = 0; // --- upper border
if (tx_l == 0) smem[tx_l] [ty_l] = 0; // --- top-left corner
else if (tx_l == BLOCK_WIDTH-1) smem[tx_l+2][ty_l] = 0; // --- top-right corner
} else if (ty_l == BLOCK_HEIGHT-1) {smem[tx_l+1][ty_l+2] = 0; // --- bottom border
if (tx_l == 0) smem[tx_l] [ty_l+2] = 0; // --- bottom-left corder
else if (tx_l == BLOCK_WIDTH-1) smem[tx_l+2][ty_l+2] = 0; // --- bottom-right corner
}
// --- Fill shared memory
smem[tx_l+1][ty_l+1] = Input_Image[ty_g*Image_Width + tx_g]; // --- center
if ((tx_l == 0)&&((tx_g > 0))) smem[tx_l] [ty_l+1] = Input_Image[ty_g*Image_Width + tx_g-1]; // --- left border
else if ((tx_l == BLOCK_WIDTH-1)&&(tx_g < Image_Width - 1)) smem[tx_l+2][ty_l+1] = Input_Image[ty_g*Image_Width + tx_g+1]; // --- right border
if ((ty_l == 0)&&(ty_g > 0)) { smem[tx_l+1][ty_l] = Input_Image[(ty_g-1)*Image_Width + tx_g]; // --- upper border
if ((tx_l == 0)&&((tx_g > 0))) smem[tx_l] [ty_l] = Input_Image[(ty_g-1)*Image_Width + tx_g-1]; // --- top-left corner
else if ((tx_l == BLOCK_WIDTH-1)&&(tx_g < Image_Width - 1)) smem[tx_l+2][ty_l] = Input_Image[(ty_g-1)*Image_Width + tx_g+1]; // --- top-right corner
} else if ((ty_l == BLOCK_HEIGHT-1)&&(ty_g < Image_Height - 1)) { smem[tx_l+1][ty_l+2] = Input_Image[(ty_g+1)*Image_Width + tx_g]; // --- bottom border
if ((tx_l == 0)&&((tx_g > 0))) smem[tx_l] [ty_l+2] = Input_Image[(ty_g-1)*Image_Width + tx_g-1]; // --- bottom-left corder
else if ((tx_l == BLOCK_WIDTH-1)&&(tx_g < Image_Width - 1)) smem[tx_l+2][ty_l+2] = Input_Image[(ty_g+1)*Image_Width + tx_g+1]; // --- bottom-right corner
}
__syncthreads();
// --- Pull the 3x3 window in a local array
long v[9] = { smem[tx_l][ty_l], smem[tx_l+1][ty_l], smem[tx_l+2][ty_l],
smem[tx_l][ty_l+1], smem[tx_l+1][ty_l+1], smem[tx_l+2][ty_l+1],
smem[tx_l][ty_l+2], smem[tx_l+1][ty_l+2], smem[tx_l+2][ty_l+2] };
// --- Bubble-sort
for (int i = 0; i < 5; i++) {
for (int j = i + 1; j < 9; j++) {
if (v[i] > v[j]) { // swap?
int tmp = v[i];
v[i] = v[j];
v[j] = tmp;
}
}
}
// --- Pick the middle one
Output_Image[ty_g*Image_Width + tx_g] = v[4];
}
|
630
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#include <math.h>
/*
Returns the current time in miliseconds.
*/
double getMilitime(){
struct timeval ret;
gettimeofday(&ret, NULL);
return ((ret.tv_sec ) * 1000000u + ret.tv_usec) / 1.e6;
}
#define TYPE double
#define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0)
#ifdef __CUDACC__
inline void checkCuda(cudaError_t e) {
if (e != cudaSuccess) {
// cudaGetErrorString() isn't always very helpful. Look up the error
// number in the cudaError enum in driver_types.h in the CUDA includes
// directory for a better explanation.
err("CUDA Error %d: %s\n", e, cudaGetErrorString(e));
}
}
inline void checkLastCudaError() {
checkCuda(cudaGetLastError());
}
#endif
__global__ void ReductionKernel(TYPE* ArrDevice, TYPE* ArrTempDevice, long ArrSize){
extern __shared__ TYPE sdata[];
unsigned long objectId = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long tid = threadIdx.x;
sdata[tid] = 0;
if (objectId < ArrSize) {
sdata[tid] = ArrDevice[objectId];
__syncthreads();
// do reduction in shared mem
for(unsigned long s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) ArrTempDevice[blockIdx.x] = sdata[0];
}
}
__global__ void PrintKernel(TYPE* ArrDevice){
printf("ArrSumDevice: %f \n",ArrDevice[0]);
}
int main(int argc, char* argv[]){
if(argc<2){
printf("Input Error\n");
return 1;
}
long ArrSize = atoi(argv[1]);
TYPE *ArrDevice, *ArrTempDevice,*ArrHost=NULL;
//Allocate in host
ArrHost = (TYPE*) malloc(ArrSize*sizeof(TYPE));
assert(ArrHost != NULL) ;
//Fill in host
long count;
for(count=0;count<ArrSize;count++){
ArrHost[count] = 1;
}
//Allocate in device
checkCuda(cudaMalloc(&ArrDevice, ArrSize*sizeof(TYPE)));
checkCuda(cudaMalloc(&ArrTempDevice, ArrSize*sizeof(TYPE)));
//Fill in device
cudaMemcpy(ArrDevice, ArrHost, ArrSize*sizeof(TYPE), cudaMemcpyHostToDevice);
PrintKernel<<< 1, 1 >>>(ArrDevice);
cudaDeviceSynchronize(); checkLastCudaError();
//Call kernel
long ArrTempFlag=0;
unsigned long numThreadsPerClusterBlock = 512;
unsigned long numClusterBlocks =
(ArrSize + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock;
long CurrArrSize = ArrSize;
printf("start timing\n");
double start_time = getMilitime();
while(CurrArrSize>1){
if(numThreadsPerClusterBlock>CurrArrSize) numThreadsPerClusterBlock = pow(2.0,ceil(log2((double)CurrArrSize)));
numClusterBlocks =
(CurrArrSize + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock;
////printf("numThreadsPerClusterBlock: %d \n",numThreadsPerClusterBlock);
////printf("numClusterBlocks: %d \n",numClusterBlocks);
if((ArrTempFlag) %2 == 0)
ReductionKernel<<< numClusterBlocks, numThreadsPerClusterBlock, numThreadsPerClusterBlock * sizeof(TYPE)>>>
(ArrDevice, ArrTempDevice,CurrArrSize);
else
ReductionKernel<<< numClusterBlocks, numThreadsPerClusterBlock, numThreadsPerClusterBlock * sizeof(TYPE)>>>
(ArrTempDevice, ArrDevice,CurrArrSize);
cudaDeviceSynchronize(); checkLastCudaError();
//PrintKernel<<< 1, 1 >>>(ArrDevice);
//cudaDeviceSynchronize(); checkLastCudaError();
CurrArrSize = numClusterBlocks;
ArrTempFlag++;
}
printf("elapsed time: %f sec\n", getMilitime()-start_time);
//Copy result to host (maybe prlong in the device)
if((ArrTempFlag) %2 == 0){
PrintKernel<<< 1, 1 >>>(ArrDevice);
cudaDeviceSynchronize(); checkLastCudaError();
}else{
PrintKernel<<< 1, 1 >>>(ArrTempDevice);
cudaDeviceSynchronize(); checkLastCudaError();
}
//free device data
checkCuda(cudaFree(ArrDevice));
checkCuda(cudaFree(ArrTempDevice));
free(ArrHost);
return(0);
}
|
631
|
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#define LIST_SIZE 100000
extern "C" __device__ long long instCountList[LIST_SIZE];
void bambooLogRecordOff(){
}
void bambooLogKernelBegin(int i) {
}
void bambooLogKernelEnd() {
#ifdef KERNELTRACE
cudaDeviceSynchronize();
#endif
}
|
632
|
#include "includes.h"
__global__ void swan_fast_fill( uint4 *ptr, int len ) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if( idx<len) {
ptr[idx] = make_uint4( 0,0,0,0 );
}
}
|
633
|
/***************************************************************************//**
* \file LHS2.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to generate the left hand side for the poission solve
*/
#include "LHS2.h"
namespace kernels
{
/*
* calculates the boundary terms for the left hand side matrix for the poisson solve
* param row array storing the row indices for the sparse LHS matrix
* param col array storing the column indices for the sparse LHS matrix
* param val array storing the values for the sparse LHS matrix
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param nx number of cells in x direction
* param ny number of cells in y direction
* param dt change in time
*/
__global__
void LHS2_BC(int *row, int *col, double *val, double *dx, double *dy, int nx, int ny, double dt)
{
int ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip >= nx*ny)
return;
int I = ip % nx,
J = ip / nx;
if (I != 0 && I != nx-1 && J != 0 && J != ny-1)
return;
int numE = 0;
if (J == 0)
{
numE = I*4;
if (I!=0)
numE-=1;
}
else if (J == ny-1)
{
numE = nx*4-2 + (J-1)*(nx*5-2) + I*4;
if (I!=0)
numE-=1;
}
else
{
numE = nx*4-2 + (J-1)*(nx*5 - 2) + I*5;
if (I != 0)
numE -= 1;
}
double temp = 0;
//EAST
//if not on the east wall and east is outside the body, add east term
if (I != nx-1)//not at east boundry
{
row[numE] = ip;
col[numE] = ip + 1;
val[numE] = -dt/(dx[I]*(dx[I]+dx[I+1])*0.5);
numE++;
temp += dt/(dx[I]*(dx[I]+dx[I+1])*0.5);
}
//WEST
//if not on west wall and west is outside the body, add west term
if (I != 0)//not at west boundary
{
row[numE] = ip;
col[numE] = ip - 1;
val[numE] = -dt/(dx[I]*(dx[I]+dx[I-1])*0.5);
temp += dt/(dx[I]*(dx[I]+dx[I-1])*0.5);
numE++;
}
//NORTH
//if not on north wall and north is outside the body, add north term
if (J != ny-1)//not at north boundry
{
row[numE] = ip;
col[numE] = ip + nx;
val[numE] = -dt/(dy[J]*(dy[J]+dy[J+1])*0.5);
temp += dt/(dy[J]*(dy[J]+dy[J+1])*0.5);
numE++;
}
//SOUTH
//if not on south wall and south is outside the body, add south term
if (J != 0)//not at south boundry
{
row[numE] = ip;
col[numE] = ip - nx;
val[numE] = -dt/(dy[J]*(dy[J]+dy[J-1])*0.5);
temp += dt/(dy[J]*(dy[J]+dy[J-1])*0.5);
numE++;
}
//MID
row[numE] = ip;
col[numE] = ip;
val[numE] = temp;
//do some jank so the solver works, although this modifies the matricies it doesn't really change the results
if (ip == 0)
val[numE] += val[numE];
//val[numE] = 0;
//val[numE] *= val[numE];
}
/*
* calculates the middle terms for the left hand side matrix for the poisson solve
* param row array storing the row indices for the sparse LHS matrix
* param col array storing the column indices for the sparse LHS matrix
* param val array storing the values for the sparse LHS matrix
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param nx number of cells in x direction
* param ny number of cells in y direction
* param dt change in time
*/
__global__
void LHS2_mid_nobody(int *row, int *col, double *val, double *dx, double *dy, int nx, int ny, double dt)
{
int ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip >= nx*ny)
return;
int I = ip % nx,
J = ip / nx;
if (I == 0 || I == nx-1 || J == 0 || J == ny-1)
return;
int numE= nx*4-2 + (J-1)*(nx*5 - 2) + I*5 - 1;
double temp = 0;
//EAST
row[numE] = ip;
col[numE] = ip + 1;
val[numE] = -dt/(dx[I]*(dx[I]+dx[I+1])*0.5);
numE++;
temp += dt/(dx[I]*(dx[I]+dx[I+1])*0.5);
//WEST
row[numE] = ip;
col[numE] = ip - 1;
val[numE] = -dt/(dx[I]*(dx[I]+dx[I-1])*0.5);
temp += dt/(dx[I]*(dx[I]+dx[I-1])*0.5);
numE++;
//NORTH
row[numE] = ip;
col[numE] = ip + nx;
val[numE] = -dt/(dy[J]*(dy[J]+dy[J+1])*0.5);
temp += dt/(dy[J]*(dy[J]+dy[J+1])*0.5);
numE++;
//SOUTH
row[numE] = ip;
col[numE] = ip - nx;
val[numE] = -dt/(dy[J]*(dy[J]+dy[J-1])*0.5);
temp += dt/(dy[J]*(dy[J]+dy[J-1])*0.5);
numE++;
//MID
row[numE] = ip;
col[numE] = ip;
val[numE] = temp;
//do some jank so the solver works, although this modifies the matricies it doesn't really change the results
if(row[numE]==col[numE] && col[numE]==(ny/2)*nx+nx/2)
{
val[numE] += val[numE];
}
}
}
|
634
|
//Author: Xinrea
//Date: 2018/7/5
//Basic Sample of using CUDA
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cstdio>
cudaError_t addData(int size,int a[],int b[],int c[]);
__global__ void addKernel(int dev[],int size){
int i = threadIdx.x;
dev[2*size+i] = dev[i]+dev[size+i];
}
int main(int argc, char const *argv[])
{
int size = 3;
int a[3] = {1,2,3};
int b[3] = {4,5,6};
int c[3] = {0};
cudaError_t res;
res = addData(size,a,b,c);
if(res != cudaSuccess){
fprintf(stderr,"addData failed\n");
return -1;
}
printf("%d %d %d\n",c[0],c[1],c[2]);
res = cudaDeviceReset();
if(res != cudaSuccess){
fprintf(stderr,"cudaDeviceReset failed\n");
return -1;
}
return 0;
}
cudaError_t addData(int size,int a[],int b[],int c[]){
cudaError_t res;
int *dev;
do{
res = cudaSetDevice(0);
if (res != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed\n");
break;
}
res = cudaMalloc((void**)&dev,3*size*sizeof(int));
if(res != cudaSuccess){
fprintf(stderr,"cudaMalloc failed\n");
break;
}
res = cudaMemcpy(dev,a,size*sizeof(int),cudaMemcpyHostToDevice);
if(res != cudaSuccess){
fprintf(stderr,"cudaMemcpy a failed\n");
break;
}
res = cudaMemcpy(dev+size,b,size*sizeof(int),cudaMemcpyHostToDevice);
if(res != cudaSuccess){
fprintf(stderr,"cudaMemcpy b failed\n");
break;
}
addKernel<<<1,size>>>(dev,size);
res = cudaGetLastError();
if(res != cudaSuccess){
fprintf(stderr,"addData failed\n");
break;
}
cudaDeviceSynchronize();
res = cudaMemcpy(c,dev+2*size,size*sizeof(int),cudaMemcpyDeviceToHost);
if(res != cudaSuccess){
fprintf(stderr,"cudaMemcpy c failed\n");
break;
}
} while(0);
cudaFree(dev);
return res;
}
|
635
|
// addNext.cu
// Second program from Dr Dobbs tutorial. http://drdobbs.com/parallel/207402986
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
// Host kernel = increment each element by 1
void incOnHost(float *a, int N) {
int i;
for (i=1; i<N; i++) {
a[i] = a[i] + a[i-1];
}
}
// Device Kernel = increment each element by 1
__global__ void incOnDevice(float *a, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( (idx<N) && (idx>1) )
a[idx] = a[idx] + a[idx-1];
}
// Main thread
int main(void) {
float *a_h, *b_h;
float *a_d;
int i, N=48;
size_t size = N*sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
cudaMalloc((void **) &a_d, size);
for (i=0; i<N; i++)
a_h[i] = (float)i;
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
// do calculation on host
incOnHost(a_h, N);
// do calculation on device
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
incOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve results and compare
cudaMemcpy(b_h,a_d,size,cudaMemcpyDeviceToHost);
for (i=0; i<N; i++) {
// printf("value from host %d = %1.1f\n",i,a_h[i]);
printf("value from device %d = %1.1f\n",i,b_h[i]);
// assert(a_h[i] == b_h[i]);
}
free(a_h); free(b_h); cudaFree(a_d);
}
|
636
|
extern "C" __global__ void kernel_dummy(float *ptr)
{
ptr[blockIdx.x] = 0;
}
|
637
|
/*
* Compile and run: nvcc -arch=sm_20 kGrid.cu -run
*/
#include <stdio.h>
/*__global__
void kGrid(int n, int *k) {
int l = blockIdx.x * blockDim.x + threadIdx.x;
if(l < n) {
for(int* i = 0; i < k; i++) {
for(int* j = 0; j < k; j++) {
printf("%i", j)
}
printf("\n");
}
}
}
int main(void) {
int N = 1<<20;
int *k, *d_k;
k = (int*)malloc(N*sizeof(int));
cudaMalloc(&d_k, N*sizeof(int));
for(int i = 0; i < N; i++) {
k[i] = 5;
}
cudaMemcpy(d_k, k, N*sizeof(int), cudaMemcpyHostToDevice);
kGrid<<<(N+255)/256, 256>>>(N, d_k);
cudaMemcpy(k, d_k, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_k);
free(k);
return 0;
}*/
__global__ void print_kernel() {
printf("Block %d, thread %d\n", blockIdx.x, threadIdx.x);
}
int main() {
print_kernel<<<10, 10>>>();
cudaDeviceSynchronize();
return 0;
}
|
638
|
#include<stdio.h>
#include<stdlib.h>
#define TPB 8
#define N 32
__device__ float distance(float x1, float x2)
{
return sqrt ((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref)
{
const int i=blockIdx.x*blockDim.x+threadIdx.x;
const int j=blockIdx.x;
const int k=threadIdx.x;
const int l=blockDim.x;
const float x=d_in[i];
d_out[i]=distance(x, ref);
printf("blockDIM=%d, blockID=%d, threadID=%d, i=%d: the distance between %f to %f is %f. \n", l, j, k, i, ref, x, d_out[i]); ////
}
void distanceArray(float *out, float *in, float ref, int len)
{
float *d_in=0;
float *d_out=0; ////
cudaMalloc(&d_in, len*sizeof(float));
cudaMalloc(&d_out, len*sizeof(float));
cudaMemcpy(d_in, in, len*sizeof(float), cudaMemcpyHostToDevice);
distanceKernel<<<len/TPB, TPB>>>(d_out, d_in, ref);
cudaMemcpy(out, d_out, len*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
float scale(int i, int n)
{
return ((float) i)/(n-1);
}
int main()
{
const float ref=0.5f;
float *in=(float*) calloc(N,sizeof(float));
float *out=(float*) calloc(N, sizeof(float));
for(int i=0; i<N; ++i)
{
in[i]=scale(i,N); //
}
distanceArray(out, in, ref, N);
printf("______________________________ \n");
for(int j=0; j<N; ++j)
{
printf("The distance, printed from the host, between %f to %f is %f. \n", ref, in[j], out[j]);
}
free(in);
free(out);
return 0;
}
|
639
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29) {
float tmp_1 = (+1.5410E-43f * var_1 * +1.0973E35f + var_2 - (+0.0f - +1.9327E35f));
comp = tmp_1 - (var_3 * +1.7163E36f / var_4 + var_5 + -1.8967E-42f + var_6);
float tmp_2 = (-1.7459E35f - (var_7 * +1.1576E-37f));
comp = tmp_2 / expf(+1.5630E-37f / cosf(+1.3297E-36f - (var_8 - -1.7565E-2f + var_9 - ceilf(var_10 * (var_11 / var_12 + atanf(-1.5981E-37f + +1.3857E-1f - var_13 / atanf(-1.3361E-44f / -1.2044E-44f)))))));
if (comp <= (var_14 - (var_15 * +1.7918E-24f / var_16 - (-0.0f * var_17)))) {
comp = atanf(atan2f((var_18 - (+1.9056E1f + var_19)), +1.0569E-37f - atan2f(+1.3944E-37f, (-1.0124E35f / var_20 + var_21))));
}
if (comp < (var_22 / fmodf((var_23 / var_24), +1.0852E-35f))) {
comp += +1.0219E-35f * +0.0f;
comp += sqrtf(+1.7431E-36f);
comp = (var_25 - (+1.6513E35f / +1.1353E15f * var_26 + +1.6437E35f));
comp += var_27 * (var_28 / (var_29 * +1.2062E-42f));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30);
cudaDeviceSynchronize();
return 0;
}
|
640
|
#include "includes.h"
__device__ inline unsigned int RM_Index(unsigned int row, unsigned int col, unsigned int width) {
return (row * width + col);
}
__global__ void MultinomialNBLearnKernel(float *feature_probs, float *class_priors, const float *d_row_sums, unsigned int n_samples_, unsigned int n_classes_, unsigned int n_features_) {
/* Each thread will take one term */
unsigned int tidx = threadIdx.x;
unsigned int feat_col = tidx + (blockIdx.x * blockDim.x);
unsigned int i = 0;
if (feat_col < n_features_) { /* End condition check */
/* For each label */
for (i = 0; i < n_classes_; ++i) {
// Alpha is default set to 1.0 for laplacian smoothing
feature_probs[RM_Index(i, feat_col, n_features_)] =
log((feature_probs[RM_Index(i, feat_col, n_features_)] + 1.0) /
(d_row_sums[i] - (n_features_ * 1.0)));
if (feat_col == 0) {
class_priors[i] = log(class_priors[i] / (float)n_samples_);
}
}
}
}
|
641
|
#include "includes.h"
const int Nthreads = 1024, NrankMax = 3, nt0max = 71, NchanMax = 1024;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void blankdWU(const double *Params, const double *dWU, const int *iC, const int *iW, double *dWUblank){
int nt0, tidx, tidy, bid, Nchan, NchanNear, iChan;
nt0 = (int) Params[4];
Nchan = (int) Params[9];
NchanNear = (int) Params[10];
tidx = threadIdx.x;
tidy = threadIdx.y;
bid = blockIdx.x;
while (tidy<NchanNear){
iChan = iC[tidy+ NchanNear * iW[bid]];
dWUblank[tidx + nt0*iChan + bid * nt0 * Nchan] =
dWU[tidx + nt0*iChan + bid * nt0 * Nchan];
tidy+=blockDim.y;
}
}
|
642
|
//
// kernel routine
//
__global__ void VecAdd_kernel(const float* A, const float* B, float* C, int N)
/* Naive kernel */
{
// Uncomment line below and define global index form block and thread indexes
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < N){
C[i] = A[i] + B[i];
}
}
|
643
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
__global__ void vecAddKernel(int *a, int *b, int *c){
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main(){
int v1[5] = {1,2,3,4,5};
int v2[5] = {6,7,8,9,10};
int *cuda_a, *cuda_b, *cuda_c;
cudaMalloc((void**) &cuda_a, 5*sizeof(int));
cudaMalloc((void**) &cuda_b, 5*sizeof(int));
cudaMalloc((void**) &cuda_c, 5*sizeof(int));
cudaMemcpy(cuda_a, v1, 5*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_b, v2, 5*sizeof(int), cudaMemcpyHostToDevice);
vecAddKernel <<<1,5>>> (cuda_a, cuda_b, cuda_c);
cudaDeviceSynchronize();
int v3[5];
cudaMemcpy(v3, cuda_c, 5*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cuda_a);
cudaFree(cuda_b);
cudaFree(cuda_c);
for (int i=0; i<5; i++){
printf("%d: %d + %d = %d\n", i, v1[i], v2[i], v3[i]);
}
}
|
644
|
#include <iostream>
#include <fstream>
#include <cuda_runtime.h>
using namespace std;
//nvcc cudaProperty.cu -o cudaProp
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count == 0) {
cout << "There is no device."<< endl;
return false;
}
int i;
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
cout << "There is no device supporting CUDA 1.x." << endl;
return false;
}
cudaSetDevice(i);
return true;
}
int main(int argc, char** argv){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, 0) == cudaSuccess) {
if(prop.major >= 1) {
cout << "Name : " << prop.name << endl;
cout << "Total Global Mem : " << prop.totalGlobalMem << endl;
cout << "Shared Mem per block : " << prop.sharedMemPerBlock<< endl;
cout << "Max Thread per block : " << prop.maxThreadsPerBlock<< endl;
cout << "total Const Mem : " << prop.totalConstMem<< endl;
cout << "multiProcessor : " << prop.multiProcessorCount<< endl;
cout << "Warp Size : " << prop.warpSize<< endl;
cout << "ClockRate : " << prop.clockRate<< endl;
cout << "Major : " << prop.major<< endl;
cout << "Minor : " << prop.minor<< endl;
}
}
return 0;
}
|
645
|
/*
* singleGpuSpectrometer
*
* Version 2.0, April 12 2010
*
* This program was written by Hirofumi Kondo at the Supercomputing Engineering Laboratory,
* Graduate School of Information Science and Technology, Osaka University, Japan.
*
* Copyright 2010 Supercomputing Engineering Laboratory, Graduate School of Information
* Science and Technology, Osaka University, Japan
*
*
* Compile :
* nvcc -o singleGpuSpectrometer singleGpuSpectrometer.cu -I /usr/local/cuda/NVIDIA_GPU_Computing_SDK/common/inc
* /usr/local/cuda/NVIDIA_GPU_Computing_SDK/C/lib/libcutil.a
* -L /usr/local/cuda/lib -l cufft
*
* Usage : ./singleGpuSpectrometer [options]
* -length : signal length of this spectrometer handle in M-points
* -boxcar : length of boxcar for smoothing
* -threshold : value of threshold
* -max_detect_point : value of maximum detected points over threshold in each boxcar
* -output_file : filename of output file
*
* Output file format :
* The file format is binary format.
* The output file records all spikes whose power exceed (boxcar_mean) * (threashold).
* The file contains 3 data
* 1) index of signal
* 2) the power of signal
* 3) mean power of boxcar which the signal is in
*
* Special Instruction
* 1) Memory capacity
* The memory capacity that this GPU spectrometer requires is changed by the signal length.
* If you want to analyze 128M-points signal, GPU has to have 4GB VRAM.
* The maximum length that 1GB VRAM GPU can handle is 32M-points.
*
* 2) CUDA
* We recommend that you use CUDA 2.3 and CUFFT 2.3.
* This is not necessary condition.
* But the execution time is wrong if you use CUDA 2.2 and CUFFT 2.2.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
//#include <cutil_inline.h>
//#include <cufft.h>
//#include "bench_fft.h"
//#include "output_greg.c"
/*
* Prototype declaration
*/
/* Modified by Jayanth Chennamangalam on 2013.01.31 --> */
/*void do_analyze_on_gpu();*/
void do_analyze_on_gpu(int signalLength);
/* <-- Modified by Jayanth Chennamangalam on 2013.01.31 */
/*
* Program main
*/
int main(int argc, char** argv){
int i;
int signalLength = 1024 * 1024;
for (i=1;i<argc;i++) {
if (!strcmp(argv[i], "-length")) {
signalLength = atoi(argv[++i]) * 1024 * 1024;
} /*else if (!strcmp(argv[i], "-boxcar")){
boxcar = atoi(argv[++i]);
} else if (!strcmp(argv[i], "-threshold")){
threshold = atoi(argv[++i]);
} else if (!strcmp(argv[i], "-max_detect_point")){
maximumDetectPointInBoxcar = atoi(argv[++i]);
} else if (!strcmp(argv[i], "-output_file")){
strncpy(outputFileName, argv[++i], FILENAME_BUFSIZE);
if(outputFileName[FILENAME_BUFSIZE-1]!='\0'){
fprintf(stderr,"Error : Too long output file name. maximum length = %d\n", FILENAME_BUFSIZE-1);
exit(-1);
}
} else {
fprintf(stderr,"Error : wrong argument\n");
}*/
}
// Analyze signal on GPU
printf("Calling GPU\n");
do_analyze_on_gpu(signalLength);
return 0;
}
|
646
|
#include "includes.h"
__global__ void set_unavailable(bool *available, int n_rows, const int *idx, int n_selected) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_selected) {
available[idx[tid]] = false;
}
}
|
647
|
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
#include <stdio.h>
#include <cuda_runtime.h>
// device kernel
__global__
void helloWorldDevice() {
printf("Hello world from device %d!\n", threadIdx.x);
}
int main() {
printf("Hello world from host!\n");
// run kernel in 3 instances
helloWorldDevice <<<1, 3>>> ();
cudaDeviceSynchronize();
}
|
648
|
#include "includes.h"
__global__ void lga_filtering_forward (const int n, const float *bottom_data, const float *filters, const int height, const int width, const int channel, const int radius, float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
// printf("OK\n");
// printf("%d, %.2f, %.2f\n", index, bottom_data[index], top_data[index]);
if (index >= n)
{
return;
}
// top_data[index]=1.0;
// assert(0);
int step = height * width;
int wsize = 2 * radius + 1;
// int fsize=wsize*wsize*3;
int fbase =
index / (step * channel) * (step * wsize * wsize * 3) + index % step;
int row = index % step / width;
int col = index % width;
int depth = index / step % channel;
for (int d = -1; d <= 1; d++)
{
for (int r = -radius; r <= radius; r++)
{
for (int c = -radius; c <= radius; c++)
{
int rr = r + row;
int cc = c + col;
int dd = d + depth;
int shift = 0;
if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width
&& dd < channel)
shift = r * width + c + d * step;
int location =
(d + 1) * (wsize * wsize) + (r + radius) * wsize + c + radius;
top_data[index] +=
bottom_data[index + shift] * filters[fbase + location * step];
}
}
}
// top_data[index]=1.0;
// printf("%d, %d, %d, %.2f, %.2f\n", index, row, col, bottom_data[index], top_data[index]);
}
|
649
|
char *title = "gauss filtering";
char *description = "gauss filtering";
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <assert.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h> /* exp */
#ifndef max
#define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#ifndef uint8_t
typedef unsigned char uint8_t;
#endif
#ifndef uint16_t
typedef unsigned short uint16_t;
#endif
#ifndef uint32_t
typedef unsigned int uint32_t;
#endif
// http://en.wikipedia.org/wiki/BMP_file_format
#define BMPMAGIC 0x00
#define BMPFILESIZE 0x02
#define BMPOFFSET 0x0A
#define BMPDIBSISE 0x0E
#define BMPWIDTH 0x12
#define BMPHEIGHT 0x16
#define BMPBITSPERPIXEL 0x1C
typedef struct {
uint8_t magic[2]; /* the magic number used to identify the BMP file:
0x42 0x4D (Hex code points for B and M).
The following entries are possible:
BM - Windows 3.1x, 95, NT, ... etc
BA - OS/2 Bitmap Array
CI - OS/2 Color Icon
CP - OS/2 Color Pointer
IC - OS/2 Icon
PT - OS/2 Pointer. */
uint32_t filesz; /* the size of the BMP file in bytes */
uint16_t creator1; /* reserved. */
uint16_t creator2; /* reserved. */
uint32_t offset; /* the offset, i.e. starting address,
of the byte where the bitmap data can be found. */
} bmp_header_t;
typedef struct {
uint32_t header_sz; /* the size of this header (40 bytes) */
uint32_t width; /* the bitmap width in pixels */
uint32_t height; /* the bitmap height in pixels */
uint16_t nplanes; /* the number of color planes being used.
Must be set to 1. */
uint16_t depth; /* the number of bits per pixel,
which is the color depth of the image.
Typical values are 1, 4, 8, 16, 24 and 32. */
uint32_t compress_type; /* the compression method being used.
See also bmp_compression_method_t. */
uint32_t bmp_bytesz; /* the image size. This is the size of the raw bitmap
data (see below), and should not be confused
with the file size. */
uint32_t hres; /* the horizontal resolution of the image.
(pixel per meter) */
uint32_t vres; /* the vertical resolution of the image.
(pixel per meter) */
uint32_t ncolors; /* the number of colors in the color palette,
or 0 to default to 2<sup><i>n</i></sup>. */
uint32_t nimpcolors; /* the number of important colors used,
or 0 when every color is important;
generally ignored. */
} bmp_dib_v3_header_t;
__global__ void global_defaultgaussfilter(
unsigned int *input,
unsigned int *output,
double *matrix, int Nh,
int width,int height,
int ddxx, int ddyy)
{
assert(ddxx*ddyy==0);
if(ddxx>0) for (int id = blockDim.x*blockIdx.x + threadIdx.x;
id < width*height;
id += blockDim.x*gridDim.x) {
int x = id % width;
int y = id / width;
double s1 = 0;
double s2 = 0;
for (int dx = -Nh; dx <= Nh; dx++) {
if (x + dx < 0) continue;
if (x + dx >= width) continue;
int value = (double)(unsigned int)input[id + dx];
s1 += value*matrix[Nh + dx];
s2 += matrix[Nh + dx];
}
output[id] = (unsigned int)(s1 / s2);
}
if(ddyy>0) for (int id = blockDim.x*blockIdx.x + threadIdx.x;
id < width*height;
id += blockDim.x*gridDim.x) {
int x = id % width;
int y = id / width;
double s1 = 0;
double s2 = 0;
for (int dy = -Nh; dy <= Nh; dy++) {
if (y + dy < 0) continue;
if (y + dy >= height) continue;
int value = (double)(unsigned int)input[id + width*dy];
s1 += value*matrix[Nh + dy];
s2 += matrix[Nh + dy];
}
output[id] = (unsigned int)(s1 / s2);
}
}
__host__ void host_defaultgaussfilter(
int gridSize, int blockSize,
unsigned int *r, unsigned int *g, unsigned int *b,
double *matrix, int Nh,
int width, int height)
{
unsigned int *channel[3] = { r,g,b };
// channel - массив данных
// sigma - коэффициент размытия
// width - ширина
// height - высота
cudaError_t err;
unsigned int *device_a;
unsigned int *device_b;
double *device_matrix;
err = cudaMalloc((void**)&device_a, width*height*sizeof(unsigned int));
err = cudaMalloc((void**)&device_b, width*height*sizeof(unsigned int));
err = cudaMalloc((void**)&device_matrix, (2 * Nh + 1) * sizeof(double));
cudaMemcpy(device_matrix, matrix, (2 * Nh + 1)*sizeof(double), cudaMemcpyHostToDevice);
int blocks = (gridSize > 0)? gridSize : min(15, (int)sqrt(width*height));
int threads = (blockSize > 0)? blockSize : min(15, (int)sqrt(width*height));
for(int j=0; j<3; j++) {
cudaMemcpy(device_a, channel[j], width*height*sizeof(unsigned int), cudaMemcpyHostToDevice);
global_defaultgaussfilter <<< blocks, threads >>>(device_a, device_b, device_matrix, Nh, width, height, 0, 1);
global_defaultgaussfilter <<< blocks, threads >>>(device_b, device_a, device_matrix, Nh, width, height, 1, 0);
cudaMemcpy(channel[j], device_a, width*height*sizeof(unsigned int), cudaMemcpyDeviceToHost);
}
// Освобождаем память на устройстве
cudaFree((void*)device_a);
cudaFree((void*)device_b);
cudaFree((void*)device_matrix);
err = err;
}
__global__ void global_recursivegaussfilter(
unsigned int *input,
unsigned int *output,
double *bb, int p, int q,
int width,int height,
int ddxx, int ddyy)
{
assert(p==1);
assert(q==3);
assert(ddxx*ddyy==0);
// Копируем в локальные переменные для ускорения доступа к значениям
double b1 = bb[1]/bb[0];
double b2 = bb[2]/bb[0];
double b3 = bb[3]/bb[0];
double B = 1.0 - (b1+b2+b3);
if(ddxx>0) for (int y = blockDim.x*blockIdx.x + threadIdx.x;
y < height;
y += blockDim.x*gridDim.x) {
int id = y*width;
if(width>1) output[id]=B*input[id];id++;
if(width>2) output[id]=B*input[id]+b1*output[id-1];id++;
if(width>3) output[id]=B*input[id]+b1*output[id-1]+b2*output[id-2];id++;
for(; id<width; id++)
output[id]=B*input[id]+b1*output[id-1]+b2*output[id-2]+b3*output[id-3];
}
if(ddxx<0) for (int y = blockDim.x*blockIdx.x + threadIdx.x;
y < height;
y += blockDim.x*gridDim.x) {
int id = y*width+width;
id--;if(width>1) output[id]=B*input[id];
id--;if(width>2) output[id]=B*input[id]+b1*output[id+1];
id--;if(width>3) output[id]=B*input[id]+b1*output[id+1]+b2*output[id+2];
for(id--; id>=0; id--)
output[id]=B*input[id]+b1*output[id+1]+b2*output[id+2]+b3*output[id+3];
}
if(ddyy>0) for (int x = blockDim.x*blockIdx.x + threadIdx.x;
x < width;
x += blockDim.x*gridDim.x) {
int id = x;
if(height>1) output[id]=B*input[id];id+=width;
if(height>2) output[id]=B*input[id]+b1*output[id-width];id+=width;
if(height>3) output[id]=B*input[id]+b1*output[id-width]+b2*output[id-2*width];id+=width;
for(int y=3; y<height; y++, id+=width)
output[id]=B*input[id]+b1*output[id-width]+b2*output[id-2*width]+b3*output[id-3*width];
}
if(ddyy<0) for (int x = blockDim.x*blockIdx.x + threadIdx.x;
x < width;
x += blockDim.x*gridDim.x){
int id = height*width+x;
id-=width;if(height>1) output[id]=B*input[id];
id-=width;if(height>2) output[id]=B*input[id]+b1*output[id+width];
id-=width;if(height>3) output[id]=B*input[id]+b1*output[id+width]+b2*output[id+2*width];
for(id-=width; id>=0; id-=width)
output[id]=B*input[id]+b1*output[id+width]+b2*output[id+2*width]+b3*output[id+3*width];
}
}
__host__ void host_recursivegaussfilter(
int gridSize, int blockSize,
unsigned int *r, unsigned int *g, unsigned int *b,
double *bb, int p, int q,
int width, int height)
{
unsigned int *channel[3] = { r,g,b };
// channel - массив данных
// sigma - коэффициенты
// width - ширина
// height - высота
cudaError_t err;
unsigned int *device_a;
unsigned int *device_b;
double *device_bb;
err = cudaMalloc((void**)&device_a, width*height*sizeof(unsigned int));
err = cudaMalloc((void**)&device_b, width*height*sizeof(unsigned int));
err = cudaMalloc((void**)&device_bb, (p+q) * sizeof(double));
cudaMemcpy(device_bb, bb, (p+q)*sizeof(double), cudaMemcpyHostToDevice);
int blocks = (gridSize > 0)? gridSize : min(15, (int)sqrt(max(width,height)));
int threads = (blockSize > 0)? blockSize : min(15, (int)sqrt(max(width,height)));
for(int j=0; j<3; j++) {
cudaMemcpy(device_a, channel[j], width*height*sizeof(unsigned int), cudaMemcpyHostToDevice);
global_recursivegaussfilter <<< blocks, threads >>>(device_a, device_b, device_bb, p, q, width, height, 0, 1);
global_recursivegaussfilter <<< blocks, threads >>>(device_b, device_a, device_bb, p, q, width, height, 0, -1);
global_recursivegaussfilter <<< blocks, threads >>>(device_a, device_b, device_bb, p, q, width, height, 1, 0);
global_recursivegaussfilter <<< blocks, threads >>>(device_b, device_a, device_bb, p, q, width, height, -1, 0);
cudaMemcpy(channel[j], device_a, width*height*sizeof(unsigned int), cudaMemcpyDeviceToHost);
}
// Освобождаем память на устройстве
cudaFree((void*)device_a);
cudaFree((void*)device_b);
cudaFree((void*)device_bb);
err = err;
}
#define RECURSIVE 1
#define DEFAULT 0
int main(int argc, char* argv[])
{
int i, j;
std::cout << title << std::endl;
// Find/set the device.
int device_count = 0;
cudaGetDeviceCount(&device_count);
for (i = 0; i < device_count; ++i)
{
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, i);
std::cout << "Running on GPU " << i << " (" << properties.name << ")" << std::endl;
}
if (argc < 5){
printf("Usage :\t%s filter sigma inputfilename.bmp ouputfilename.bmp\n", argv[0]);
printf("filter :\tgauss recursivegauss\n");
exit(-1);
}
// Получаем параметры - имена файлов
char *filter = argv[1];
double sigma = atof(argv[2]);
char *inputFileName = argv[3];
char *outputFileName = argv[4];
int gridSize = (argc>5)?atoi(argv[5]):0;
int blockSize = (argc>6)?atoi(argv[6]):0;
int mode =(strcmp(filter,"gauss")==0)?DEFAULT:RECURSIVE;
printf("Title :\t%s\n", title);
printf("Description :\t%s\n", description);
printf("Filter :\t%s\n", filter);
printf("Sigma :\t%le\n", sigma);
printf("Input file name :\t%s\n", inputFileName);
printf("Output file name :\t%s\n", outputFileName);
FILE *file = fopen(inputFileName, "rb");
if (!file) {
fprintf(stderr, "Open file error (%s)\n", inputFileName);
fflush(stderr);
exit(-1);
}
fseek(file, 0L, SEEK_END);
long size = ftell(file);
unsigned char *buffer = (unsigned char *)malloc((size_t)size);
fseek(file, 0L, SEEK_SET);
fread((void*)buffer, (size_t)1, (size_t)size, file);
fclose(file);
uint32_t width = *(uint32_t *)&buffer[BMPWIDTH];
uint32_t height = *(uint32_t *)&buffer[BMPHEIGHT];
uint32_t file_size = *(uint32_t *)&buffer[BMPFILESIZE];
uint32_t offset = *(uint32_t *)&buffer[BMPOFFSET];
uint16_t bits_per_pixel = *(uint16_t *)&buffer[BMPBITSPERPIXEL];
uint16_t bytes_per_pixel = ((int)((bits_per_pixel+7)/8));
uint32_t bytes_per_line = ((int)((bits_per_pixel * width+31)/32))*4; // http://en.wikipedia.org/wiki/BMP_file_format
printf("BMP image size :\t%ld x %ld\n", width, height);
printf("BMP file size :\t%ld\n", file_size);
printf("BMP pixels offset :\t%ld\n", offset);
printf("BMP bits per pixel :\t%d\n", bits_per_pixel);
printf("BMP bytes per pixel :\t%d\n", bytes_per_pixel);
printf("BMP bytes per line :\t%d\n", bytes_per_line);
uint8_t *pixels =(uint8_t *)&buffer[offset];
uint8_t *pixel;
uint32_t x, y;
/* выделение памяти под байтовые цветовые каналы */
int count = width*height;
unsigned int* channel[3];
for (j = 0; j < 3; j++)
channel[j] = (unsigned int*)malloc(count*sizeof(unsigned int));
int pos = 0;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
pixel = &pixels[y*bytes_per_line+x*bytes_per_pixel];
channel[0][pos] = (unsigned int)pixel[0];
channel[1][pos] = (unsigned int)pixel[1];
channel[2][pos] = (unsigned int)pixel[2];
pos++;
}
}
switch(mode){
case RECURSIVE:{
int p = 1, q = 3;
double * bb = (double*)malloc((p+q)*sizeof(double));
//// http://mognovse.ru/qgi-chekan-rostislav-vladimirovich.html
//// cтандартные рекомендации по выбору константы q представляют собой:
//double qq;
//if(sigma>2.5) qq=0.98711*sigma - 0.96330;
//else if(sigma>0.5) qq=3.97156 - 4.14554*sqrt(1.0 - 0.26891*sigma);
//else qq=3.97156 - 4.14554*sqrt(1.0 - 0.26891*sigma);
//double qq2 = qq*qq;
//double qq3 = qq2*qq;
//bb[0] = 1.5725 + 2.44413*qq + 1.4281*qq2;
//bb[1] = 2.44413*qq + 2.85619*qq2 + 1.26661*qq3;
//bb[2] = -1.4281*qq2 - 1.26661*qq3;
//bb[3] = 0.422205*qq3;
// http://habrahabr.ru/post/151157/
double sigma_inv_4;
sigma_inv_4 = sigma*sigma; sigma_inv_4 = 1.0/(sigma_inv_4*sigma_inv_4);
double coef_A = sigma_inv_4*(sigma*(sigma*(sigma*1.1442707+0.0130625)-0.7500910)+0.2546730);
double coef_W = sigma_inv_4*(sigma*(sigma*(sigma*1.3642870+0.0088755)-0.3255340)+0.3016210);
double coef_B = sigma_inv_4*(sigma*(sigma*(sigma*1.2397166-0.0001644)-0.6363580)-0.0536068);
double z0_abs = exp(coef_A);
double z0_real = z0_abs*cos(coef_W);
double z0_im = z0_abs*sin(coef_W);
double z2 = exp(coef_B);
double z0_abs_2 = z0_abs*z0_abs;
bb[3] = 1.0/(z2*z0_abs_2);
bb[2] = -(2*z0_real+z2)*bb[3];
bb[1] = (z0_abs_2+2*z0_real*z2)*bb[3];
bb[0] = 1.0;
clock_t t1, t2;
t1 = clock();
host_recursivegaussfilter(gridSize, blockSize,
channel[0], channel[1], channel[2],
bb, p, q,
width, height);
t2 = clock();
double seconds = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf("Execution time :\t%le\n", seconds);
free(bb);
}
break;
default:{
// Nh - полуразмер
int Nh = (int)(3 * sigma);
double * matrix = (double*)malloc((2 * Nh + 1)*sizeof(double));
for (int i = -Nh; i <= Nh; i++) matrix[Nh + i] = exp(-(double)i*i / (2.0*sigma*sigma));
clock_t t1, t2;
t1 = clock();
host_defaultgaussfilter(gridSize, blockSize,
channel[0], channel[1], channel[2],
matrix, Nh,
width, height);
t2 = clock();
double seconds = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf("Execution time :\t%le\n", seconds);
free(matrix);
}
break;
}
pos = 0;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
pixel = &pixels[y*bytes_per_line+x*bytes_per_pixel];
pixel[0] = (uint8_t)channel[0][pos];
pixel[1] = (uint8_t)channel[1][pos];
pixel[2] = (uint8_t)channel[2][pos];
pos++;
}
}
/* высвобождаем массивы */
for (j = 0; j < 3; j++)
free(channel[j]);
/* выводим результаты */
file = fopen(outputFileName, "wb");
if (!file) {
fprintf(stderr, "Open file error (%s)\n", outputFileName);
fflush(stderr);
exit(-1);
}
fwrite((void*)buffer, (size_t)1, (size_t)size, file);
printf("Output file size :\t%ld\n", size);
free(buffer);
fclose(file);
cudaDeviceReset();
exit(0);
}
|
650
|
#include "addition.cuh"
#include <stdio.h>
__global__ void addition(int* a, int* b, int* c){
*c = *a + *b;
}
void setupCuda(int* &D_A, int* &D_B, int* &D_C, int* &A, int* &B, int* &C){
cudaMalloc((void**)&D_A, sizeof(int));
cudaMalloc((void**)&D_B, sizeof(int));
cudaMalloc((void**)&D_C, sizeof(int));
cudaMemcpy(D_A, A, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(D_B, B, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(D_C, C, sizeof(int), cudaMemcpyHostToDevice);
}
void copyInputToDevice(int* D_A, int* D_B, int* D_C, int* A, int* B, int* C){
cudaMemcpy(D_A, A, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(D_B, B, sizeof(int), cudaMemcpyHostToDevice);
}
void executeKernel(int* D_A, int* D_B, int* D_C, int* A, int* B, int* C){
addition<<<1,1>>>(D_A, D_B, D_C);
}
void copyOutputToHost(int* D_A, int* D_B, int* D_C, int* A, int* B, int* C){
cudaMemcpy(C, D_C, sizeof(int), cudaMemcpyDeviceToHost);
}
void cleanupCuda(int* D_A, int* D_B, int* D_C){
cudaFree(D_A);
cudaFree(D_B);
cudaFree(D_C);
}
|
651
|
#include "includes.h"
__global__ void bcnn_op_cuda_ramp_kernel(int n, float *x, float *y) {
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
y[i] = x[i] * (x[i] > 0) + 0.1 * x[i];
}
return;
}
|
652
|
#include "cuda_runtime.h"
#include <stdint.h>
|
653
|
#include <cuda_runtime.h>
int main() {
int* a;
cudaMalloc(&a, 100);
cudaFree(a);
return 0;
}
|
654
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <iostream>
#include <cstdlib>
#define BLOCK_SIZE 64
#define N 64
using namespace std;
void displayLastError(const string &msg)
{
cout << "Last Error (" << msg << "):\t" << cudaGetErrorString(cudaGetLastError()) << endl;
}
__global__ void minMaxCuda(float *array)
{
int nTotalThreads = blockDim.x; // Total number of active threads
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint)
{
float temp = array[threadIdx.x + halfPoint];
if (temp < array[threadIdx.x]) array[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1); // divide by two.
}
}
int main(int argc, char *argv[])
{
float *data = new float[N];
for(int i=0;i<N;i++)
data[i] = i;
float *deviceData;
float *deviceMax;
size_t size = N*sizeof(float);
cudaMalloc((void**)&deviceData, size);
displayLastError("memory allocation");
cudaMalloc((void**)&deviceMax, sizeof(float));
displayLastError("memory allocation");
cudaMemcpy(deviceData, data, size, cudaMemcpyHostToDevice);
displayLastError("memory copying");
int blocks = N / BLOCK_SIZE;
if(N % BLOCK_SIZE)
blocks++;
minMaxCuda<<<blocks, BLOCK_SIZE>>>(deviceData);
displayLastError("kernel");
float max;
cudaMemcpy(&max, deviceMax, sizeof(float), cudaMemcpyDeviceToHost);
displayLastError("memory copying");
cudaFree(deviceData);
displayLastError("free");
cudaFree(deviceMax);
displayLastError("free");
delete [] data;
return 0;
}
|
655
|
#include "includes.h"
__global__ void sgemm_kernel_v2(const float *A, const float *B, float *C, int M, int N, int K, float alpha, float beta)
{
int bid_x = blockIdx.x * blockDim.x;
int bid_y = blockIdx.y * blockDim.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
float element_c = 0.f;
__shared__ float s_tile_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float s_tile_B[BLOCK_DIM][BLOCK_DIM];
// forward tile with tile size in matrix A
for (int k = 0; k < K; k += BLOCK_DIM)
{
s_tile_A[tid_y][tid_x] = A[ (bid_y + tid_y) * K + tid_x + k ]; // Get sub-matrix from A
s_tile_B[tid_y][tid_x] = B[ (k*BLOCK_DIM + tid_y) * N + bid_x + tid_x ]; // Get sub-matrix from B
__syncthreads();
// compute gemm operation with tiles
for (int e = 0; e < BLOCK_DIM; e++)
element_c += s_tile_A[tid_y][e] * s_tile_B[e][tid_x];
__syncthreads();
}
C[(bid_y + tid_y) * N + (bid_x + tid_x)] = \
alpha * element_c + beta * C[(bid_y + tid_y) * N + (bid_x + tid_x)];
}
|
656
|
// My first CUDA program!
// 2018.9.1
#include <stdio.h>
__global__ void helloFromGPU(void)
{
printf("Hello GPU! from thread \n");
}
int main(void)
{
printf("Hello cPU! \n");
helloFromGPU <<<1,10>>>();
//cudaDeviceReset();
cudaDeviceSynchronize();
return 0;
}
|
657
|
#include <cuda.h>
#include <stdio.h>
#include <time.h>
#define N 100
__global__
void addVectorGPU(int* a, int* b, int* c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
c[tid] = a[tid] + b[tid];
}
}
void addVectorCPU(int* a, int* b, int* c) {
for (int i = 0; i < N; i++) {
c[i] = a[i] + b[i];
}
}
void printVector(int* a) {
for (int i = 0; i < N; i++) {
if (i) printf(" ");
printf("%d", a[i]);
}
printf("\n");
}
int main() {
clock_t start, end;
double cpu_time_used, gpu_time_used;
int *h_a, *h_b, *h_c, *h_result;
int *d_a, *d_b, *d_c;
const int size = sizeof(int) * N;
h_a = (int*)malloc(size);
h_b = (int*)malloc(size);
h_c = (int*)malloc(size);
h_result = (int*)malloc(size);
// init
for (int i = 0; i < N; i++) {
h_a[i] = i + 1;
h_b[i] = (i + 1) * 2;
h_c[i] = 0;
}
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
start = clock();
addVectorCPU(h_a, h_b, h_c);
// printVector(h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
dim3 threads_per_block(10, 1, 1);
dim3 number_of_blocks((N / threads_per_block.x) + 1, 1, 1);
start = clock();
addVectorGPU<<< number_of_blocks, threads_per_block >>>(d_a, d_b, d_c);
cudaMemcpy(h_result, d_c, size, cudaMemcpyDeviceToHost);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
free(h_a); free(h_b);
free(h_c); free(h_result);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
658
|
/**
* Concurrent Wave Equation
* Compilation Command: nvcc cuda1.cu -o cuda1
* This program was originally written in serial method by the teacher.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
static void handleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (handleError(err, __FILE__, __LINE__))
void checkParam();
__global__ void initLine(float*, float*, int);
__global__ void updateAll(float*, float*, int, int);
void printResult();
int totalSteps, totalPoints, allocPoints;
float *currVal;
float *devCurrVal, *devPrevVal;
int main(int argc, char *argv[]) {
sscanf(argv[1], "%d", &totalPoints);
sscanf(argv[2], "%d", &totalSteps);
checkParam();
allocPoints = totalPoints + 256;
currVal = (float*) malloc(allocPoints * sizeof(float));
if (!currVal)
exit(EXIT_FAILURE);
HANDLE_ERROR(cudaMalloc((void**) &devCurrVal, allocPoints * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**) &devPrevVal, allocPoints * sizeof(float)));
dim3 threadsPerBlock(256);
dim3 numOfBlocks(allocPoints/256);
printf("Initializing points on the line...\n");
initLine<<<numOfBlocks, threadsPerBlock>>>(devPrevVal, devCurrVal, totalPoints);
printf("Updating all points for all time steps...\n");
updateAll<<<numOfBlocks, threadsPerBlock>>>(devPrevVal, devCurrVal, totalPoints, totalSteps);
printf("Printing final results...\n");
HANDLE_ERROR(cudaMemcpy(currVal, devCurrVal, allocPoints * sizeof(float), cudaMemcpyDeviceToHost));
printResult();
printf("\nDone.\n\n");
cudaFree(devCurrVal);
cudaFree(devPrevVal);
free(currVal);
return EXIT_SUCCESS;
}
void checkParam() {
char temp[20];
while ((totalPoints < MINPOINTS) || (totalPoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: ", MINPOINTS, MAXPOINTS);
scanf("%s", temp);
totalPoints = atoi(temp);
if ((totalPoints < MINPOINTS) || (totalPoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d.\n", MINPOINTS, MAXPOINTS);
}
while ((totalSteps < 1) || (totalSteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", temp);
totalSteps = atoi(temp);
if ((totalSteps < 1) || (totalSteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d.\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", totalPoints, totalSteps);
}
__global__ void initLine(float *__devPrevVal, float *__devCurrVal, int __totalPoints) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < __totalPoints) {
float x = (float) i / (__totalPoints - 1);
__devPrevVal[i] = __devCurrVal[i] = __sinf(6.28318530 * x);
}
}
__global__ void updateAll(float *__devPrevVal, float *__devCurrVal, int __totalPoints, int __totalSteps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < __totalPoints) {
float locPrevVal = __devPrevVal[i], locCurrVal = __devCurrVal[i] , locNextVal;
for (int j = 0; j < __totalSteps; j++) {
if ((i == 0) || (i == __totalPoints - 1))
locNextVal = 0.0;
else
locNextVal = 1.82 * locCurrVal - locPrevVal;
locPrevVal = locCurrVal;
locCurrVal = locNextVal;
}
__devCurrVal[i] = locCurrVal;
}
}
void printResult() {
for (int i = 0; i < totalPoints; i++) {
printf("%6.4f ", currVal[i]);
if ((i + 1) % 10 == 0)
printf("\n");
}
}
|
659
|
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <omp.h>
#include <chrono>
#include <random>
#include <iomanip>
#include <cmath>
#include "matrix_operations.cuh"
#define BLOCKSIZE_1 16
__global__ void kernel_naive_multiply_cuda(double* Ad, double* Bd, double* Cd, int M, int K, int N)
{
double val = 0.;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= 0 && row < M && col >= 0 && col < N)
{
for (int k = 0; k < K; ++k)
{
val += *(Ad + (row)+k * M) * *(Bd + (col * K) + k);
}
}
*(Cd + row + col * M) = val;
}
int naive_multiply_cuda(double* __restrict__ A, double* __restrict__ B,
double* __restrict__ C, int M, int K, int N)
{
dim3 threads(BLOCKSIZE_1, BLOCKSIZE_1); //threads per block
unsigned int blocks_x = (N + threads.x - 1) / threads.x; //basically ceil(N/threads.x)
unsigned int blocks_y = (M + threads.y - 1) / threads.y;
dim3 blocks(blocks_x, blocks_y);
kernel_naive_multiply_cuda<<<blocks, threads>>>(A, B, C, M, K, N);
return 0;
}
int random_uniform_initialize(double* m, int M, int N, double lower, double upper)
{
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(lower, upper);
for (int i = 0; i < M * N; ++i)
{
*(m + i) = distribution(generator);
}
return 0;
}
int naive_multiply_sequential(double* A, double* B, double* C, int M, int K, int N)
{
for (int i = 0; i < M * N; ++i)
{
double val = 0.;
int row = i % M;
int col = i / M;
for (int j = 0; j < K; ++j)
{
val += *(A + j * M + row) * *(B + col * K + j);
}
*(C + i) = val;
}
return 0;
}
int naive_multiply_omp(double* A, double* B, double* C, int M, int K, int N)
{
#pragma omp parallel for
for (int i = 0; i < M * N; ++i)
{
//printf("hello from thread %d", omp_get_thread_num());
double val = 0.;
int row = i % M;
int col = i / M;
for (int j = 0; j < K; ++j)
{
val += *(A + j * M + row) * *(B + col * K + j);
}
*(C + i) = val;
}
return 0;
}
void print_matrix(const double* mat, int M, int N)
{
std::cout << std::endl;
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < N; ++j)
{
std::cout << std::fixed << std::setprecision(3) << std::setw(8) << *(mat + j * M + i) << " ";
}
std::cout << std::endl;
}
}
void print_matrix_big(const double* mat, int M, int N, int include)
{
std::cout << std::endl << "[" << std::endl;
for (int i = 0; i < include; ++i)
{
for (int j = 0; j < include; ++j)
{
std::cout << std::fixed << std::setprecision(3) << std::setw(8) << *(mat + j * M + i) << " ";
}
std::cout << " ..." << std::endl;
}
std::cout << " ...";
for (int i = (M - include); i < M; ++i)
{
std::cout << std::endl << " ... ";
for (int j = (N - include); j < N; ++j)
{
std::cout << std::fixed << std::setprecision(3) << std::setw(8) << *(mat + j * M + i) << " ";
}
}
std::cout << std::endl << "]" << std::endl;
}
double max_diff(const double* mat1, const double* mat2, int M, int N)
{
double maxd = 0.;
double tdiff = 0.;
for (int i = 0; i < M * N; ++i)
{
tdiff = std::abs(*(mat1 + i) - *(mat2 + i));
if (tdiff > maxd)
{
maxd = tdiff;
}
}
return maxd;
}
|
660
|
extern "C" __global__ void noarg() {}
extern "C" __global__ void simple_add(float * A)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
A[id] += 1.0;
}
extern "C" __global__ void four_mad(float * A)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float f = A[id];
f *= 41.0;
f += 37.0;
f *= 11.0;
f += 23.0;
f *= 2.0;
f += 13.0;
f *= 3.0;
f += 7.0;
A[id] = f;
}
#define PEAK_FLOP_MADD \
r0 = r1*r8+r0; \
r1 = r15*r9+r2; \
r2 = r14*r10+r4; \
r3 = r13*r11+r6; \
r4 = r12*r12+r8; \
r5 = r11*r13+r10; \
r6 = r10*r14+r12; \
r7 = r9*r15+r14; \
r8 = r7*r0+r1; \
r9 = r8*r1+r3; \
r10 = r6*r2+r5; \
r11 = r5*r3+r7; \
r12 = r4*r4+r9; \
r13 = r3*r5+r11; \
r14 = r2*r6+r13; \
r15 = r0*r7+r15; \
/**/
extern "C" __global__ void peak_flop(float * A)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float r0, r1, r2, r3, r4, r5, r6, r7;
float r8, r9, r10, r11, r12, r13, r14, r15;
r0 = 0.0001 * id;
r1 = 0.0001 * id;
r2 = 0.0002 * id;
r3 = 0.0003 * id;
r4 = 0.0004 * id;
r5 = 0.0005 * id;
r6 = 0.0006 * id;
r7 = 0.0007 * id;
r8 = 0.0008 * id;
r9 = 0.0009 * id;
r10 = 0.0010 * id;
r11 = 0.0011 * id;
r12 = 0.0012 * id;
r13 = 0.0013 * id;
r14 = 0.0014 * id;
r15 = 0.0015 * id;
for(int i=0; i<50; i++) {
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
PEAK_FLOP_MADD;
}
r0 += r1 + r2 + r3 + r4 + r5 + r6 + r7 +
r8 + r9 + r10 + r11 + r12 + r13 + r14 + r15;
A[id] = r0;
}
extern "C" __global__ void peak_flop_empty(float * A)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float r0, r1, r2, r3, r4, r5, r6, r7;
float r8, r9, r10, r11, r12, r13, r14, r15;
r0 = 0.0001 * id;
r1 = 0.0001 * id;
r2 = 0.0002 * id;
r3 = 0.0003 * id;
r4 = 0.0004 * id;
r5 = 0.0005 * id;
r6 = 0.0006 * id;
r7 = 0.0007 * id;
r8 = 0.0008 * id;
r9 = 0.0009 * id;
r10 = 0.0010 * id;
r11 = 0.0011 * id;
r12 = 0.0012 * id;
r13 = 0.0013 * id;
r14 = 0.0014 * id;
r15 = 0.0015 * id;
r0 += r1 + r2 + r3 + r4 + r5 + r6 + r7 +
r8 + r9 + r10 + r11 + r12 + r13 + r14 + r15;
A[id] = r0;
}
|
661
|
/*
#include "SiPotential.h"
//-------------------- force between two Si particles ---------------------//
__host__ __device__ double f2_derivative_of_rij_tag(double r_ij_tag)
{
double first = -4*B_Si*(1.0/(r_ij_tag*r_ij_tag*r_ij_tag*r_ij_tag*r_ij_tag));
double second = ((B_Si*(1.0/(r_ij_tag*r_ij_tag*r_ij_tag*r_ij_tag)))-1)*(1.0/((r_ij_tag-a_Si)*(r_ij_tag-a_Si)));
double print = first-second;
double r_ij_tag_minus_a = r_ij_tag - a_Si;//r'ij-a
double r_ij_tag_minus_a2 = r_ij_tag_minus_a*r_ij_tag_minus_a;
double r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a);//(r'ij-a)^(-1)
double r_ij_tag_minus_a_in_mTwo = (1.0/(r_ij_tag_minus_a2));//(r'ij-a)^(-2)
double r_ij_tag2 = r_ij_tag*r_ij_tag;
double r_ij_tag4 = r_ij_tag2*r_ij_tag2;
double r_ij_tag5 = r_ij_tag4*r_ij_tag;
double r_ij_tag_in_mFive = (1.0/(r_ij_tag5));//r'ij^(-5)
double r_ij_tag_in_mFour = (1.0/(r_ij_tag4));//r'ij^(-4)
double expression = B_Si * r_ij_tag_in_mFour;
expression = expression - 1.0;//(B*r'ij^(-4) - 1)
double exponent = exp(r_ij_tag_minus_a_in_mOne);
double f2_derivative_part_1 = -4.0 * B_Si * r_ij_tag_in_mFive;
double f2_derivative_part_2 = expression * r_ij_tag_minus_a_in_mTwo;
return A_Si*exponent*(f2_derivative_part_1 - f2_derivative_part_2);
}
__host__ __device__ double v2_derivative_of_rix(real3 i, real3 j, double r_ij)
{
if(r_ij/sigma_Si == pow(2.0,1.0/6.0))
{
return 0;
}
double f2_derivative = f2_derivative_of_rij_tag(r_ij/sigma_Si);
f2_derivative = f2_derivative * (epsilon_Si/sigma_Si);//v2 derivative of distance
double dist_x = (i.x - j.x);
dist_x = dist_x / (r_ij);
double v2_derivative = f2_derivative * dist_x;
return v2_derivative;
}
__host__ __device__ double v2_derivative_of_riy(real3 i, real3 j, double r_ij)
{
if(r_ij/sigma_Si == pow(2.0,1.0/6.0))
{
return 0;
}
double f2_derivative = f2_derivative_of_rij_tag(r_ij/sigma_Si);
f2_derivative = f2_derivative * (epsilon_Si/sigma_Si);
double dist_y = i.y - j.y;
dist_y = dist_y / (r_ij);
double v2_derivative = f2_derivative * dist_y;
return v2_derivative;
}
__host__ __device__ double v2_derivative_of_riz(real3 i, real3 j, double r_ij)
{
if(r_ij/sigma_Si == pow(2.0,1.0/6.0))
{
return 0;
}
double f2_derivative = f2_derivative_of_rij_tag(r_ij/sigma_Si);
f2_derivative = f2_derivative * (epsilon_Si/sigma_Si);
double dist_z = i.z - j.z;
dist_z = dist_z / (r_ij);
double v2_derivative = f2_derivative * dist_z;
return v2_derivative;
}
//----------------------------------------------------------------------------//
//-------------------- potential between two Si particles ---------------------//
__host__ __device__ double f2(double r_ij_tag)
{
if(r_ij_tag >= a_Si)
{
return 0;
}
double r_ij_tag_minus_a_in_mOne = r_ij_tag - a_Si;
r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a_in_mOne);
double exponent = exp(r_ij_tag_minus_a_in_mOne);
double r_ij_tag2 = r_ij_tag*r_ij_tag;
double r_ij_tag4 = r_ij_tag2*r_ij_tag2;
double expression = (1.0/(r_ij_tag4));
expression *= B_Si;
expression -= 1.0;
return A_Si*expression*exponent;
}
__host__ __device__ double v2(double r_ij_tag)
{
if(r_ij_tag == pow(2.0,1.0/6.0))
{
return -epsilon_Si;
}
return f2(r_ij_tag)*epsilon_Si;
}
//----------------------------------------------------------------------------//
//------------------------ force between three Si particles -------------------//
__host__ __device__ double hi_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosJik_plus_oneThird = ((r_ij_tag*r_ij_tag + r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag)/(2.0 * r_ij_tag * r_ik_tag)) + (1.0/3.0);
double r_ij_tag_minus_a = r_ij_tag - a_Si;
double r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a) * gama_Si;
double r_ij_tag_minus_a_in_mTwo = (1.0/(r_ij_tag_minus_a*r_ij_tag_minus_a)) * gama_Si;
double r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag - a_Si) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_ik_tag_minus_a_in_mOne);
double expression = (r_ij_tag*r_ij_tag - r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag) / (r_ij_tag*r_ij_tag * r_ik_tag);
expression -= (r_ij_tag_minus_a_in_mTwo*cosJik_plus_oneThird);
return lamda_Si*exponent*cosJik_plus_oneThird*expression;
}
__host__ __device__ double hi_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosJik_plus_oneThird = ((r_ij_tag*r_ij_tag + r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag)/(2.0 * r_ij_tag * r_ik_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = (1.0/(r_ij_tag - a_Si)) * gama_Si;
double r_ik_tag_minus_a = r_ik_tag - a_Si;
double r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a) * gama_Si;
double r_ik_tag_minus_a_in_mTwo = (1.0/(r_ik_tag_minus_a*r_ik_tag_minus_a)) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_ik_tag_minus_a_in_mOne);
double expression = (r_ik_tag*r_ik_tag - r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag) / (r_ik_tag*r_ik_tag * r_ij_tag);
expression -= (r_ik_tag_minus_a_in_mTwo*cosJik_plus_oneThird);
return lamda_Si*exponent*cosJik_plus_oneThird*expression;
}
__host__ __device__ double hj_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIjk_plus_oneThird = ((r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag - r_ik_tag*r_ik_tag)/(2.0 * r_ij_tag * r_jk_tag)) + (1.0/3.0);
double r_ij_tag_minus_a = r_ij_tag - a_Si;
double r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a) * gama_Si;
double r_ij_tag_minus_a_in_mTwo = (1.0/(r_ij_tag_minus_a*r_ij_tag_minus_a)) * gama_Si;
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (r_ij_tag*r_ij_tag - r_jk_tag*r_jk_tag + r_ik_tag*r_ik_tag) / (r_ij_tag*r_ij_tag * r_jk_tag);
expression -= (r_ij_tag_minus_a_in_mTwo*cosIjk_plus_oneThird);
return lamda_Si*exponent*cosIjk_plus_oneThird*expression;
}
__host__ __device__ double hj_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIjk_plus_oneThird = ((r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag - r_ik_tag*r_ik_tag)/(2.0 * r_ij_tag * r_jk_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = (1.0/(r_ij_tag - a_Si)) * gama_Si;
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (-r_ik_tag) / (r_ij_tag * r_jk_tag);
return lamda_Si*exponent*2.0*cosIjk_plus_oneThird*expression;
}
__host__ __device__ double hk_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIkj_plus_oneThird = ((r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag - r_ij_tag*r_ij_tag)/(2 * r_ik_tag * r_jk_tag)) + (1.0/3.0);
double r_ik_tag_minus_a_in_mOne = (1.0/(r_ik_tag - a_Si)) * gama_Si;
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double exponent = exp(r_ik_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (-r_ij_tag) / (r_ik_tag * r_jk_tag);
return lamda_Si*exponent*2.0*cosIkj_plus_oneThird*expression;
}
__host__ __device__ double hk_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIkj_plus_oneThird = ((r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag - r_ij_tag*r_ij_tag)/(2.0 * r_ik_tag * r_jk_tag)) + (1.0/3.0);
double r_jk_tag_minus_a_in_mOne = (1.0/(r_jk_tag - a_Si)) * gama_Si;
double r_ik_tag_minus_a = r_ik_tag - a_Si;
double r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a) * gama_Si;
double r_ik_tag_minus_a_in_mTwo = (1.0/(r_ik_tag_minus_a*r_ik_tag_minus_a)) * gama_Si;
double exponent = exp(r_ik_tag_minus_a_in_mOne+r_jk_tag_minus_a_in_mOne);
double expression = (r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag + r_ij_tag*r_ij_tag) / (r_ik_tag*r_ik_tag * r_jk_tag);
expression -= (r_ik_tag_minus_a_in_mTwo*cosIkj_plus_oneThird);
return lamda_Si*exponent*cosIkj_plus_oneThird*expression;
}
__host__ __device__ double f3_derivative_of_rij_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double hi_derivative_of_rij = 0.0;
double hj_derivative_of_rij = 0.0;
double hk_derivative_of_rij = 0.0;
if(r_ij_tag < a_Si && r_ik_tag < a_Si)
{
hi_derivative_of_rij = hi_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ij_tag < a_Si)
{
hj_derivative_of_rij = hj_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ik_tag < a_Si)
{
hk_derivative_of_rij = hk_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
return hi_derivative_of_rij + hj_derivative_of_rij + hk_derivative_of_rij;
}
__host__ __device__ double f3_derivative_of_rik_tag(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double hi_derivative_of_rik = 0.0;
double hj_derivative_of_rik = 0.0;
double hk_derivative_of_rik = 0.0;
if(r_ik_tag < a_Si && r_ij_tag < a_Si)
{
hi_derivative_of_rik = hi_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ij_tag < a_Si)
{
hj_derivative_of_rik = hj_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ik_tag < a_Si)
{
hk_derivative_of_rik = hk_derivative_of_rij_tag(r_ij_tag,r_ik_tag,r_jk_tag);
}
return hi_derivative_of_rik + hj_derivative_of_rik + hk_derivative_of_rik;
}
__host__ __device__ double v3_derivative_of_rix(real3 i, real3 j, real3 k, double r_ij, double r_ik, double r_jk)
{
double v3_derived_by_rij = (f3_derivative_of_rij_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double v3_derived_by_rik = (f3_derivative_of_rik_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double dist_ijx = (i.x-j.x);
double dist_ikx = (i.x-k.x);
double expression1 = (dist_ijx/(r_ij));
double expression2 = (dist_ikx/(r_ik));
return v3_derived_by_rij*expression1 + v3_derived_by_rik*expression2;
}
__host__ __device__ double v3_derivative_of_riy(real3 i, real3 j, real3 k, double r_ij, double r_ik, double r_jk)
{
double v3_derived_by_rij = (f3_derivative_of_rij_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double v3_derived_by_rik = (f3_derivative_of_rik_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double dist_ijy = (i.y-j.y);
double dist_iky = (i.y-k.y);
double expression1 = (dist_ijy/(r_ij));
double expression2 = (dist_iky/(r_ik));
return v3_derived_by_rij*expression1 + v3_derived_by_rik*expression2;
}
__host__ __device__ double v3_derivative_of_riz(real3 i, real3 j, real3 k, double r_ij, double r_ik, double r_jk)
{
double v3_derived_by_rij = (f3_derivative_of_rij_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double v3_derived_by_rik = (f3_derivative_of_rik_tag(r_ij/sigma_Si, r_ik/sigma_Si, r_jk/sigma_Si))*(epsilon_Si/sigma_Si);
double dist_ijz = (i.z-j.z);
double dist_ikz = (i.z-k.z);
double expression1 = (dist_ijz/(r_ij));
double expression2 = (dist_ikz/(r_ik));
return v3_derived_by_rij*expression1 + v3_derived_by_rik*expression2;
}
//----------------------------------------------------------------------------//
//-------------------- potential between three Si particles -------------------//
__host__ __device__ double hi(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosJik_plus_oneThird = ((r_ij_tag*r_ij_tag + r_ik_tag*r_ik_tag - r_jk_tag*r_jk_tag)/(2.0 * r_ij_tag * r_ik_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = r_ij_tag - a_Si;
r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a_in_mOne);
r_ij_tag_minus_a_in_mOne *= gama_Si;
double r_ik_tag_minus_a_in_mOne = r_ik_tag - a_Si;
r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a_in_mOne);
r_ik_tag_minus_a_in_mOne *= gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne + r_ik_tag_minus_a_in_mOne);
return lamda_Si*exponent*cosJik_plus_oneThird*cosJik_plus_oneThird;
}
__host__ __device__ double hj(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIjk_plus_oneThird = ((r_ij_tag*r_ij_tag + r_jk_tag*r_jk_tag - r_ik_tag*r_ik_tag)/(2.0 * r_ij_tag * r_jk_tag)) + (1.0/3.0);
double r_ij_tag_minus_a_in_mOne = r_ij_tag - a_Si;
r_ij_tag_minus_a_in_mOne = (1.0/r_ij_tag_minus_a_in_mOne);
r_ij_tag_minus_a_in_mOne *= gama_Si;
double r_jk_tag_minus_a_in_mOne = r_ik_tag - a_Si;
r_jk_tag_minus_a_in_mOne = (1/r_jk_tag_minus_a_in_mOne);
r_jk_tag_minus_a_in_mOne *= gama_Si;
double exponent = exp(r_ij_tag_minus_a_in_mOne + r_jk_tag_minus_a_in_mOne);
return lamda_Si*exponent*cosIjk_plus_oneThird*cosIjk_plus_oneThird;
}
__host__ __device__ double hk(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double cosIkj_plus_oneThird = ((r_ik_tag*r_ik_tag + r_jk_tag*r_jk_tag - r_ij_tag*r_ij_tag)/(2.0 * r_ik_tag * r_jk_tag)) + (1.0/3.0);
double r_ik_tag_minus_a_in_mOne = r_ik_tag - a_Si;
r_ik_tag_minus_a_in_mOne = (1.0/r_ik_tag_minus_a_in_mOne);
r_ik_tag_minus_a_in_mOne *= gama_Si;
double r_jk_tag_minus_a_in_mOne = r_jk_tag - a_Si;
r_jk_tag_minus_a_in_mOne = (1.0/r_jk_tag_minus_a_in_mOne);
r_jk_tag_minus_a_in_mOne *= gama_Si;
double exponent = exp(r_ik_tag_minus_a_in_mOne + r_jk_tag_minus_a_in_mOne);
return lamda_Si*exponent*cosIkj_plus_oneThird*cosIkj_plus_oneThird;
}
__host__ __device__ double f3(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
double h_i = 0.0;
double h_j = 0.0;
double h_k = 0.0;
if(r_ij_tag < a_Si && r_ik_tag < a_Si)
{
h_i = hi(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_ij_tag < a_Si && r_ik_tag < a_Si)
{
h_j = hj(r_ij_tag,r_ik_tag,r_jk_tag);
}
if(r_jk_tag < a_Si && r_ik_tag < a_Si)
{
h_k = hk(r_ij_tag,r_ik_tag,r_jk_tag);
}
return h_i + h_j + h_k;
}
__host__ __device__ double v3(double r_ij_tag, double r_ik_tag, double r_jk_tag)
{
return f3(r_ij_tag,r_ik_tag,r_jk_tag)*epsilon_Si;
}
//----------------------------------------------------------------------------//
*/
|
662
|
//Header from standard libraries
#include <fstream>
//size of blocks in grid
#define BLOCK_SIZE 16
//kernel function to multiply c=a*b
__global__ void Muld(float* a,float* b,float* c,int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0;
if(i>=n || j>=n){
return;
}
for(int k = 0 ; k < n ; ++k) {
sum = sum + a[i*n + k]*b[k*n + j];
}
c[i*n+j] = sum;
}
//host function to multiply C=A*B
void Mul(float* A, float* B, int n,float* C)
{
int size;
// Load A and B to the device
float* Ad;
size = n * n * sizeof(float);
cudaMalloc((void**)&Ad, size);
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
float* Bd;
size = n * n * sizeof(float);
cudaMalloc((void**)&Bd, size);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
// Allocate C on the device
float* Cd;
size = n * n * sizeof(float);
cudaMalloc((void**)&Cd, size);
// Compute the execution configuration assuming
// the matrix dimensions are multiples of BLOCK_SIZE
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(n / dimBlock.x + (n%dimBlock.x!=0), n / dimBlock.y+(n%dimBlock.y!=0));
// Launch the device computation
Muld<<<dimGrid, dimBlock>>>(Ad, Bd, Cd , n);
// Read C from the device
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
}
int main(int argc,char* argv[])
{
std::ifstream fin(argv[1]);
std::ofstream fout("out.txt");
int n;
fin >> n;
float *hA = (float*)malloc(sizeof(float)*n*n);
float *hB = (float*)malloc(sizeof(float)*n*n);
for(int i = 0; i < n; ++i) {
for(int j = 0; j < n; ++j) {
fin >> hA[i*n+j];
}
}
for(int i = 0; i < n; ++i) {
for(int j = 0; j < n; ++j) {
hB[i*n+j] = 0;
}
hB[i*n+i] = 1;
}
// in case, when count of input matrix > 1
// hC = hA * hB;
// Mul(hA,hB,n,hC);
for(int i = 2; i <= n; i <<=1) {
Mul(hA,hA,n,hA);
if(i & n) {
Mul(hA,hB,n,hB);
}
}
for(int i = 0; i < n; ++i) {
for(int j = 0; j < n; ++j) {
fout << hB[i*n+j] << ' ';
}
fout << std::endl;
}
free(hA);
free(hB);
return 0;
}
|
663
|
// ##########################################################
// By Eugene Ch'ng | www.complexity.io
// Email: genechng@gmail.com
// ----------------------------------------------------------
// The ERC 'Lost Frontiers' Project
// Development for the Parallelisation of ABM Simulation
// ----------------------------------------------------------
// A Basic CUDA Application for ABM Development
//
// In this application, we extend the Struct.cu with
// an array of IDs that can represent agents from simulation code
// that are passed in so that they can be processed using the GPU
// The code adds position information of agents as a basis
// for future development.
//
// The idea is to allow each kernel to process the interaction of
// agents based on their positional information
//
// The sizes of structs and arrays are printed at the end
// so that you can gauge how you should manage your memory
// in large agent simulations
// ----------------------------------------------------------
// How to compile:
// nvcc <filename>.cu -o <outputfile>
// ##########################################################
#include <iostream>
using namespace std;
// number of agents
#define N 1000
// struct to contain the position of each agent
struct POSITION {
float x;
float y;
float z;
};
// struct to contain data of all agents
struct AGENTS {
int id[N]; // array representing agent IDs
POSITION pos[N]; // array of agent positions
// constructor taking in an array of agent IDs
AGENTS(int *_id)
{
// assign IDs to the ID array passed in
for(int i=0; i<N; i++)
{
id[i] = _id[i];
}
}
};
// kernel code
__global__ void changeID(AGENTS *dev_agent)
{
// loop through all IDs in the agent struct
for(int i=0; i<N; i++)
{
// change agent IDs in the kernel
dev_agent->id[i] *= 2;
// print agent IDs
if(i < 10 || i > N-10)
printf("** device agent %d\n", dev_agent->id[i]);
}
}
int main(void)
{
cout<<"\n------------- assigning variables in host"<<endl;
AGENTS *dev_agent; // the device INFO
cout<<"\n------------- instantiating host id"<<endl;
int id[N];
for(int i=0; i<N; i++)
{
id[i] = i; // assign i to each ID
// output assignment
if(i < 10 || i > N-10)
cout<<"** host id[0]: "<<id[i]<<endl;
}
// send the IDs into the agent struct constructor
AGENTS *agent = new AGENTS(id);
cout<<"\n------------- allocate memory to device"<<endl;
cudaMalloc( (void**)&dev_agent, sizeof(AGENTS) );
// copy the instantiated struct agent to device as dev_agent
cout<<"\n------------- copy agent to dev_agent"<<endl;
cudaMemcpy( dev_agent, agent, sizeof(AGENTS), cudaMemcpyHostToDevice);
cout<<"\n------------- calling device kernel and change the id in the struct"<<endl;
changeID<<<1,1>>>(dev_agent);
// copy changed dev_agent to the struct, output the printing in the kernel
cout<<"\n------------- copying memory from device to host and printing"<<endl;
cudaMemcpy( agent, dev_agent, sizeof(AGENTS), cudaMemcpyDeviceToHost );
cout<<"\n------------- output changed results"<<endl;
for(int i=0; i<N; i++)
{
if(i < 10 || i > N-10)
cout<<"** host agent->id[0]: "<<agent->id[i]<<endl;
}
cout<<"\n------------- size of struct and arrays:"<<endl;
cout << "size of struct AGENTS: " << sizeof(AGENTS) << " bytes" << endl;
cout << "size of struct POSITION: " << sizeof(POSITION) << " bytes" << endl;
cout << "size of struct id[N]: " << N * sizeof(int) << " bytes" << endl;
cout<<"\n------------- cleaning up"<<endl;
delete agent;
cudaFree(dev_agent);
return 0;
}
|
664
|
/* Solving the 2D acoustic wave equation using explicit finite
* difference method
* Copyright 2018 Chaiwoot Boonyasiriwat. All rights reserved.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
//Kernel
__global__ void Wcalculate(float *u0,float *u1,float *u2,float C2,int nx,int ny){
int i=blockDim.x*blockIdx.x+threadIdx.x;
int j=blockDim.y*blockIdx.y+threadIdx.y;
if( i>0 && i<nx-1 && j>0 && j<ny-1){
u2[i+j*nx] = (2.0f-4.0f*C2)*u1[i+j*nx] - u0[i+j*nx] + C2*(u1[(i+1)+j*nx]+u1[(i-1)+j*nx] + u1[i+(j+1)*nx]+u1[i+(j-1)*nx]);
}
}
__global__ void Wupdate(float *u0,float *u1,float *u2,int nx,int ny){
int i=blockDim.x*blockIdx.x+threadIdx.x;
int j=blockDim.y*blockIdx.y+threadIdx.y;
if( i<nx && j<ny){
u0[i+j*nx] = u1[i+j*nx];
u1[i+j*nx] = u2[i+j*nx];
}
}
int main() {
//allocate parameter
size_t size;
cudaEvent_t start, stop;
int nx, ny, nt, ix, iy, it, indx;
float v, dx, dt, C, C2, xmax, ymax, a;
float *u0_h, *u1_h, *u2_h;
//set value
xmax = 1.0f;
ymax = 1.0f;
nx = 201;
ny = 201;
v = 0.1f;
dx = xmax/(nx-1);
dt = 0.035f;
C = v*dt/dx;
C2 = C*C;
nt = 1000;
a = 1000.0;
size = nx*ny*sizeof(float);
u0_h = (float*) malloc(size);
u1_h = (float*) malloc(size);
u2_h = (float*) malloc(size);
float *u0_cu = NULL;
cudaMalloc((void**)&u0_cu,size);
float *u1_cu = NULL;
cudaMalloc((void**)&u1_cu,size);
float *u2_cu = NULL;
cudaMalloc((void**)&u2_cu,size);
//initial u0 u1
for (iy=0; iy<ny; iy++) {
float yy = iy*dx - 0.5*ymax;
for (ix=0; ix<nx; ix++) {
indx = ix+iy*nx;
float xx = ix*dx - 0.5*xmax;
u0_h[indx] = exp(-a*(pow(xx,2)+pow(yy,2)));
u1_h[indx] = u0_h[indx];
u2_h[indx] = 0;
}
}
//coppy u0 -> u0_cu, u1 -> u1_cu
cudaMemcpy(u0_cu, u0_h, size,cudaMemcpyHostToDevice);
cudaMemcpy(u1_cu, u1_h, size,cudaMemcpyHostToDevice);
cudaMemcpy(u2_cu, u2_h, size,cudaMemcpyHostToDevice);
//start wave calculation looping time
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
dim3 G(nx/32 +1,ny/32 +1);
dim3 B(32,32);
for (it=0; it<nt; it++) {
// advance wavefields at inner nodes
Wcalculate<<<G,B>>>(u0_cu, u1_cu, u2_cu, C2, nx, ny);
// update
Wupdate<<<G,B>>>(u0_cu, u1_cu, u2_cu, nx, ny);
}
cudaMemcpy(u2_h, u2_cu, size,cudaMemcpyDeviceToHost);
//end calculation
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float cpu_time;
cudaEventElapsedTime(&cpu_time,start,stop);
printf("CPU time = %lf s\n", cpu_time*0.001);
// output the final snapshot
FILE *file = fopen("u_cu.dat","w");
fwrite(u2_h, sizeof(float), nx*ny, file);
fclose(file);
// Free memory
free(u0_h);
free(u1_h);
free(u2_h);
cudaFree(u1_cu);
cudaFree(u1_cu);
cudaFree(u2_cu);
return 0;
}
|
665
|
#include <stdio.h>
#include "Queue.cuh"
Queue** createQueue(int width, int height){
Queue** queue=(Queue**)calloc(1, sizeof(Queue*));
*queue=(Queue*)calloc(1, sizeof(Queue));
(*queue)->width=width;
(*queue)->height=height;
(*queue)->entries=0;
(*queue)->queue=(int**)calloc(height, sizeof(int*));
for(int entry=0; entry<height; entry++){
(*queue)->queue[entry]=(int*)calloc(width, sizeof(int));
}
return queue;
}
void copyValuesBetweenQueues(Queue** queue1, Queue** queue2){
for(int entry=0; entry<(*queue1)->height; entry++){
for(int value=0; value<(*queue1)->width; value++){
(*queue2)->queue[entry][value]=(*queue1)->queue[entry][value];
}
}
(*queue2)->entries=(*queue1)->entries;
}
void freeQueue(Queue** queue){
for(int entry=0; entry<(*queue)->height; entry++){
free((*queue)->queue[entry]);
}
free((*queue)->queue);
free(*queue);
free(queue);
}
void extendQueue(Queue** queue, int newWidth, int newHeight){
Queue** queueHolder=createQueue(newWidth, newHeight);
copyValuesBetweenQueues(queue, queueHolder);
*queue=*queueHolder;
free(queueHolder);
}
void freeEntryFromQueue(int* entry){
free(entry);
}
int isEmpty(Queue** queue){
return (*queue)->entries==0;
}
void insertToQueue(Queue** queue, int* entry){
if((*queue)->entries==(*queue)->height){
extendQueue(queue, (*queue)->width, (*queue)->height*2);
}
for(int value=0; value<(*queue)->width; value++){
(*queue)->queue[(*queue)->entries][value]=entry[value];
}
(*queue)->entries++;
freeEntryFromQueue(entry);
}
int* peekFromQueue(Queue** queue, int entryNum){
return (*queue)->queue[entryNum];
}
int* popFromQueue(Queue** queue, int entryNum){
int* returnedEntry=(int*)calloc((*queue)->width, sizeof(int));
for(int value=0; value<(*queue)->width; value++){
returnedEntry[value]=(*queue)->queue[entryNum][value];
}
(*queue)->entries--;
for(int entry=entryNum; entry<(*queue)->entries; entry++){
for(int value=0; value<(*queue)->width; value++){
(*queue)->queue[entry][value]=(*queue)->queue[entry+1][value];
}
}
return returnedEntry;
}
int* popRandomEntryFromQueue(Queue** queue){
int entry=(int)((((double)rand())/(RAND_MAX+1))*(*queue)->entries);
return popFromQueue(queue, entry);
}
void printQueueData(Queue** queue){
printf("Width %d, Height %d, entries %d\n", (*queue)->width, (*queue)->height, (*queue)->entries);
for(int entry=0; entry<(*queue)->height; entry++){
printf("Entry %d \t", entry);
for(int value=0; value<(*queue)->width; value++){
printf("%d ", (*queue)->queue[entry][value]);
}
printf("\n");
}
}
|
666
|
#include <stdio.h>
#include <stdint.h>
#include <math.h>
#include <cuda_runtime.h>
#define FLT_MIN 1.175494351e-38F
__device__ float FCC_KR = 0.3;
__device__ float FCC_KB = 0.11;
__device__ float SMPTE_240M_KR = 0.212;
__device__ float SMPTE_240M_KB = 0.087;
__device__ float REC_601_KR = 0.299;
__device__ float REC_601_KB = 0.114;
__device__ float REC_709_KR = 0.2126;
__device__ float REC_709_KB = 0.0722;
__device__ float REC_2020_KR = 0.2627;
__device__ float REC_2020_KB = 0.0593;
__device__ float REC709_ALPHA = 1.09929682680944f;
__device__ float REC709_BETA = 0.018053968510807f;
__device__ float ST2084_M1 = 0.1593017578125f;
__device__ float ST2084_M2 = 78.84375f;
__device__ float ST2084_C1 = 0.8359375f;
__device__ float ST2084_C2 = 18.8515625f;
__device__ float ST2084_C3 = 18.6875f;
__device__ float yuv2rgb_REC_2020_NCL[9] = {1.000000, 1.4196651e-17, 1.47459996, 1.000000, -0.164553121, -0.571353137, 1.000000, 1.88139999, 5.67866042e-17};
__device__ float rgb2yuv_REC_709[9] = {0.212599993, 0.715200007, 0.0722000003, -0.114572108, -0.385427892, 0.500000, 0.500000, -0.454152912, -0.0458470918};
__device__ float gamutMa[9] = {1.66049099, -0.58764112, -0.0728498623, -0.124550477, 1.13289988, -0.00834942237, -0.0181507636, -0.100578897, 1.11872971};
struct FilterContext {
unsigned filter_width;
unsigned stride;
float *data;
int *left;
};
__global__ void doGPU(uint8_t *data, uint8_t *dst_data, int length) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= length)
return;
dst_data[i] = data[i];
}
__device__ void save_yuv(uint8_t *data, uint8_t *dst_data, int i) {
for (int x = 0; x < 4; x++) {
dst_data[x] = data[x];
}
}
__device__ uint16_t clamp(uint16_t v, uint16_t min = 0, uint16_t max = 255) {
if (v > max)
return max;
if (v < min)
return min;
return v;
}
__device__ float rec_709_oetf(float x){
//[3] ToGammaLutOperationAVX2
//if (x < 4.5f * REC709_BETA)
// x = x / 4.5f;
//else
// x = pow((x + (REC709_ALPHA - 1.0f)) / REC709_ALPHA, 1.0f / 0.45f);
//return x;
//rec_1886_inverse_eotf
return x < 0.0f ? 0.0f : pow(x, 1.0f / 2.4f);
}
__device__ float st_2084_eotf(float x){
// [1] ToLinearLutOperationAVX2
// Filter negative values to avoid NAN.
if (x > 0.0f) {
float xpow = pow(x, 1.0f / ST2084_M2);
float num = max(xpow - ST2084_C1, 0.0f);
float den = max(ST2084_C2 - ST2084_C3 * xpow, FLT_MIN);
x = pow(num / den, 1.0f / ST2084_M1);
} else {
x = 0.0f;
}
return x;
}
__device__ void linear_to_gamma(float r, float g, float b, float *rr, float *rg, float *rb) {
*rr = rec_709_oetf(r);
*rg = rec_709_oetf(g);
*rb = rec_709_oetf(b);
}
__device__ void gamma_to_linear(float r, float g, float b, float *rr, float *rg, float *rb) {
*rr = 100.0*st_2084_eotf(r);
*rg = 100.0*st_2084_eotf(g);
*rb = 100.0*st_2084_eotf(b);
}
__device__ float int16_2floaty(uint16_t x){
return float(x) * 0.00114155246 + -0.0730593577;
}
__device__ float int16_2float(uint16_t x){
return float(x) * 0.00111607148 + -0.571428597;
}
__device__ uint16_t float_2int16y(float x){
x = x * 876 + 64;
float d = 0;
x+=d;
if(x<0.0f)
x = 0.0f;
if(x<float(1UL<<10)-1)
return x;
return float(1UL<<10)-1;
}
__device__ uint16_t float_2int16(float x){
x = x * 876 + 512;
float d = 0;
x+=d;
if(x<0.0f)
x = 0.0f;
if(x<float(1UL<<10)-1)
return x;
return float(1UL<<10)-1;
}
__device__ void yuv_to_rgb(uint16_t *y, uint16_t *u, uint16_t *v, uint16_t *ry, uint16_t *ru, uint16_t *rv) {
// int to float
float iy,iu,iv;
iy = int16_2floaty(*y);
iu = int16_2float(*u);
iv = int16_2float(*v);
float r = iy * yuv2rgb_REC_2020_NCL[0] + iu * yuv2rgb_REC_2020_NCL[1] + iv * yuv2rgb_REC_2020_NCL[2];
float g = iy * yuv2rgb_REC_2020_NCL[3] + iu * yuv2rgb_REC_2020_NCL[4] + iv * yuv2rgb_REC_2020_NCL[5];
float b = iy * yuv2rgb_REC_2020_NCL[6] + iu * yuv2rgb_REC_2020_NCL[7] + iv * yuv2rgb_REC_2020_NCL[8];
float lr, lg, lb;
gamma_to_linear(r,g,b, &lr, &lg, &lb);
r = lr * gamutMa[0] + lg * gamutMa[1] + lb * gamutMa[2];
g = lr * gamutMa[3] + lg * gamutMa[4] + lb * gamutMa[5];
b = lr * gamutMa[6] + lg * gamutMa[7] + lb * gamutMa[8];
float gr, gg, gb;
linear_to_gamma(r,g,b, &gr, &gg, &gb);
//gr = r; gg=g; gb=b;
iy = gr * rgb2yuv_REC_709[0] + gg * rgb2yuv_REC_709[1] + gb * rgb2yuv_REC_709[2];
iu = gr * rgb2yuv_REC_709[3] + gg * rgb2yuv_REC_709[4] + gb * rgb2yuv_REC_709[5];
iv = gr * rgb2yuv_REC_709[6] + gg * rgb2yuv_REC_709[7] + gb * rgb2yuv_REC_709[8];
// float to int
*ry = clamp(float_2int16y(iy), 16*4, 235*4);
*ru = clamp(float_2int16(iu), 16*4, 240*4);
*rv = clamp(float_2int16(iv), 16*4, 240*4);
//*ry = *y;
//*ru = *u;
//*rv = *v;
}
__global__ void
calc_colorspace(uint8_t *data0, uint8_t *data1, uint8_t *data2, int w, int h, int stride_y, int stride_uv,
uint8_t *dst_data0, uint8_t *dst_data1, uint8_t *dst_data2, int colorspace) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
int iy = i/(w/2)*2;
int ix = (i % (w/2)) *2;
if (ix >= w || iy >= h)
return;
int ofy = iy * stride_y + ix;
int ofuv = (iy>>1) * stride_uv + ix/2;
uint16_t * srcy = (uint16_t *)data0;
uint16_t * srcu = (uint16_t *)data1;
uint16_t * srcv = (uint16_t *)data2;
uint16_t * dsty = (uint16_t *)dst_data0;
uint16_t * dstu = (uint16_t *)dst_data1;
uint16_t * dstv = (uint16_t *)dst_data2;
yuv_to_rgb(&srcy[ofy], &srcu[ofuv], &srcv[ofuv], &dsty[ofy], &dstu[ofuv], &dstv[ofuv]);
yuv_to_rgb(&srcy[ofy + 1], &srcu[ofuv], &srcv[ofuv], &dsty[ofy + 1], &dstu[ofuv], &dstv[ofuv]);
yuv_to_rgb(&srcy[ofy + stride_y], &srcu[ofuv], &srcv[ofuv], &dsty[ofy + stride_y], &dstu[ofuv], &dstv[ofuv]);
yuv_to_rgb(&srcy[ofy + stride_y +1], &srcu[ofuv], &srcv[ofuv], &dsty[ofy + stride_y + 1], &dstu[ofuv], &dstv[ofuv]);
}
__global__ void colorspace(float *src0, float *src1, float *src2, float *dst0, float *dst1, float *dst2, int w, int h){
int i = (blockDim.x * blockIdx.x + threadIdx.x);
if(i>w*h-1)
return;
float iy = src0[i];
float iu = src1[i];
float iv = src2[i];
float r = iy * yuv2rgb_REC_2020_NCL[0] + iu * yuv2rgb_REC_2020_NCL[1] + iv * yuv2rgb_REC_2020_NCL[2];
float g = iy * yuv2rgb_REC_2020_NCL[3] + iu * yuv2rgb_REC_2020_NCL[4] + iv * yuv2rgb_REC_2020_NCL[5];
float b = iy * yuv2rgb_REC_2020_NCL[6] + iu * yuv2rgb_REC_2020_NCL[7] + iv * yuv2rgb_REC_2020_NCL[8];
float lr, lg, lb;
gamma_to_linear(r,g,b, &lr, &lg, &lb);
r = lr * gamutMa[0] + lg * gamutMa[1] + lb * gamutMa[2];
g = lr * gamutMa[3] + lg * gamutMa[4] + lb * gamutMa[5];
b = lr * gamutMa[6] + lg * gamutMa[7] + lb * gamutMa[8];
float gr, gg, gb;
linear_to_gamma(r,g,b, &gr, &gg, &gb);
//gr = r; gg=g; gb=b;
dst0[i] = gr * rgb2yuv_REC_709[0] + gg * rgb2yuv_REC_709[1] + gb * rgb2yuv_REC_709[2];
dst1[i] = gr * rgb2yuv_REC_709[3] + gg * rgb2yuv_REC_709[4] + gb * rgb2yuv_REC_709[5];
dst2[i] = gr * rgb2yuv_REC_709[6] + gg * rgb2yuv_REC_709[7] + gb * rgb2yuv_REC_709[8];
//yuv_to_rgb(&src0[i], &src1[i], &src2[i], &dst0[i], &dst1[i], &dst2[i]);
}
__global__ void resize_line_32_y(uint8_t *src, float *dst, int width, int height) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
if(i>width*height-1)
return;
dst[i] = int16_2floaty(src[i]);
}
__global__ void resize_line_32_uv(uint8_t *src, float *dst, int wi, int hi){
int i = (blockDim.x * blockIdx.x + threadIdx.x);
if(i>wi*hi-1)
return;
int h = int(i / wi);
int v = int(i % wi);
int w = int(wi / 2);
if (h == 0 || h == hi-1){
if (v % 2 == 0 || v == wi - 1)
dst[i] = int16_2float(src[int(h/2)*w +int(v / 2)]);
else
dst[i] = int16_2float(src[int(h/2) * w + int(v / 2)]) * 0.5 + int16_2float(src[int(h / 2) * w + int(v / 2) + 1]) * 0.5;
}
else if (h % 2 == 0){
if (v % 2 == 0 || v == wi - 1)
dst[i] = int16_2float(src[int(h / 2 - 1) * w + int(v / 2)]) * 0.25 + int16_2float(src[int(h / 2) * w + int(v / 2)]) * 0.75;
else{
float x1 = int16_2float(src[int(h / 2 - 1) * w + int(v / 2)]) * 0.5 + int16_2float(src[int(h / 2 - 1) * w + int(v / 2) + 1]) * 0.5;
float x2 = int16_2float(src[int(h / 2) * w + int(v / 2)]) * 0.5 + int16_2float(src[int(h / 2) * w + int(v / 2) + 1]) * 0.5;
dst[i] = x1 * 0.25 + x2 * 0.75;
}
}
else{
if (v % 2 == 0 || v == wi - 1)
dst[i] = int16_2float(src[int(h / 2) * w + int(v / 2)]) * 0.75 + int16_2float(src[int(h / 2 + 1) * w + int(v / 2)]) * 0.25;
else{
float x1 = int16_2float(src[int(h / 2) * w + int(v / 2)]) * 0.5 + int16_2float(src[int(h / 2) * w + int(v / 2) + 1]) * 0.5;
float x2 = int16_2float(src[int(h / 2 + 1) * w + int(v / 2)]) * 0.5 + int16_2float(src[int(h / 2 + 1) * w + int(v / 2) + 1]) * 0.5;
dst[i] = x1 * 0.75 + x2 * 0.25;
}
}
}
__global__ void stamp_line_32_y(float *src, uint8_t *dst, int wi, int hi){
int i = (blockDim.x * blockIdx.x + threadIdx.x);
if(i>wi*hi-1)
return;
dst[i] = clamp(float_2int16y(src[i]), 16*4, 235*4);
}
__global__ void stamp_line_32_uv(float *src, uint8_t *dst, int wi, int hi){
int i = (blockDim.x * blockIdx.x + threadIdx.x);
if(i>wi*hi-1)
return;
float x1,x2,x3;
x1=x2=x3=0;
int h = int(i / wi);
int v = int(i % wi);
int w = int(wi * 2);
int sh = (h+1)/2;
int sv = (v+1)/2;
if (h == hi -1){
float x1 = src[sh * w + sv] * 0.5 + src[(sh + 1) * w + sv] * 0.375 + src[(sh + 2) * w + sv] * 0.125;
float x2 = src[sh * w + sv + 1] * 0.5 + src[(sh + 1) * w + sv + 1] * 0.375 + src[(sh + 2) * w + sv + 1] * 0.125;
float x3 = src[sh * w + sv + 2] * 0.5 + src[(sh + 1) * w + sv + 2] * 0.375 + src[(sh + 2) * w + sv + 2] * 0.125;
}
else{
float x1 = src[sh * w + sv] * 0.125 + src[(sh + 1) * w + sv] * 0.375 + src[(sh + 2) * w + sv] * 0.375 + src[(sh + 3) * w + sv] * 0.125;
float x2 = src[sh * w + sv + 1] * 0.125 + src[(sh + 1) * w + sv + 1] * 0.375 + src[(sh + 2) * w + sv + 1] * 0.375 + src[(sh + 3) * w + sv + 1] * 0.125;
float x3 = src[sh * w + sv + 2] * 0.125 + src[(sh + 1) * w + sv + 2] * 0.375 + src[(sh + 2) * w + sv + 2] * 0.375 + src[(sh + 3) * w + sv + 2] * 0.125;
}
dst[i] = clamp(float_2int16(x1 * 0.25 + x2 * 0.5 + x3 * 0.25), 16*4, 240*4);
}
extern "C" {
int
doitgpu(uint8_t *data0, uint8_t *data1, uint8_t *data2, int *linesize, int width, int height, int format, uint8_t *out0,
uint8_t *out1, uint8_t *out2) {
fprintf(stderr, "=====gpu====\n");
uint8_t *cuda_data0, *cuda_data1, *cuda_data2;
uint8_t *dst_data0, *dst_data1, *dst_data2;
float *dst_w0, *dst_w1, *dst_w2, *dst_prt0, *dst_prt1, *dst_prt2;
int length0 = linesize[0] * height;
int length1 = linesize[1] * height >> 1;
int length2 = linesize[2] * height >> 1;
int stride_y = linesize[0]>>1;
int stride_uv = linesize[1]>>1;
cudaMalloc(&cuda_data0, length0);
cudaMalloc(&cuda_data1, length1);
cudaMalloc(&cuda_data2, length2);
cudaMalloc(&dst_data0, length0);
cudaMalloc(&dst_data1, length1);
cudaMalloc(&dst_data2, length2);
cudaMemcpy(cuda_data0, data0, length0, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_data1, data1, length1, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_data2, data2, length2, cudaMemcpyHostToDevice);
cudaMalloc(&dst_w0, 4 * width * height);
cudaMalloc(&dst_w1, 4 * width * height);
cudaMalloc(&dst_w2, 4 * width * height);
cudaMalloc(&dst_prt0, 4 * width * height);
cudaMalloc(&dst_prt1, 4 * width * height);
cudaMalloc(&dst_prt2, 4 * width * height);
int blocks = (width * height +128 - 1)/128;
int threads = 128;
resize_line_32_y<<<blocks, threads>>>(cuda_data0, dst_w0, width, height);
resize_line_32_uv<<<blocks, threads>>>(cuda_data1, dst_w1, width, height);
resize_line_32_uv<<<blocks, threads>>>(cuda_data2, dst_w2, width, height);
fprintf(stderr, "--resized\n");
colorspace<<<blocks, threads>>>(dst_w0, dst_w1, dst_w2, dst_prt0, dst_prt1, dst_prt2, width, height);
fprintf(stderr, "--colorspace\n");
stamp_line_32_y<<<blocks, threads>>>(dst_prt0, dst_data0, width, height);
blocks = (width * height / 4 + 128 - 1) / 128;
threads = 128;
//calc_colorspace <<<blocks, threads>>>(cuda_data0, cuda_data1, cuda_data2, width, height, stride_y, stride_uv,
// dst_data0, dst_data1, dst_data2, 0);
stamp_line_32_uv<<<blocks, threads>>>(dst_prt1, dst_data1, width/2, height/2);
stamp_line_32_uv<<<blocks, threads>>>(dst_prt2, dst_data2, width/2, height/2);
fprintf(stderr, "--over\n");
cudaMemcpy(out0, dst_data0, length0, cudaMemcpyDeviceToHost);
cudaMemcpy(out1, dst_data1, length1, cudaMemcpyDeviceToHost);
cudaMemcpy(out2, dst_data2, length2, cudaMemcpyDeviceToHost);
cudaDeviceReset();
return 0;
}
}
|
667
|
/*
* Kernel for calulating the element-wise product of two matrices
* m, n --> dimensions of matrices A, B, C
*/
extern "C" {
__global__ void hadamard(int m, int n, float *A, int lda, float *B, int ldb, float *C, int ldc)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
C[i + j*ldc] = A[i + j*lda] * B[i + j*ldb];
}
}
/*
* Matrix sum, parameters as above
*/
extern "C" {
__global__ void matrix_sum(int m, int n, float *A, int lda, float *B, int ldb, float *C, int ldc)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
C[i + j*ldc] = A[i + j*lda] + B[i + j*ldb];
}
}
/*
* Copy of elements
*/
extern "C" {
__global__ void copy(int m, int n, float *dst, int lddst, float *src, int ldsrc)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
dst[i + j*lddst] = src[i + j*ldsrc];
}
}
|
668
|
#include <cuda.h>
/*Lx2Cuda performs the 2-D convolution of matrices A and row vector B*/
__global__ void Lx2(const float *d_in,float *d_out,int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((4<Row) && (Row<numRows-5) && (4<Col) && (Col<numCols-5)){
d_out[Col*numRows+Row]=mask[0]*d_in[Col*numRows+Row]+
mask[1]*(d_in[(Col-1)*numRows+Row]+d_in[(Col+1)*numRows+Row])+
mask[2]*(d_in[(Col-2)*numRows+Row]+d_in[(Col+2)*numRows+Row])+
mask[3]*(d_in[(Col-3)*numRows+Row]+d_in[(Col+3)*numRows+Row])+
mask[4]*(d_in[(Col-4)*numRows+Row]+d_in[(Col+4)*numRows+Row])+
mask[5]*(d_in[(Col-5)*numRows+Row]+d_in[(Col+5)*numRows+Row]);
}
}
/*Lz2 performs the 2-D convolution of matrices A and column vector B*/
__global__ void Lz2(const float *d_in,float *d_out,int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((4<Row) && (Row<numRows-5) && (4<Col) && (Col<numCols-5)){
int Loc = Col*numRows+Row;
d_out[Loc]=mask[0]*d_in[Loc]+
mask[1]*(d_in[Loc-1]+d_in[Loc+1])+
mask[2]*(d_in[Loc-2]+d_in[Loc+2])+
mask[3]*(d_in[Loc-3]+d_in[Loc+3])+
mask[4]*(d_in[Loc-4]+d_in[Loc+4])+
mask[5]*(d_in[Loc-5]+d_in[Loc+5]);
}
}
/*Lz1 performs the 2-D convolution of matrices A and column vector C1*/
__global__ void Lz1(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
int Loc = Col*numRows+Row;
d_out[Loc]=
mask[0]*(d_in[Loc+1]-d_in[Loc-1])+
mask[1]*(d_in[Loc+2]-d_in[Loc-2])+
mask[2]*(d_in[Loc+3]-d_in[Loc-3])+
mask[3]*(d_in[Loc+4]-d_in[Loc-4])+
mask[4]*(d_in[Loc+5]-d_in[Loc-5])+
mask[5]*(d_in[Loc+6]-d_in[Loc-6]);
}
}
/*Lx1 performs the 2-D convolution of matrices A and row vector C1*/
__global__ void Lx1(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_out[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row]-d_in[(Col-1)*numRows+Row])+
mask[1]*(d_in[(Col+2)*numRows+Row]-d_in[(Col-2)*numRows+Row])+
mask[2]*(d_in[(Col+3)*numRows+Row]-d_in[(Col-3)*numRows+Row])+
mask[3]*(d_in[(Col+4)*numRows+Row]-d_in[(Col-4)*numRows+Row])+
mask[4]*(d_in[(Col+5)*numRows+Row]-d_in[(Col-5)*numRows+Row])+
mask[5]*(d_in[(Col+6)*numRows+Row]-d_in[(Col-6)*numRows+Row]);
}
}
/*sbLx performs the 2-D convolution of matrices A and row vector S1*/
__global__ void sbLx(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_out[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row]-d_in[(Col-0)*numRows+Row])+
mask[1]*(d_in[(Col+2)*numRows+Row]-d_in[(Col-1)*numRows+Row])+
mask[2]*(d_in[(Col+3)*numRows+Row]-d_in[(Col-2)*numRows+Row])+
mask[3]*(d_in[(Col+4)*numRows+Row]-d_in[(Col-3)*numRows+Row])+
mask[4]*(d_in[(Col+5)*numRows+Row]-d_in[(Col-4)*numRows+Row])+
mask[5]*(d_in[(Col+6)*numRows+Row]+d_in[(Col-5)*numRows+Row]);
}
}
/*sfLx performs the 2-D convolution of matrices A and row vector S1*/
__global__ void sfLx(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_out[Col*numRows+Row]=
mask[0]*(d_in[(Col+0)*numRows+Row]-d_in[(Col-1)*numRows+Row])+
mask[1]*(d_in[(Col+1)*numRows+Row]-d_in[(Col-2)*numRows+Row])+
mask[2]*(d_in[(Col+2)*numRows+Row]-d_in[(Col-3)*numRows+Row])+
mask[3]*(d_in[(Col+3)*numRows+Row]-d_in[(Col-4)*numRows+Row])+
mask[4]*(d_in[(Col+4)*numRows+Row]-d_in[(Col-5)*numRows+Row])+
mask[5]*(d_in[(Col+5)*numRows+Row]+d_in[(Col-6)*numRows+Row]);
}
}
/*sbLz performs the 2-D convolution of matrices A and column vector S1*/
__global__ void sbLz(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
int Loc = Col*numRows+Row;
d_out[Col*numRows+Row]=
mask[0]*(d_in[Loc+1]-d_in[Loc-0])+
mask[1]*(d_in[Loc+2]-d_in[Loc-1])+
mask[2]*(d_in[Loc+3]-d_in[Loc-2])+
mask[3]*(d_in[Loc+4]-d_in[Loc-3])+
mask[4]*(d_in[Loc+5]-d_in[Loc-4])+
mask[5]*(d_in[Loc+6]+d_in[Loc-5]);
}
}
/*sfLz performs the 2-D convolution of matrices A and column vector S1*/
__global__ void sfLz(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
int Loc = Col*numRows+Row;
d_out[Loc]=
mask[0]*(d_in[Loc+0]-d_in[Loc-1])+
mask[1]*(d_in[Loc+1]-d_in[Loc-2])+
mask[2]*(d_in[Loc+2]-d_in[Loc-3])+
mask[3]*(d_in[Loc+3]-d_in[Loc-4])+
mask[4]*(d_in[Loc+4]-d_in[Loc-5])+
mask[5]*(d_in[Loc+5]+d_in[Loc-6]);
}
}
/*rsgffd performs the 2-D forward rotated staggered-grid finite difference*/
__global__ void rsgffd(const float *d_in,float *d_outx,float *d_outz,const int numRows,int numCols,float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_outx[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row-0]-d_in[(Col-0)*numRows+Row+1])+
mask[1]*(d_in[(Col+2)*numRows+Row-1]-d_in[(Col-1)*numRows+Row+2])+
mask[2]*(d_in[(Col+3)*numRows+Row-2]-d_in[(Col-2)*numRows+Row+3])+
mask[3]*(d_in[(Col+4)*numRows+Row-3]-d_in[(Col-3)*numRows+Row+4])+
mask[4]*(d_in[(Col+5)*numRows+Row-4]-d_in[(Col-4)*numRows+Row+5])+
mask[5]*(d_in[(Col+6)*numRows+Row-5]-d_in[(Col-5)*numRows+Row+6]);
d_outz[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row+1]-d_in[(Col-0)*numRows+Row-0])+
mask[1]*(d_in[(Col+2)*numRows+Row+2]-d_in[(Col-1)*numRows+Row-1])+
mask[2]*(d_in[(Col+3)*numRows+Row+3]-d_in[(Col-2)*numRows+Row-2])+
mask[3]*(d_in[(Col+4)*numRows+Row+4]-d_in[(Col-3)*numRows+Row-3])+
mask[4]*(d_in[(Col+5)*numRows+Row+5]-d_in[(Col-4)*numRows+Row-4])+
mask[5]*(d_in[(Col+6)*numRows+Row+6]-d_in[(Col-5)*numRows+Row-5]);
}
}
/*rsgbfd performs the 2-D backward rotated staggered-grid finite difference*/
__global__ void rsgbfd(const float *d_in,float *d_outx,float *d_outz,int numRows,int numCols,float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_outx[Col*numRows+Row]=
mask[0]*(d_in[(Col+0)*numRows+Row-1]-d_in[(Col-1)*numRows+Row+0])+
mask[1]*(d_in[(Col+1)*numRows+Row-2]-d_in[(Col-2)*numRows+Row+1])+
mask[2]*(d_in[(Col+2)*numRows+Row-3]-d_in[(Col-3)*numRows+Row+2])+
mask[3]*(d_in[(Col+3)*numRows+Row-4]-d_in[(Col-4)*numRows+Row+3])+
mask[4]*(d_in[(Col+4)*numRows+Row-5]-d_in[(Col-5)*numRows+Row+4])+
mask[5]*(d_in[(Col+5)*numRows+Row-6]-d_in[(Col-6)*numRows+Row+5]);
d_outz[Col*numRows+Row]=
mask[0]*(d_in[(Col+0)*numRows+Row+0]-d_in[(Col-1)*numRows+Row-1])+
mask[1]*(d_in[(Col+1)*numRows+Row+1]-d_in[(Col-2)*numRows+Row-2])+
mask[2]*(d_in[(Col+2)*numRows+Row+2]-d_in[(Col-3)*numRows+Row-3])+
mask[3]*(d_in[(Col+3)*numRows+Row+3]-d_in[(Col-4)*numRows+Row-4])+
mask[4]*(d_in[(Col+4)*numRows+Row+4]-d_in[(Col-5)*numRows+Row-5])+
mask[5]*(d_in[(Col+5)*numRows+Row+5]-d_in[(Col-6)*numRows+Row-6]);
}
}
|
669
|
/*
* ExBottomUpdater.cpp
*
* Created on: 01 февр. 2016 г.
* Author: aleksandr
*/
#include "ExBottomUpdater.h"
#include "SmartIndex.h"
/*
* indx должен пренадлежать участку от [0, sizeX-1]
*/
__device__
void ExBottomUpdater::operator() (const int indx) {
int m = indx;
Ex(m, 0) = coeff[0] * (Ex(m, 2) + ExBottom(0, 1, m)) +
coeff[1] * (ExBottom(0, 0, m) + ExBottom(2, 0, m) - Ex(m, 1) - ExBottom(1, 1, m)) +
coeff[2] * ExBottom(1, 0, m) - ExBottom(2, 1, m);
for (int n = 0; n < 3; n++) {
ExBottom(n, 1, m) = ExBottom(n, 0, m);
ExBottom(n, 0, m) = Ex(m, n);
}
}
|
670
|
/*
* EzBottomUpdater.cpp
*
* Created on: 23 янв. 2016 г.
* Author: aleksandr
*/
#include "EzBottomUpdater.h"
#include "SmartIndex.h"
#define EzBottom(N, Q, M) EzBottom[(M) * 6 + (Q) * 3 + (N)]
/*
* indx должен пренадлежать участку от [0, sizeX-1]
*/
__device__
void EzBottomUpdater::operator() (const int indx) {
int m = indx;
Ez(m, 0) = coeff[0] * (Ez(m, 2) + EzBottom(0, 1, m)) +
coeff[1] * (EzBottom(0, 0, m) + EzBottom(2, 0, m) - Ez(m, 1) - EzBottom(1, 1, m)) +
coeff[2] * EzBottom(1, 0, m) - EzBottom(2, 1, m);
for (int n = 0; n < 3; n++) {
EzBottom(n, 1, m) = EzBottom(n, 0, m);
EzBottom(n, 0, m) = Ez(m, n);
}
}
|
671
|
/*
* CS 4444
* Steven Stetzler
* Homework 5: Matrix-Matrix Multiplication with CUDA
*/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include <stdlib.h>
#include <iostream>
using namespace std;
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
// A GPU implementation of matrix multiplication.
// Given three N x N matrices A, B, and C we compute C = A x B
__global__ void matrix_mult_gpu(float* A, float* B, float* C, int N) {
// Get the row and column of C that this thread should work on
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int k, idx;
float sum;
// Ignore threads that would compute values outside of the boundary of the matrix
if (row < N && col < N) {
idx = row * N + col;
sum = 0;
for (k = 0; k < N; k++) {
sum += A[row * N + k] * B[k * N + col];
}
C[idx] = sum;
}
}
// A naive (no cache blocking) CPU implementation of matrix multiplication
// Given three N x N matrices A, B, and C we compute C = A x B
void matrix_mult_cpu(float* A, float* B, float* C, int N) {
int row, col, k, idx;
float sum;
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
idx = row * N + col;
sum = 0;
for (k = 0; k < N; k++) {
sum += A[row * N + k] * B[k * N + col];
}
C[idx] = sum;
}
}
}
// Compare the values in two N x N matrices C and C_CPU
void compareHostAndGpuOutput(float* C, float* C_CPU, int N) {
int totalElements = N * N;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
if (fabs(C[i] - C_CPU[i]) > 0.01) {
missmatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
// Main method
int main(int argc, char** argv) {
// The problem size N is the dimension of the arrays
int N = (argc > 1) ? atoi(argv[1]) : 100;
// Option whether or not to check GPU output against CPU implementation
// Should not be included for large N, as CPU will be very slow
int check_cpu = (argc > 2) ? atoi(argv[2]) : 0;
// Options to specify the thread gridding
int thread_x = (argc > 3) ? atoi(argv[3]) : 32;
int thread_y = (argc > 4) ? atoi(argv[4]) : 32;
// Option to specify the number of trials for run time tests of the GPU
int n_trials = (argc > 5) ? atoi(argv[5]) : 10;
// Compute block gridding from the thread gridding
// This is the minimum size block gridding given the size of the array and the thread gridding that guarantees that all values
// in C will be computed
int grid_x = (int) ceil((double) N / thread_x);
int grid_y = (int) ceil((double) N / thread_y);
// Print run parameters
printf("N = %d\nGrid: %d x %d\nThreads: %d x %d\nTrials: %d\n", N, grid_x, grid_y, thread_x, thread_y, n_trials);
// Specify block and thread gridding
dim3 block_per_grid(grid_x, grid_y , 1);
dim3 thread_per_block(thread_x, thread_y, 1);
// Create and allocate three arrays
float* A = (float*) malloc(N * N * sizeof(float));
float* B = (float*) malloc(N * N * sizeof(float));
float* C = (float*) malloc(N * N * sizeof(float));
// Create pointers for GPU arrays, but do not allocate yet!
float* A_GPU;
float* B_GPU;
float* C_GPU;
// Perform random initialization of the arrays
int i, j;
float val;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
val = (rand() % 1000) * 0.001;
A[i * N + j] = val;
B[i * N + j] = val;
C[i * N + j] = val;
}
}
clock_t start, end;
double elapsed;
// If we want to check against CPU, perform CPU matrix multiplication and time it
if (check_cpu) {
start = clock();
matrix_mult_cpu(A, B, C, N);
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("CPU: %.10f seconds\n", elapsed);
}
double copy_avg_time = 0;
double comp_avg_time = 0;
// For each trial in run time analysis
for (i = 0 ; i < n_trials; i++) {
// Time the copy operation
start = clock();
// Allocate arrays on GPU
check_error(cudaMalloc((void **) &A_GPU, N * N * sizeof(float)));
check_error(cudaMalloc((void **) &B_GPU, N * N * sizeof(float)));
check_error(cudaMalloc((void **) &C_GPU, N * N * sizeof(float)));
// Copy in values from A, B, and C
check_error(cudaMemcpy(A_GPU, A, N * N * sizeof(float), cudaMemcpyHostToDevice));
check_error(cudaMemcpy(B_GPU, B, N * N * sizeof(float), cudaMemcpyHostToDevice));
check_error(cudaMemcpy(C_GPU, C, N * N * sizeof(float), cudaMemcpyHostToDevice));
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
copy_avg_time += elapsed;
// Time the computation operation
start = clock();
// Perform GPU matrix multiply
matrix_mult_gpu<<<block_per_grid, thread_per_block>>>(A_GPU, B_GPU, C_GPU, N);
cudaDeviceSynchronize();
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
comp_avg_time += elapsed;
// Free arrays if this isn't our last trial
if (i != n_trials - 1) {
check_error(cudaFree(A_GPU));
check_error(cudaFree(B_GPU));
check_error(cudaFree(C_GPU));
}
}
// Print timing results
printf("GPU_copy: %.10f seconds\n", copy_avg_time / n_trials);
printf("GPU: %.10f seconds\n", comp_avg_time / n_trials);
// If we wanted to check against CPU, do so
if (check_cpu) {
// Copy result from GPU
float* C_GPU_Copy = (float*) malloc(N * N * sizeof(float));
check_error(cudaMemcpy(C_GPU_Copy, C_GPU, N * N * sizeof(float), cudaMemcpyDeviceToHost));
// Compare GPU and CPU output
compareHostAndGpuOutput(C, C_GPU_Copy, N);
}
return 0;
}
|
672
|
#include <iostream>
#include <stdlib.h>
#include "load_matrix.cuh"
int load_matrix(double ***matrix, int *size, char* filename) {
FILE* ip;
int i, j;
if ((ip = fopen(filename, "r")) == NULL) {
return 1;
}
fscanf(ip, "%d\n\n", size);
(*matrix) = alloc_mem(*size, (*size) + 1);
for (i = 0; i < *size; ++i) {
for (j = 0; j < *size; ++j)
fscanf(ip, "%lf\t", &(*matrix)[i][j]);
fscanf(ip, "\n");
}
fscanf(ip, "\n");
for (i = 0; i < *size; ++i)
fscanf(ip, "%lf\n", &(*matrix)[i][(*size - 1) + 1]);
fclose(ip);
return 0;
}
double **alloc_mem(int rows_count, int cols_count) {
int i;
double ** mem;
mem = (double **)malloc(rows_count * sizeof(double*));
for (i = 0; i < rows_count; ++i) {
mem[i] = (double *)malloc(cols_count * sizeof(double));
}
return mem;
}
void dealloc_mem(double** matrix, int rows_count) {
int i;
for (i = 0; i < rows_count; ++i) {
free(matrix[i]);
}
free(matrix);
}
|
673
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
/**
* Encrypt Program Cuda
*
* This program encrypts a file using a degree 2 formula using Cuda
* Parallelization and then decrypts the file using another degree 2
* formula.
*
* @Author: Clayton Chase Glenn
*/
#define MAX 20
#define DEBUG 0
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it Encrypts the corresponding
* Character in the String.
**/
__global__
void encrypt(char *p, char *c, int a, int b, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) c[tid] = (a*p[tid] + b) % 256;
}
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it Encrypts the corresponding
* Character in the String.
**/
__global__
void decrypt(char *p, char *c, int a, int b, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) c[tid] = (a*p[tid] + b) % 256;
}
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it checks if the corresponding
* character in array a matches b.
**/
__global__
void isMatch(char *p, char *c, int *a, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) {
if (c[tid] != p[tid]) {
*a = 1;
}
}
}
/**
* Helper Function
* Prints an string to standard error showing help
* for valid arguments in the executable
**/
void printerror(){
fprintf(stderr, "Invalid Arguments\n");
fprintf(stderr, "Correct Form: ./encrypt [File to Encrypt]\n");
fprintf(stderr, " or\n");
fprintf(stderr, " ./encrypt -n [2^(1:20)]\n");
exit(0);
}
/**
* Main Program
* This Program is for Homework 6 to encrypt some text or show
* the encryption method of text that is 2 to the power of N
* characters long all initialized to zero.
**/
int main(int argc, char **argv) {
// Declare a buffer of max size to start
int N = MAX;
char *buf;
// Check for immediate errors in args
if (argc < 2) printerror();
if (argc == 3 && strcmp(argv[1], "-n")) printerror();
// If args match for testing, Initiallize the program
if(!strcmp(argv[1], "-n") && argc == 3){
// Get total characters from args
N = strtol(argv[2], NULL, 10);
// Show error if N isn't within constraints
if(N < 1 || N > 20) printerror();
// N <- calc to 2^N as size and allocate space
N = (int)pow(2, N);
buf = (char*)malloc(N*sizeof(char));
//Initiallize the buffer to Zero
int i = 0;
while (i < N) buf[i++] = 48;
}
// If 2 args, this means file
if(argc == 2) {
// Declare a file pointer, character array, and single character for reading
FILE *fp;
char c;
char chars[1048576];
int i = 0;
// Open the file for reading
fp = fopen(argv[1], "r");
// If file is null, file does not exist or error
if (fp == NULL) {
fprintf(stderr, "Not a Valid File\n");
return (-1);
}
// Read each character and keep within 2^20, add to array
while((c = fgetc(fp)) != EOF) {
if (i >= 1048576) {
fprintf(stderr, "File Too Large\n");
return (-1);
}
chars[i++] = c;
}
// Increment i for space and allocate space for buffer
N = i + 1;
buf = (char*)malloc(N*sizeof(char));
// Copy read elements into buffer
i = 0;
while(i < N) buf[i] = chars[i++];
// Close File, not needed anymore
fclose(fp);
}
// Initiallize Character Arrays for Encrypting and manual memset
char h_p[N];
char h_c[N];
char h_r[N];
int i = 0;
while (i < N) {
h_p[i] = buf[i];
h_c[i] = 32;
h_r[i++] = 32;
}
// Init all other variables
char *dev_p, *dev_c, *dev_r;
int *match;
int h_match = 0;
int h_a = 171, h_b = 55;
int r_a = 3, r_b = 91;
cudaEvent_t start1, start2, start3, startf, stop1, stop2, stop3, stopf;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventCreate(&start3);
cudaEventCreate(&stop3);
cudaEventCreate(&startf);
cudaEventCreate(&stopf);
float final_time1 = 0.0, final_time2 = 0.0, final_time3 = 0.0, final_timef = 0.0;
// Allocate Memory for match flag
match = (int*)malloc(sizeof(int));
*match = 0;
// Allocate memory in the GPU for the character arrays
cudaMalloc(&dev_p, N*sizeof(char));
cudaMalloc(&dev_c, N*sizeof(char));
cudaMalloc(&dev_r, N*sizeof(char));
cudaMalloc(&match, sizeof(int));
// Print N for distinguish
printf("N: %d\n", N);
// If debug on, show plain text
if(DEBUG) {
printf("Plain Text: ");
i = 0;
while(i < N) printf("%c", h_p[i++]);
printf("\n");
}
// Copy the Memory from the arrays to the array pointers
cudaMemcpy(dev_p, h_p, N*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, h_c, N*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_r, h_r, N*sizeof(char), cudaMemcpyHostToDevice);
// Start Total Time Record
cudaEventRecord(startf);
// Encrypt the Plain Text and Record Start and Finish
cudaEventRecord(start1);
encrypt<<<128, 128>>>(dev_p, dev_c, h_a, h_b, N);
cudaEventRecord(stop1);
// Copy the results from GPU to the CPU
cudaMemcpy(h_c, dev_c, N*sizeof(char), cudaMemcpyDeviceToHost);
// If debug on, show encrypted text
if(DEBUG) {
printf("Encrypted Text: ");
i = 0;
while(i < N) printf("%c", h_c[i++]);
printf("\n");
}
// Syncronize all blocks and threads in GPU and get time
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&final_time1, start1, stop1);
// Decrypt the Encrypted Text
cudaEventRecord(stop2);
decrypt<<<128, 128>>>(dev_c, dev_r, r_a, r_b, N);
cudaEventRecord(stop2);
// Copy the results from GPU to CPU
cudaMemcpy(h_r, dev_r, N*sizeof(char), cudaMemcpyDeviceToHost);
// If debug on, show decrypted text
if(DEBUG) {
printf("Decrypted Text: ", h_r);
i = 0;
while(i < N) printf("%c", h_r[i++]);
printf("\n");
}
// Syncronize all blocks and threads in GPU and get time
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&final_time2, start2, stop2);
// Check if Plain Text and Encrypt<-->Decrypt Text is matching by GPU
cudaEventRecord(start3);
isMatch<<<128, 128>>>(dev_r, dev_p, match, N);
cudaEventRecord(stop3);
// Copy the Match Result from GPU to CPU
cudaMemcpy(&h_match, match, sizeof(int), cudaMemcpyDeviceToHost);
// If match is zero, success, else, no success
if (h_match) fprintf(stdout, "Does not Match\n");
else fprintf(stdout, "Does Match\n");
// Syncronize all blocks and threads in GPU and get time
cudaEventSynchronize(stop3);
cudaEventElapsedTime(&final_time3, start3, stop3);
// Syncronize all blocks and threads in GPU and get time
cudaEventRecord(stopf);
cudaEventSynchronize(stopf);
cudaEventElapsedTime(&final_timef, startf, stopf);
// Print Times
printf("Encrypt Time: %4.10f seconds\n", final_time1/1000);
printf("Decrypt Time: %4.10f seconds\n", final_time2/1000);
printf("Match Time: %4.10f seconds\n", final_time3/1000);
printf("Total Time: %4.10f seconds\n\n", final_timef/1000);
// Free the GPU memory
cudaFree(dev_p);
cudaFree(dev_c);
cudaFree(dev_r);
}
|
674
|
/* Copyright (C) 2012 Fabrizio Gueli
*
* This file is part of Cuda-complex-sim
*
* Cuda-complex-sim is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, either
* version 3 of the License, or (at your option) any later version.
*
* Cuda-complex-sim is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Cuda-complex-sim. If not, see <http://www.gnu.org/licenses/>.
*/
/*
#include "device.cuh"
#include "host.hpp"
#include "graph_transf.hpp"
#include "h_barabasi_game.hpp"
#include "hygra.cuh"
int main(int argc, char** argv)
{
bool* nodes_dev;
Link* links_target_dev;
bool* flagw_array;
bool* flagr_array;
uint32_t* counter;
task_t* task_dev;
task_arguments* task_args_dev;
message_t* inbox_dev;
n_attribute *prog;
curandState *d_state;
// if (argc!=3)
// {
// perror("\nErrore");
// exit(1);
// }
// uint32_t max_nodes=atoi(argv[1]);
// uint8_t average_links=atoi(argv[2]);
uint16_t supplementary_size= 10;
uint32_t max_nodes = 1000000;
uint8_t average_links= 2;
uint16_t barabasi_initial_nodes=average_links+1;
allocateDataStructures(&prog,&nodes_dev, &task_dev, &task_args_dev, &links_target_dev, &inbox_dev,max_nodes,average_links,supplementary_size,&d_state,&flagw_array,&flagr_array,&counter);
h_allocateDataStructures(supplementary_size,max_nodes,average_links);
Graph g = h_barabasi_game(barabasi_initial_nodes, 1, max_nodes);
startSimulation(links_target_dev,nodes_dev,supplementary_size,g);
srand(time(NULL));
// Link init;
// init.target=-1;
// init_data<<<BLOCKS,THREADS_PER_BLOCK>>>();
// h_initArray<bool>(false,h_nodes_array,h_max_nodes_number);
// h_initArray<Link>(init, h_links_target_array, h_max_nodes_number*h_average_links_number);
// adjlistToCompactList(g);
// copyToDevice(nodes_dev,h_nodes_array , 0, h_max_nodes_number );
// copyToDevice(links_target_dev,h_links_target_array ,0, h_max_nodes_number*h_average_links_number );
size_t avail;
size_t total;
cudaMemGetInfo( &avail, &total );
size_t used = total - avail;
printf("\nMemoria: totale %d, in uso %d, disponibile: %d", total, used, avail);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Start record
cudaEventRecord(start, 0);
message_test<<<BLOCKS,THREADS_PER_BLOCK,h_average_links_number*THREADS_PER_BLOCK*sizeof(Link)>>>();
message_test2nd<<<BLOCKS,THREADS_PER_BLOCK,h_average_links_number*THREADS_PER_BLOCK*sizeof(Link)>>>();
message_test2nd<<<BLOCKS,THREADS_PER_BLOCK,h_average_links_number*THREADS_PER_BLOCK*sizeof(Link)>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop); // that's our time!
// Clean up:
cudaEventDestroy(start);
cudaEventDestroy(stop);
FILE *file;
file=fopen("times.txt","a");
fprintf(file, "%f\n",elapsedTime);
fflush(file);
fclose(file);
cudaThreadExit();
return 0;
}
*/
|
675
|
// dimensions layout:
// 0-2 target offset_size (flat)
// 3-5 source and inclusion size (incremental)
// 6-8 filter size (not incremental)
// 9 sample_count
__global__ void cuda_filter_tips_fast(
float *target_image,
float const *const source_image,
float const *const inclusion_image,
float const *const filter_kernel,
int const *const dimensions,
int const *const filter_source_offsets,
float const *const error_measure,
unsigned int const *const real_grid_dimensions)
{
//indexing
unsigned int block_index_x = blockIdx.x % real_grid_dimensions[0];
unsigned int block_index_y = blockIdx.x / real_grid_dimensions[0];
unsigned int block_index_z = blockIdx.y;
unsigned int target_index_x = threadIdx.x + (block_index_x * blockDim.x);
unsigned int target_index_y = threadIdx.y + (block_index_y * blockDim.y);
unsigned int target_index_z = threadIdx.z + (block_index_z * blockDim.z);
unsigned int target_index_frame_offset = target_index_x + dimensions[0]+
((target_index_y + dimensions[1]) * dimensions[3])+
((target_index_z + dimensions[2]) * dimensions[4]);
float total_contribution = 0;
for (int filter_index = 0; filter_index < dimensions[6]; filter_index++)
{
int source_index_frame_offset = target_index_frame_offset + filter_source_offsets[filter_index];
if (inclusion_image[source_index_frame_offset] == 0.0f)
{
continue;
}
// compute error, this is were the magic happens:
// choose any function that has range {0, 1} to determine contribution
float mse = 0;
for (int time_index = 0; time_index < dimensions[9]; time_index++)
{
float time_error = source_image[target_index_frame_offset + (time_index * dimensions[5])] -
source_image[source_index_frame_offset + (time_index * dimensions[5])];
mse += time_error * time_error;
}
// note magic number
float k = (mse * mse) / (43245000.0f);
if (k >= 4.61f)
{
continue;
}
float contribution = filter_kernel[filter_index] * inclusion_image[source_index_frame_offset] * expf(-k);
// here the magic ends
total_contribution += contribution;
for (int time_index = 0; time_index < dimensions[9]; time_index++)
{
target_image[target_index_frame_offset + (time_index * dimensions[5])] += source_image[source_index_frame_offset + (time_index * dimensions[5])] * contribution;
}
}
for (int time_index = 0; time_index < dimensions[9]; time_index++)
{
target_image[target_index_frame_offset + (time_index * dimensions[5])] /= total_contribution;
}
}
|
676
|
#include "test.cuh"
__global__ void cuda_lanch_add(float *ptr, const int len)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < len; i = i + stride)
{
if(i < len){
ptr[i] = ptr[i] * 2;
}
}
}
void add(float *ptr, const int len)
{
dim3 blockSize(256);
dim3 gridSize(10);
cuda_lanch_add<<<gridSize, blockSize>>>(ptr, len);
}
|
677
|
/*-----------
*
* square.cu
*
* This is the source file of an increment kernel.
*
* This kernel is from CUDA samples. simpleOccupancy.cu
*
* streamsOptBenchmark/square.cu
*
* By Hao Li
*
*------------
*/
// #include "functions.h"
////////////////////////////////////////////////////////////////////////////////
// Test kernel
//
// This kernel squares each array element. Each thread addresses
// himself with threadIdx and blockIdx, so that it can handle any
// execution configuration, including anything the launch configurator
// API suggests.
////////////////////////////////////////////////////////////////////////////////
__global__ void square(float *in_array, float *out_array, int arrayCount)
{
for(int l = 0; l < 1000000; l++)
{
// extern __shared__ int dynamicSmem[];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
out_array[idx] = in_array[idx];
if (idx < arrayCount) {
out_array[idx] *= out_array[idx];
}
}
}
// int main(int argc, char **argc){
// int matrixDataSize = MATRIX_SIZE * MATRIX_SIZE;
// printf("%d\n", matrixDataSize);
// Matrix h_A, d_A;
// initMatrix(h_A, matrixDataSize, onHOST);
// initMatrix(d_A, matrixDataSize, onDEVICE);
// cudaMemcpy(d_A.elements, h_A.elements, matrixDataSize, cudaMemcpyHostToDevice);
// // set kernel launch configuration
// dim3 threads = dim3(512, 1);
// dim3 blocks = dim3(matrixDataSize / threads.x, 1);
// square<<<blocks, threads, 0, 0>>>(d_A.elements, 100000);
// cudaMemcpy(h_A.elements, d_A.elements, matrixDataSize, cudaMemcpyDeviceToHost);
// free(h_A.elements);
// cudaFree(d_A.elements);
// return 0;
// }
|
678
|
#include "matrix.cuh"
#define ROW_INDEX 0
#define COL_INDEX 1
#define NUM_INDEXES 2
__device__ matrix_t* device_roll_matrix_list(buffer_t* buffer, matrix_list_t* list)
{
unsigned int i;
//assert(list != NULL);
//for(i=0; i<list->num; i++)
//{
// assert(list->matrix_list[i] != NULL);
//}
unsigned int vector_size=0;
for(i=0; i<list->num; i++)
{
vector_size += list->matrix_list[i]->rows * list->matrix_list[i]->cols;
}
matrix_t* vector = device_matrix_constructor(buffer, 1, vector_size);
float* current_index = vector->matrix;
for(i=0; i<list->num; i++)
{
unsigned int matrix_size = list->matrix_list[i]->rows * list->matrix_list[i]->cols;
memcpy(current_index, list->matrix_list[i]->matrix, matrix_size * sizeof(float));
current_index = current_index + matrix_size;
}
return vector;
}
__device__ matrix_list_t* device_unroll_matrix_list(buffer_t* buffer, matrix_t* vector, int num, unsigned int sizes[][NUM_INDEXES])
{
//assert(vector != NULL);
matrix_list_t* list = device_matrix_list_constructor(buffer, num);
float* current_index = vector->matrix;
unsigned int i;
for(i=0; i<num; i++)
{
list->matrix_list[i] = device_matrix_constructor(buffer, sizes[i][ROW_INDEX], sizes[i][COL_INDEX]);
unsigned int matrix_size = sizes[i][ROW_INDEX] * sizes[i][COL_INDEX];
memcpy(list->matrix_list[i]->matrix, current_index, matrix_size * sizeof(float));
current_index = current_index + matrix_size;
}
return list;
}
|
679
|
#include <cstdio>
int deviceQuery()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
printf( "No CUDA GPU has been detected\n");
return -1;
} else if (deviceCount == 1) {
printf( "There is 1 device supporting CUDA\n");
} else {
printf("There are %d devices supporting CUDA\n", deviceCount);
}
}
printf( "Device %d: %s", dev, deviceProp.name);
printf( " Computational Capabilities: %d,%d\n", deviceProp.major, deviceProp.minor);
printf( " Maximum global memory size: %lu\n", deviceProp.totalGlobalMem);
printf( " Maximum constant memory size: %lu\n", deviceProp.totalConstMem);
printf( " Maximum shared memory size per block: %lu\n", deviceProp.sharedMemPerBlock);
printf( " Maximum block dimensions: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf( " Maximum grid dimensions: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf( " Warp size: %d\n", deviceProp.warpSize);
}
return 0;
}
|
680
|
#include <iostream>
#include <cstdio>
#include <cstdlib>
using namespace std;
int main()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
cout << count << endl;
for(int i = 0; i < count;++i)
{
cudaGetDeviceProperties(&prop,i);
printf("Name: %s\n",prop.name);
printf("Cumpute capability: %d.%d\n",prop.major,prop.minor);
printf("\n");
}
}
|
681
|
#include "includes.h"
__global__ void _bcnn_pow_kernel(int n, float *x, float a, float *y) {
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) y[i] = pow(x[i], a);
}
|
682
|
#include <iostream>
#include <cstdlib>
using namespace std;
__global__
void MatrixMultiplication(unsigned long* Md, unsigned long* Nd, unsigned long* Pd, int width) {
int tx = threadIdx.x; // x-index of threads
int ty = threadIdx.y; // y-index of threads
// Pvalue stores the Pd element that is computed by thread
unsigned long Pvalue = 0;
for(int k = 0; k < width; k++) {
unsigned long Mdelement = Md[ty * width + k];
unsigned long Ndelement = Nd[k * width + tx];
Pvalue += Mdelement * Ndelement;
}
// Write the matrix to device memory
// Each thread write one element
Pd[ty * width + tx] = Pvalue;
}
int main(int argc, char **argv) {
int width = 1500;
cout << width << " x " << width << endl;
unsigned long size = width * width * sizeof(unsigned long);
unsigned long* A;
unsigned long* B;
unsigned long* C;
A = (unsigned long*) malloc(size);
B = (unsigned long*) malloc(size);
C = (unsigned long*) malloc(size);
srand (time(NULL));
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
A[i * width + j] = (rand() % 100) + 1;
B[i * width + j] = (rand() % 200) + 1;
}
}
unsigned long* Ad;
unsigned long* Bd;
unsigned long* Cd;
// transfer A and B to device memory
cudaMalloc((void**) &Ad, size);
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &Bd, size);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
// allocate c on the device
// C is a result of matrix multiplication
cudaMalloc((void**) &Cd, size);
dim3 dimBlock(width, width);
dim3 dimGrid(1,1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
MatrixMultiplication<<<dimGrid,dimBlock>>>(Ad, Bd, Cd, width);
cudaEventRecord(stop);
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << endl;
cout << milliseconds << endl;
// for (int i = 0; i < width; i++) {
// for (int j = 0; j < width; j++) {
// cout << C[i * width + j] << "\t";
// }
// cout << endl;
// }
return 0;
}
|
683
|
#include <stdio.h>
#include <string.h>
#define tpb 32
__global__ void oddCheck(int* nums,int*len, int* out, int* last){
int index=threadIdx.x + blockIdx.x*tpb;
if (index<*len) out[index]=nums[index]%2;
if(index==((*len)-1)) *last=out[index];
}
__global__ void exToIn(int* inp, int* out, int*len, int*last){
int index = threadIdx.x + blockIdx.x*tpb;
if((index>0)&&(index<*len)){
out[index-1]=inp[index];
}
if(index==((*len)-1)) { out[index]=inp[index]+*last;
*last=out[index];
}
}
__global__ void upSweep(int* arr, int* len, int* tLen, int step){
int index=threadIdx.x + blockIdx.x*tpb;
if(index>*tLen) return;
if((((index+1)%(step*2))!=0) || index==0 || ((*len)<=index)) return;
arr[index]=arr[index]+arr[index-step];
}
__global__ void downSweep(int* arr, int* len, int* tLen, int step){
int index=threadIdx.x + blockIdx.x*tpb;
if(2*step==*len) arr[(*len)-1]=0;
if((((index+1)%(step*2))!=0) || (index==0) || ((*len)<=index)) return;
int tmp=arr[index-step];
arr[index-step]=arr[index];
arr[index]+=tmp;
}
__global__ void printArr(int* arr,int*len){
for(int i=0;i<((*len)-1);i++) printf("%d, ",arr[i]);
printf("%d",arr[(*len)-1]);
}
__global__ void copyOddsP(int*inp, int*prefix, int*inpLen,int*out){
if((blockIdx.x+threadIdx.x)==0){ out[0]=inp[0];}
else if((blockIdx.x+threadIdx.x)<*inpLen){
int i=threadIdx.x + blockIdx.x*tpb;
if(prefix[i]!=prefix[i-1]){
out[prefix[i-1]]=inp[i];
}
}
}
int main(int argc,char **argv){
char buff[50000];
int inp[15000];
buff[0]=' ';
char* token;
FILE* fp = fopen("inp.txt", "r" );
fgets(buff+1, 50000, fp);
token=strtok(buff,",");
int numLen=0;
while(token!=NULL){
inp[numLen]=atoi(token+1);
numLen++;
token=strtok(NULL,",");
}
int* nums = inp;
int falseLen=1;
while(falseLen<numLen) falseLen*=2;
int Len=falseLen;
int* cudLen;
cudaMalloc(&cudLen,sizeof(int));
cudaMemcpy(cudLen,&Len,sizeof(int),cudaMemcpyHostToDevice);
int* trueLen;
cudaMalloc(&trueLen,sizeof(int));
cudaMemcpy(trueLen,&numLen,sizeof(int),cudaMemcpyHostToDevice);
int* cudNum;
cudaMalloc(&cudNum,(Len)*sizeof(int));
cudaMemcpy(cudNum,nums,(Len)*sizeof(int),cudaMemcpyHostToDevice);
int* out;
cudaMalloc(&out,(Len+1)*sizeof(int));
int* last;
cudaMalloc(&last,sizeof(int));
oddCheck<<<(Len+tpb)/tpb,tpb>>>(cudNum,trueLen,out,last);
for(int step=1; step<Len; step*=2){
upSweep<<<(Len+tpb)/tpb,tpb>>>(out,cudLen,trueLen,step);
}
for(int step=Len/2; step>0; step/=2){
downSweep<<<(Len+tpb)/tpb,tpb>>>(out,cudLen,trueLen,step);
}
Len=numLen;
cudLen=trueLen;
int* shifted;
cudaMalloc(&shifted,Len*sizeof(int));
exToIn<<<(Len+tpb)/tpb,tpb>>>(out,shifted,cudLen,last);
int* cudOut;
cudaMalloc((void**) &cudOut, Len*sizeof(int));
copyOddsP<<<(Len+tpb)/tpb,tpb>>>(cudNum, shifted, cudLen,cudOut);
int len;
cudaMemcpy(&len,last,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(inp,cudOut,len*sizeof(int),cudaMemcpyDeviceToHost);
fclose(fp);
FILE* fp_end = fopen("q3.txt", "w");
for (int i = 0; i < len; i++) {
fprintf(fp_end, "%d", inp[i]);
if (i != len-1) {
fprintf(fp_end, ", ");
}
}
cudaFree(cudLen);
cudaFree(cudNum);
cudaFree(out);
cudaFree(last);
cudaFree(shifted);
cudaFree(cudOut);
return 0;
}
|
684
|
#include<stdio.h>
#include<cuda.h>
#define row1 10
#define col1 10
#define row2 10
#define col2 10
typedef long long int LLI;
__global__ void matproductsharedmemory(LLI *l,LLI *m, LLI *n)
{
LLI x=blockIdx.x;
LLI y=blockIdx.y;
__shared__ LLI p[col1];
LLI i;
LLI k=threadIdx.x;
n[col2*y+x]=0;
p[k]=l[col1*y+k]*m[col2*k+x];
__syncthreads();
for(i=0;i<col1;i++)
n[col2*y+x]=n[col2*y+x]+p[i];
}
int main()
{
LLI a[row1][col1];
LLI b[row2][col2];
LLI c[row1][col2];
LLI *d,*e,*f;
LLI i,j;
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
a[i][j]= i*row1+j;
}
}
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
b[i][j]=i*row2+j;
}
}
cudaMalloc((void **)&d,row1*col1*sizeof(LLI));
cudaMalloc((void **)&e,row2*col2*sizeof(LLI));
cudaMalloc((void **)&f,row1*col2*sizeof(LLI));
cudaMemcpy(d,a,row1*col1*sizeof(LLI),cudaMemcpyHostToDevice);
cudaMemcpy(e,b,row2*col2*sizeof(LLI),cudaMemcpyHostToDevice);
dim3 grid(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
matproductsharedmemory<<<grid,col1>>>(d,e,f);
cudaMemcpy(c,f,row1*col2*sizeof(LLI),cudaMemcpyDeviceToHost);
/*
printf("\n Product of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%Ld\t",c[i][j]);
}
printf("\n");
}
*/
cudaFree(d);
cudaFree(e);
cudaFree(f);
return 0;
}
/*
OUTPUT profile
==13287== NVPROF is profiling process 13287, command: ./a.out
==13287== Profiling application: ./a.out
==13287== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 94.72% 2.5322ms 1 2.5322ms 2.5322ms 2.5322ms matproductsharedmemory(__int64*, __int64*, __int64*)
3.68% 98.338us 2 49.169us 49.025us 49.313us [CUDA memcpy HtoD]
1.61% 42.913us 1 42.913us 42.913us 42.913us [CUDA memcpy DtoH]
API calls: 98.22% 189.54ms 3 63.178ms 5.3290us 189.52ms cudaMalloc
1.43% 2.7661ms 3 922.02us 26.698us 2.6712ms cudaMemcpy
0.19% 361.76us 94 3.8480us 170ns 233.68us cuDeviceGetAttribute
0.08% 150.22us 3 50.073us 6.2080us 110.67us cudaFree
0.05% 89.941us 1 89.941us 89.941us 89.941us cuDeviceTotalMem
0.01% 27.216us 1 27.216us 27.216us 27.216us cuDeviceGetName
0.01% 24.939us 1 24.939us 24.939us 24.939us cudaLaunch
0.00% 2.2690us 3 756ns 186ns 1.7650us cuDeviceGetCount
0.00% 1.0820us 2 541ns 239ns 843ns cuDeviceGet
0.00% 955ns 3 318ns 172ns 542ns cudaSetupArgument
0.00% 724ns 1 724ns 724ns 724ns cudaConfigureCall
*/
|
685
|
#include <stdio.h>
#include "cuda_profiler_api.h"
#define SIZE 20480
__global__ void
vecAdd(int *a,int *b, int *c, int len)
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<len) c[i] = a[i] + b[i];
}
void vecAdd_CPU(int *a,int *b, int *c, int len)
{
int i=0;
for(i=0;i<len;i++)
c[i] =a[i]+b[i];
}
void loadVal(int *a, int *b, int *c, int len)
{
int i=0;
for(i=0;i<len;i++)
{
a[i] = i*5;
b[i] = i*6;
c[i] = 0;
}
}
void dispRes(int *arr)
{
int i=0;
printf("Results of the first 10 elements\n");
for(i=0;i<10;i++)
{
printf("%d, ",arr[i]);
}
}
int main(void)
{
int *a,*b,*c;
int *d_a,*d_b,*d_c; //device variables
a = (int*)malloc(SIZE*sizeof(int));
b = (int*)malloc(SIZE*sizeof(int));
c = (int*)malloc(SIZE*sizeof(int));
//cuda memory allocation on the device
cudaMalloc((void**)&d_a,SIZE*sizeof(int));
cudaMalloc((void**)&d_b,SIZE*sizeof(int));
cudaMalloc((void**)&d_c,SIZE*sizeof(int));
printf("Loading values to the array...\n");
loadVal(a,b,c,SIZE);
//cuda memory copy from host to device
cudaMemcpy(d_a,a,SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,SIZE*sizeof(int),cudaMemcpyHostToDevice);
printf("Calling vector adding function...\n");
dim3 DimGrid((SIZE-1)/256+1,1,1);
dim3 DimBlock(256,1,1);
cudaProfilerStart();
vecAdd<<<DimGrid,DimBlock>>>(d_a,d_b,d_c,SIZE);
cudaProfilerStop();
//CPU equivalent
vecAdd_CPU(a,b,c,SIZE);
//cuda memory copy from device to host
cudaMemcpy(c,d_c,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
dispRes(c);
system("PAUSE");
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
686
|
#include<stdlib.h>
#include<stdio.h>
#include<malloc.h>
#include<time.h>
#define arm 32
__device__ int globalArray[32];
__global__ void add(int *a,int *c)
{
int tid = threadIdx.x;
int temp=a[tid];
int count=0;
while(temp!=0)
{
count++;
temp=temp/2;
}
atomicAdd(&globalArray[count], 1);
c[count]=globalArray[count];
}
int main(void)
{
int arr[20];
int count=20;
int bitband[arm]={0};
for (int i = 0; i < 20; ++i)
{
arr[i]=i;
}
int *d_a,*d_c;
int size = sizeof(int);
cudaMalloc((void **)&d_a,size*count);
cudaMalloc((void **)&d_c,size*arm);
cudaMemcpy(d_a,arr,size*count,cudaMemcpyHostToDevice);
add<<<1,count>>>(d_a,d_c);
cudaMemcpy(bitband,d_c,size*arm,cudaMemcpyDeviceToHost);
for (int i = 0; i < 20; ++i)
{
printf("%d\n",bitband[i]);
}
cudaFree(d_a);
cudaFree(d_c);
return 0;
}
|
687
|
#include "includes.h"
__global__ void calc_lut(int *lut, int * hist_in, int img_size, int nbr_bin){
__shared__ int shared_hist[256];
shared_hist[threadIdx.x] = hist_in[threadIdx.x];
__syncthreads();
__shared__ int cdf[256];
__syncthreads();
int i, min, d;
//int cdf = 0;
min = 0;
i = 0;
while(min == 0){
min = shared_hist[i++];
}
d = img_size - min;
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
shared_hist[threadIdx.x] += shared_hist[threadIdx.x-stride];
}
cdf[threadIdx.x] = shared_hist[threadIdx.x];
//printf("cdf = %d\n",cdf);
__syncthreads();
//for(i = 0; i <= threadIdx.x; i ++){ //tha mporouse na ginei me prefix sum san veltistoipohsh FIXME
// cdf += shared_hist[i];
// lut[i] = (cdf - min)*(nbr_bin - 1)/d;
//}
//printf("cdf = %d\n",cdf);
lut[threadIdx.x] = (int)(((float)cdf[threadIdx.x] - min)*255/d + 0.5);
if(lut[threadIdx.x] < 0){
lut[threadIdx.x] = 0;
}
}
|
688
|
#include <iostream>
#include <cuda.h>
#include <cstdlib>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
const int BLOCK = 256;
__global__
void AddListK(float *I, float *O, int l)
{
int b = blockIdx.x;
int t = threadIdx.x;
__shared__ float pSum[BLOCK*2];
unsigned int start = 2*blockDim.x*b;
(start+t < l) ? pSum[t] = I[start+t]: pSum[t] = 0.0;//First half
(start+blockDim.x+t < l) ? pSum[t+blockDim.x] = I[start+blockDim.x+t] : pSum[t+blockDim.x] = 0.0;//Second half
__syncthreads();
for(unsigned int s = blockDim.x; s > 0; s/=2){
__syncthreads();
(t < s) ? pSum[t] += pSum[t+s] : pSum[t]+= 0;
}
//printf("Sum =%f ", pSum[0]);
O[b] = pSum[0];
}
__host__
double addList(float *h_I, int h_l){
float *d_I, *d_O;
int olen;
olen = h_l / (BLOCK<<1); //The output length equals twice the total of the length divided by width
if (olen % (BLOCK<<1)) {
olen++;
}
float h_O[olen];
cudaMalloc((void **) &d_I, sizeof(float)*h_l);
cudaMalloc((void **) &d_O, sizeof(float)*olen);
cudaMemcpy(d_I, h_I, sizeof(float)*h_l, cudaMemcpyHostToDevice);
dim3 dimGrid(olen, 1, 1);
dim3 dimBlock(BLOCK, 1, 1);
AddListK<<<dimGrid, dimBlock>>>(d_I, d_O, h_l);
cudaMemcpy(h_O, d_O, sizeof(float)*olen, cudaMemcpyDeviceToHost);
cudaFree(d_I);cudaFree(d_O);
double total = 0.0;
for(int i = 0; i < olen; i ++){
total+=h_O[i];
}
return total;
}
void populateArray(float a[], int l){
srand48(time(NULL));
float prev = drand48()*100;
float nxt;
for(int i = 1; i < l; i++){
do{
nxt = drand48()*100;
}while(nxt==prev);
a[i] = nxt;
prev = nxt;
}
}
int main(){
srand(time(NULL));
//int ilen = (rand() % 6553) * BLOCK;
int ilen = 2000000;
float I[ilen];
populateArray(I, ilen);
printf("Input length %d", ilen);
time_t gstart = time(NULL);
double gtotal = 0.0;
for(int i = 0; i < 1000; i ++){
gtotal = addList(I,ilen);
}
time_t gstop = time(NULL);
time_t start = time(NULL);
double total = 0.0;
for(int i = 0; i < 1000; i ++){
total = 0.0;
for(int i = 0; i < ilen; i ++){
total+=I[i];
}
}
time_t stop = time(NULL);
printf("Average times\n GPU: %f CPU: %f", difftime(gstop, gstart), difftime(stop, start));
printf("TOTAL: %f == %f \n DIF: %f", total, gtotal, total-gtotal);
return 0;
}
|
689
|
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void test(const int *in, int *answer) {
const int tid = threadIdx.x;
if (tid == 0) printf("hello!\n");
int sum = in[tid];
if (tid == 0) printf("sum[0] is %d\n", sum);
*answer = sum;
}
int main() {
int *h_data, *d_data;
int N = 64;
int *h_answer, *d_answer;
h_data = (int *)malloc(N*sizeof(int));
h_answer = (int *)malloc(sizeof(int));
cudaMalloc(&d_data, N*sizeof(int));
cudaMalloc(&d_answer, sizeof(int));
for (int i = 0; i < N; i += 1) {
h_data[i] = N - i;
}
cudaMemcpy(d_data, h_data, N*sizeof(int), cudaMemcpyHostToDevice);
test<<<1, N>>>(h_data, d_answer);
cudaMemcpy(h_answer, d_answer, sizeof(int), cudaMemcpyDeviceToHost);
printf("The answer is %d\n", *h_answer);
}
|
690
|
#include <stdio.h>
#define TILE 32 // Thread block dimension
#define N 8192 // Side of the matrix
#define MATSIZE N * N // Total size of the matrix
#define MEMSIZE MATSIZE * sizeof(double) // Size of matrix in memory
// Generic function to be called for bandwidth testing on GPUs.
typedef void (*kernelFunc)(double *, double *, int);
/**
* @brief Performs an optimized version of a matrix transposition.
* @param a The input matrix.
* @param b The transposed matrix in output.
* @param size The size of the matrix side.
*
* By exploiting GPU shared memory we may decompose the transposition in
* multiple submatrices transpositions, minimizing global memory accesses
* by doing them simultaneously for same-tile threads.
*/
__global__ void transposeOptimized(double *a, double *b, int size)
{
__shared__ double tile[TILE][TILE];
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
tile[threadIdx.x][threadIdx.y] = a[row * size + col];
__syncthreads();
b[col * size + row] = tile[threadIdx.x][threadIdx.y];
}
/**
* @brief Performs a naive version of a matrix transposition on GPU.
* @param a The input matrix.
* @param b The transposed matrix in output.
* @param size The size of the matrix side.
*/
__global__ void transposeNaive(double *a, double *b, int size)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
b[col * size + row] = a[row * size + col];
}
/**
* @brief Performs a serial version of a matrix transposition on CPU.
* @param a The input matrix.
* @param b The transposed matrix in output.
* @param size The size of the matrix side.
*/
void transposeCpu(double *a, double *b, int size)
{
for (int i = 0; i < size; ++i)
for (int j = 0; j < size; ++j)
b[j * size + i] = a[i * size + j];
}
/**
* @brief Validates the equality of two matrices in input.
* @param a Matrix a.
* @param b Matrix b.
* @param size The size of the matrix side.
*/
int isCorrect(double * a, double * b, int size)
{
for(int i = 0; i < size; ++i)
for(int j = 0; j < size; ++j)
if(b[i * size + j] != a[i * size + j])
return 0;
return 1;
}
/**
* @brief Tests execution time and bandwidth of a transposition kernel on a GPU.
* @param kernel The kernel to be tested.
* @param kernelName The name of the kernel to be tested.
* @param block_x The x-dimension of the block used to perform blocking for cache.
* @param block_y The y-dimension of the block used to perform blocking for cache.
*
* The transposition is performed as specified by the kernel function and then is
* validated against a correctly-transposed matrix. GPU time and bandwidth are
* provided as outputs of the function.
*/
int testCudaBandwidth(kernelFunc kernel, const char * kernelName, int block_x, int block_y)
{
double * h_in, * h_out;
double * dev_in, * dev_out;
double * cpu;
dim3 block(block_x, block_y);
dim3 grid(N / block.x, N / block.y);
h_in = (double *)malloc(MEMSIZE);
h_out = (double *)malloc(MEMSIZE);
cpu = (double *)malloc(MEMSIZE);
cudaMalloc((void **)&dev_in, MEMSIZE);
cudaMalloc((void **)&dev_out, MEMSIZE);
// Fill input matrix with some indices (for validating transposition).
for(int i = 0; i < MATSIZE; ++i)
h_in[i] = i;
// Initial setup of input matrix and timing events.
cudaMemcpy(dev_in, h_in, MEMSIZE, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float exec_time = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Print some informations about the current task.
printf("\nTransposing matrix on CPU for validation...\n");
transposeCpu(h_in, cpu, N);
printf("\nMatrix size: %dx%d, tile: %dx%d, block:%dx%d \n", N, N, TILE, TILE, block_x, block_y);
printf("\nKernel: %s\n\n", kernelName);
// Time kernel execution.
cudaEventRecord(start);
kernel<<<grid, block>>>(dev_in, dev_out, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exec_time, start, stop);
cudaMemcpy(h_out, dev_out, MEMSIZE, cudaMemcpyDeviceToHost);
printf("%s: %s\n", kernelName, isCorrect(h_out, cpu, N) ? "CORRECT" : "FAIL");
printf("GPU Time: %f\n", exec_time);
// Bandwidth given by reading and writing a matrix during exec_time,
// converted to GB/s for readability.
printf("Bandwidth (GB/s): %f\n", MEMSIZE * 2 / exec_time / 1000000);
printf("-------------------------------\n");
// Cleanup
free(h_in);
free(h_out);
free(cpu);
cudaFree(dev_in);
cudaFree(dev_out);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
int main(int argc, char *argv[])
{
testCudaBandwidth(&transposeNaive, "Naive Transpose", 32, 32);
int a[5] = {2, 4, 8, 16, 32};
for(int i = 0; i < 5; ++i)
for(int j = 0; j < 5; ++j)
testCudaBandwidth(&transposeOptimized, "Optimized Transpose", a[i], a[j]);
return 0;
}
|
691
|
__global__ void findCoordinate(float *A, int *keypoints, int *newKeypoints, int lenght){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < lenght){
float a00 = A[0];
float a01 = A[1];
float a10 = A[3];
float a11 = A[4];
float t0 = A[2];
float t1 = A[5];
int x = keypoints[index];
int y = keypoints[index+lenght];
float xp = a00*y + a01*x + t0;
float yp = a10*y +a11*x + t1;
int x1 = (int)xp;
int y1 = (int)yp;
//printf("%d %d %f %f %d %d %f %f %f %f %f %f %f\n",x1, y1, xp, yp, x, y, a00,a01,a10,a11,t0,t1);
int index2 = index*2+1;
int index1 = index*2;
newKeypoints[index1] = x1;
newKeypoints[index2] = y1;
}
}
|
692
|
#include "Data.cuh"
#include "Data_kernel.cuh"
#include <fstream>
#include <iostream>
#include <limits>
Data::Data():
rowSize(0), columnSize(0)//, flow(0)
{
}
Data::~Data()
{
delete[] this->weightLeft;
delete[] this->weightRight;
delete[] this->weightUp;
delete[] this->weightDown;
delete[] this->weightS;
delete[] this->weightT;
delete[] this->height;
delete[] this->capacity;
// delete[] this->bfsTag;
cudaFree(device_active);
cudaFree(device_weightLeft);
cudaFree(device_weightRight);
cudaFree(device_weightUp);
cudaFree(device_weightDown);
cudaFree(device_weightS);
cudaFree(device_weightT);
cudaFree(device_height);
cudaFree(device_capacity);
}
#define DEBUG
#ifdef DEBUG
void Data::Print()
{
std::cout << "Weight:" << std::endl << std::endl;
std::cout << "Left:" << std::endl;
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
size_t idx = (i * this->columnSize + j);
std::cout << this->weightLeft[idx] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << "Right:" << std::endl;
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
size_t idx = (i * this->columnSize + j);
std::cout << this->weightRight[idx] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << "Up:" << std::endl;
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
size_t idx = (i * this->columnSize + j);
std::cout << this->weightUp[idx] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << "Down:" << std::endl;
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
size_t idx = (i * this->columnSize + j);
std::cout << this->weightDown[idx] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << "Capacity:" << std::endl;
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
size_t idx = (i * this->columnSize + j);
std::cout << this->capacity[idx] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << "Height:" << std::endl;
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
size_t idx = (i * this->columnSize + j);
std::cout << this->height[idx] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << std::endl;
}
#endif
Data::Data(size_t rowSize, size_t columnSize) :
rowSize(rowSize), columnSize(columnSize)//, flow(0)
{
//weight = new int[4 * rowSize * columnSize](); // (): set to zero
weightLeft = new int[rowSize * columnSize]();
weightRight = new int[rowSize * columnSize]();
weightUp = new int[rowSize * columnSize]();
weightDown = new int[rowSize * columnSize]();
weightS = new int[rowSize * columnSize]();
weightT = new int[rowSize * columnSize]();
height = new int[rowSize * columnSize]();
capacity = new int[rowSize * columnSize]();
//int *bfsTag;
cudaMalloc( &device_active, sizeof(bool) );
cudaMalloc( &device_weightLeft, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightRight, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightUp, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightDown, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightS, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightT, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_height, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_capacity, sizeof(int) * rowSize * columnSize );
cudaMemset( device_active, false, sizeof(bool) );
cudaMemset( device_height, 0, sizeof(int) * rowSize * columnSize );
cudaMemset( device_capacity, 0, sizeof(int) * rowSize * columnSize );
}
void Data::Read(const char * filename)
{
using std::fstream;
fstream fin;
fin.open(filename, fstream::in);
fin >> this->rowSize >> this->columnSize;
this->weightLeft = new int[this->rowSize * this->columnSize]();
this->weightRight = new int[this->rowSize * this->columnSize]();
this->weightUp = new int[this->rowSize * this->columnSize]();
this->weightDown = new int[this->rowSize * this->columnSize]();
this->weightS = new int[this->rowSize * this->columnSize]();
this->weightT = new int[this->rowSize * this->columnSize]();
this->height = new int[this->rowSize * this->columnSize]();
this->capacity = new int[this->rowSize * this->columnSize]();
//this->bfsTag = new int[this->rowSize * this->columnSize]();
for (int i = 0; i < this->rowSize * this->columnSize; i++) {
// order: s, t, left, right, up, down
fin >> this->weightS[i] >> this->weightT[i];
fin >> this->weightLeft[i] >> this->weightRight[i] >> this->weightUp[i] >> this->weightDown[i];
}
fin.close();
cudaMalloc( &device_active, sizeof(bool) );
cudaMalloc( &device_weightLeft, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightRight, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightUp, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightDown, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightS, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_weightT, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_height, sizeof(int) * rowSize * columnSize );
cudaMalloc( &device_capacity, sizeof(int) * rowSize * columnSize );
cudaMemset( device_active, false, sizeof(bool) );
cudaMemset( device_height, 0, sizeof(int) * rowSize * columnSize );
cudaMemset( device_capacity, 0, sizeof(int) * rowSize * columnSize );
cudaMemcpy( device_weightS, weightS, sizeof(int) * rowSize * columnSize, cudaMemcpyHostToDevice);
cudaMemcpy( device_weightT, weightT, sizeof(int) * rowSize * columnSize, cudaMemcpyHostToDevice);
cudaMemcpy( device_weightLeft, weightLeft, sizeof(int) * rowSize * columnSize, cudaMemcpyHostToDevice);
cudaMemcpy( device_weightRight, weightRight, sizeof(int) * rowSize * columnSize, cudaMemcpyHostToDevice);
cudaMemcpy( device_weightUp, weightUp, sizeof(int) * rowSize * columnSize, cudaMemcpyHostToDevice);
cudaMemcpy( device_weightDown, weightDown, sizeof(int) * rowSize * columnSize, cudaMemcpyHostToDevice);
//cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
}
int Data::GetFlow()
{
dim3 grid( (this->rowSize + 31) / 32, (this->columnSize + 31) / 32, 1);
dim3 block(32, 1, 1);
//int count = 0;// debug
bool *active = new bool(true);
//this->active = true;
while (*active) {
//for (int _ = 0; _ < 100; _ ++) {
//count ++;
//this->Print();
//this->active = false;
cudaMemset( device_active, false, sizeof(bool) );
Data_PushLeftForLine<<< grid, block >>>(
this->device_active,
this->device_weightLeft,
this->device_weightRight,
this->device_weightS,
this->device_weightT,
this->device_height,
this->device_capacity,
this->rowSize,
this->columnSize
);
//cudaMemcpy(active, device_active, sizeof(bool), cudaMemcpyDeviceToHost);
Data_PushUpForLine<<< grid, block >>>(
this->device_active,
this->device_weightUp,
this->device_weightDown,
this->device_weightS,
this->device_weightT,
this->device_height,
this->device_capacity,
this->rowSize,
this->columnSize
);
//cudaMemcpy(active, device_active, sizeof(bool), cudaMemcpyDeviceToHost);
Data_PushRightForLine<<< grid, block >>>(
this->device_active,
this->device_weightLeft,
this->device_weightRight,
this->device_weightS,
this->device_weightT,
this->device_height,
this->device_capacity,
this->rowSize,
this->columnSize
);
//cudaMemcpy(active, device_active, sizeof(bool), cudaMemcpyDeviceToHost);
Data_PushDownForLine<<< grid, block >>>(
this->device_active,
this->device_weightUp,
this->device_weightDown,
this->device_weightS,
this->device_weightT,
this->device_height,
this->device_capacity,
this->rowSize,
this->columnSize
);
cudaMemcpy(active, device_active, sizeof(bool), cudaMemcpyDeviceToHost);
}
//std::cout << count << std::endl;
//#ifdef DEBUG
cudaMemcpy(weightS, device_weightS, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost);
cudaMemcpy(weightT, device_weightT, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost);
cudaMemcpy(weightLeft, device_weightLeft, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost);
cudaMemcpy(weightRight, device_weightRight, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost);
cudaMemcpy(weightUp, device_weightUp, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost);
cudaMemcpy(weightDown, device_weightDown, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost);
cudaMemcpy(height, device_height, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost);
cudaMemcpy(capacity, device_capacity, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost);
//#endif
// this->Print();
// std::cout << "-----" << std::endl;
return 0;
//return this->flow;
}
void Data::BfsFromT()
{
dim3 grid((this->rowSize * this->columnSize + 32767) / 32768, 32, 1);
dim3 block(32, 32, 1);
Data_BfsFromT<<< grid, block >>>(
this->device_weightT,
this->device_height,
this->rowSize,
this->columnSize
);
// debug
//cudaMemcpy(height, device_height, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost); this->Print();
// debug
int maxHeight = 1;
for (bool *active = new bool(true); *active; maxHeight ++) {
cudaMemset( device_active, false, sizeof(bool) );
Data_BfsLevelK<<< grid, block >>>(
this->device_active,
this->device_weightUp,
this->device_weightDown,
this->device_weightLeft,
this->device_weightRight,
this->device_height,
this->rowSize,
this->columnSize,
// parameter
maxHeight
);
cudaMemcpy(active, device_active, sizeof(bool), cudaMemcpyDeviceToHost);
}
cudaMemcpy(height, device_height, sizeof(int) * rowSize * columnSize, cudaMemcpyDeviceToHost);
// debug
//this->Print();
// debug
// html debug
int result[100][100] = { 0 };
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
const size_t index = i * this->columnSize + j;
int v = this->height[index];
if (v != -1) {
if (i != 0 && weightLeft[index] > 0 &&
this->height[(i - 1) * this->columnSize + j] == -1) {
result[i][j] = 1;
result[i - 1][j] = 2;
}
if (i != this->rowSize && weightRight[index] > 0 &&
this->height[(i + 1) * this->columnSize + j] == -1) {
result[i][j] = 1;
result[i + 1][j] = 2;
}
if (j != this->columnSize - 1 && weightUp[index] > 0 &&
this->height[i * this->columnSize + j + 1] == -1) {
result[i][j] = 1;
result[i][j + 1] = 2;
}
if (j != 0 && weightDown[index] > 0 &&
this->height[i * this->columnSize + j - 1] == -1) {
result[i][j] = 1;
result[i][j - 1] = 2;
}
}
}
}
std::cout << this->rowSize << " " << this->columnSize << std::endl;
for (int i = 0; i < this->rowSize; i++) {
std::cout << "<tr>";
for (int j = 0; j < this->columnSize; j++) {
int v = this->height[i * this->columnSize + j];
//if (v == 6) {
// std::cout << "(" << i << " " << j << ")" << std::endl;
//}
//std::cout << (v > 10000 ? -1 : v) << " ";
//if (v == max_k) {
// std::cout << "<td class=\"yellow\"> </td>";
if (result[i][j] == 1) {
std::cout << "<td class=\"yellow\"> </td>";
} else if (result[i][j] == 2) {
std::cout << "<td class=\"purple\"> </td>";
} else if (this->weightS[i * this->columnSize + j] > 0) {
std::cout << "<td class=\"red\"> </td>";
} else if (this->weightT[i * this->columnSize + j] > 0) {
std::cout << "<td class=\"green\"> </td>";
} else {
if (v == -1) {
std::cout << "<td> </td>";
} else {
std::cout << "<td style=\"background-color: rgb(0, 0," << (5 * v) << ")\"> </td>";
}
}
}
std::cout << "</tr>";
//std::cout << std::endl;
}
// html debug
}
/*
void Data::BfsFromT()
{
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
if (this->weightT[i * this->columnSize + j] > 0) {
this->height[i * this->columnSize + j] = 1;
} else {
this->height[i * this->columnSize + j] = std::numeric_limits<int>::max();
}
}
}
//int max_k = -1;
int maxHeight = 0;
bool check = true;
for (int k = 1; check; k++) {
check = false;
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
if (this->height[i * this->columnSize + j] == k) {
//
if (i != 0 && weightRight[(i - 1) * this->columnSize + j] > 0 &&
this->height[(i - 1) * this->columnSize + j] > k) {
this->height[(i - 1) * this->columnSize + j] = k + 1;
check = true;
}
if (i != this->rowSize && weightLeft[(i + 1) * this->columnSize + j] > 0 &&
this->height[(i + 1) * this->columnSize + j] > k) {
this->height[(i + 1) * this->columnSize + j] = k + 1;
check = true;
}
if (j != this->columnSize - 1 && weightDown[i * this->columnSize + j + 1] > 0 &&
this->height[i * this->columnSize + j + 1] > k) {
this->height[i * this->columnSize + j + 1] = k + 1;
check = true;
}
if (j != 0 && weightUp[i * this->columnSize + j - 1] > 0 &&
this->height[i * this->columnSize + j - 1] > k) {
this->height[i * this->columnSize + j - 1] = k + 1;
check = true;
}
//
}
}
}
//std::cout << "k = " << k << std::endl;
maxHeight = k;
}
//for (int i = 0; i < this->rowSize * this->columnSize; i++) {
// std::cout << this->weight[i * 4] << " ";
// std::cout << this->weight[i * 4 + 1] << " ";
// std::cout << this->weight[i * 4 + 2] << " ";
// std::cout << this->weight[i * 4 + 3] << std::endl;
//}
// return if there is a path from t to s
// for (int i = 0; i < this->rowSize; i++) {
// for (int j = 0; j < this->columnSize; j++) {
// const size_t index = i * this->columnSize + j;
// if (this->height[index] <= maxHeight && this->weightS[index] > 0) {
// return true;
// }
// }
// }
// return false;
#define HTML_DEBUG
#ifdef HTML_DEBUG
int result[100][100] = { 0 };
for (int i = 0; i < this->rowSize; i++) {
for (int j = 0; j < this->columnSize; j++) {
const size_t index = i * this->columnSize + j;
int v = this->height[index];
if (v <= maxHeight) {
if (i != 0 && weightLeft[index] > 0 &&
this->height[(i - 1) * this->columnSize + j] > maxHeight) {
result[i][j] = 1;
result[i - 1][j] = 2;
}
if (i != this->rowSize && weightRight[index] > 0 &&
this->height[(i + 1) * this->columnSize + j] > maxHeight) {
result[i][j] = 1;
result[i + 1][j] = 2;
}
if (j != this->columnSize - 1 && weightUp[index] > 0 &&
this->height[i * this->columnSize + j + 1] > maxHeight) {
result[i][j] = 1;
result[i][j + 1] = 2;
}
if (j != 0 && weightDown[index] > 0 &&
this->height[i * this->columnSize + j - 1] > maxHeight) {
result[i][j] = 1;
result[i][j - 1] = 2;
}
}
}
}
std::cout << this->rowSize << " " << this->columnSize << std::endl;
for (int i = 0; i < this->rowSize; i++) {
std::cout << "<tr>";
for (int j = 0; j < this->columnSize; j++) {
int v = this->height[i * this->columnSize + j];
//if (v == 6) {
// std::cout << "(" << i << " " << j << ")" << std::endl;
//}
//std::cout << (v > 10000 ? -1 : v) << " ";
//if (v == max_k) {
// std::cout << "<td class=\"yellow\"> </td>";
if (result[i][j] == 1) {
std::cout << "<td class=\"yellow\"> </td>";
} else if (result[i][j] == 2) {
std::cout << "<td class=\"purple\"> </td>";
} else if (this->weightS[i * this->columnSize + j] > 0) {
std::cout << "<td class=\"red\"> </td>";
} else if (this->weightT[i * this->columnSize + j] > 0) {
std::cout << "<td class=\"green\"> </td>";
} else {
if (v > maxHeight) {
std::cout << "<td> </td>";
} else {
std::cout << "<td style=\"background-color: rgb(0, 0," << (5 * v) << ")\"> </td>";
}
}
}
std::cout << "</tr>";
//std::cout << std::endl;
}
#endif
//std::cout << this->rowSize << " " << this->columnSize << std::endl;
//for (int i = 0; i < this->rowSize; i++) {
// for (int j = 0; j < this->columnSize; j++) {
// int v = this->height[i * this->columnSize + j];
// if (v == 6) {
// std::cout << "(" << i << ", " << j << ")" << std::endl;
// }
// }
// std::cout << std::endl;
//}
}
*/
|
693
|
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include <sys/time.h>
#include<thrust/sort.h>
#include<math.h>
#define BLOCKSIZE 1024
struct vehicle
{
float time ;
int id;
};
struct cmp {
__host__ __device__
bool operator()(const vehicle& o1, const vehicle& o2) {
if (o1.time == o2.time)
return o1.id < o2.id ;
else
return o1.time < o2.time;
}
};
__global__ void dkernel(int n,int k,int size,float *matrix , int dis , int *speed)
{
unsigned id = blockIdx.x * blockDim.x + threadIdx.x ;
if (id < size)
{
float time = (float)dis/(float)speed[id];
matrix[id] = time*(float)60;
}
}
__global__ void dkernel1(float *matrix ,vehicle* AT, int i,int n)
{
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x ;
if(idx < n)
{
AT[idx].time=matrix[i*n + idx];
AT[idx].id = idx ;
}
}
__global__ void dkernel2(float *matrix, vehicle* AT , float* ET,int i ,int n)
{
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x ;
if(idx < n)
{
AT[idx].time = ET[idx] + matrix[(i+1)*n + AT[idx].id];
AT[idx].id = AT[idx].id ;
}
}
__global__ void dkernel3(int *total_time , vehicle* AT,int n)
{
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x ;
if(idx < n)
{
total_time[AT[idx].id]=(int)(AT[idx].time);
}
}
//Complete the following function
void operations ( int n, int k, int m, int x, int dis, int *speed, int **results ) {
int size = n * (k+1) ;
int nblocks = ceil(float(float(size)/float(BLOCKSIZE))) ;
float *matrix , *travel_time ;
cudaMalloc(&matrix , size*sizeof(float));
travel_time=(float*)malloc(size*sizeof(float));
int *gspeed ;
cudaMalloc(&gspeed , size*sizeof(int)) ;
cudaMemcpy(gspeed , speed, size*sizeof(int) , cudaMemcpyHostToDevice);
dkernel<<<nblocks,1024>>>(n,k,size,matrix,dis,gspeed) ;
cudaMemcpy(travel_time,matrix,size*(sizeof(float)), cudaMemcpyDeviceToHost);
struct vehicle arrival_time[n] ;
int no_blocks = ceil(float(float(n)/float(BLOCKSIZE))) ;
vehicle *AT;
cudaMalloc(&AT,n*sizeof(vehicle));
dkernel1<<<no_blocks,1024>>>(matrix,AT,0,n);
cudaMemcpy(arrival_time,AT,n*sizeof(vehicle),cudaMemcpyDeviceToHost);
thrust::sort(arrival_time, arrival_time+n, cmp());
float end_time[n];
float* ET;
cudaMalloc(&ET, n*sizeof(float)) ;
for(int i=0;i<k;i++)
{
results[0][i]=arrival_time[0].id+1;
results[1][i]=arrival_time[n-1].id+1;
for(int j=0;j<n;j++)
{
if(j<m)
{
end_time[j]=arrival_time[j].time + float(x) ;
}
else
{
if(end_time[j-m] > arrival_time[j].time)
{
float wait_time = end_time[j-m] - arrival_time[j].time ;
end_time[j] = arrival_time[j].time + wait_time + float(x) ;
}
else
{
end_time[j] = arrival_time[j].time + float(x) ;
}
}
}
cudaMemcpy(ET , end_time, n*sizeof(float) , cudaMemcpyHostToDevice);
cudaMemcpy(AT , arrival_time, n*sizeof(vehicle) , cudaMemcpyHostToDevice);
dkernel2<<<no_blocks,1024>>>(matrix , AT , ET, i , n);
cudaMemcpy(arrival_time,AT,n*sizeof(vehicle),cudaMemcpyDeviceToHost);
thrust::sort(arrival_time, arrival_time+n, cmp());
}
results[0][k]=arrival_time[0].id+1;
results[1][k]=arrival_time[n-1].id+1;
int *total_time ;
cudaMalloc(&total_time,n*sizeof(int));
cudaMemcpy(AT , arrival_time, n*sizeof(vehicle) , cudaMemcpyHostToDevice);
dkernel3<<<no_blocks , 1024>>>(total_time,AT,n);
cudaMemcpy(results[2],total_time,n*sizeof(int),cudaMemcpyDeviceToHost);
}
int main(int argc,char **argv){
//variable declarations
int n,k,m,x;
int dis;
//Input file pointer declaration
FILE *inputfilepointer;
//File Opening for read
char *inputfilename = argv[1];
inputfilepointer = fopen( inputfilename , "r");
//Checking if file ptr is NULL
if ( inputfilepointer == NULL ) {
printf( "input.txt file failed to open." );
return 0;
}
fscanf( inputfilepointer, "%d", &n ); //scaning for number of vehicles
fscanf( inputfilepointer, "%d", &k ); //scaning for number of toll tax zones
fscanf( inputfilepointer, "%d", &m ); //scaning for number of toll tax points
fscanf( inputfilepointer, "%d", &x ); //scaning for toll tax zone passing time
fscanf( inputfilepointer, "%d", &dis ); //scaning for distance between two consecutive toll tax zones
// scanning for speeds of each vehicles for every subsequent toll tax combinations
int *speed = (int *) malloc ( n*( k+1 ) * sizeof (int) );
for ( int i=0; i<=k; i++ ) {
for ( int j=0; j<n; j++ ) {
fscanf( inputfilepointer, "%d", &speed[i*n+j] );
}
}
// results is in the format of first crossing vehicles list, last crossing vehicles list
// and total time taken by each vehicles to pass the highway
int **results = (int **) malloc ( 3 * sizeof (int *) );
results[0] = (int *) malloc ( (k+1) * sizeof (int) );
results[1] = (int *) malloc ( (k+1) * sizeof (int) );
results[2] = (int *) malloc ( (n) * sizeof (int) );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start,0);
// Function given to implement
operations ( n, k, m, x, dis, speed, results );
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time taken by function to execute is: %.6f ms\n", milliseconds);
// Output file pointer declaration
char *outputfilename = argv[2];
FILE *outputfilepointer;
outputfilepointer = fopen(outputfilename,"w");
// First crossing vehicles list
for ( int i=0; i<=k; i++ ) {
fprintf( outputfilepointer, "%d ", results[0][i]);
}
fprintf( outputfilepointer, "\n");
//Last crossing vehicles list
for ( int i=0; i<=k; i++ ) {
fprintf( outputfilepointer, "%d ", results[1][i]);
}
fprintf( outputfilepointer, "\n");
//Total time taken by each vehicles to pass the highway
for ( int i=0; i<n; i++ ) {
fprintf( outputfilepointer, "%d ", results[2][i]);
}
fprintf( outputfilepointer, "\n");
fclose( outputfilepointer );
fclose( inputfilepointer );
return 0;
}
|
694
|
#include "includes.h"
__device__ void out_of_bounds_function(void) {
*(int*) 0x87654320 = 42;
}
__global__ void out_of_bounds_kernel(void) {
out_of_bounds_function();
}
|
695
|
// Copyright (c) 2013 Craig Wright (kungfucraig@gmail.com)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
// Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies
// or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
// OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include <cuda_runtime.h>
//! Square the data.
__global__ void square(float *data)
{
const unsigned int tid = threadIdx.x;
float d = data[tid];
data[tid] = d*d;
}
//! Provide a wrapper for the kernel.
extern "C" cudaError_t runKernel(int len, float *dataD)
{
dim3 grid(1, 1, 1);
dim3 threads(len, 1, 1);
square<<< grid, threads >>>(dataD);
return cudaGetLastError();
}
|
696
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <iostream>
struct sample_functor {
double alpha;
double beta;
sample_functor(double _alpha, double _beta) {
alpha = _alpha;
beta = _beta;
}
__device__ double operator() (const double& x, const double& y) const {
return alpha * x + (1.0 - alpha) * (beta * y);
}
};
int main() {
thrust::host_vector<double> host_x{1.1, 3.3, 2.2};
thrust::host_vector<double> host_y{6.6, 7.7, 8.8};
thrust::host_vector<double> host_output(3);
thrust::device_vector<double> device_x(3);
thrust::device_vector<double> device_y(3);
thrust::device_vector<double> device_output(3);
double alpha = 0.005;
double beta = 0.1;
thrust::copy(host_x.begin(), host_x.end(), device_x.begin());
thrust::copy(host_y.begin(), host_y.end(), device_y.begin());
thrust::transform(device_x.begin(), device_x.end(), device_y.begin(), device_output.begin(), sample_functor(alpha, beta));
thrust::copy(device_output.begin(), device_output.end(), host_output.begin());
std::cout << host_output[0] << ", " << host_output[1] << ", " << host_output[2] << std::endl;
// 0.6622, 0.78265, 0.8866000000000002
return 0;
}
|
697
|
#include <stdio.h>
#include <stdlib.h>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <cuda_runtime.h>
#include <chrono>
using namespace std;
const int INF = ((1 << 30) - 1);
// const int V = 50010;
void input(char* inFileName);
void output(char* outFileName);
void block_FW(int B);
int ceil(int a, int b);
// void cal(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
int n, m;
// static int Dist[V][V];
static __align__(32) int *dist, *Result;
__global__ void PHASE_ONE(int Round, int B, int* distance, int V){
// calculate target pivot
extern __shared__ __align__(32) int shared_distance[];
int x = threadIdx.x;
int y = threadIdx.y;
int p_x = Round * B + x;
int p_y = Round * B + y;
if(p_y < V && p_x < V){
shared_distance[y * B + x] = distance[p_y * V + p_x];
}else{
shared_distance[y * B + x] = INF;
}
__syncthreads();
#pragma unroll
for(int k = 0; k < B; k++){
if(shared_distance[y * B + x] > shared_distance[y * B + k] + shared_distance[k * B + x]){
shared_distance[y * B + x] = shared_distance[y * B + k] + shared_distance[k * B + x];
}
__syncthreads();
}
if(p_y < V && p_x < V){
distance[p_y * V + p_x] = shared_distance[y * B + x];
}
}
__global__ void PHASE_TWO(int Round, int B, int* distance, int V){
// grid size (Round * 2)
if(blockIdx.x == Round){
return;
}
extern __shared__ __align__(32) int shared[];
// first block stores pivot block
// rest of spaces store distance col/row's block
int* shared_p_distacne = shared;
int* shared_distance = shared + B*B;
int x = threadIdx.x;
int y = threadIdx.y;
int p_x = Round * B + x;
int p_y = Round * B + y;
int idx_y, idx_x;
// fill present distance
if(p_y < V && p_x < V){
shared_p_distacne[y * B + x] = distance[p_y * V + p_x];
}else{
shared_p_distacne[y * B + x] = INF;
}
__syncthreads();
if(blockIdx.x == Round) {
return;
}
if(blockIdx.y == 0){
// pivot row
idx_x = blockIdx.x * B + x;
idx_y = p_y;
}else{
// pivot col
idx_x = p_x;
idx_y = blockIdx.x * B + y;
}
if(idx_y >= V || idx_x >= V) {
return;
}
if(idx_y < V && idx_x < V){
shared_distance[y * B + x] = distance[idx_y * V + idx_x];
}else{
shared_distance[y * B + x] = INF;
}
__syncthreads();
// calculate for each row/col
if(blockIdx.y == 0){
// #pragma unroll
for(int k = 0; k < B; k++){
int temp = shared_p_distacne[y * B + k] + shared_distance[k * B + x];
if(shared_distance[y * B + x] > temp){
shared_distance[y * B + x] = temp;
}
__syncthreads();
}
}else{
// #pragma unroll
for(int k = 0; k < B; k++){
int temp = shared_distance[y * B + k] + shared_p_distacne[k * B + x];
if(shared_distance[y * B + x] > temp){
shared_distance[y * B + x] = temp;
}
__syncthreads();
}
}
if(idx_y < V && idx_x < V){
distance[idx_y * V + idx_x] = shared_distance[y * B + x];
}
}
__global__ void PHASE_THREE(int Round, int B, int* distance, int V){
// grid size (Round*Round)
if(blockIdx.x == Round || blockIdx.y == Round){
return;
}
extern __shared__ __align__(32) int shared[];
int* shared_row = shared;
int* shared_col = shared + B*B;
int x = threadIdx.x;
int y = threadIdx.y;
int idx_x = blockIdx.x * B + x;
int idx_y = blockIdx.y * B + y;
int row_i = Round * B + y;
int row_j = idx_x;
int col_i = idx_y;
int col_j = Round * B + x;
if(row_i < V && row_j < V){
shared_row[y * B + x] = distance[row_i * V + row_j];
}else{
shared_row[y * B + x] = INF;
}
if(col_i < V && col_j < V){
shared_col[y * B + x] = distance[col_i * V + col_j];
}else{
shared_col[y * B + x] = INF;
}
__syncthreads();
if(idx_y >= V || idx_x >= V) {
return;
}
int temp = distance[idx_y * V + idx_x];
// #pragma unroll
// #pragma GCC ivdep
for(int k = 0; k < B; k++){
if(temp > shared_col[y * B + k] + shared_row[k * B + x]){
temp = shared_col[y * B + k] + shared_row[k * B + x];
}
}
distance[idx_y * V + idx_x] = temp;
}
int main(int argc, char* argv[]) {
input(argv[1]);
int B = 32;
block_FW(B);
output(argv[2]);
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
dist = (int*)malloc(n * n * sizeof(int));
Result = (int*)malloc(n * n * sizeof(int));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) {
dist[i * n + j] = 0;
} else {
dist[i * n + j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++i) {
fread(pair, sizeof(int), 3, file);
dist[pair[0] * n + pair[1]] = pair[2];
}
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if(Result[i * n + j] >= INF){
Result[i * n + j] = INF;
}
}
fwrite(&Result[i * n], sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) { return (a + b - 1) / b; }
void block_FW(int B) {
int round = ceil(n, B);
int *device_distance;
int deviceID;
cudaDeviceProp prop;
dim3 grid_p1(1, 1);
dim3 grid_p2(round, 2);
dim3 grid_p3(round, round);
dim3 blk(B, B);
cudaGetDevice(&deviceID);
cudaGetDeviceProperties(&prop, deviceID);
if (!prop.deviceOverlap){
printf("!prop.deviceOverlap\n");
}
cudaSetDevice(0);
cudaMalloc(&device_distance, n * n * sizeof(int));
cudaMemcpy(device_distance, dist, n * n * sizeof(int), cudaMemcpyHostToDevice);
for (int i = 0; i < round; i++) {
PHASE_ONE<<<grid_p1, blk, B * B * sizeof(int)>>>(i, B, device_distance, n);
PHASE_TWO<<<grid_p2, blk, B * B * sizeof(int) * 2>>>(i, B, device_distance, n);
PHASE_THREE<<<grid_p3, blk, B * B * sizeof(int) * 2>>>(i, B, device_distance, n);
}
cudaMemcpy(Result, device_distance, n * n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(device_distance);
}
// void cal(
// int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
// int block_end_x = block_start_x + block_height;
// int block_end_y = block_start_y + block_width;
// for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
// for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// // To calculate B*B elements in the block (b_i, b_j)
// // For each block, it need to compute B times
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
// // To calculate original index of elements in the block (b_i, b_j)
// // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
// int block_internal_start_x = b_i * B;
// int block_internal_end_x = (b_i + 1) * B;
// int block_internal_start_y = b_j * B;
// int block_internal_end_y = (b_j + 1) * B;
// if (block_internal_end_x > n) block_internal_end_x = n;
// if (block_internal_end_y > n) block_internal_end_y = n;
// for (int i = block_internal_start_x; i < block_internal_end_x; ++i) {
// for (int j = block_internal_start_y; j < block_internal_end_y; ++j) {
// if (Dist[i][k] + Dist[k][j] < Dist[i][j]) {
// Dist[i][j] = Dist[i][k] + Dist[k][j];
// }
// }
// }
// }
// }
// }
// }
|
698
|
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void kernel(void) {
printf("GPU: Hello world\n");
}
int main(int argc, char* argv[]) {
kernel<<<2, 4>>>();
cudaDeviceSynchronize();
return 0;
}
|
699
|
#include "user.cuh"
using namespace std;
int main(int argc, char *argv[]) {
// Preapre file I/O
string fileName;
if (argc == 1) fileName = "maxFloatNum.csv";
else fileName = argv[1];
CSV_Data cd(fileName, true);
// Generate Vector
printf("[INFO] Generating vector...\n");
float minVal = INFINITY, maxVal = -INFINITY;
float *p_host_vector = (float *)malloc(VECTOR_SIZE * sizeof(float));
generateVector(p_host_vector, VECTOR_SIZE, &minVal, &maxVal);
printf("[INFO] Max value:%.4f, Min Value:%.4f\n", maxVal, minVal);
// Copy vector from host to device
float *p_host_max_val = (float *)malloc(sizeof(float)), *p_device_vector;
float *p_host_device_max_val = (float *)malloc(sizeof(float)), *p_device_max_val;
cudaMalloc((void **)&p_device_vector, VECTOR_SIZE * sizeof(float));
cudaMalloc((void **)&p_device_max_val, sizeof(float));
cudaMemcpy(p_device_vector, p_host_vector, VECTOR_SIZE * sizeof(float), cudaMemcpyHostToDevice);
printf("Starting Benchmark (ITERATION: %d)\n", ITERATION);
// Host Code
struct timeval startTime, endTime;
float host_exec_time = 0.0;
for (int iter = 0; iter < ITERATION; iter++) {
gettimeofday(&startTime, NULL);
host_maxValueVector(p_host_vector, VECTOR_SIZE, p_host_max_val);
gettimeofday(&endTime, NULL);
float tmp_exec_time = (endTime.tv_sec - startTime.tv_sec) * 1000. + (endTime.tv_usec - startTime.tv_usec) / 1000.;
host_exec_time += tmp_exec_time;
}
host_exec_time /= ITERATION;
cd.AddHostData(host_exec_time, p_host_max_val, sizeof(float));
// Device Code
for (int numThreadsPerBlk = NUM_THREADS_BASE; numThreadsPerBlk <= NUM_THREADS_MAX; numThreadsPerBlk*=2) {
for (int numBlks = NUM_THREAD_BLKS_FROM; numBlks <= NUM_THREAD_BLKS_TO; numBlks*=2) {
int numElements = VECTOR_SIZE;
int numOps = numElements > (numBlks * numThreadsPerBlk) ?
numElements / (numBlks * numThreadsPerBlk) + (numElements % (numBlks * numThreadsPerBlk) ? 1 : 0) : 1;
int *p_device_block_cnt, host_block_cnt;
cudaMalloc((void **)&p_device_block_cnt, sizeof(int));
dim3 gridSize(numBlks);
dim3 blockSize(numThreadsPerBlk);
float avg_exec_time = 0.0;
for (int iter = 0; iter < ITERATION; iter++) {
cudaEvent_t cuda_start, cuda_end;
float tmp_exec_time;
cudaEventCreate(&cuda_start);
cudaEventCreate(&cuda_end);
// Prepare data
cudaMemset(p_device_max_val, 0, sizeof(float));
cudaMemset(p_device_block_cnt, 0, sizeof(int));
// Execute Kernel
cudaEventRecord(cuda_start, 0);
device_maxValueVector<<<gridSize, blockSize, numThreadsPerBlk*sizeof(float)>>>
(p_device_vector, p_device_max_val, VECTOR_SIZE, p_device_block_cnt, numOps);
cudaDeviceSynchronize();
cudaEventRecord(cuda_end, 0);
cudaEventSynchronize(cuda_end);
cudaEventElapsedTime(&tmp_exec_time, cuda_start, cuda_end);
cudaEventDestroy(cuda_start);
cudaEventDestroy(cuda_end);
avg_exec_time += tmp_exec_time;
}
avg_exec_time /= ITERATION; // Get average of execution time.
// Print Results
cudaMemcpy(&host_block_cnt, p_device_block_cnt, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(p_host_device_max_val, p_device_max_val, sizeof(float), cudaMemcpyDeviceToHost);
cd.AddDeviceData(numThreadsPerBlk, numBlks, avg_exec_time, p_host_device_max_val, sizeof(float));
cudaFree(p_device_block_cnt);
}
}
// Deallocation
cudaFree(p_device_vector);
cudaFree(p_device_max_val);
free(p_host_vector);
free(p_host_max_val);
free(p_host_device_max_val);
}
|
700
|
#include <iostream>
__global__
void vecAddKernel(float *A, float *B, float *C, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n)
{
C[i] = A[i] + B[i];
}
}
void vecAdd(float* h_A, float* h_B, float* h_C, int n)
{
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, size);
cudaMalloc((void **) &d_C, size);
cudaError_t err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__,__LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
vecAddKernel<<<ceil(n/256.0), 256>>>(d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main()
{
float A[] = {1, 2, 3, 4};
float B[] = {1, 2, 3, 4};
float C[4] = {};
vecAdd(A, B, C, 4);
for(float i : A)
{
std::cout << i << " ";
}
std::cout << "\n";
for(float i : B)
{
std::cout << i << " ";
}
std::cout << "\n";
for(float i : C)
{
std::cout << i << " ";
}
std::cout << "\n";
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.