serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
4,901 | #include <stdio.h>
__device__ unsigned A(unsigned a, unsigned b)
{
while( a & b )
{
unsigned X = a ^ b;
unsigned Y = (a&b)<<1;
a = X;
b = Y;
}
return a ^ b;
}
__device__ unsigned G(unsigned a, unsigned b)
{
for(;b;b^=a^=b^=a%=b);
return !--a;
}
__device__ unsigned F(unsigned a, unsigned b)
{
unsigned s = 0;
while( b )
{
s += G(a, b);
b--;
}
return s;
}
__device__ unsigned S(unsigned a, unsigned b)
{
for( ; ; )
{
if ( a )
{
if ( a >= b )
{
unsigned x = A(a, 1+~b);
unsigned y = b + 2;
a = x;
b = y;
}
else return 0;
}
else return 1;
}
}
__global__ void kernel(char* result)
{
int t, sum, x = blockIdx.x*blockDim.x + threadIdx.x + 1;
int X = x;
for( sum=0; X; X/=10 ) sum += X % 10;
if ( x % sum ) t = 0;
else if ( S(F(x,x),1) ) t = 1;
else t = 2;
result[x] = t;
}
int main()
{
char str[10][10] = { ":Ugly", ":Good", ":Bad"};
char *devR;
char result[200005];
cudaMalloc( (void**)&devR, 200005 );
kernel<<<200, 1000>>>(devR);
cudaMemcpy( result, devR, 200005, cudaMemcpyDeviceToHost );
for( unsigned x=1; x<=200000; x++ )
{
printf("%d", x);
puts( str[result[x]] );
}
printf("Who's 25?\n");
cudaFree( devR );
return 0;
}
|
4,902 | /************************************************************
This program uses Cuda and an Nvidia GPU for matrix multiplication.
A serial version and a parallel version are both implemented. The
serial version uses a single thread on the GPU to do all the
calculations. However, the parallel version uses one thread per
element to calculate each individual element of the answer matrix.
Known issues:
This program only works on matrices smaller than or equal to
256x256. 1024x1024 will cause segmentation faults and 512x512
simply causes the program to almost crash and return times of
0 for each kernel call.
The matrices must be square and all matrices must be the same
size.
The total number of threads needed (size of the matrix squared)
must be evenly divisible by the number of threads used per block.
If a grid is declared that contains a number of threads less than
the declared number of threads per block, the kernel will return
a value of 0 for the elements of the answer array corresponding
to all threads in that grid. For example, if the matrices being
used are 20x20 and the declared number of threads per block is
16x16, then the grids not containing 16x16 threads will return 0
for all elements. The kernel call will return a 20x20 matrix with
a 16x16 matrix inside it that is correct, and all the other
elements will be 0.
*/
#include <stdio.h>
#include <stdlib.h>
#define MATSIZE 256
#define THREADS_PER_BLOCK 32
//serial matrix multiplication kernel
__global__ void smultiply(int* g_a, int* g_b, int* g_c)
{
int x, y, z;
for (x = 0; x < MATSIZE; ++ x)
{
for (y = 0; y < MATSIZE; ++ y)
{
for (z = 0; z < MATSIZE; ++ z)
{
g_c[(x * MATSIZE) + y] += g_a[(x * MATSIZE) + z] * g_b[(z * MATSIZE) + y];
}
}
}
}
//parallel matrix multiplication kernel
__global__ void pmultiply(int* g_a, int* g_b, int* g_d)
{
int z, sum = 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
for (z = 0; z < MATSIZE; ++ z)
{
sum += g_a[x * MATSIZE + z] * g_b[y + z * MATSIZE];
}
g_d[(x * MATSIZE) + y] = sum;
}
int main()
{
int a[MATSIZE * MATSIZE] = {};
int b[MATSIZE * MATSIZE] = {};
int c[MATSIZE * MATSIZE] = {};
int d[MATSIZE * MATSIZE] = {};
int i, *g_a, *g_b, *g_c, *g_d;
int g_size = MATSIZE * MATSIZE * sizeof(int);
cudaEvent_t start, stop;
float time;
for (i = 0; i < MATSIZE * MATSIZE; ++ i)
{
a[i] = rand() % 1000;
b[i] = rand() % 1000;
//fill the arrays to be multiplied with
//random numbers between 0 and 999
c[i] = 0;
d[i] = 0;
//make sure both answer arrays are
//filled with 0s
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
//used for timing the Cuda run
cudaMalloc(&g_a, g_size); //allocate memory on Cuda device
cudaMemcpy(g_a, a, g_size, cudaMemcpyHostToDevice);
//copy matrix A onto the Cuda device
cudaMalloc(&g_b, g_size);
cudaMemcpy(g_b, b, g_size, cudaMemcpyHostToDevice);
cudaMalloc(&g_c, g_size);
cudaMemcpy(g_c, c, g_size, cudaMemcpyHostToDevice);
dim3 dimGrid((MATSIZE / THREADS_PER_BLOCK), (MATSIZE / THREADS_PER_BLOCK));
//create the needed number of grids
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
//create the needed number of threads in each grid
//serial Cuda kernel call
cudaEventRecord(start, 0);
smultiply<<<1,1>>>(g_a, g_b, g_c);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//get run time
cudaMemcpy(c, g_c, g_size, cudaMemcpyDeviceToHost);
//copy results back to host device
cudaFree(g_c);
//free up unused user allocated memory on Cuda device
printf("Time = %f milliseconds\n", time);
//create a second answer matrix to use
//This is not done until now so that memory on the
//Cuda device is not wasted.
cudaMalloc(&g_d, g_size);
cudaMemcpy(g_d, d, g_size, cudaMemcpyHostToDevice);
//parallel Cuda kernel call
cudaEventRecord(start, 0);
pmultiply<<<dimGrid,threads>>>(g_a, g_b, g_d);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time = %f milliseconds\n", time);
cudaMemcpy(d, g_d, g_size, cudaMemcpyDeviceToHost);
cudaFree(g_a);
cudaFree(g_b);
cudaFree(g_d);
//free up all unused user allocated memory on Cuda device
printf("\n");
/*The next 2 for loops print out the values of both
answer matrices. This can be used to ensure that both
kernel calls are producing the same results, and that
the results are correct. This section can be commented
out when the user only wants the timing of a run.*/
for (i = 1; i <= MATSIZE * MATSIZE; ++ i)
{
if (i % MATSIZE == 0)
{
printf("%d ", c[i-1]);
printf("\n");
}
else
printf("%d ", c[i-1]);
}
printf("\n");
for (i = 1; i <= MATSIZE * MATSIZE; ++ i)
{
if ( i % MATSIZE == 0)
{
printf("%d ", d[i-1]);
printf("\n");
}
else
printf("%d ", d[i-1]);
}
} |
4,903 | #include "includes.h"
__global__ void kernel0(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
y[i] = a*x[i] + y[i];
}
} |
4,904 | // test the size of shared memory for each block
#include <iostream>
#include <cstdio>
using namespace std;
#define N 100
__const__ int NN = 1;
__global__
void fun(double *py)
{
printf("NN = %d\n", NN);
double a[NN];
*py = 0.;
for (int i=0; i<1000000; ++i)
*py += 3.1415927;
}
int main()
{
double *py, y;
int N0 = 13;
cudaMemcpyToSymbol(NN, &N0, sizeof(int));
cudaMalloc(&py, sizeof(double));
fun<<<1,1>>>(py);
cudaMemcpy(&y, py, sizeof(double), cudaMemcpyDeviceToHost);
cout << "y = " << y << endl;
cudaDeviceSynchronize();
}
|
4,905 | // mpi authors
#include <algorithm> // swap
#include <cstdio>
#include <fstream> // file io
#include <iomanip>
#include <iostream> // io
#include <cmath>
#include <sstream> // string stream
#include <string> // strings
#include <time.h>
using namespace std;
const int VERT = 317080; // from http://snap.stanford.edu/data/com-DBLP.html
const int EDGES = 1049866; // number of edges. from above
const int TOP = 10; // top number of authors to determine
void readFileCSR(string fileName, int *rowIdx);
__global__ void countAuthors(int *d_rowIdx, int *d_counts, int n);
__global__ void topAuthors(int *d_counts, int *d_max, int top, int n);
__global__ void coauthorVolume(int *d_dist, int *d_counts, int max, int n);
void determineTop(int *authors, int *topAuth, int *topCounts);
void sort(int *id, int *val, int len);
void generateData(int *dist, int distSize);
double CLOCK();
int main(int argc, char *argv[])
{
string fileName = "dblp-co-authors.txt"; // file name with data
int *h_rowIdx, *h_counts, *h_max, *h_tCnts, *h_dist; // host authors
int *d_rowIdx, *d_counts, *d_max, *d_dist; // device authors
size_t rowBytes, cntBytes, maxBytes; // size (in bytes) of each array
double start, finish, total = 0; // timing variables
int max; // maximum number of co-authors
// determine size of device arrays
rowBytes = (VERT + 1) * sizeof(int);
cntBytes = VERT * sizeof(int);
maxBytes = TOP * sizeof(int);
// allocate memory on host
h_rowIdx = new int[VERT + 1]; // host row counts (CSR format)
h_counts = new int[VERT];
h_max = new int[TOP];
h_tCnts = new int[TOP];
cudaMalloc(&d_rowIdx, rowBytes); // allocate memory device
cudaMalloc(&d_counts, cntBytes);
cudaMalloc(&d_max, maxBytes);
// initialize row offset array
for (int i = 0; i < VERT + 1; i++)
{
h_rowIdx[i] = 0;
}
// get device properties
cudaDeviceProp props; // devices properties
cudaGetDeviceProperties(&props, 0); // get the device properties
cout << "GPU: " << props.name << ": " << props.major << "." << props.minor << endl;
start = CLOCK();
readFileCSR(fileName, h_rowIdx);
finish = CLOCK() - start;
total += finish;
cout << "File read time: " << finish / 1000 << " sec" << endl;
for (int i = 0; i < VERT; i++) // initalize host arrays
h_counts[i] = 0;
//for (int i = 0; i < TOP; i++)
//h_max[i] = i;
cudaMemcpy(d_rowIdx, h_rowIdx, rowBytes, cudaMemcpyHostToDevice); // copy array to device
cudaMemcpy(d_counts, h_counts, cntBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_max, h_max, maxBytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024; // number of threads in each thread block
gridSize = (int)ceil((float)(VERT + 1) / blockSize); // number of thread blocks in a grid
// COUNT AUTHORS' CO-AUTHORS
start = CLOCK();
// execute the kernel
countAuthors<<<gridSize, blockSize>>>(d_rowIdx, d_counts, VERT);
finish = CLOCK() - start;
total += finish;
cout << "GPU author count time: " << finish / 1000 << " sec" << endl;
cudaMemcpy(h_counts, d_counts, cntBytes, cudaMemcpyDeviceToHost); // copy data back to host
// DETERMINE TOP X AUTHORS WITH MOST CO-AUTHORS
start = CLOCK();
determineTop(h_counts, h_max, h_tCnts);
sort(h_max, h_tCnts, TOP);
finish = CLOCK() - start;
total += finish;
cout << "Max co-auth. search time: " << finish / 1000 << " sec" << endl;
// GENERATE GRAPHING DATA
max = h_tCnts[0]; // get the maximum no. of co-authors
h_dist = new int[max]; // allocate memory on host
cudaMalloc(&d_dist, max * sizeof(int)); // allocate memory on device
// initialize array
for (int i = 0; i < max; i++)
{
h_dist[i] = 0;
}
cudaMemcpy(d_dist, h_dist, max * sizeof(int), cudaMemcpyHostToDevice); // copy array to device
start = CLOCK();
gridSize = (int)ceil((float)VERT / blockSize); // update gridSize
coauthorVolume<<<gridSize, blockSize>>>(d_dist, d_counts, max, VERT); // execute the kernel
finish = CLOCK() - start;
total += finish;
cout << "GPU data generation time: " << finish / 1000 << " sec" << endl;
cudaMemcpy(h_dist, d_dist, max * sizeof(int), cudaMemcpyDeviceToHost); // copy array to host
// DISPLAY RUNNING INFO
cout << "***************************" << endl;
cout << "FINAL RESULTS:" << endl;
for (int i = 0; i < TOP; i++)
{
cout << "auth" << setw(6) << h_max[i] + 1 << ", count " << h_tCnts[i] << endl;
}
cout << "First 10 distribution values:" << endl;
for (int i = 0; i < TOP; i++)
{
cout << h_dist[i] << endl;
}
cout << "Last: " << h_dist[342] << endl; // display the last value
generateData(h_dist, max); // generate the graphing data
cout << "Cumulative running time: " << total / 1000 << " sec" << endl;
cudaFree(d_rowIdx); // free device memory
cudaFree(d_counts);
cudaFree(d_max);
cudaFree(d_dist);
delete[] h_rowIdx; // free host memory
delete[] h_counts;
delete[] h_max;
delete[] h_dist;
}
/**
* @brief Read a file, generating a CSR sparse matrix
*
* @param fileName File name to read from
* @param rowIdx Row index (offset)
* @param colIdx Column index
* @param counts Coauthor counts
*/
void readFileCSR(string fileName, int *rowIdx)
{
ifstream fin(fileName.c_str()); // open the input file
size_t found; // check for the first character
string line, tempAuth;
char delim = ' '; // delimiter for the data
int author; // author value
int adjIdx[2]; // adjaceny matrix value indices
int count = 0;
while (fin)
{
// check for line being a comment
getline(fin, line);
found = line.find_first_not_of(" \t");
// check for comments in the file. comments appear to start with '%'
if (found != string::npos)
{
// if the line is a comment, move onto the next one.
if (line[found] == '%')
continue;
}
stringstream s(line);
int i = 0;
while (getline(s, tempAuth, delim))
{
author = stoi(tempAuth);
adjIdx[i] = author;
i++;
//cout << author << "\t";
}
//cout << adjIdx[0] << " " << adjIdx[1] << endl; // DEBUGGING
int idx0 = adjIdx[0];
int idx1 = adjIdx[1];
rowIdx[idx0]++;
rowIdx[idx1]++; // = rowIdx[idx1-1] + rowIdx[idx1] + 1;
if (fin.eof())
break;
count++;
}
fin.close();
// cumulatively sum the elements
for (int i = 2; i < VERT + 2; i++)
{
rowIdx[i] = rowIdx[i] + rowIdx[i - 1];
}
cout << "Lines processed: " << count << endl;
}
/**
* @brief CUDA kernel to get the top authors and retrieve author counts
*
* @param d_rowIdx
* @param d_counts
* @param d_max
* @param n
*/
__global__ void countAuthors(int *d_rowIdx, int *d_counts, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x; // get global thread id
int countVal; // total co-authors
if (id < n) // Make sure the threads don't index out of range
{
countVal = d_rowIdx[id + 1] - d_rowIdx[id]; // calculate the number of co-authors
d_counts[id] = countVal;
}
}
__global__ void topAuthors(int *d_counts, int *d_max, int top, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x; // get global thread id
int stride = blockDim.x * gridDim.x; // stride length
int smallest = top - 1; // index of smallest member of d_max
int curr = 0;
__shared__ int t_top[20];
__shared__ int t_cnt[20];
for (int i = 0; i < top; i++)
{
t_top[i] = i;
t_cnt[i] = 0;
}
if (id < n)
{
for (int i = id; i < n; i += stride)
{
curr = d_counts[i];
if (curr > d_counts[t_top[smallest]])
{
t_top[smallest] = i;
t_cnt[smallest] = d_counts[i];
for (int j = threadIdx.x; j < 20; j += 2) // find new smallest
{
int comp1 = t_top[j];
int comp2 = t_top[smallest];
if (d_counts[comp1] < d_counts[comp2])
{
smallest = j;
}
}
}
}
}
if (threadIdx.x == 0)
{
printf("Thread: %d\n", threadIdx.x);
for (int i = 0; i < 20; i++)
printf("auth %d, count %d\n", (t_top[i] + 1), t_cnt[i]);
}
}
/**
* @brief
*
* @param d_dist
* @param d_counts
* @param max
*/
__global__ void coauthorVolume(int *d_dist, int *d_counts, int max, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x; // get global thread id
if (id < n)
{
int memLoc = d_counts[id] - 1; // get the index that needs to be incremented
atomicAdd((d_dist + memLoc), 1); // add 1 to the value at d_dist[memLoc]
}
}
/**
* @brief Detertime the top number of authors
*
* @param authors list of authors in order of id (index)
* @param authPerProc size of authors
* @param topAuth list of top authors
* @param topCounts counts associated with author at index
* @param top size of top list
*/
void determineTop(int *authors, int *topAuth, int *topCounts)
{
int currCount;
for (int i = 0; i < VERT; i++)
{
currCount = authors[i];
for (int j = 0; j < TOP; j++)
{
if (currCount > topCounts[j])
{
// replace smallest author at the end.
topAuth[TOP - 1] = i;
topCounts[TOP - 1] = currCount;
sort(topAuth, topCounts, TOP);
break;
}
}
}
}
/**
* @brief Bubble sort
*
* @param id ids assoc with vals
* @param val values to sort
* @param len list length
*/
void sort(int *id, int *val, int len)
{
bool swapped;
for (int i = 0; i < len - 1; i++)
{
swapped = false;
for (int j = 0; j < len - i - 1; j++)
{
if (val[j] < val[j + 1])
{
swap(val[j], val[j + 1]);
swap(id[j], id[j + 1]);
swapped = true;
}
}
if (!swapped)
break;
}
}
/**
* @brief Generate a data file from passed values
*
* @param dist Distribution
* @param distSize Size of the distribution
*/
void generateData(int *dist, int distSize)
{
ofstream data("author_data-cuda.txt");
for (int i = 0; i < distSize-1; i++)
{
data << i+1 << " " << dist[i] << "\n";
}
data << distSize << " " << dist[distSize-1];
cout << "Data saved as \"author_data-cuda.txt\"" << endl;
}
/**
* @brief Get a time point
*
* @return double The time point generated
*/
double CLOCK()
{
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec * 1000) + (t.tv_nsec * 1e-6);
} |
4,906 | #include <stdio.h>
__global__ void helloFromGPU() {
int t = threadIdx.x;
printf("Hello World from GPU %d!\n", t);
}
int main() {
printf("Hello World from CPU!\n");
helloFromGPU <<<1,10>>>();
//cudaDeviceReset();
return 0;
} |
4,907 | #include <cstdio>
#define N 64
#define B 1
#define T 64
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void dl(int* in)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// The warps in this block take different paths; the synctreads calls
// will cause a deadlock.
if(tid > 31)
{
if(in[tid] % 2 == 0)
in[tid]++;
__syncthreads();
}
else {
if(in[tid] % 2 == 1)
in[tid]--;
__syncthreads();
}
/* int sum = in[tid];
if(tid > 0)
sum += in[tid-1];
if(tid < N - 1)
sum += in[tid+1];
in[tid] = sum / 3; */
}
int main()
{
int* in = (int*) malloc(N*sizeof(int));
for(int i = 0; i < N; i++)
in[i] = i;
int* din;
gpuErrchk(cudaMalloc((void**)&din, N*sizeof(int)));
gpuErrchk(cudaMemcpy(din, in, N*sizeof(int), cudaMemcpyHostToDevice));
dl<<<B,T>>>(din);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(in, din, N*sizeof(int), cudaMemcpyDeviceToHost));
for(int i = 0; i < N; i++)
printf("%d ", in[i]);
printf("\n");
free(in); cudaFree(din);
} |
4,908 |
/*
// Cython function from 'thinc' library
class NumpyOps(Ops):
def backprop_max_pool(self, float[:, ::1] d_maxes,
int[:, ::1] which, int[::1] lengths):
cdef int B = lengths.shape[0]
cdef int O = d_maxes.shape[1]
cdef int T = 0
for length in lengths[:B]:
T += length
cdef Pool mem = Pool()
dX = <float*>mem.alloc(T * O, sizeof(float))
cpu_backprop_max_pool(dX,
&d_maxes[0,0], &which[0, 0], &lengths[0], B, T, O)
return cpu_floats_ptr2array(dX, (T, O))
cdef void cpu_backprop_max_pool(float* dX__to,
const float* d_maxes__bo, const int* which__bo, const int* lengths__b,
int B, int T, int O) nogil:
cdef int length, i, j
for length in lengths__b[:B]:
for i in range(length):
for j in range(O):
if which__bo[j] == i:
dX__to[j] += d_maxes__bo[j]
dX__to += O
d_maxes__bo += O
which__bo += O
*/
void __global__ backprop_max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_maxes[256];
__shared__ short local_which[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_maxes[threadIdx.x] = maxes[step*dims+threadIdx.x];
local_which[threadIdx.x] = which[step*dims+threadIdx.x];
short j=0; // the word index in a doc
for (int i = blockStarts+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
{
if(local_which[threadIdx.x]==j)
{
words[i] = local_maxes[threadIdx.x];
}
else
words[i]=0;
j++;
}
}
}
|
4,909 | #include "cuda_runtime.h"
#include <stdio.h>
#include <iostream>
#include <time.h>
#define M 5
#define N 3
// cuComplex or cuDoubleComplex
#define CPLX cuDoubleComplex
void init_crand(int *data,int size){
for (int i = 0; i < size*2; ++i)
data[i] = rand() %100 - 50;
}
void print_cplx(int *data,int m,int n){
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
printf("%3d%+3di ",data[i*n*2+ j+j],data[i*n*2+j+j+1]);
}
printf("\n");
}
printf("\n");
}
__global__ void ctranspose(int*X,int*X_T,int m,int n);
int main(void)
{
int *d_a, *d_b, *d_c;
int *h_a, *h_b, *h_c;
time_t t;
srand(time(&t));
int memsize = sizeof(int)*M*N*2;
h_a = (int*)malloc(memsize);
h_b = (int*)malloc(memsize);
h_c = (int*)malloc(memsize);
init_crand(h_a,M*N);
print_cplx(h_a,M,N);
cudaMalloc((void **)&d_a, memsize);
cudaMalloc((void **)&d_b, memsize);
cudaMalloc((void **)&d_c, memsize);
cudaMemcpy(d_a, h_a, memsize, cudaMemcpyHostToDevice);
ctranspose<<<M,N>>>(d_a,d_b,M,N);
cudaMemcpy(h_b, d_b, memsize, cudaMemcpyDeviceToHost);
print_cplx(h_b,N,M);
/************************************************************/
cudaMemcpy(d_a, h_b, memsize, cudaMemcpyHostToDevice);
//Roll Back
ctranspose<<<N,M>>>(d_a,d_b,N,M);
cudaMemcpy(h_a, d_b, memsize, cudaMemcpyDeviceToHost);
print_cplx(h_a,M,N);
free(h_a);free(h_b);free(h_c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
__global__ void ctranspose(int*X,int*X_T,int m,int n){
int idxX = threadIdx.x*2 ;
int idxY = blockIdx.x*2 ;
int idx_in = idxX + n*idxY;
int idx_out = idxY + m*idxX;
X_T[idx_out] = X[idx_in];
X_T[idx_out+1] = X[idx_in+1];
}
|
4,910 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cstdlib>
#include <ctime>
#include <algorithm>
/*
TODOs
Wrong result when size != 2^n
Cannot handle (wrong result) when size is huge
*/
using namespace std;
static void HandleError(cudaError_t err,
const char *file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
system("pause");
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//#define CHECK_RESULTS_OUTPUT
__global__ void GenerateHistogramAndPredicate(int *input, int *currentBit, int *numBits, int *bitHistogram, int *predicate, int *size, int numBitsPow2)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int bid = blockIdx.x;
if (id >= (*size))
{
return;
}
extern __shared__ int localBin[];
if (threadIdx.x == 0)
{
for(int i = 0; i < numBitsPow2; i++)
localBin[i] = 0;
}
__syncthreads();
int bit = (input[id] >> (*currentBit)) & ((1 << *numBits) - 1);
//atomicAdd(&(bitHistogram[bit * gridDim.x + blockIdx.x]), 1);
atomicAdd(&localBin[bit], 1);
predicate[bit * (*size) + id] = 1;
__syncthreads();
if (threadIdx.x == 0)
{
for(int i = 0; i < numBitsPow2; i++)
bitHistogram[i * gridDim.x + bid] = localBin[i];
}
__syncthreads();
}
__global__ void PrefixSum(int *input, int *output, int *size, int *totalBits)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= (*size))
{
return;
}
int bit = 0;
for (bit = 0; bit < (*totalBits); bit++)
{
int current_value = input[bit * (*size) + id];
int current_cdf = input[bit * (*size) + id];
for (unsigned int interval = 1; interval < blockDim.x; interval <<= 1)
{
if (threadIdx.x >= interval)
{
current_cdf += input[bit * (*size) + id - interval];
}
__syncthreads();
input[bit * (*size) + id] = current_cdf;
__syncthreads();
}
output[bit * (*size) + id] = input[bit * (*size) + id] - current_value;
__syncthreads();
}
}
__global__ void PrefixSum_GPUGems(int *g_odata, int *g_idata, int totalSize, int n, int numBitPow2)
{
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
bool isEnd = ((2 * id + 1) >= totalSize);
if (2 * id >= totalSize || 2 * thid >= n)
{
return;
}
for (int startOffset = 0; startOffset < numBitPow2; startOffset++)
{
int offset = 1;
temp[2 * thid] = g_idata[startOffset * totalSize + 2 * id]; // load input into shared memory
temp[2 * thid + 1] = isEnd ? 0 : g_idata[startOffset * totalSize + 2 * id + 1];
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[startOffset * totalSize + 2 * id] = temp[2 * thid]; // write results to device memory
if (!isEnd)
g_odata[startOffset * totalSize + 2 * id + 1] = temp[2 * thid + 1];
}
}
__global__ void ReOrder(int *input, int *output, int *bitScan, int *relativePos, int *currentBit, int *numBits, int *size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= (*size))
{
return;
}
int bit = (input[id] >> (*currentBit)) & ((1 << *numBits) - 1);
output[relativePos[bit * (*size) + id] + bitScan[bit * gridDim.x + blockIdx.x]] = input[id];
}
int pow(int a, int b)
{
int result = 1;
for (int i = 0; i < b; i++)
{
result *= a;
}
return result;
}
const int arraySize = 500000, gridSize = 1024;
const int gridCount = ceil((float)arraySize / (float)gridSize);
int input[arraySize] = { 0 };
int output[arraySize] = { 0 };
int main()
{
const int totalBits = 20, numBits = 1;
const int numBitsPow2 = pow(2, numBits);
int sizeBitScan = numBitsPow2 * gridCount;
int one = 1;
int tmp_bitHistogram[32] = { 0 };
printf("Data generation...\n===============\n| Range: 0 ~ %d\n| Size: %d\n| GSize: %d\n===============\n\n", pow(2, totalBits), arraySize, gridSize);
//init data
srand(time(0));
for (int i = 0; i < arraySize; i++)
{
input[i] = rand() % (pow(2, totalBits) - 1);
}
printf("Sending data to GPU...\n");
//Input: arraySize the input array
//Output: arraySize result
//currentBit: 1 current bit pos
//bitLenth: 1 current bit lenth (numBits)
//bitHistogram: 2^numBits count of items with value i at current bit
//bitScan: 2^numBits prefix sum of bitHistogram
//predicate: arraySize * 2^numBits T/F if item value equals to i at current bit
//relativePos: arraySize * 2^numBits prefix sum of predicate
//size: 1 arraySize
int *d_Input = 0, *d_Output = 0, *d_bitHistogram = 0, *d_bitScan = 0,
*d_predicate = 0, *d_relativePos = 0, *d_currentBit = 0, *d_bitLenth = 0, *d_size = 0,
*d_sizeBitScan = 0, *d_one = 0, *d_bitLenthPow2 = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
HANDLE_ERROR(cudaSetDevice(0));
// Allocate GPU buffers
HANDLE_ERROR(cudaMalloc((void**)&d_Output, arraySize * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_Input, arraySize * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_bitHistogram, gridCount * numBitsPow2 * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_bitScan, gridCount * numBitsPow2 * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_predicate, arraySize * numBitsPow2 * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_relativePos, arraySize * numBitsPow2 * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_currentBit, sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_bitLenth, sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_size, sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_sizeBitScan, sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_one, sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_bitLenthPow2, sizeof(int)));
// Copy input vectors from host memory to GPU buffers.
HANDLE_ERROR(cudaMemcpy(d_Input, input, arraySize * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_bitLenth, &numBits, sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_size, &arraySize, sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_sizeBitScan, &sizeBitScan, sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_bitLenthPow2, &numBitsPow2, sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_one, &one, sizeof(int), cudaMemcpyHostToDevice));
printf("GPU Sort Started!\n");
std::clock_t start;
start = std::clock();
//Do the sort
for (int i = 0; i < totalBits; i += numBits)
{
//update current bit
HANDLE_ERROR(cudaMemcpy(d_currentBit, &i, sizeof(int), cudaMemcpyHostToDevice));
//clear buffers
HANDLE_ERROR(cudaMemset(d_bitHistogram, 0, gridCount * numBitsPow2 * sizeof(int)));
HANDLE_ERROR(cudaMemset(d_bitScan, 0, gridCount * numBitsPow2 * sizeof(int)));
HANDLE_ERROR(cudaMemset(d_predicate, 0, numBitsPow2 * arraySize * sizeof(int)));
HANDLE_ERROR(cudaMemset(d_relativePos, 0, numBitsPow2 * arraySize * sizeof(int)));
#ifdef CHECK_RESULTS_OUTPUT
//check results
HANDLE_ERROR(cudaMemcpy(output, d_Input, arraySize * sizeof(int), cudaMemcpyDeviceToHost));
printf("Input:\t");
for (int i = 0; i < arraySize; i++)
{
printf("%d ", output[i]);
}
printf("\n");
#endif
/////////////////
GenerateHistogramAndPredicate <<< gridCount, gridSize, numBitsPow2 * sizeof(unsigned int) >>> (d_Input, d_currentBit, d_bitLenth, d_bitHistogram, d_predicate, d_size, numBitsPow2);
#ifdef CHECK_RESULTS_OUTPUT
//check results
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(tmp_bitHistogram, d_bitHistogram, numBitsPow2 * gridCount * sizeof(int), cudaMemcpyDeviceToHost));
printf("Bit %d:\t", i);
for (int j = 0; j < gridCount; j++)
{
for (int k = 0; k < numBitsPow2; k++)
{
printf("%d ", tmp_bitHistogram[j * numBitsPow2 + k]);
}
printf("| ");
}
printf("\n");
#endif
/////////////////
PrefixSum <<< 1, numBitsPow2 * gridCount >>> (d_bitHistogram, d_bitScan, d_sizeBitScan, d_one);
#ifdef CHECK_RESULTS_OUTPUT
//check results
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(tmp_bitHistogram, d_bitScan, numBitsPow2 * gridCount * sizeof(int), cudaMemcpyDeviceToHost));
printf("Scan %d:\t", i);
for (int j = 0; j < gridCount; j++)
{
for (int k = 0; k < numBitsPow2; k++)
{
printf("%d ", tmp_bitHistogram[j * numBitsPow2 + k]);
}
printf("| ");
}
printf("\n");
#endif
/////////////////
#ifdef CHECK_RESULTS_OUTPUT
//check results
HANDLE_ERROR(cudaMemcpy(tmp_bitHistogram, d_predicate, numBitsPow2 * arraySize * sizeof(int), cudaMemcpyDeviceToHost));
printf("Pred %d:\t", i);
for (int j = 0; j < numBitsPow2; j++)
{
for (int k = 0; k < arraySize; k++)
{
printf("%d ", tmp_bitHistogram[j * arraySize + k]);
}
printf("| ");
}
printf("\n");
#endif
/////////////////
//PrefixSum <<< gridCount, gridSize >>> (d_relativePos, d_predicate, d_size, d_bitLenthPow2);
PrefixSum_GPUGems <<< gridCount, gridSize / 2, gridSize * sizeof(int) >>> (d_relativePos, d_predicate, arraySize, gridSize, numBitsPow2);
#ifdef CHECK_RESULTS_OUTPUT
//check results
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(tmp_bitHistogram, d_relativePos, numBitsPow2 * arraySize * sizeof(int), cudaMemcpyDeviceToHost));
printf("RPos %d:\t", i);
for (int j = 0; j < numBitsPow2; j++)
{
for (int k = 0; k < arraySize; k++)
{
printf("%d ", tmp_bitHistogram[j * arraySize + k]);
}
printf("| ");
}
printf("\n");
#endif
/////////////////
ReOrder <<< gridCount, gridSize >>> (d_Input, d_Output, d_bitScan, d_relativePos, d_currentBit, d_bitLenth, d_size);
#ifdef CHECK_RESULTS_OUTPUT
//check results
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(output, d_Output, arraySize * sizeof(int), cudaMemcpyDeviceToHost));
printf("Output:\t");
for (int i = 0; i < arraySize; i++)
{
printf("%d ", output[i]);
}
printf("\n*--*--*--*--*--*\n");
#endif
HANDLE_ERROR(cudaDeviceSynchronize());
/////////////////
//Swap input and output for next iter
int* tmp = d_Input;
d_Input = d_Output;
d_Output = tmp;
//printf("\n*-*-*-*-*-*-*\n");
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
HANDLE_ERROR(cudaDeviceSynchronize());
double duration = (std::clock() - start) / (double)CLOCKS_PER_SEC;
printf("\nGPU Sort Finished! time cost (ms): %.3lf\n\n", duration * 1000.0);
printf("Collecting results...\n");
HANDLE_ERROR(cudaMemcpy(output, d_Input, arraySize * sizeof(int), cudaMemcpyDeviceToHost));
printf("Checking results...\n\n");
bool validate = true, iszero = true;
for (int i = 1; i < arraySize; i++)
{
if (output[i - 1] > output[i])
{
validate = false;
}
if (output[i] != 0)
{
iszero = false;
}
}
if (iszero)
{
validate = false;
printf("* Result is full of zero!\n* CHECK the GPU part.\n\n");
}
if (validate)
{
printf("Correct!\n");
}
else
{
printf("Wrong...!\n");
}
printf("\n==*==*==*==\nCPU Sort Started!\n");
start = std::clock();
std::sort(input, input + arraySize);
duration = (std::clock() - start) / (double)CLOCKS_PER_SEC;
printf("\nCPU Sort Finished! time cost (ms): %.3lf\n\n", duration * 1000.0);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
HANDLE_ERROR(cudaDeviceReset());
cudaFree(d_Input);
cudaFree(d_Output);
cudaFree(d_bitHistogram);
cudaFree(d_bitScan);
cudaFree(d_predicate);
cudaFree(d_relativePos);
cudaFree(d_currentBit);
cudaFree(d_bitLenth);
cudaFree(d_size);
system("pause");
return 0;
}
|
4,911 | #include "includes.h"
__global__ void updateState(float *B, float *external, int dim, float timestep, int length, float L, float M) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x + length;
if (index < length + dim) {
float input = B[index] + external[index];
float old_output = B[index - dim];
float d_layers = (-1 * old_output) + 1 / (1 + expf(-1 * L * (input - M)));
B[index] = old_output + d_layers * timestep;
}
} |
4,912 | #include "includes.h"
// Device code for ICP computation
// Currently working only on performing rotation and translation using cuda
#ifndef _ICP_KERNEL_H_
#define _ICP_KERNEL_H_
#define TILE_WIDTH 256
#endif // #ifndef _ICP_KERNEL_H_
__global__ void CalculateTotalError(double * distance_d, int size_data)
{
__shared__ double error_s[2*TILE_WIDTH];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockDim.x*blockIdx.x;
if(start + t < size_data)
error_s[t] = distance_d[start + t];
else
error_s[t] = 0.0f;
if(start + blockDim.x + t < size_data)
error_s[blockDim.x + t] = distance_d[start + blockDim.x + t];
else
error_s[blockDim.x + t] = 0.0f;
for(unsigned int stride = blockDim.x; stride >= 1; stride >>= 1)
{
__syncthreads();
if(t < stride)
error_s[t] += error_s[t + stride];
}
if(t == 0)
distance_d[blockIdx.x] = error_s[t];
} |
4,913 | #include <stdio.h>
#include <inttypes.h>
#ifndef tile_size_x
#define tile_size_x 1
#endif
#ifndef block_size_x
#define block_size_x 512
#endif
#ifndef block_size_y
#define block_size_y 1
#endif
#ifndef window_width
#define window_width 1500
#endif
#define USE_READ_ONLY_CACHE read_only
#if USE_READ_ONLY_CACHE == 1
#define LDG(x, y) __ldg(x+y)
#elif USE_READ_ONLY_CACHE == 0
#define LDG(x, y) x[y]
#endif
#ifndef write_sums
#define write_sums 0
#endif
/*
* This kernel computes the correlated hits of hits no more than 1500 apart.
* It does this using a 1-dimensional mapping of threads and thread blocks.
*
* This kernel supports the usual set of optimizations, including tiling, partial loop unrolling, read-only cache.
* Tuning parameters supported are 'read_only' [0,1], 'tile_size_x' divisor of 1500, and 'block_size_x' multiple of 32.
*
* 'write_sums' can be set to [0,1] to enable the code that
* produces another output, namely the number of correlated
* hits per row. This number is later to create the sparse
* representation of the correlations table. If not using
* sparse respresentation set write_sums to 0.
*/
__global__ void quadratic_difference_linear(char *__restrict__ correlations, int *sums, int N, int sliding_window_width,
const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ z, const float *__restrict__ ct) {
int tx = threadIdx.x;
int bx = blockIdx.x * block_size_x * tile_size_x;
__shared__ float sh_ct[block_size_x * tile_size_x + window_width];
__shared__ float sh_x[block_size_x * tile_size_x + window_width];
__shared__ float sh_y[block_size_x * tile_size_x + window_width];
__shared__ float sh_z[block_size_x * tile_size_x + window_width];
if (bx+tx < N) {
//the loading phase
for (int k=tx; k < block_size_x*tile_size_x+window_width; k+=block_size_x) {
if (bx+k < N) {
sh_ct[k] = LDG(ct,bx+k);
sh_x[k] = LDG(x,bx+k);
sh_y[k] = LDG(y,bx+k);
sh_z[k] = LDG(z,bx+k);
}
}
__syncthreads();
//start of the the computations phase
int i = tx;
float l_ct[tile_size_x];
float l_x[tile_size_x];
float l_y[tile_size_x];
float l_z[tile_size_x];
#if write_sums == 1
int sum[tile_size_x];
#endif
//keep the most often used values in registers
for (int ti=0; ti<tile_size_x; ti++) {
l_ct[ti] = sh_ct[i+ti*block_size_x];
l_x[ti] = sh_x[i+ti*block_size_x];
l_y[ti] = sh_y[i+ti*block_size_x];
l_z[ti] = sh_z[i+ti*block_size_x];
#if write_sums == 1
sum[ti] = 0;
#endif
}
//small optimization to eliminate bounds checks for most blocks
if (bx+block_size_x*tile_size_x+window_width < N) {
//unfortunately there's no better way to do this right now
//[1, 2, 3, 4, 5, 6, 10, 12, 15]
#if f_unroll == 2
#pragma unroll 2
#elif f_unroll == 3
#pragma unroll 3
#elif f_unroll == 4
#pragma unroll 4
#elif f_unroll == 5
#pragma unroll 5
#elif f_unroll == 6
#pragma unroll 6
#elif f_unroll == 10
#pragma unroll 10
#elif f_unroll == 12
#pragma unroll 12
#elif f_unroll == 15
#pragma unroll 15
#endif
for (int j=1; j < window_width+1; j++) {
#pragma unroll
for (int ti=0; ti<tile_size_x; ti++) {
float diffct = l_ct[ti] - sh_ct[i+ti*block_size_x+j];
float diffx = l_x[ti] - sh_x[i+ti*block_size_x+j];
float diffy = l_y[ti] - sh_y[i+ti*block_size_x+j];
float diffz = l_z[ti] - sh_z[i+ti*block_size_x+j];
if (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz) {
uint64_t pos = (j-1) * ((uint64_t)N) + (bx+i+ti*block_size_x);
correlations[pos] = 1;
#if write_sums == 1
sum[ti] += 1;
#endif
}
}
}
}
//same as above but with bounds checks for last few blocks
else {
//unfortunately there's no better way to do this right now
//[1, 2, 3, 4, 5, 6, 10, 12, 15]
#if f_unroll == 2
#pragma unroll 2
#elif f_unroll == 3
#pragma unroll 3
#elif f_unroll == 4
#pragma unroll 4
#elif f_unroll == 5
#pragma unroll 5
#elif f_unroll == 6
#pragma unroll 6
#elif f_unroll == 10
#pragma unroll 10
#elif f_unroll == 12
#pragma unroll 12
#elif f_unroll == 15
#pragma unroll 15
#endif
for (int j=1; j < window_width+1; j++) {
for (int ti=0; ti<tile_size_x; ti++) {
if (bx+i+ti*block_size_x+j < N) {
float diffct = l_ct[ti] - sh_ct[i+ti*block_size_x+j];
float diffx = l_x[ti] - sh_x[i+ti*block_size_x+j];
float diffy = l_y[ti] - sh_y[i+ti*block_size_x+j];
float diffz = l_z[ti] - sh_z[i+ti*block_size_x+j];
if (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz) {
uint64_t pos = (j-1) * ((uint64_t)N) + (bx+i+ti*block_size_x);
correlations[pos] = 1;
#if write_sums == 1
sum[ti] += 1;
#endif
}
}
}
}
}
#if write_sums == 1
for (int ti=0; ti<tile_size_x; ti++) {
sums[bx+i+ti*block_size_x] = sum[ti];
}
#endif
}
}
/*
* This is the old kernel that uses a 2D thread block layout, mainly kept here to verify the correctness of the linear kernel
*/
__global__ void quadratic_difference(int8_t *correlations, int N, int sliding_window_width, float *x, float *y, float *z, float *ct)
{
int i = blockIdx.x * block_size_x + threadIdx.x;
int j = blockIdx.y * block_size_y + threadIdx.y;
int l = i + j;
if (i < N && j < sliding_window_width) {
uint64_t pos = j * (uint64_t)N + (uint64_t)i;
if (l >= N){
return;
}
float diffct = ct[i] - ct[l];
float diffx = x[i] - x[l];
float diffy = y[i] - y[l];
float diffz = z[i] - z[l];
if (diffct * diffct < diffx * diffx + diffy * diffy + diffz * diffz) {
correlations[pos] = 1;
}
}
}
|
4,914 | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define VALUESMAX 100
#define BMARK -1
#define SIZE 100
#define PRINTMATRIX 1
#define PRINTPERM 0
#define SINGLETONS 0 //1 if singletons 0 if inversions
#define RAND 5
__device__ void nextPermutationBlock(double *matrix, double *permutations, bool *usedValues, int n, double value, int parametr, int *fractionNumber, int level);
__global__ void permutations(double *matrix,double *permutationValues);
__global__ void addPermutations(double *determinant, double *permutations, int *n);
int main(){
double *matrix, *d_matrix, *d_permutationValues, *d_determinant, determinant;
int n=SIZE, *d_n;
cudaError_t cudaStatus;
matrix=(double*)malloc(sizeof(double)*n*n);//alocating matrix
for(int i=0; i< n*n; i++){
matrix[i]=rand()%RAND;
if(PRINTMATRIX==1){
printf("%f ",matrix[i]);
if(!((i+1)%n)){
printf("\n");
}//if
}//PRINTMATRIX
}//for
cudaStatus=cudaMalloc((void**)&d_matrix, n*n*sizeof(double)); /*allocating matrix memory on gpu*/
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"Error in allocating memory\n");
return cudaSuccess;
}
cudaStatus=cudaMalloc((void**)&d_n,sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"Error in allocating memory\n");
return cudaStatus;
}
cudaStatus=cudaMalloc((void**)&d_permutationValues, (n-1)*n*sizeof(double)); /*allocating matrix memory on gpu*/
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"Error in allocating memory\n");
return cudaSuccess;
}
cudaStatus=cudaMalloc((void**)&d_determinant, sizeof(double)); /*allocating matrix memory on gpu*/
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"Error in allocating memory\n");
return cudaSuccess;
}
cudaStatus=cudaMemcpy(d_matrix,matrix,n*n*sizeof(double),cudaMemcpyHostToDevice);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"Error in copying matrix memory, %d\n", cudaStatus);
return cudaStatus;
}
cudaStatus=cudaMemcpy(d_n,&n,sizeof(int),cudaMemcpyHostToDevice);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"Error in copying matrix memory\n");
return cudaStatus;
}
permutations<<<n,n-1>>>(d_matrix,d_permutationValues);
//copying memory
addPermutations<<<1,1>>>(d_determinant,d_permutationValues,d_n);
cudaStatus=cudaMemcpy(&determinant,d_determinant,sizeof(double),cudaMemcpyDeviceToHost);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"Error in copying matrix memory, %d\n", cudaStatus);
return cudaStatus;
}
printf("Determinant equals: %f \n", determinant);
}
__device__ void nextPermutationBlock(double *matrix, double *permutations, bool *usedValues, int n, double value, int parametr, int *fractionNumber, int level){
if(level==n){
if(SINGLETONS==1){
value*=pow((float)BMARK,n-parametr);
}
else{
value*=pow((float)BMARK,parametr);
}
*permutations+=value;
}
else{
int addValue=-1;
for(int i=0; i<n; i++){
if(usedValues[i]==true){
usedValues[i]=false;
if(SINGLETONS==1){
if(i==level){
addValue=1;
}
else{
addValue=0;
}
}
else{//inversions
addValue++;
}
nextPermutationBlock(matrix,permutations,usedValues,n,value*matrix[level*n+i],parametr+addValue,fractionNumber,level+1);
usedValues[i]=true;
}
}
}
}
__global__ void addPermutations(double *determinant, double *permutations, int *n){
int nn=*n**n-1;
*determinant=0;
for(int i=0;i<nn;i++){
*determinant+=permutations[i];
}
}
__global__ void permutations(double *matrix,double *permutationValues){
int *fractionNumber[1]={0};
int n=gridDim.x;
int inversions;
bool usedValues[VALUESMAX];
double result=1;
for(int i=0; i<blockDim.x+1;i++){
usedValues[i]=true;
}
usedValues[blockIdx.x]=false;
usedValues[threadIdx.x]=false;
result*=matrix[blockIdx.x];
result*=matrix[n+threadIdx.x];
inversions=blockIdx.x+threadIdx.x;
if(blockIdx.x<threadIdx.x){
inversions--;
}
nextPermutationBlock(matrix, &permutationValues[blockIdx.x*n+threadIdx.x],usedValues,n,result,inversions,fractionNumber[0], 2);
}
|
4,915 | #include "includes.h"
__global__ void kernel(unsigned char *ptr, int ticks){
// map from threadIdx/BlockIdx to pixel positions
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char grey = (unsigned char) (128.0f + 127.0f * cos(d/10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
} |
4,916 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define INF 1073741824
#define BLOCK_SZ 16
#define BUFFER_SZ 32
int m; // nodes
int n; // dimensions
int k; // k-nearest
// input sample file
int* load(const char *input)
{
FILE *file = fopen(input, "r");
if (!file) {
fprintf(stderr, "Error: no such input file \"%s\"\n", input);
exit(1);
}
// load m, n, k
fscanf(file, "%d%d%d", &m, &n, &k);
// allocate memory
int *data = (int*)malloc(sizeof(int) * m * n);
// load data
int i;
for (i = 0; i < m * n; i++) {
fscanf(file, "%d", data + i);
}
fclose(file);
return data;
}
__global__ void distances2(int *data, int *dis, int m, int n)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = BLOCK_SZ * blockIdx.x + tx;
int j = BLOCK_SZ * blockIdx.y + ty;
if (i >= m || j >= m) return;
__shared__ int matA[BLOCK_SZ][BLOCK_SZ];
__shared__ int matB[BLOCK_SZ][BLOCK_SZ];
int tmp1;
int tmp2 = 0;
for (int k = 0; k < n; k += BLOCK_SZ) {
// load sub matrix to shared memory
matA[tx][ty] = (k + ty < n) ? data[i * n + (k + ty)] : 0;
// matB[tx][ty] = (k + tx < n) ? data[j * n + (k + tx)] : 0;
matB[ty][tx] = (k + tx < n) ? data[j * n + (k + tx)] : 0;
__syncthreads();
if (i < j) { // compute partial sum
for (int w = 0; w < BLOCK_SZ; w++) {
// tmp1 = matA[tx][w] - matB[w][ty];
tmp1 = matA[tx][w] - matB[ty][w];
tmp2 += tmp1 * tmp1;
}
}
__syncthreads();
}
if (i < j) {
dis[i * m + j] = dis[j * m + i] = tmp2;
} else if (i == j) {
dis[i * m + j] = INF;
}
}
__global__ void distances3(int *data, int *dis, int m, int n, int block)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = BLOCK_SZ * (blockIdx.x + (blockIdx.z / 2) * block) + tx;
int j = BLOCK_SZ * (blockIdx.y + (blockIdx.z % 2) * block) + ty;
if (i >= m || j >= m) return;
__shared__ int matA[BLOCK_SZ][BLOCK_SZ];
__shared__ int matB[BLOCK_SZ][BLOCK_SZ];
int tmp1;
int tmp2 = 0;
for (int k = 0; k < n; k += BLOCK_SZ) {
// load sub matrix to shared memory
matA[tx][ty] = (k + ty < n) ? data[i * n + (k + ty)] : 0;
matB[tx][ty] = (k + tx < n) ? data[j * n + (k + tx)] : 0;
__syncthreads();
if (i < j) { // compute partial sum
for (int w = 0; w < BLOCK_SZ; w++) {
tmp1 = matA[tx][w] - matB[w][ty];
tmp2 += tmp1 * tmp1;
}
}
__syncthreads();
}
if (i < j) {
dis[i * m + j] = dis[j * m + i] = tmp2;
} else if (i == j) {
dis[i * m + j] = INF;
}
}
__global__ void distances33(int *data, int *dis, int m, int n, int block)
{
int tx = threadIdx.x + (threadIdx.z / 2) * BLOCK_SZ / 2;
int ty = threadIdx.y + (threadIdx.z % 2) * BLOCK_SZ / 2;
int i = BLOCK_SZ * (blockIdx.x + (blockIdx.z / 2) * block) + tx;
int j = BLOCK_SZ * (blockIdx.y + (blockIdx.z % 2) * block) + ty;
if (i >= m || j >= m) return;
__shared__ int matA[BLOCK_SZ][BLOCK_SZ];
__shared__ int matB[BLOCK_SZ][BLOCK_SZ];
int tmp1;
int tmp2 = 0;
for (int k = 0; k < n; k += BLOCK_SZ) {
// load sub matrix to shared memory
matA[tx][ty] = (k + ty < n) ? data[i * n + (k + ty)] : 0;
matB[tx][ty] = (k + tx < n) ? data[j * n + (k + tx)] : 0;
__syncthreads();
if (i < j) { // compute partial sum
for (int w = 0; w < BLOCK_SZ; w++) {
tmp1 = matA[tx][w] - matB[w][ty];
tmp2 += tmp1 * tmp1;
}
}
__syncthreads();
}
if (i < j) {
dis[i * m + j] = dis[j * m + i] = tmp2;
} else if (i == j) {
dis[i * m + j] = INF;
}
}
__global__ void sort(int *dis, int *result, int m, int k)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= m) return;
int tmp, idx;
for (int j = 0; j < k; j++) { // find j-th nearest neighbor
tmp = INF;
for (int l = i * m; l < (i + 1) * m; l++) {
if (dis[l] < tmp) {
tmp = dis[l];
idx = l;
}
}
result[i * k + j] = idx % m;
dis[idx] = INF;
}
}
__global__ void ssort(int *dis, int *result, int m, int k)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= m) return;
const int start = i * m;
int buffer[BUFFER_SZ];
// find the max value in first k elements
int max = 0;
int idx;
for (int j = 0; j < k; j++) {
buffer[j] = j;
if (dis[start + j] > max) {
max = dis[start + j];
idx = j;
}
}
// traverse the remaining elements to select the k minimal
for (int j = k; j < m; j++) {
if (dis[start + j] < max) {
dis[start + idx] = dis[start + j];
buffer[idx] = j;
max = 0;
for (int l = 0; l < k; l++) {
if (dis[start + l] > max) {
max = dis[start + l];
idx = l;
}
}
}
}
// sort the k elements
for (int j = 0; j < k; j++) { // find j-th nearest neighbor
max = INF; // use max as "min" here to save register resource
for (int l = 0; l < k; l++) {
if (dis[start + l] < max ||
(dis[start + l] == max && buffer[l] < buffer[idx])) { // when two distances are the same, index first
max = dis[start + l];
idx = l;
}
}
result[i * k + j] = buffer[idx];
dis[start + idx] = INF;
}
}
void knn(int *data, int *result)
{
int *d_data, *d_result, *d_dis;
int *dis = (int*)malloc(sizeof(int) * m * m);
int block = ceil(m / (double)BLOCK_SZ);
int block1 = ceil(block / 2.0);
float timer1, timer2;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void**)&d_data, sizeof(int) * m * n);
cudaMalloc((void**)&d_result, sizeof(int) * m * k);
cudaMalloc((void**)&d_dis, sizeof(int) * m * m);
cudaMemcpy(d_data, data, sizeof(int) * m * n, cudaMemcpyHostToDevice);
cudaEventRecord(start);
// distances2<<<dim3(block, block, 1), dim3(BLOCK_SZ, BLOCK_SZ, 1)>>>(d_data, d_dis, m, n);
// distances3<<<dim3(block1, block1, 4), dim3(BLOCK_SZ, BLOCK_SZ, 1)>>>(d_data, d_dis, m, n, block1);
distances33<<<dim3(block1, block1, 4), dim3(BLOCK_SZ / 2, BLOCK_SZ / 2, 4)>>>(d_data, d_dis, m, n, block1);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer1, start, stop);
cudaEventRecord(start);
sort<<<block, BLOCK_SZ>>>(d_dis, d_result, m, k);
// ssort<<<block, BLOCK_SZ>>>(d_dis, d_result, m, k);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer2, start, stop);
cudaMemcpy(result, d_result, sizeof(int) * m * k, cudaMemcpyDeviceToHost);
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&timer, start, stop);
// fprintf(stderr, "distance: %.4lf ms\n", timer1);
// fprintf(stderr, "sort: %.4lf ms\n", timer2);
cudaFree(d_data);
cudaFree(d_result);
cudaFree(d_dis);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
int main(int argc, char **argv)
{
if (argc != 2) {
fprintf(stderr, "Usage: %s input_file\n", argv[0]);
exit(1);
}
// input
int *data = load(argv[1]);
int *result = (int*)malloc(sizeof(int) * m * k);
// compute
knn(data, result);
// output
for (int i = 0; i < m; i++) {
for (int j = 0; j < k; j++) {
printf("%d ", result[i * k + j]);
}
printf("\n");
}
free(data);
free(result);
return 0;
}
|
4,917 | //pass
//--gridDim=[32768,1,1] --blockDim=[512,1,1]
__global__ void init_array(int *g_data, int *factor, int num_iterations)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i=0; i<num_iterations; i++)
{
g_data[idx] += *factor; // non-coalesced on purpose, to burn time
}
}
|
4,918 | #include "includes.h"
__global__ void cuSincInterpolation_kernel(const int nImages, const float * imagesIn, const int inNX, const int inNY, float * imagesOut, const int outNX, const int outNY, const float * r_filter_, const int i_covs_, const int i_decfactor_, const int i_intplength_, const int i_startX, const int i_startY, const int i_int_size)
{
int idxImage = blockIdx.z;
int idxX = threadIdx.x + blockDim.x*blockIdx.x;
int idxY = threadIdx.y + blockDim.y*blockIdx.y;
if(idxImage >=nImages || idxX >= i_int_size || idxY >= i_int_size) return;
int outx = idxX + i_startX;
int outy = idxY + i_startY;
int idxOut = idxImage*outNX*outNY + outx*outNY + outy;
float r_xout = (float)outx/i_covs_;
int i_xout = int(r_xout);
float r_xfrac = r_xout - i_xout;
int i_xfrac = int(r_xfrac*i_decfactor_);
float r_yout = (float)outy/i_covs_;
int i_yout = int(r_yout);
float r_yfrac = r_yout - i_yout;
int i_yfrac = int(r_yfrac*i_decfactor_);
float intpData = 0.0f;
float r_sincwgt = 0.0f;
float r_sinc_coef;
for(int i=0; i < inNX; i++) {
int i_xindex = i_xout - i + i_intplength_/2;
if(i_xindex < 0) i_xindex+= i_intplength_;
if(i_xindex >= i_intplength_) i_xindex-=i_intplength_;
float r_xsinc_coef = r_filter_[i_xindex*i_decfactor_+i_xfrac];
for(int j=0; j< inNY; j++) {
int i_yindex = i_yout - j + i_intplength_/2;
if(i_yindex < 0) i_yindex+= i_intplength_;
if(i_yindex >= i_intplength_) i_yindex-=i_intplength_;
float r_ysinc_coef = r_filter_[i_yindex*i_decfactor_+i_yfrac];
r_sinc_coef = r_xsinc_coef*r_ysinc_coef;
r_sincwgt += r_sinc_coef;
intpData += imagesIn[idxImage*inNX*inNY+i*inNY+j]*r_sinc_coef;
/*
if(outx == 0 && outy == 1) {
printf("intp kernel %d %d %d %d %d %d %d %f\n", i, j, i_xindex, i_yindex, i_xindex*i_decfactor_+i_xfrac,
i_yindex*i_decfactor_+i_yfrac, idxImage*inNX*inNY+i*inNY+j, r_sinc_coef);
}*/
}
}
imagesOut[idxOut] = intpData/r_sincwgt;
//printf("test int kernel %d %d %f %f %f\n", outx, outy, intpData, r_sincwgt, imagesOut[idxOut]);
} |
4,919 | //
// Created by bluet on 06/04/2021.
//
/**
*\file normal_force_hertz.c
*\brief body of the function normal_force_hertz
*/
#include "normal_force_hertz.cuh"
__device__ double normal_force_hertz(double deltan,double deltandot, double rij, double Eij, double Aij)
{
double fcnij;
// The interaction force is given by two parts. The First one concerns the elastic contribution given by Hertz model
// The second part is dissipative force allows taking into account the energy dissipation at contact during a collision.
// The model used here is described in T. Pöschel and T. Schwager, Computational Granular dynamics Springer book
//(equation 2.14 p20)
fcnij=(-4.0/3.0)*sqrt(rij)*Eij*(pow(deltan,3.0/2.0)+Aij*deltandot*sqrt(deltan));
// If the force is positive due to an attractive contribution of dissipative force during the contact separation,
// the force must vanish to avoid an attractive effect (detail in Computational Granular Dynamics book, p. 22).
if (fcnij>0.0) {fcnij=0;}
return(fcnij);
}
|
4,920 | #include <cstdlib>
#include <iostream>
#include <algorithm>
#include <random>
#include <chrono>
#include <cuda.h>
#include <cuda_runtime.h>
__host__
__device__
void vector_mul(float *out, float *a,
float *b, size_t n) {
for(size_t i = 0; i < n; i ++){
out[i] = a[i] * b[i];
}
}
__global__
void vector_mul_gpu(float* d_out, float* d_a,
float* d_b, size_t n) {
vector_mul(d_out, d_a, d_b, n);
}
int main(int argc, char** argv){
size_t N = 1000'000'000;
if (argc > 1) {
N = strtoul(argv[1], 0, 0);
}
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// Allocate host memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
// Initialize host arrays
std::mt19937 eng{std::random_device()()};
std::generate(a, a+N, [&eng]() {
return std::uniform_real_distribution<float>{0.f, 1.f}(eng);
});
std::generate(b, b+N, [&eng]() {
return std::uniform_real_distribution<float>{1.f, 2.f}(eng);
});
// Allocate device memory
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
// Transfer data from host to device memory
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// Executing in kernel
int blockCount = 5;
int threadCount = 500;
vector_mul_gpu<<<blockCount, threadCount>>>(d_out, d_a, d_b, N);
cudaDeviceSynchronize();
// Transfer data back to host memory
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// profiling for CPU
auto s = std::chrono::high_resolution_clock::now();
vector_mul(out, a, b, N);
auto d = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - s).count();
std::cout << d << " ms CPU" << std::endl;
// Deallocate host memory
free(a);
free(b);
free(out);
}
|
4,921 | // FILE: ising3d_q.c
//
// 1) H = -J \sum_{\langle i,j \rangle} \sigma_i \sigma_j , J > 0 for FM
//
//
// 2. Lattice labelings :
//
//
// j3
// . j4 (+z)
// | . +z
// | / /
// | / /
// | / /
// |/ /
// j1 . ---------- . ---------- . j0 (+x) .---------- +x
// /| |
// / | |
// / | |
// / | |
// . | |
// j5 . +y
// j2
// (+y)
//
#include <string>
#include <stdio.h>
#include <math.h>
#include <stdlib.h> // Provides rand(), RAND_MAX
#include <assert.h>
#include <time.h>
#include <curand.h>
#include <curand_kernel.h>
using std::string;
#define D 3
#define BLOCKLx 4
#define BLOCKLy 4
#define BLOCKLz 4
#define MyBit 1ULL
#define N64bit 64
typedef unsigned long long int bit64;
typedef bit64 spin_t;
typedef double v_type;
int L, N;
int init, istp, mstp, nbins;
int tau, qpnt, tot_pnt, interval;
v_type T, Ti, Tf, vel, r;
__device__ __constant__ v_type Boltz[4*D+1];
__global__ void init_rand(int L, unsigned long long int seed, curandState_t *states)
{
int global_x = blockIdx.x * blockDim.x + threadIdx.x;
int global_y = blockIdx.y * blockDim.y + threadIdx.y;
int global_z = blockIdx.z * blockDim.z + threadIdx.z;
int global_n = global_z * L * L + global_y * L + global_x;
curand_init(seed, global_n, global_n, &states[global_n]);
__syncthreads();
} // init_rand
__device__ v_type ran(curandState* global_state, int global_n)
{
curandState_t local_state = global_state[global_n];
v_type rn = curand_uniform(&local_state);
global_state[global_n] = local_state;
return rn;
} // ran
__global__ void display_dims()
{
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) {
if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.y == 0) {
printf("gDim.x : %i \n", gridDim.x);
printf("bDim.x : %i \n", blockDim.x);
}
}
__syncthreads();
} // display_dims
__global__ void mc_updates(int L, curandState* global_state, spin_t *d_spins, int off)
{
__shared__ spin_t local_spins[(BLOCKLx+2)*(BLOCKLy+2)*(BLOCKLz+2)];
int global_x, global_y, global_z, global_n;
int nn_global_x, nn_global_y, nn_global_z, nn_global_n;
int local_x, local_y, local_z, local_n;
spin_t sj, ss0, ss1, ss2, ss3, ss4, ss5, mask;
int xm, xp, ym, yp, zm, zp, L2 = L*L, b, dE;
global_x = blockIdx.x * blockDim.x + threadIdx.x;
global_y = blockIdx.y * blockDim.y + threadIdx.y;
global_z = blockIdx.z * blockDim.z + threadIdx.z;
global_n = global_z * L2 + global_y * L + global_x;
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[global_n];
if (threadIdx.x == 0) {
nn_global_x = ((blockIdx.x-1+gridDim.x)%gridDim.x)*BLOCKLx + BLOCKLx-1;
nn_global_y = global_y;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = 0;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.x == BLOCKLx-1) {
nn_global_x = ((blockIdx.x+1)%gridDim.x)*BLOCKLx;
nn_global_y = global_y;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = BLOCKLx+1;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.y == 0) {
nn_global_x = global_x;
nn_global_y = ((blockIdx.y-1+gridDim.y)%gridDim.y)*BLOCKLy + BLOCKLy-1;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = 0;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.y == BLOCKLy-1) {
nn_global_x = global_x;
nn_global_y = ((blockIdx.y+1)%gridDim.y)*BLOCKLy;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = BLOCKLy+1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.z == 0) {
nn_global_x = global_x;
nn_global_y = global_y;
nn_global_z = ((blockIdx.z-1+gridDim.z)%gridDim.z)*BLOCKLz + BLOCKLz-1;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = 0;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.z == BLOCKLz-1) {
nn_global_x = global_x;
nn_global_y = global_y;
nn_global_z = ((blockIdx.z+1)%gridDim.z)*BLOCKLz;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = BLOCKLz+1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
//__syncthreads();
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
sj = local_spins[local_n];
__syncthreads();
if ( (threadIdx.x + threadIdx.y + threadIdx.z + off)%2 == 0 ) {
xm = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x-1;
xp = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x+1;
ym = local_z * (BLOCKLx+2)*(BLOCKLy+2) + (local_y-1) * (BLOCKLx+2) + local_x;
yp = local_z * (BLOCKLx+2)*(BLOCKLy+2) + (local_y+1) * (BLOCKLx+2) + local_x;
zm = (local_z-1) * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
zp = (local_z+1) * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
ss0 = sj ^ local_spins[xm];
ss1 = sj ^ local_spins[xp];
ss2 = sj ^ local_spins[ym];
ss3 = sj ^ local_spins[yp];
ss4 = sj ^ local_spins[zm];
ss5 = sj ^ local_spins[zp];
for (b = 0; b < N64bit; ++b) {
dE = 0;
// dE <--> 2 \sigma^B_i XOR \sigma^B_j - 1
mask = (MyBit << b);
dE += (ss0 & mask) ? 1 : -1;
dE += (ss1 & mask) ? 1 : -1;
dE += (ss2 & mask) ? 1 : -1;
dE += (ss3 & mask) ? 1 : -1;
dE += (ss4 & mask) ? 1 : -1;
dE += (ss5 & mask) ? 1 : -1;
if ( ran(global_state, global_n) < Boltz[dE+6] ) {
sj ^= mask;
}
} // b
local_spins[local_n] = sj;
d_spins[global_n] = local_spins[local_n];
} // end of "if (Idx.x + Idx.y + Idx.z + off)%2 == 0"
__syncthreads();
} // mc_updates
// ========================================================================== //
void initialize();
void read_file();
void set_parameters();
void lattice(int *nnbors);
void configuration(spin_t *spins);
void random_conf(spin_t *spins);
void read_conf(spin_t *spins);
void write_conf(spin_t *spins);
void checkout_configuration(spin_t *spins, spin_t *saved_spins);
void save_configuration(spin_t *spins, spin_t *saved_spins);
void initial_temperature();
void quench_temperature(int t);
void tT_tables(int *t_table, v_type *T_table);
void probability(v_type *h_prob);
void clean(double *enrg, double *maga, double *mag2, double *mag4);
void measure(spin_t *spins,int *nnbors,int n,double *enrg,double *maga,double *mag2,double *mag4);
void write_data(int *t_table,v_type *T_table,double *enrg,double *maga,double *mag2,double *mag4);
void check_stop();
// ========================================================================== //
int main(int argc, char* argv[]) {
curandState_t *devStates;
spin_t *spins, *saved_spins, *dev_spins;
v_type *host_prob;
v_type *T_table;
int *t_table;
int *nnbors;
double *enrg;
double *maga;
double *mag2;
double *mag4;
int i, j, k, t, n, steps = 10;
srand(time(NULL));
initialize();
dim3 block(BLOCKLx, BLOCKLy, BLOCKLz);
assert(L > 0);
dim3 grid(L/BLOCKLx, L/BLOCKLy, L/BLOCKLz);
nnbors = (int *) malloc(N*2*D * sizeof(int));
lattice(nnbors);
t_table = (int *) malloc(tot_pnt * sizeof(int));
T_table = (v_type *) malloc(tot_pnt * sizeof(v_type));
tT_tables(t_table, T_table);
spins = (spin_t *) malloc( N * sizeof(spin_t) ); assert(spins != NULL);
saved_spins = (spin_t *) malloc( N * sizeof(spin_t) ); assert(saved_spins != NULL);
host_prob = (v_type *) malloc((4*D+1) * sizeof(v_type));
configuration(spins);
cudaMalloc((void **)&dev_spins, N*sizeof(spin_t)); assert(dev_spins != NULL);
cudaMalloc((void **)&devStates, N*sizeof(curandState_t));
init_rand<<<grid, block>>>(L, rand(), devStates);
cudaDeviceSynchronize();
enrg = (double *) malloc(tot_pnt * sizeof(double));
maga = (double *) malloc(tot_pnt * sizeof(double));
mag2 = (double *) malloc(tot_pnt * sizeof(double));
mag4 = (double *) malloc(tot_pnt * sizeof(double));
initial_temperature(); probability(host_prob);
cudaMemcpyToSymbol(Boltz,host_prob,(4*D+1)*sizeof(v_type),0,cudaMemcpyHostToDevice);
cudaMemcpy(dev_spins, spins, N*sizeof(spin_t), cudaMemcpyHostToDevice);
for (i = 0; i < istp; ++i) {
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 0);
cudaDeviceSynchronize();
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 1);
cudaDeviceSynchronize();
} // i
cudaMemcpy(spins, dev_spins, N*sizeof(spin_t), cudaMemcpyDeviceToHost);
save_configuration(spins, saved_spins);
write_conf(spins);
for (k = 0; k < nbins; ++k) {
clean(enrg, maga, mag2, mag4);
for (i = 0; i < mstp; ++i) {
initial_temperature(); probability(host_prob);
cudaMemcpyToSymbol(Boltz,host_prob,(4*D+1)*sizeof(v_type),0,cudaMemcpyHostToDevice);
checkout_configuration(spins, saved_spins);
cudaMemcpy(dev_spins, spins, N*sizeof(spin_t), cudaMemcpyHostToDevice);
for(j = 0; j < steps; ++j) {
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 0);
cudaDeviceSynchronize();
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 1);
cudaDeviceSynchronize();
} // i
cudaMemcpy(spins, dev_spins, N*sizeof(spin_t), cudaMemcpyDeviceToHost);
save_configuration(spins, saved_spins);
n = 0;
for (t = 0; t <= tau; ++t) {
quench_temperature(t); probability(host_prob);
cudaMemcpyToSymbol(Boltz,host_prob,(4*D+1)*sizeof(v_type),0,cudaMemcpyHostToDevice);
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 0);
cudaDeviceSynchronize();
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 1);
cudaDeviceSynchronize();
if (t % interval == 0) {
cudaMemcpy(spins, dev_spins, N*sizeof(spin_t), cudaMemcpyDeviceToHost);
measure(spins, nnbors, n++, enrg, maga, mag2, mag4);
} // if
} // t
} // i-mstp
write_data(t_table, T_table, enrg, maga, mag2, mag4);
write_conf(spins);
check_stop();
} // k-bin
if (devStates != NULL) { cudaFree(devStates); devStates = NULL; }
if (dev_spins != NULL) { cudaFree(dev_spins); dev_spins = NULL; }
if (nnbors != NULL) { free(nnbors); nnbors = NULL; }
if (t_table != NULL) { free(t_table); t_table = NULL; }
if (T_table != NULL) { free(T_table); T_table = NULL; }
if (spins != NULL) { free(spins); spins = NULL; }
if (saved_spins != NULL) { free(saved_spins); saved_spins = NULL; }
if (host_prob != NULL) { free(host_prob); host_prob = NULL; }
if (enrg != NULL) { free(enrg); enrg = NULL; }
if (maga != NULL) { free(maga); maga = NULL; }
if (mag2 != NULL) { free(mag2); mag2 = NULL; }
if (mag4 != NULL) { free(mag4); mag4 = NULL; }
return 0;
} // main
void write_data(int *t_table,v_type *T_table,double *enrg,double *maga,double *mag2,double *mag4)
{
int n;
FILE *ofptr;
double dmstp = (double) mstp;
ofptr = fopen("data.dat","a");
for(n = 0; n < tot_pnt; ++n) {
enrg[n] /= dmstp;
maga[n] /= dmstp;
mag2[n] /= dmstp;
mag4[n] /= dmstp;
fprintf(ofptr,"%8i %12.8f %14.8f %12.8f %12.8f %12.8f\n",
t_table[n], T_table[n], enrg[n], maga[n], mag2[n], mag4[n]);
} // b
fclose(ofptr);
} // write_data
void clean(double *enrg, double *maga, double *mag2, double *mag4)
{
int n;
for (n = 0; n < tot_pnt; ++n) {
enrg[n] = maga[n] = mag2[n] = mag4[n] = 0.0;
}
} // clean
void measure(spin_t *spins,int *nnbors,int n,double *enrg,double *maga,double *mag2,double *mag4)
{
int E = 0, j, b;
int m[N64bit];
bit64 mask, ss0, ss2, ss4;
double dN = (double) N, dm , local_ma, local_m2, local_m4, d64 = (double) N64bit;
for (b = 0; b < N64bit; ++b) m[b] = 0;
for (j = 0; j < N; ++j) {
ss0 = spins[j] ^ spins[ nnbors[j*2*D+0] ];
ss2 = spins[j] ^ spins[ nnbors[j*2*D+2] ];
ss4 = spins[j] ^ spins[ nnbors[j*2*D+4] ];
for (b = 0; b < N64bit; ++b) {
mask = (MyBit << b);
m[b] += ( (spins[j] & mask) ? 1 : -1 );
// dE <--> 2 \sigma^B_i XOR \sigma^B_j - 1
E += ( (ss0 & mask) ? 1 : -1 );
E += ( (ss2 & mask) ? 1 : -1 );
E += ( (ss4 & mask) ? 1 : -1 );
} // b
} // j
enrg[n] += ((double) E)/(dN * d64);
local_ma = local_m2 = local_m4 = 0.0;
for (b = 0; b < N64bit; ++b) {
dm = (double) m[b]/dN;
local_ma += fabs(dm);
local_m2 += pow(dm,2.0);
local_m4 += pow(dm,4.0);
} //b
maga[n] += local_ma/d64;
mag2[n] += local_m2/d64;
mag4[n] += local_m4/d64;
} // measure
void initial_temperature() { T = Ti; }
void quench_temperature(int t)
{
T = Tf + vel * pow((v_type)(tau-t), r);
} //
void initialize()
{
// 1) read-in input parameters
read_file();
// 2) set simulation parameters
set_parameters();
} // initialize
void probability(v_type *h_prob)
{
v_type beta = 1.0e0/T;
// e <--> 2 \sigma^B_i XOR \sigma^B_j - 1
for (int e = -6; e <= 6; ++e) {
h_prob[e+6] = exp(2.0 * beta * (v_type) e);
}
} // probability
void checkout_configuration(spin_t *spins, spin_t *saved_spins)
{
int i;
for (i = 0; i < N; ++i) {
spins[i] = saved_spins[i];
}
} // checkout_configuration
void save_configuration(spin_t *spins, spin_t *saved_spins)
{
int i;
for (i = 0; i < N; ++i) {
saved_spins[i] = spins[i];
}
} // save_configuration
void configuration(spin_t *spins)
{
if (init == 0) {
random_conf(spins);
} else {
read_conf(spins);
}
} // configuration
void random_conf(spin_t *spins)
{
for (int i = 0; i < N; ++i) {
spins[i] = 0;
for(int b = 0; b < N64bit; ++b) {
if (((double)rand())/((double)RAND_MAX) > 0.5) {
spins[i] ^= (MyBit << b);
} // if
} // b
} // i
} // random_conf
void read_conf(spin_t *spins)
{
FILE *fptr;
fptr = fopen("spins.dat", "rt");
if (fptr == NULL) { printf("can not open spins.dat"); exit(0); }
for (int i = 0; i < N; ++i) {
fscanf(fptr, "%llu", &spins[i]);
}
fclose(fptr);
} // read_conf
void write_conf(spin_t *spins)
{
FILE *ofptr;
int i;
ofptr = fopen("spins.dat","w");
for (i = 0; i < N; ++i) {
fprintf(ofptr,"%llu\n",spins[i]);
}
fclose(ofptr);
} // write_conf
void lattice(int *nnbors)
{
int L2 = L * L;
for (int z0 = 0; z0 < L; ++z0) {
for (int y0 = 0; y0 < L; ++y0) {
for (int x0 = 0; x0 < L; ++x0) {
int x1 = (x0+1)%L;
int x2 = (x0-1+L)%L;
int y1 = (y0+1)%L;
int y2 = (y0-1+L)%L;
int z1 = (z0+1)%L;
int z2 = (z0-1+L)%L;
int j = z0 * L2 + y0 * L + x0;
nnbors[j*2*D+0] = z0 * L2 + y0 * L + x1;
nnbors[j*2*D+1] = z0 * L2 + y0 * L + x2;
nnbors[j*2*D+2] = z0 * L2 + y1 * L + x0;
nnbors[j*2*D+3] = z0 * L2 + y2 * L + x0;
nnbors[j*2*D+4] = z1 * L2 + y0 * L + x0;
nnbors[j*2*D+5] = z2 * L2 + y0 * L + x0;
} // x0
} // y0
} // z0
} // lattice
void tT_tables(int *t_table, v_type *T_table)
{
int t, n = 0;
initial_temperature();
for (t = 0; t <= tau; ++t) {
quench_temperature(t);
if (t % interval == 0) {
t_table[n] = t;
T_table[n] = T;
++n;
}
} // t
assert(n == tot_pnt);
} // tT_tables
void set_parameters()
{
v_type Tc = 1.0/0.22169;
N = L * L * L;
Ti = 1.5 * Tc;
Tf = 0.9 * Tc; // 1.0
r = 1.0;
vel = (Ti-Tf)/pow((v_type) tau, r);
tot_pnt = qpnt + 1;
interval = tau/qpnt;
} // set_parameters
void read_file()
{
FILE *fptr;
fptr = fopen("input.in", "rt");
if (fptr == NULL) {
printf("can not open input.in");
exit(0);
}
fscanf(fptr,"%i", &L);
fscanf(fptr,"%i %i", &tau, &qpnt);
fscanf(fptr,"%i %i %i %i", &init, &istp, &mstp, &nbins);
fclose(fptr);
assert( tau%qpnt == 0 );
} // read_file
void check_stop()
{
FILE *fptr;
int i;
fptr = fopen("stop.txt", "rt");
if (fptr == NULL) {
printf("can not open stop.txt");
return;
}
fscanf(fptr,"%i", &i);
fclose(fptr);
if (i != 0) {
fptr = fopen("stop.txt","a");
fprintf(fptr,"%s \n", "stopped");
fclose(fptr);
exit(0);
} // if
} // check_stop
|
4,922 |
#include <iostream>
using namespace std;
__global__ void kernel( int* n) { *n = 3;}
int main()
{
int n;
int* d_n;
// store in d_n the address of a memory
// location on the device
cudaMalloc( (void**)&d_n, sizeof(int));
kernel<<<1,1>>>(d_n);
cudaMemcpy( &n, d_n, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_n);
cout << "Hello, CUDA! " << n << endl;
return 0;
}
|
4,923 | // The cuda device properties
#include <stdio.h>
int main() {
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, 0);
printf("Comput cappbility %i and %i\n", dev_prop.major, dev_prop.minor);
printf("SP count %i\n", dev_prop.multiProcessorCount);
printf("The maximum amount of threads per Block %i\n",
dev_prop.maxThreadsPerBlock);
printf("The maximum amount of threads per SM %i\n",
dev_prop.maxThreadsPerMultiProcessor);
printf("The maximum global memory is %zu\n", dev_prop.totalGlobalMem);
printf("The total number of SM is %i\n", dev_prop.multiProcessorCount);
printf("The number of register per block is %i\n", dev_prop.regsPerBlock);
printf("The amount of shared memory per block %zu\n",
dev_prop.sharedMemPerBlock);
printf("The amount of shared memory per SM %zu\n",
dev_prop.sharedMemPerMultiprocessor);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
dev_prop.memoryClockRate*(dev_prop.memoryBusWidth/8)/1.0e6);
}
|
4,924 | #include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <cuda.h>
#include "cuda_runtime.h"
#define ROWSIZE 8192 // Number of Columns
#define COLSIZE 8192 // Number of Rows
#define SIZE (ROWSIZE * COLSIZE) // total Size
#define BLOCKWORK 2
#define num_threads 32 // number of threads per block
int num_blocks = SIZE/(num_threads*BLOCKWORK) + (SIZE%(num_threads*BLOCKWORK) == 0 ? 0:1); // total number of blocks
//#define NUMBLOCK 1
//#define BLOCKWIDTH 16
//#define NUMTHREAD 4
//#define ASIZE 4
void printArray(int * image){
int i,j;
for (i = 0; i < COLSIZE; ++i)
{
for (j = 0; j < ROWSIZE; ++j)
{
printf("%d\t", image[i * ROWSIZE + j]);
}
printf("\n");
}
printf("\n\n");
}
__global__ void prefixSum(int * img, int * integral)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int index;
for (int i=0; i<ROWSIZE; i++)
{
index = idx + (i*num_threads*BLOCKWORK);
if (index >= SIZE)break;
if (index % ROWSIZE == 0)
{
integral[index] = img[index];
}
else
{
integral[index] = img[index] + img[index-1];
}
i = index;
__syncthreads();
}
/*
if (idx < SIZE)
{
if (idx % ROWSIZE == 0)
{
integral[idx] = img[idx];
}
else
{
integral[idx] = img[idx] + integral[idx-1];
}
}
*/
//__syncthreads();
}
__global__ void columnSum(int * img, int * integral)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int index;
for (int i=0; i<COLSIZE; i++)
{
index = idx + (i*num_threads*BLOCKWORK);
if (index >= SIZE)break;
if (index== 0)
{
integral[index] = img[index];
}
else
{
// current pixel (col) = original pixel (col) + current col - 1
// integral[i][j] = img[i][j] + integral[i-1][j]
integral[index] = img[index] + img[index - ROWSIZE];
}
i = index;
__syncthreads();
}
/*
if (idx < SIZE)
{
if (idx == 0)
{
integral[idx] = img[idx];
}
else
{
// current pixel (col) = original pixel (col) + current col - 1
// integral[i][j] = img[i][j] + integral[i-1][j]
integral[idx] = img[idx] + integral[idx - ROWSIZE];
}
}
*/
//__syncthreads();
}
int main()
{
// const int SIZE = ASIZE;
//int ASIZE = *(int *) argv[1];
int *IMG_HOST, *INTG_HOST;
int *IMG_DEV, *INTG_DEV;
//const int SIZE = MXSIZE;
//Time initialization
float timePassed;
size_t size = SIZE*sizeof(int);
cudaSetDevice(1);
IMG_HOST = (int *)malloc(size);
INTG_HOST = (int *)malloc(size);
cudaMalloc((void **) &IMG_DEV, size);
cudaMalloc((void **) &INTG_DEV, size);
int i,j;//, random;
for (i = 0; i < COLSIZE; ++i)
{
//srand(i);
for (j = 0; j < ROWSIZE; ++j)
{
//srand(j);
IMG_HOST[i*ROWSIZE + j] = i*2 + j*4;
}
}
//printArray(IMG_HOST);
// dim3 grid(NUMBLOCK,NUMBLOCK), block(NUMTHREAD,NUMTHREAD);
dim3 block(ROWSIZE/(num_threads*BLOCKWORK), 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(IMG_DEV, IMG_HOST, size, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
prefixSum <<< num_blocks, num_threads*BLOCKWORK >>> (IMG_DEV, INTG_DEV);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timePassed, start,stop);
printf("Time Spent Row: %0.5f ms\n", timePassed);
//#################################################################//
cudaMemcpy(INTG_HOST, INTG_DEV, size, cudaMemcpyDeviceToHost);
//printArray(INTG_HOST);
//cudaMemcpy(IMG_DEV, INTG_HOST, size, cudaMemcpyHostToDevice);
//INTG_HOST = (int *)malloc(size*size);
cudaEventRecord(start, 0);
columnSum <<< num_blocks, num_threads*BLOCKWORK >>> (IMG_DEV, INTG_DEV);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timePassed, start,stop);
printf("Time Spent Column: %0.5f ms\n", timePassed);
cudaMemcpy(INTG_HOST, INTG_DEV, size, cudaMemcpyDeviceToHost);
//printArray(INTG_HOST);
//Free up the resources
free(IMG_HOST);
free(INTG_HOST);
cudaFree(IMG_DEV);
cudaFree(INTG_DEV);
return 0;
}
|
4,925 | #include <stdio.h>
void test1() {
int* a = new int;
*a = 3;
*a = *a + 2;
printf("%d\n", *a);
}
void test2() {
int* a = (int*)malloc(sizeof(int));
int* b = (int*)malloc(sizeof(int));
if (!(a && b)) {
printf("Out of memory\n");
exit(-1);
}
*a = 2;
*b = 3;
}
void test3() {
int i = 5;
int* a = (int*)malloc(1000*sizeof(int));
if (!a) {
printf("Out of memory\n");
exit(-1);
}
*(i + a) = i;
}
void test4() {
int** a = (int**)malloc(3*sizeof(int*));
a[0] = (int*)malloc(100*sizeof(int*));
a[1] = (int*)malloc(100*sizeof(int*));
a[2] = (int*)malloc(100*sizeof(int*));
a[1][1] = 5;
}
void test5() {
int* a = (int*)malloc(sizeof(int));
scanf("%d", a);
if (!*a) {
printf("Value is 0\n");
}
}
void question1() {
/*
y_1[n] = x[n - 1] + x[n] + x[n + 1]
y_2[n] = y_2[n - 2] + y_2[n - 1] + x[n]
The second implementation would be harder because we would need to syncronize the GPU
code due to the sequential nature of the statement. The first implementation is better
because it does not require syncronization of threads and can all be done in parallel.
*/
}
void question2() {
/*
y[n] = c * x[n] + (1 - c) * y[n - 1]
The code is not capable of running in parallel because it requires the previous iteration
of the function. If c is close to 1, we could ignore the "y[n - 1]" part of the code
because it will be close to 0. In this case if we drop the last part of the calculation
we could easily implement the code in a parallel manner.
*/
}
int main(void) {
test1();
test2();
test3();
test4();
test5();
return 0;
} |
4,926 | #include "includes.h"
extern "C"
{
}
__global__ void updateParams(int N, int M, float alpha, float beta1, float beta2, float t, float *PARAMS, float *GRADS, float *m, float *v)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
float beta1r = __fsub_rn(1.0, beta1);
float alphar = __fmul_rn(-alpha, __frcp_rn(__fsub_rn(1.0, __powf(beta1, t))));
if (i < N && j < M)
{
m[index] = __fmaf_rn(beta1, m[index], __fmul_rn(beta1r, GRADS[index]));
v[index] = fmaxf(fmaxf(__fmul_rn(beta2, v[index]), fabsf(GRADS[index])), 1.0e-16);
PARAMS[index] = __fmaf_rn(alphar,__fdividef(m[index], v[index]), PARAMS[index]);
//m[index] = beta1*m[index] + (1 - beta1)*GRADS[index];
//float a = beta2*v[index];
// float b = ((GRADS[index])>(0))?(GRADS[index]):(-GRADS[index]);
//float c = fmaxf(a, fabsf(GRADS[index])); // ((a)>(fabsf(GRADS[index]))?(a):(b);
//v[index] = fmaxf(c, 1.0e-16); // ((c)>(1.0e-16))?(c):(1.0e-16);
//float tmp = alpha/(1.0-powf(beta1, t));
//PARAMS[index] = PARAMS[index] - (alpha/(1.0-__powf(beta1, t)))*m[index]/v[index];
//PARAMS[index] = tmp*m[index]/v[index];
}
} |
4,927 | #include <iostream>
#include <stdio.h>
#include <time.h>
#include <math.h>
#define N 2000
using namespace std;
void fill_matrix(int *m,char c){
cout<<"Llenamos matriz "<<endl;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
switch(c){
case 's':
m[i*N+j] = sin(i);break;
case 'c':
m[i*N+j] = cos(i);break;
case 'z':
m[i*N+j] = 0;break;
default: return;
}
}
}
return;
}
//Multiplicacion en CPU
int multiply_seq(int *m1,int *m2,int *m3){
cout<<"Multiplicamos con el algoritmo secuencial: \n"<<endl;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
for(int k=0;k<N;k++){
m3[i*N+j]+=m1[j*N+k] * m2[k*N+i];
}
}
}
return 0;
}
//Multiplicacion en GPU
__global__ void multiply_par(int *a, int *b, int *c) {
int k=0,suma=0;
int i= blockIdx.x * blockDim.x + threadIdx.x;
int j= blockIdx.y * blockDim.y + threadIdx.y;
if(i < N && j < N){
for(k=0;k<N;k++){
suma+=a[j*N+k] * b[k*N+i];
}
c[j*N + i]= suma;
}
}
//Imprimir matrices
void print_matrix(int *m){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
cout<<"["<< m[i*N+j] <<"]";
}
cout<<endl;
}
cout<<endl;
return;
}
int main(){
//Declaracion de variables
int *h_A = (int *)malloc(N*N*sizeof(int *));;
int *h_B = (int *)malloc(N*N*sizeof(int *));;
int *h_C = (int *)malloc(N*N*sizeof(int *));;
clock_t t_i,t_f;
float tiempo;
fill_matrix(h_A,'s');
fill_matrix(h_B,'c');
fill_matrix(h_C,'z');
//print_matrix(h_A);
//print_matrix(h_B);
t_i=clock();
multiply_seq(h_A,h_B,h_C);
t_f=clock();
tiempo= ((double)t_f - t_i) / CLOCKS_PER_SEC;
cout<<"El tiempo de trabajo con el algoritmo secuencial es de ";
printf("%f\n\n",tiempo);
cout<<"Multiplicamos con el algoritmo paralelo: \n"<<endl;
//Punteros de device
int *d_A,*d_B,*d_C;
int size = N * N * sizeof(int);
cudaMalloc((void **) &d_A, size);//Reservar memoria en GPU
cudaMalloc((void **) &d_B, size);
cudaMalloc((void **) &d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);//Pasar datos de CPU a GPU
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Invocamos el kernel
dim3 dimBlock(N*N,2);
t_i=clock();
multiply_par<<<1, dimBlock>>>(d_A,d_B,d_C);
t_f=clock();
tiempo= ((double)t_f - t_i) / CLOCKS_PER_SEC;
cout<<"El tiempo de trabajo con el algoritmo en paralelo es de ";
printf("%f\n\n",tiempo);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);//Pasar datos de GPU a CPU
//print_matrix(h_C);
cudaFree(d_A);//Liberar memoria en GPU
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
4,928 | #include<cuda_runtime.h>
#include<cufft.h>
#include<cufftXt.h>
#include<stdio.h>
#include<string>
#include<math.h>
cufftComplex* read_file(std::string file_path, size_t * size, bool shrink){
size_t size2 = 1000000;
//*size = get_data_size(file_path);
//shrink the sample into a power of 2 so that transformations are done fast
//if(shrink)
// *size = (size_t)pow(2, (size_t)log2(*size));
FILE* file;
file = fopen(file_path.c_str(), "r");
if(file == NULL){
printf("Error: Couldn't open file %s\n", file_path.c_str());
exit(EXIT_FAILURE);
}
cufftComplex* data_cufft = (cufftComplex*)malloc(*size*sizeof(cufftComplex));
cufftComplex* chunk_cufft = (cufftComplex*)malloc(size2*sizeof(cufftComplex));
int offset = 1000;
unsigned char* data = (unsigned char*)malloc((*size +offset)*sizeof(char));
fread(data, 1,(( *size)+offset),file);
for(int i =0; i < *size; i ++){
data_cufft[i].x = (float) data[i];
//we're dealing with real numbers so set phase to 0
data_cufft[i].y = 0;
if(i<size2){
chunk_cufft[i].x = (float)data[i+offset];
chunk_cufft[i].y = 0;
}
//printf("%f %f\n", data_cufft[i].x, chunk_cufft[i].x);
}
fclose(file);
//getchar();
cufftHandle plan1;
cufftHandle plan2;
cufftPlan1d(&plan1, (int)*size, CUFFT_R2C, 1);
cufftPlan1d(&plan2,(int)size2, CUFFT_R2C, 1);
cufftComplex* d_data_cufft;
cufftComplex* d_chunk_cufft;
cudaMalloc((void**)&d_data_cufft, *size*sizeof(cufftComplex));
cudaMalloc((void**)&d_chunk_cufft, size2*sizeof(cufftComplex));
cudaMemcpy(d_data_cufft, data_cufft, *size*sizeof(cufftComplex), cudaMemcpyHostToDevice);
cudaMemcpy(d_chunk_cufft, chunk_cufft, size2*sizeof(cufftComplex), cudaMemcpyHostToDevice);
cufftExecR2C(plan1,(cufftReal*)d_data_cufft, d_data_cufft);
cufftExecR2C(plan2,(cufftReal*)d_chunk_cufft, d_chunk_cufft);
cudaMemcpy(data_cufft, d_data_cufft, (*size/2+1)*sizeof(cufftComplex), cudaMemcpyDeviceToHost);
cudaMemcpy(chunk_cufft, d_chunk_cufft, (size2/2+1)*sizeof(cufftComplex), cudaMemcpyDeviceToHost);
for(int i =0; i <(size2/2+1); i ++){
printf("x1:%f , y1:%f, abs: %f\n", data_cufft[i].x, data_cufft[i].y, sqrt(data_cufft[i].x*data_cufft[i].x + data_cufft[i].y*data_cufft[i].y));
printf("x2:%f , y2:%f, abs: %f\n", chunk_cufft[i].x, chunk_cufft[i].y, sqrt(chunk_cufft[i].x*chunk_cufft[i].x + chunk_cufft[i].y*chunk_cufft[i].y));
printf("\n");
}
return data_cufft;
}
int main(int argc, char* argv[]){
size_t s = 1000000;
read_file(argv[1], &s, false);
return 0;
}
|
4,929 | template<typename T>
__device__ void vectorAddScalar(const T* A, const T scalar, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = A[index] + scalar;
}
}
template<typename T>
__device__ void vectorSubScalar(const T* A, const T scalar, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = A[index] - scalar;
}
}
template<typename T>
__device__ void scalarSubVector(const T* A, const T scalar, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = scalar - A[index];
}
}
template<typename T>
__device__ void vectorTimesScalar(const T* A, const T scalar, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = A[index] * scalar;
}
}
template<typename T>
__device__ void vectorDivScalar(const T* A, const T scalar, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = A[index] / scalar;
}
}
template<typename T>
__device__ void scalarDivVector(const T* A, const T scalar, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = scalar / A[index];
}
} |
4,930 | #include "includes.h"
// First solution with global memory
// Shared memory residual calculation
// Reduction code from CUDA Slides - Mark Harris
__global__ void gpu_Heat (float *u, float *utmp, float *residual,int N) {
// TODO: kernel computation
int sizey = N;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
float diff=0.0;
if( i < N-1 && j < N-1 && i > 0 && j > 0) {
utmp[i*sizey+j]= 0.25 *
(u[ i*sizey + (j-1) ]+ // left
u[ i*sizey + (j+1) ]+ // right
u[ (i-1)*sizey + j ]+ // top
u[ (i+1)*sizey + j ]); // bottom
diff = utmp[i*sizey+j] - u[i*sizey + j];
residual[i*sizey+j] = diff * diff;
}
} |
4,931 | #include <stdio.h>
#include <math.h>
#define THRDS_P_BLK 256
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
__global__
void normalization_and_sum(int size, double maxx, double range, double *inputArr, double *x0, double *mean)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < size)
{
x0[i] = (inputArr[i] - maxx) / range;
atomicAdd(mean,x0[i]);
}
}
__global__
void compute_std(int size, double *x0, double *mean, double *myStd)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
double tempVal, tempValSq;
if (i < size)
{
tempVal = x0[i] - *(mean);
tempValSq = tempVal * tempVal;
atomicAdd(myStd,tempValSq);
}
}
__global__
void scale_likelihood(int size, double sigma, double *sumxw, double *sumw, double *x, double *w)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
double wLocal;
if (i < size)
{
wLocal = w[i] * exp(x[i]/sigma);
atomicAdd(sumw,wLocal);
atomicAdd(sumxw,wLocal*x[i]);
}
}
__global__
void neg_log_likelihood(double size, double mu, double sigma, double logSigma, double *data, double *censoring, double *frequency, double *nH11, double *nH12, double *nH22, double *nlogL)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < size)
{
double z, expz, L, unc;
z = (data[i]-mu)/sigma;
expz = exp(z);
L = (z-logSigma)*(1-censoring[i]-expz);
unc = (1-censoring[i]);
atomicAdd(nlogL,frequency[i]*L);
atomicAdd(nH11,frequency[i]*expz);
atomicAdd(nH12, frequency[i] * ((z + 1) * expz - unc));
atomicAdd(nH22, frequency[i] * (z *(z + 2) * expz - ((2 * z + 1) *unc)));
}
}
#ifdef __cplusplus
extern "C" {
#endif
void runKernels_NegLogLikelihood(double* nlogL, double* acov, double* weibulparms, double* data, double* censoring, double* frequency, int size)
{
double mu = weibulparms[0]; // scale
double sigma = weibulparms[1]; // shape
double logSigma;
double nH11 = 0.0;
double nH12 = 0.0;
double nH22 = 0.0;
logSigma = log(sigma);
double *dev_data, *dev_censoring, *dev_frequency, *dev_nH11, *dev_nH12, *dev_nH22, *dev_nlogL;
cudaMalloc(&dev_data, size*sizeof(double));
cudaMalloc(&dev_censoring, size*sizeof(double));
cudaMalloc(&dev_frequency, size*sizeof(double));
cudaMalloc(&dev_nH11, sizeof(double));
cudaMalloc(&dev_nH12, sizeof(double));
cudaMalloc(&dev_nH22, sizeof(double));
cudaMalloc(&dev_nlogL, sizeof(double));
cudaMemcpy(dev_data, data, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_censoring, censoring, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_frequency, frequency, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_nH11, &nH11, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_nH12, &nH12, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_nH22, &nH22, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_nlogL, nlogL, sizeof(double), cudaMemcpyHostToDevice);
neg_log_likelihood<<<(size+THRDS_P_BLK-1)/THRDS_P_BLK, THRDS_P_BLK>>>(size, mu, sigma, logSigma, dev_data, dev_censoring, dev_frequency, dev_nH11, dev_nH12, dev_nH22, dev_nlogL);
// copy to host
cudaMemcpy(nlogL, dev_nlogL, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&nH11, dev_nH11, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&nH12, dev_nH12, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&nH22, dev_nH22, sizeof(double), cudaMemcpyDeviceToHost);
*nlogL = *nlogL * -1;
double sigmaSq = sigma * sigma;
double avarDenom = (nH11*nH22 - nH12*nH12);
printf("avarDenom gpu %f\n", avarDenom);
acov[0]=sigmaSq*(nH22/avarDenom);
acov[1]=sigmaSq*((-1*nH12)/avarDenom);
acov[2]=sigmaSq*((-1*nH12)/avarDenom);
acov[3]=sigmaSq*(nH11/avarDenom);
cudaFree(dev_nlogL);
cudaFree(dev_nH22);
cudaFree(dev_nH12);
cudaFree(dev_nH11);
cudaFree(dev_frequency);
cudaFree(dev_censoring);
cudaFree(dev_data);
}
double runKernels_ScaleLikelihood(double sigma, double *x, double *w, double xbar, int size)
{
double sumxw = 0.0;
double sumw = 0.0;
double *device_x, *device_w, *device_sumxw, *device_sumw;
cudaMalloc(&device_x, size*sizeof(double));
cudaMalloc(&device_w, size*sizeof(double));
cudaMalloc(&device_sumxw, sizeof(double));
cudaMalloc(&device_sumw, sizeof(double));
cudaMemcpy(device_x, x, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_w, w, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_sumxw, &sumxw, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_sumw, &sumw, sizeof(double), cudaMemcpyHostToDevice);
scale_likelihood<<<(size+THRDS_P_BLK-1)/THRDS_P_BLK, THRDS_P_BLK>>>(size, sigma, device_sumxw, device_sumw, device_x, device_w);
// copy the sums to host
cudaMemcpy(&sumxw, device_sumxw, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&sumw, device_sumw, sizeof(double), cudaMemcpyDeviceToHost);
double v;
v = (sigma + xbar - sumxw / sumw);
cudaFree(device_sumw);
cudaFree(device_sumxw);
cudaFree(device_w);
cudaFree(device_x);
return v;
}
void runKernels_ComputeMeanAndStd(double *inputData, double *x0, double *mean, double *myStd, double maxx, double range, int size)
{
//int GPU_N, which_device;
//cudaGetDeviceCount(&GPU_N);
//printf("CUDA-capable device count: %i\n", GPU_N);
//cudaGetDevice(&which_device);
//printf("CUDA-which device being used: %i\n", which_device);
//cudaSetDevice(1);
//const size_t malloc_limit = size_t(4608) * size_t(1024) * size_t(1024);
//size_t pValue;
//cudaDeviceSetLimit(cudaLimitMallocHeapSize, malloc_limit);
double *device_inputData, *device_x0, *device_mean, *device_myStd;
cudaMalloc(&device_inputData, size*sizeof(double));
cudaMalloc(&device_x0, size*sizeof(double));
cudaMalloc(&device_mean, sizeof(double));
cudaMalloc(&device_myStd, sizeof(double));
cudaMemcpy(device_inputData, inputData, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_x0, x0, size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_mean, mean, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_myStd, myStd, sizeof(double), cudaMemcpyHostToDevice);
normalization_and_sum<<<(size+THRDS_P_BLK-1)/THRDS_P_BLK, THRDS_P_BLK>>>(size, maxx, range, device_inputData, device_x0, device_mean);
// copy the sum to host
cudaMemcpy(mean, device_mean, sizeof(double), cudaMemcpyDeviceToHost);
// compute the mean
*(mean) = *(mean) / size;
// copy mean to the device
cudaMemcpy(device_mean, mean, sizeof(double), cudaMemcpyHostToDevice);
compute_std<<<(size+255)/256, 256>>>(size, device_x0, device_mean, device_myStd);
// copy the sum to host
cudaMemcpy(myStd, device_myStd, sizeof(double), cudaMemcpyDeviceToHost);
*(myStd) = *(myStd) / (size-1);
*(myStd) = sqrt(*(myStd));
// copy x0 to host
cudaMemcpy(x0, device_x0, size*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(device_myStd);
cudaFree(device_mean);
cudaFree(device_x0);
cudaFree(device_inputData);
}
#ifdef __cplusplus
}
#endif
|
4,932 | #include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void Ring_kernel( float *A, float *BP, int *corrAB, float *M, int ring, int c, int h, int w )
{
int id1 = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
int ringSize = 2*ring + 1;
int ringPatch = ringSize * ringSize;
if (id1 < size) {
int y1 = id1 / w, x1 = id1 % w;
int y2 = corrAB[2 * id1 + 1], x2 = corrAB[2 * id1 + 0];
// int id2 = y2 * w + x2;
for (int dx = -ring; dx <= ring; dx++)
for (int dy = -ring; dy <= ring; dy++)
{
int pIdx = (dy + ring) * ringSize + (dx + ring);
int _x2 = x2 + dx, _y2 = y2 + dy;
if (_x2 >= 0 && _x2 < w && _y2 >= 0 && _y2 < h)
{
for (int dc = 0; dc < c; dc++) {
// M[(dc * size + y1 * w + x1) * ringPatch + pIdx] =
M[(dc * size + y1 * w) * ringPatch + pIdx * w + x1] =
BP[dc * size + _y2 * w + _x2];
}
}
}
}
return ;
} |
4,933 | #include "includes.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 1024
////////////////////////////////////////////////////////////////////////////////
// Basic scan codelets
////////////////////////////////////////////////////////////////////////////////
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
__global__ void uniformUpdate( uint4 *d_Data, uint *d_Buffer )
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
} |
4,934 | #include <bits/stdc++.h>
#include <cuda.h>
#define BLOCK_SIZE 1024
using namespace std;
__global__ void sum(int *d_A, int *d_B, int *d_C, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
//if(i < n*n)
d_C[i] = d_A[i] + d_B[i];
}
__global__ void sumR(int *d_A, int *d_B, int *d_C, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for(int j = 0; j < n; j++)
d_C[i*n + j] = d_A[i*n + j] + d_B[i*n + j];
}
__global__ void sumC(int *d_A, int *d_B, int *d_C, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for(int j = 0; j < n; j++)
d_C[j*n + i] = d_A[j*n + i] + d_B[j*n + i];
}
int main(){
int *h_A, *h_B, *h_C;
int n; cin>>n;
int size = sizeof(int) * n*n;
h_A = (int *)malloc(size);
h_B = (int *)malloc(size);
h_C = (int *)malloc(size);
for(int i = 0; i < n*n; i++) {
h_A[i] = 3;
h_B[i] = 4;
}
int *d_A, *d_B, *d_C;
clock_t t = clock();
cudaMalloc(&d_A, size);
cudaMalloc(&d_B, size);
cudaMalloc(&d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//sum<<< ceil( (n*n) / (double)BLOCK_SIZE), BLOCK_SIZE >>> (d_A, d_B, d_C, n);
sumR<<< ceil( n / (double)BLOCK_SIZE), BLOCK_SIZE >>> (d_A, d_B, d_C, n);
//sumC<<< ceil( n / (double)BLOCK_SIZE), BLOCK_SIZE >>> (d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
double a = ((double)(clock()-t))/CLOCKS_PER_SEC;
cout<< a <<endl;
//for(int i = 0; i < n; i++) {
//for(int j = 0; j < n; j++)
//cout<<h_C[j]<<" ";
//cout<<endl;
//}
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
4,935 | /*
Kam Pui So (Anthony)
CS510 GPU
Project Group A
Application:
Matrix Multiplication based on CUDA TOOLKIT Documentation
This version of matrix multiplication does not use share memory.
*/
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
#define SCALE 3.14159
#define MAX 9
#define REPEAT 10
//global
const int TESTSIZE[] = {1, 5, 7, 11, 13, 16, 23, 32, 64};
// Row major matrix struct
typedef struct {
int width;
int height;
float* elements;
} matrix;
// block size struct
typedef struct {
int x;
int y;
} blocksize;
// forward declaration
// matrix multiplication kernel
__global__ void matrixMultiplyKernel (const matrix, const matrix, matrix);
// print error code
void printError(char * message, cudaError_t error) {
char errorString[255];
strcpy(errorString, cudaGetErrorString(error));
if (strcmp(errorString, "no error") == 1)
printf("%s: %s\n", message, cudaGetErrorString(error));
}
// Host code - matrix multiplication
// AxB = C
// block size is determine at runtime
void matrixMultiplyHost (const matrix A, const matrix B, matrix C, const blocksize dimension) {
// variable declaration
matrix A_device, B_device, C_device;
size_t size;
cudaError_t err;
// load A and B to device memory
A_device.width = A.width;
A_device.height = A.height;
size = A.width * A.height * sizeof(float);
err = cudaMalloc(&A_device.elements, size);
printError("CUDA malloc A", err);
err = cudaMemcpy(A_device.elements, A.elements, size, cudaMemcpyHostToDevice);
printError("Copy A to device", err);
B_device.width = B.width;
B_device.height = B.height;
size = B.width * B.height * sizeof(float);
err = cudaMalloc(&B_device.elements, size);
printError("CUDA malloc B", err);
err = cudaMemcpy(B_device.elements, B.elements, size, cudaMemcpyHostToDevice);
printError("Copy B to device", err);
// allocate C in device memory
C_device.width = C.width;
C_device.height = C.height;
size = C.width * C.height * sizeof(float);
err = cudaMalloc(&C_device.elements, size);
printError("CUDA malloc C", err);
// invoke kernel
dim3 dimBlock(dimension.x, dimension.y);
dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x, (A.height + dimBlock.y -1) / dimBlock.y);
matrixMultiplyKernel<<<dimGrid, dimBlock>>>(A_device, B_device, C_device);
err = cudaThreadSynchronize();
printError("Run kernel", err);
// read C back from device memory
err = cudaMemcpy(C.elements, C_device.elements, size, cudaMemcpyDeviceToHost);
printError("Copy C off of device", err);
// free device memory
cudaFree(A_device.elements);
cudaFree(B_device.elements);
cudaFree(C_device.elements);
}
// Kernel code - matrix multiplication
// AxB = C
__global__ void matrixMultiplyKernel (const matrix A, const matrix B, matrix C) {
// each thread compute one element in C
int height = A.height;
int width = B.width;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// make sure we have a valid matrix C element
if ((row > A.height) || (col > B.width)) return;
float value = 0.0;
int limit = A.width;
for (int k = 0; k < A.width; ++k) {
value += A.elements[row + A.width + k] * B.elements[k * B.width + col];
}
C.elements[row * C.width + col] = value;
}
// create random matrix
void createRandomMatrix(matrix randomMatrix) {
int height = randomMatrix.height;
int width = randomMatrix.width;
for (int i = 0; i < height; ++i)
for (int j = 0; j < width; ++j)
randomMatrix.elements[i * width + j] = 1.123;
// randomMatrix.elements[i * width + j] = ((float) rand()) / 1.123;
}
// print matrix
void printMatrix(const matrix sourceMatrix) {
int height = sourceMatrix.height;
int width = sourceMatrix.width;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
printf("%.2f ", sourceMatrix.elements[i * width + j]);
}
printf("\n");
}
printf("------------------------\n");
}
// print result
void printResult(const timeval start, const timeval end, const blocksize testSize) {
printf("Result (x y micro-second), %d, %d, %ld\n", testSize.x, testSize.y, ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec )));
}
// main function
int main (int argc, char* argv[]) {
matrix A, B, C;
blocksize currentSize;
int i = 0;
int x, y;
struct timeval start, end;
// initialize random seed
srand(time(NULL));
// setup the matrices
A.height = atoi(argv[1]);
A.width = atoi(argv[2]);
A.elements = (float*) malloc(A.width * A.height *sizeof(float));
B.height = A.width;
B.width = atoi(argv[3]);
B.elements = (float*) malloc(B.width * B.height *sizeof(float));
C.height = A.height;
C.width = B.width;
C.elements = (float*) malloc(C.width * C.height *sizeof(float));
// create random matrix for calculation
createRandomMatrix(A);
createRandomMatrix(B);
printMatrix(A);
printMatrix(B);
// main loop for testingg (randomly picking x & y)
while (i < REPEAT) {
x = rand() % MAX;
y = rand() % MAX;
currentSize.x = TESTSIZE[x];
currentSize.y = TESTSIZE[y];
// call host code
gettimeofday(&start, NULL);
matrixMultiplyHost(A, B, C, currentSize);
gettimeofday(&end, NULL);
printResult(start, end, currentSize);
printMatrix(C);
++i;
}
// free memory
free(A.elements);
free(B.elements);
free(C.elements);
return 0;
}
|
4,936 | __global__ void fillTwoFloatsArraysKernel(
int numberRows,
int numberEntries,
float* firstArray,
float firstConstant,
float* secondArray,
float secondConstant) {
int index = blockIdx.x * numberEntries + blockIdx.y * numberRows + threadIdx.x;
firstArray[index] = firstConstant;
secondArray[index] = secondConstant;
} |
4,937 | //
// Created by root on 2020/11/20.
//
#include "stdio.h"
#include "cuda_runtime.h"
#define BDIM 32
#define RADIUS 4
#define a0 0.00000f
#define a1 0.80000f
#define a2 -0.20000f
#define a3 0.03809f
#define a4 -0.00357f
__constant__ float coef[RADIUS + 1];
// constant memory is 64KB for each processor, which is good at uniform read
__global__ void stencil_ld(float *in, float *out) {
__shared__ float smem[BDIM + 2 * RADIUS];
int idx = threadIdx.x + blockIdx.x * blockDim.x; // index in global memory
int sidx = threadIdx.x + RADIUS; // index in shared memory
smem[sidx] = in[idx]; // thread index + R is the medium data
if (threadIdx.x < RADIUS) {
// First four threads get data from thread index(left) and thread index + R + dim(right) into shared memory
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BDIM] = in[idx + BDIM];
}
__syncthreads();
// calculate stencil
float tmp = 0.0f;
#pragma unroll
for (int i = 0; i <= RADIUS; i++) {
tmp += coef[i] * (smem[sidx + i] - smem[sidx - i]);
}
out[idx] = tmp;
}
// restrict memory is 48KB for each processor, which is only suitable for scatter read
__global__ void stencil_ld_readonly(float *in, float *out, float *__restrict__ dcoef) {
__shared__ float smem[BDIM + 2 * RADIUS];
int idx = threadIdx.x + blockIdx.x * blockDim.x; // index in global memory
int sidx = threadIdx.x + RADIUS; // index in shared memory
smem[sidx] = in[idx]; // thread index + R is the medium data
if (threadIdx.x < RADIUS) {
// First four threads get data from thread index(left) and thread index + R + dim(right) into shared memory
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BDIM] = in[idx + BDIM];
}
__syncthreads();
// calculate stencil
float tmp = 0.0f;
#pragma unroll
for (int i = 0; i <= RADIUS; i++) {
tmp += dcoef[i] * (smem[sidx + i] - smem[sidx - i]);
}
out[idx] = tmp;
}
void setup_coef() {
const float h_coef[] = {a0, a1, a2, a3, a4};
cudaMemcpyToSymbol(coef, h_coef, (RADIUS + 1) * sizeof(float));
}
int main() {
int isize = 16;
size_t nBytes = (isize + 2 * RADIUS) * sizeof(float);
// allocate host memory
float *h_in = (float *) malloc(nBytes);
float *hostRef = (float *) malloc(nBytes);
float *gpuRef = (float *) malloc(nBytes);
float *d_in, *d_out, *d_coef;
cudaMalloc((float **) &d_in, nBytes);
cudaMalloc((float **) &d_out, nBytes);
cudaMalloc((float **) &d_coef, (RADIUS + 1) * sizeof(float ));
for (int i = 0; i < isize + 2 * RADIUS; i++) {
h_in[i] = (float) i;
}
cudaMemcpy(d_in, h_in, nBytes, cudaMemcpyHostToDevice);
setup_coef();
dim3 block(BDIM, 1);
dim3 grid((isize + block.x - 1) / block.x, 1);
stencil_ld<<<grid, block>>>(d_in + RADIUS, d_out + RADIUS);
cudaDeviceSynchronize();
// Copy result back to host
cudaMemcpy(gpuRef, d_out, nBytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < isize + 2 * RADIUS; i++) {
printf("%f->", gpuRef[i]);
}
printf("\n========\n");
cudaMemset(d_out, 0, nBytes);
memset(gpuRef, 0, nBytes);
const float h_coef[] = {a0, a1, a2, a3, a4};
cudaMemcpy(d_coef, h_coef, (RADIUS + 1) * sizeof(float ), cudaMemcpyHostToDevice);
stencil_ld_readonly<<<grid, block>>>(d_in + RADIUS, d_out + RADIUS, d_coef);
cudaDeviceSynchronize();
cudaMemcpy(gpuRef, d_out, nBytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < isize + 2 * RADIUS; i++) {
printf("%f->", gpuRef[i]);
}
return 0;
} |
4,938 | #include "cuda_runtime.h"
#include<iostream>
#include <chrono>
#include <cstdlib>
#include "device_launch_parameters.h"
// Select the size of the matix of dim [ SIZE X SIZE ]
#define SIZE 1024
using namespace std;
//Ensure to add the __global__ block when performing GPU operations
__global__ void gpu_multer(double *A, double *B, double *C)
{
// ID and dimensions need to be computed to run the particular matrix multiplication operation
// On a cuda core
// The 2D SIZE Matrix is converted into a SIZE * SIZE linear array
// i and j takes the row and column indices
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
//*C = 0.0;
if (i < SIZE && j < SIZE)
{
for (int k = 0; k < SIZE; k++) {
// Row and column indices are transformed to linear
C[j * SIZE + i] += A[j * SIZE + k] * B[k * SIZE + i];
}
}
}
// Functions for CPU operation
void cpu_multer(double* A,double* B, double* C_cpu)
{
for (int i = 0; i < SIZE; i++)
{
for (int j = 0; j < SIZE; j++)
{
for (int k = 0; k < SIZE; k++)
{
//The same as GPU is considered for CPU Matrix Multiplication
C_cpu[i * SIZE + j] += (A[i * SIZE + k] * B[k * SIZE + j]);
}
//cout << C_cpu[i * SIZE + j] << " ";
}
//cout << "\n";
}
}
// Function to print the matrix
void printMat(double *A)
{
for (int i = 0; i < SIZE; i++)
{
for (int j = 0; j < SIZE; j++)
{
cout << A[i * SIZE + j] << " ";
}
cout << "\n";
}
}
int main()
{
using milli = std::chrono::milliseconds;
using micro = std::chrono::microseconds;
// Define address in CPU space
double* A, * B, * C;
// Define address in GPU space
// cudaMallocManaged copies variable to GPU
cudaMallocManaged(&A, SIZE * SIZE * sizeof(double));
cudaMallocManaged(&B, SIZE * SIZE * sizeof(double));
cudaMallocManaged(&C, SIZE * SIZE * sizeof(double));
double* C_cpu = (double*)malloc(SIZE * SIZE * sizeof(double));
for (int i = 0; i < SIZE; i++)
{
for (int j = 0; j < SIZE; j++)
{
for (int k = 0; k < SIZE; k++)
{
C_cpu[i * SIZE + j] = 0;
}
}
}
//Random double precision data is populated Array A
for (int i = 0; i < SIZE; i++)
{
for (int j = 0; j < SIZE; j++)
{
A[i * SIZE + j] = (double) (rand() % 10000000) / 1000000;
}
}
//Random double precision data is populated Array B
for (int i = 0; i < SIZE; i++)
{
for (int j = 0; j < SIZE; j++)
{
B[i * SIZE + j] = (double)(rand() % 10000000) / 1000000;
}
}
// GPU multiplication operation along with time
cout << "GPU Computation start\n";
auto start = std::chrono::high_resolution_clock::now();
gpu_multer <<< dim3(SIZE, SIZE), dim3(SIZE, SIZE) >>> (A, B, C);
cudaDeviceSynchronize();
auto finish = std::chrono::high_resolution_clock::now();
cout << "Multiplication of matrices in GPU\n";
cout << "Matrix mulitplication took "
<< std::chrono::duration_cast<micro>(finish - start).count()
<< " microseconds\n\n";
// CPU Multiplication operation
cout << "CPU Computation start\n";
auto start1 = std::chrono::high_resolution_clock::now();
cpu_multer(A, B, C_cpu);
auto finish1 = std::chrono::high_resolution_clock::now();
cout << "Multiplication of matrices in CPU\n";
cout << "Matrix mulitplication took "
<< std::chrono::duration_cast<milli>(finish1 - start1).count()
<< " milliseconds\n\n";
// Use this block for debugging
// Checking the array ouyput
//cout << "> A :\n";
//printMat(A);
//cout << "\n";
//cout << "> B :\n";
//printMat(B);
//cout << "\n";
//cout << "> CPU computed Matrix :\n";
//printMat(C_cpu);
//cout << "\n";
//cout << "> GPU computed Matrix :\n";
//printMat(C);
//cout << "\n";
// Always free allocated memory after completion of execution
// cudaFree is a custom cuda api
cudaFree(A);
cudaFree(B);
cudaFree(C);
free(C_cpu);
return 0;
} |
4,939 | #include "includes.h"
//Library Definition
//Constant Definition
#define PI 3.141592654
#define blocksize 32
#define Repetitions 8192
//Print matrix into standard output
void print(double * M,int cols,int rows);
void dot(double * a,double * b, double & c, int cols);
void Create_New_Matrix(double * M,double * New,int * vec, int p0, int pp,int nn);
/*
DEVICE FUNCTIONS
*/
//Matrix transposition (Rows and Cols of M)
__global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols)
{
C[row*cols+col] =0;
for (int k = 0; k < cols2; k++)
{
C[row*cols+col]+=b[k*cols+col]*a[row*cols2+k];
}
}
} |
4,940 | #include "includes.h"
__global__ void kernel(double *Dens, double *VradInt, double *VthetaInt, double *TemperInt, int nrad, int nsec, double *invdiffRmed, double *invdiffRsup, double *DensInt, int Adiabatic, double *Rmed, double dt, double *VradNew, double *VthetaNew, double *Energy, double *EnergyInt)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dens, densint, dxtheta, invdxtheta, dens2, tempint;
if (i>0 && i<nrad && j<nsec){
dens = Dens[i*nsec + j] + Dens[(i-1)*nsec + j];
densint = DensInt[i*nsec+j] - DensInt[(i-1)*nsec + j];
VradNew[i*nsec+j] = VradInt[i*nsec+j] - dt*2.0/dens*densint*invdiffRmed[i];
}
if (i<nrad && j<nsec){
dxtheta = 2.0*PI/(double)nsec*Rmed[i];
invdxtheta = 1.0/dxtheta;
dens2 = Dens[i*nsec + j] + Dens[i*nsec + ((j-1)+nsec)%nsec];
tempint = (TemperInt[i*nsec+j] - TemperInt[i*nsec + ((j-1)+nsec)%nsec]);
VthetaNew[i*nsec + j] = VthetaInt[i*nsec + j] - dt*2.0/dens2*tempint*invdxtheta;
}
/* If gas disk is adiabatic, we add artificial viscosity as a source */
/* term for advection of thermal energy polargrid */
if (Adiabatic){
if (i<nrad && j<nsec){
dxtheta = 2.0*PI/(double)nsec*Rmed[i];
invdxtheta = 1.0/dxtheta;
EnergyInt[i*nsec + j] = Energy[i*nsec + j] - dt*DensInt[i*nsec + j]* \
(VradInt[(i+1)*nsec + j] - VradInt[i*nsec + j])*invdiffRsup[i] - \
dt*TemperInt[i*nsec + j]*(VthetaInt[i*nsec + (j+1)%nsec] - VthetaInt[i*nsec + j])* invdxtheta;
}
}
} |
4,941 | #define d_vx(z,x) d_vx[(x)*(nz)+(z)]
#define d_vy(z,x) d_vy[(x)*(nz)+(z)]
#define d_vz(z,x) d_vz[(x)*(nz)+(z)]
#define d_sxx(z,x) d_sxx[(x)*(nz)+(z)]
#define d_szz(z,x) d_szz[(x)*(nz)+(z)]
#define d_sxz(z,x) d_sxz[(x)*(nz)+(z)]
#define d_vz_adj(z,x) d_vz_adj[(x)*(nz)+(z)]
#define d_vx_adj(z,x) d_vx_adj[(x)*(nz)+(z)]
#define d_mem_dszz_dz(z,x) d_mem_dszz_dz[(x)*(nz)+(z)]
#define d_mem_dsxz_dx(z,x) d_mem_dsxz_dx[(x)*(nz)+(z)]
#define d_mem_dsxz_dz(z,x) d_mem_dsxz_dz[(x)*(nz)+(z)]
#define d_mem_dsxx_dx(z,x) d_mem_dsxx_dx[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Mu(z,x) d_Mu[(x)*(nz)+(z)]
#define d_Den(z,x) d_Den[(x)*(nz)+(z)]
#define d_ave_Byc_a(z,x) d_ave_Byc_a[(x)*(nz)+(z)]
#define d_ave_Byc_b(z,x) d_ave_Byc_b[(x)*(nz)+(z)]
#define d_DenGrad(z,x) d_DenGrad[(x)*(nz)+(z)]
#include<stdio.h>
__global__ void el_velocity(float *d_vz, float *d_vx, float *d_szz, \
float *d_sxx, float *d_sxz, float *d_mem_dszz_dz, float *d_mem_dsxz_dx, \
float *d_mem_dsxz_dz, float *d_mem_dsxx_dx, float *d_Lambda, float *d_Mu, \
float *d_ave_Byc_a, float *d_ave_Byc_b, float *d_K_z, float *d_a_z, float *d_b_z, \
float *d_K_z_half, float *d_a_z_half, float *d_b_z_half, float *d_K_x, float *d_a_x, \
float *d_b_x, float *d_K_x_half, float *d_a_x_half, float *d_b_x_half, \
int nz, int nx, float dt, float dz, float dx, int nPml, int nPad, bool isFor, \
float *d_vz_adj, float *d_vx_adj, float *d_DenGrad){
int gidz = blockIdx.x*blockDim.x + threadIdx.x;
int gidx = blockIdx.y*blockDim.y + threadIdx.y;
float dszz_dz = 0.0;
float dsxz_dx = 0.0;
float dsxz_dz = 0.0;
float dsxx_dx = 0.0;
float c1 = 9.0/8.0;
float c2 = 1.0/24.0;
// float c1 = coef[0];
// float c2 = coef[1];
if (isFor) {
if(gidz>=2 && gidz<=nz-nPad-3 && gidx>=2 && gidx<=nx-3) {
// update vz
dszz_dz = (c1*(d_szz(gidz+1,gidx)-d_szz(gidz,gidx)) - c2*(d_szz(gidz+2,gidx)-d_szz(gidz-1,gidx)))/dz;
dsxz_dx = (c1*(d_sxz(gidz,gidx)-d_sxz(gidz,gidx-1)) - c2*(d_sxz(gidz,gidx+1)-d_sxz(gidz,gidx-2)))/dx;
if(gidz<nPml || (gidz>nz-nPml-nPad-1)){
d_mem_dszz_dz(gidz,gidx) = d_b_z_half[gidz]*d_mem_dszz_dz(gidz,gidx) + d_a_z_half[gidz]*dszz_dz;
dszz_dz = dszz_dz / d_K_z_half[gidz] + d_mem_dszz_dz(gidz,gidx);
}
if(gidx<nPml || gidx>nx-nPml){
d_mem_dsxz_dx(gidz,gidx) = d_b_x[gidx]*d_mem_dsxz_dx(gidz,gidx) + d_a_x[gidx]*dsxz_dx;
dsxz_dx = dsxz_dx / d_K_x[gidx] + d_mem_dsxz_dx(gidz,gidx);
}
d_vz(gidz,gidx) += (dszz_dz + dsxz_dx) * d_ave_Byc_a(gidz, gidx) * dt;
// update vx
dsxz_dz = (c1*(d_sxz(gidz,gidx)-d_sxz(gidz-1,gidx)) - c2*(d_sxz(gidz+1,gidx)-d_sxz(gidz-2,gidx)))/dz;
dsxx_dx = (c1*(d_sxx(gidz,gidx+1)-d_sxx(gidz,gidx)) - c2*(d_sxx(gidz,gidx+2)-d_sxx(gidz,gidx-1)))/dx;
if(gidz<nPml || (gidz>nz-nPml-nPad-1)){
d_mem_dsxz_dz(gidz,gidx) = d_b_z[gidz]*d_mem_dsxz_dz(gidz,gidx) + d_a_z[gidz]*dsxz_dz;
dsxz_dz = dsxz_dz / d_K_z[gidz] + d_mem_dsxz_dz(gidz,gidx);
}
if(gidx<nPml || gidx>nx-nPml){
d_mem_dsxx_dx(gidz,gidx) = d_b_x_half[gidx]*d_mem_dsxx_dx(gidz,gidx) + d_a_x_half[gidx]*dsxx_dx;
dsxx_dx = dsxx_dx / d_K_x_half[gidx] + d_mem_dsxx_dx(gidz,gidx);
}
d_vx(gidz,gidx) += (dsxz_dz + dsxx_dx) * d_ave_Byc_b(gidz, gidx) * dt;
}
else{
return;
}
}
else {
// ========================================BACKWARD PROPAGATION====================================
if(gidz>=nPml && gidz<=nz-nPad-1-nPml && gidx>=nPml && gidx<=nx-1-nPml) {
// update vz
dszz_dz = (c1*(d_szz(gidz+1,gidx)-d_szz(gidz,gidx)) - c2*(d_szz(gidz+2,gidx)-d_szz(gidz-1,gidx)))/dz;
dsxz_dx = (c1*(d_sxz(gidz,gidx)-d_sxz(gidz,gidx-1)) - c2*(d_sxz(gidz,gidx+1)-d_sxz(gidz,gidx-2)))/dx;
d_vz(gidz,gidx) -= (dszz_dz + dsxz_dx) * d_ave_Byc_a(gidz, gidx) * dt;
// update vx
dsxz_dz = (c1*(d_sxz(gidz,gidx)-d_sxz(gidz-1,gidx)) - c2*(d_sxz(gidz+1,gidx)-d_sxz(gidz-2,gidx)))/dz;
dsxx_dx = (c1*(d_sxx(gidz,gidx+1)-d_sxx(gidz,gidx)) - c2*(d_sxx(gidz,gidx+2)-d_sxx(gidz,gidx-1)))/dx;
d_vx(gidz,gidx) -= (dsxz_dz + dsxx_dx) * d_ave_Byc_b(gidz, gidx) * dt;
// computate the density kernel (spray)
float grad_ave_Byc_a = -d_vz_adj(gidz,gidx)*(dszz_dz + dsxz_dx)*dt \
* (-pow(d_ave_Byc_a(gidz,gidx),2)/2.0);
float grad_ave_Byc_b = -d_vx_adj(gidz,gidx)*(dsxz_dz + dsxx_dx)*dt \
* (-pow(d_ave_Byc_b(gidz,gidx),2)/2.0);
atomicAdd(&d_DenGrad[gidz+nz*gidx], grad_ave_Byc_a);
atomicAdd(&d_DenGrad[gidz+nz*gidx], grad_ave_Byc_b);
if (gidz+1<=nz-nPad-1-nPml)
atomicAdd(&d_DenGrad[gidz+1+nz*gidx], grad_ave_Byc_a);
if (gidx+1<=gidx<=nx-1-nPml)
atomicAdd(&d_DenGrad[gidz+nz*(gidx+1)], grad_ave_Byc_b);
}
else{
return;
}
}
} |
4,942 | #include <iostream>
#include <cuda.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
/*
written by George Strauch on 4/21/2020
c++ program to sort an array with bubblesort on gpu
Execution syntax:
$ ./exec {int num of elements}
Example run:
$ nvcc gpu_bubble.cu -arch='sm_35' -rdc=true -lineinfo -lcudadevrt -o gpu_bs
$ time ./gpu_bs 10
$ time ./gpu_bs 20000
*/
__host__ // used for debug
void print_array (int *array, int n, int tag_index)
{
for (size_t i = 0; i < n; i++) {
if (i == tag_index+1) {
std::cout << " > ";
}
std::cout << array[i] << ' ';
}
std::cout << '\n';
}
__host__
int* allocate_shared_array(int n_elements)
{
int *a;
cudaMallocManaged(&a, n_elements*sizeof(int));
return a;
}
__host__ // makes and returns unsorted array with random elements
int* make_unsorted_array(int n_elements)
{
int *a = allocate_shared_array(n_elements);
for (size_t j = 0; j < n_elements; j++) {
a[j] = rand()%(2*n_elements);
}
return a;
}
__host__
bool go_again(int* array, int n)
{
for (size_t i = 0; i < n-1; i++) {
if(array[i] > array[i+1])
{
return true;
}
}
return false;
}
__global__
void sort(int* array, int n, int offset, int k)
{
int id = 2*(blockIdx.x*blockDim.x + threadIdx.x) + offset + k;
if (id >= n) {
return;
}
int tmp;
if (array[id] > array[id+1]) {
tmp = array[id+1];
array[id+1] = array[id];
array[id] = tmp;
}
__syncthreads();
}
__host__
void fill_array(int* a, int n) {
for (size_t i = 0; i < n; i++) {
a[i] = 0;
}
}
__host__ // returns element index if any element larger than i+1 element, else -1
int verify_in_order(int* array, int n)
{
for (size_t i = 0; i < n-1; i++) {
if (array[i+1] < array[i]) {
return i;
}
}
return -1;
}
__host__
void entry_point(int* array, int n)
{
int t = 512;
int b = 512;
int count = 0;
int total = t*b;
dim3 threads(t);
dim3 blocks(b);
// fill_array(array, n);
while (go_again(array, n)) {
cudaDeviceSynchronize();
for (size_t i = 0; i < (n/(2*total)) +1; i++) {
sort<<<blocks, threads>>>(array, n, 0, i*total);
cudaDeviceSynchronize();
sort<<<blocks, threads>>>(array, n, 1, i*total);
}
cudaDeviceSynchronize();
count++;
if (count > 1.5*n) {
break;
}
}
}
int main(int argc, char const *argv[])
{
int N = atoi(argv[1]);
std::cout << "N = " << N << '\n';
int* a = make_unsorted_array(N);
cudaProfilerStart();
// while (go_again(a, N)) {
// sort<<<1, N/2>>>(a, N, 0);
// sort<<<1, N/2>>>(a, N, 1);
// cudaDeviceSynchronize();
// }
entry_point(a, N);
cudaProfilerStop();
int order = verify_in_order(a, N);
if (order == -1) {
std::cout << "array is in order" << '\n';
}
else {
std::cout << "not in order" << '\n';
print_array(a, N, order);
}
cudaFree(a);
return 0;
}
//
|
4,943 | #define INF 2e10f
struct Sphere{
float r,b,g;
float radius;
float x,y,z;
__device__ float hit (float ox,float oy,float *n){
float dx = ox - x;
float dy = oy - y;
if(dx*dx + dy*dy < radius*radius){
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz / sqrtf(radius*radius);
return dz + z;
}
return -INF;
}
};
#define rnd(x) (x*rand() / RAND_MAX)
#define SPHERES 20
Sphere *s;
|
4,944 | #include <stdio.h>
#define SRC_SIZE 65536
#define DST_SIZE 65536
#define CPY_SIZE 8192
int main() {
int *h_mem = (int*)malloc(SRC_SIZE*sizeof(int));
memset(h_mem, 0, SRC_SIZE*sizeof(int));
int *d_mem;
cudaMalloc((void**)&d_mem, DST_SIZE*sizeof(int));
cudaMemset(d_mem, 0, DST_SIZE*sizeof(int));
cudaMemcpy(d_mem, h_mem, CPY_SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(h_mem, d_mem, CPY_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_mem);
free(h_mem);
cudaDeviceReset();
return 0;
}
|
4,945 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
__global__ void square(float* x, float* array, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
x[index] = sqrt( __ldg(&array[index]) );
}
}
void square_cpu(float *array, int n) {
for (int i = 0; i < n; i++) {
array[i] = sqrt(array[i]);
}
}
void initArray(float *a, int size){
int i;
for(i=0; i<size; i++){
a[i] = i*0.01;
}
}
int main(int argc, char**argv) {
int N = atoi(argv[1]);
int threadsPerBlock = atoi(argv[2]);
if ((threadsPerBlock % 32 != 0) || (threadsPerBlock > 1024)) {
printf("threadsPerBlock must be multiple of 32 and less than 1024");
exit(1);
}
int size = N * sizeof(float);
float *x = (float*) malloc(size);
float *array = (float*) malloc(size);
float *y = (float*) malloc(size);
initArray(array, N);
memcpy(y, array, size);
clock_t tStart = clock();
square_cpu(y, N);
printf("Time taken by Host: %.6fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
float *xd;
float *arrayd;
checkCuda( cudaMalloc(&xd, size) );
checkCuda( cudaMalloc(&arrayd, size) );
checkCuda( cudaMemcpy(arrayd, array, size, cudaMemcpyHostToDevice) );
// Call square kernel
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
tStart = clock();
square<<<blocksPerGrid, threadsPerBlock>>>(xd, arrayd, N);
cudaDeviceSynchronize();
printf("Time taken by GPU: %.6fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
checkCuda( cudaMemcpy(x, xd, size, cudaMemcpyDeviceToHost) );
// Error Checking
for (int i = 0; i < N; i++) {
if (x[i] != y[i]) {
printf("%d %f %f INVALID RESULTS \n", i, x[i], y[i]);
goto finalize;
}
}
printf("Successfull Sum\n");
finalize:
checkCuda(cudaFree(xd));
checkCuda(cudaFree(arrayd));
free(x);
free(y);
free(array);
return 0;
}
|
4,946 | #include <stdio.h>
#include <math.h>
#include <float.h>
typedef struct {
int x, y;
} Point;
typedef struct {
float4 avg;
double inverse_cov[3][3];
double log_det;
} Class;
__constant__ Class dev_class[32];
float4 Average(uchar4 *data, int w, int h, Point *class_points, int point_n) {
float4 result = make_float4(0, 0, 0, 0);
for (int i = 0; i < point_n; ++i) {
Point p = class_points[i];
uchar4 pixel = data[p.y * w + p.x];
result.x += pixel.x;
result.y += pixel.y;
result.z += pixel.z;
}
result.x /= point_n;
result.y /= point_n;
result.z /= point_n;
return result;
}
void CalculateCovariance(double cov[3][3], uchar4 *data, int w, int h,
Point *class_points, int point_n, float4 avg) {
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
cov[i][j] = 0;
}
}
for (int i = 0; i < point_n; ++i) {
Point p = class_points[i];
uchar4 pixel = data[p.y * w + p.x];
double delta[3] = {pixel.x - avg.x, pixel.y - avg.y, pixel.z - avg.z};
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
cov[i][j] += delta[i] * delta[j];
}
}
}
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
cov[i][j] /= point_n - 1;
}
}
}
double Determinant(double cov[3][3]) {
double det = 0;
for (int i = 0; i < 3; ++i) {
det += cov[0][i] *
cov[1][(i + 1) % 3] *
cov[2][(i + 2) % 3];
det -= cov[0][(i + 2) % 3] *
cov[1][(i + 1) % 3] *
cov[2][i];
}
return det;
}
void Inverse(double in[3][3], double out[3][3]) {
double det = Determinant(in);
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
out[i][j] = in[(j + 1) % 3][(i + 1) % 3] * in[(j + 2) % 3][(i + 2) % 3] -
in[(j + 1) % 3][(i + 2) % 3] * in[(j + 2) % 3][(i + 1) % 3];
out[i][j] /= det;
}
}
}
__device__ double MaxLikehoodEstimation(uchar4 p, int class_idx) {
Class c = dev_class[class_idx];
double delta[3] = {p.x - c.avg.x, p.y - c.avg.y, p.z - c.avg.z};
double temp[3] = {0,};
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
temp[i] += delta[j] * c.inverse_cov[j][i];
}
}
double result = -c.log_det;
for (int i = 0; i < 3; ++i) {
result -= temp[i] * delta[i];
}
return result;
}
__global__ void kernel(uchar4 *image, int w, int h, int class_count) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
int class_idx;
int max_idx = 0;
double value;
double max_value;
for (i = idx; i < w; i += offsetx) {
for (j = idy; j < h; j += offsety) {
uchar4 pixel = image[j * w + i];
max_value = INT_MIN;
for (class_idx = 0; class_idx < class_count; ++class_idx) {
value = MaxLikehoodEstimation(pixel, class_idx);
if (value > max_value) {
max_idx = class_idx;
max_value = value;
}
}
image[j * w + i] = make_uchar4(pixel.x, pixel.y, pixel.z, max_idx);
}
}
}
int main() {
char input_file[256], output_file[256];
int class_count;
scanf("%s", input_file);
scanf("%s", output_file);
scanf("%d", &class_count);
Point *class_points[class_count];
int w, h;
FILE *in = fopen(input_file, "rb");
fread(&w, sizeof(uchar4), 1 , in);
fread(&h, sizeof(uchar4), 1 , in);
uchar4 *data = (uchar4*) malloc(sizeof(uchar4) * h * w);
fread(data, sizeof(uchar4), h * w, in);
fclose(in);
int point_n[class_count];
for (int i = 0; i < class_count; ++i) {
scanf("%d", &point_n[i]);
class_points[i] = (Point *) malloc(sizeof(Point) * point_n[i]);
for (int j = 0; j < point_n[i]; ++j) {
scanf("%d%d", &class_points[i][j].x, &class_points[i][j].y);
}
}
Class class_arr[class_count];
double cov[3][3];
for (int i = 0; i < class_count; ++i) {
class_arr[i].avg = Average(data, w, h, class_points[i], point_n[i]);
CalculateCovariance(cov, data, w, h, class_points[i], point_n[i], class_arr[i].avg);
Inverse(cov, class_arr[i].inverse_cov);
class_arr[i].log_det = log(Determinant(cov));
}
uchar4 *dev_data;
cudaMalloc(&dev_data, sizeof(uchar4) * h * w);
cudaMemcpy(dev_data, data, sizeof(uchar4) * h * w, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(dev_class, class_arr, sizeof(Class) * class_count);
kernel<<<dim3(32,32), dim3(32, 32)>>>(dev_data, w, h, class_count);
cudaMemcpy(data, dev_data, sizeof(uchar4) * h * w, cudaMemcpyDeviceToHost);
FILE *out = fopen(output_file, "wb");
fwrite(&w, sizeof(uchar4), 1, out);
fwrite(&h, sizeof(uchar4), 1, out);
fwrite(data, sizeof(uchar4), h * w, out);
fclose(out);
cudaFree(dev_data);
for (int i = 0; i < class_count; ++i) {
free(class_points[i]);
}
free(data);
return 0;
}
|
4,947 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include "curand_kernel.h"
#include <assert.h>
// L should be (multiple of (THR_NUMBER - 2) ) + 2
#define L 114
const int AREA = L*L;
const int NTOT = (L-2)*(L-2);
// #define T 6.
// #define T 0.1
// #define T 2.26918531421
#define T_CYCLE_START 1.5
#define T_CYCLE_END 3.0
#define T_CYCLE_STEP 0.04
#define SINGLETEMP 3.5
int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP);
#define J 1.
#define SEED 1000
struct measure_plan {
int steps_repeat;
int t_max_sim;
int t_measure_wait;
int t_measure_interval; }
static PLAN = {
.steps_repeat = 1,
.t_max_sim = 251,
.t_measure_wait = 50,
.t_measure_interval = 20 };
// print history true/false
#define HISTORY 1
const int THR_NUMBER = 30;
const int BLOCK_NUMBER = ( L-2)/( THR_NUMBER - 2 );
const dim3 BLOCKS( BLOCK_NUMBER, BLOCK_NUMBER );
const dim3 THREADS( THR_NUMBER, THR_NUMBER );
// average tracker struct
struct avg_tr {
float sum;
float sum_squares;
int n;
};
struct avg_tr new_avg_tr(int locn) {
struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn};
return a;
}
void update_avg(struct avg_tr * tr_p, float newval) {
tr_p->sum += newval;
tr_p->sum_squares += (newval*newval);
}
// __device__ static inline void dev_update_avg(struct avg_tr * tr_p, float newval) {
// tr_p->sum += newval;
// tr_p->sum_squares += (newval*newval);
// }
float average( struct avg_tr tr) {
return (tr.sum)/((float) tr.n) ;
}
float stdev( struct avg_tr tr) {
return sqrt( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
}
float variance( struct avg_tr tr) {
return ( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
}
// RNG init kernel
__global__ void initRNG(curandState * const rngStates, const unsigned int seed) {
// Determine thread ID
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
// Initialise the RNG
curand_init(seed, tid, 0, &rngStates[tid]);
}
// static inline float unitrand(){
// return (float)rand() / (float)RAND_MAX;
// }
__device__ static inline float dev_unitrand( curandState * const rngStates, unsigned int tid ){
curandState localState = rngStates[tid];
float val = curand_uniform(&localState);
rngStates[tid] = localState;
return val;
}
void init_random(char grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = rand() & 1;
}
}
}
void init_t0(char grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = 0;
}
}
}
void dump(char grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
// if(grid[x+y*L] == 0) printf("•");
// else printf("◘");
if(grid[x+y*L] == 0) printf(" ");
else printf("█");
// printf("%i", grid[x+y*L]);
}
printf("\n");
}
printf("\n");
}
__device__ void dev_dump(char grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
// if(grid[x+y*L] == 0) printf("•");
// else printf("◘");
if(grid[x+y*L] == 0) printf(" ");
else printf("█");
// printf("%i", grid[x+y*L]);
}
printf("\n");
}
printf("\n");
}
struct coords {
int x;
int y;
};
__device__ static inline coords dev_get_thread_coords() {
struct coords thread_coords;
thread_coords.x = blockIdx.x*( THR_NUMBER - 2 ) + ( threadIdx.x ) ;
thread_coords.y = blockIdx.y*( THR_NUMBER - 2 ) + ( threadIdx.y ) ;
return thread_coords;
}
// can segfault
__device__ static inline char dev_shared_grid_step(char shared_grid[THR_NUMBER*THR_NUMBER], int x, int y, int xstep, int ystep) {
return shared_grid[(x+xstep) + (y+ystep)*THR_NUMBER];
}
// segfault if applied to an edge spin, call only on the inner THR_NUMBER-1 grid
__device__ void dev_update_spin_shared(char dev_shared_grid[ THR_NUMBER*THR_NUMBER ], int x, int y , curandState * const rngStates, unsigned int tid, double temperature ) {
char s0 = dev_shared_grid[x+y*THR_NUMBER];
char j1 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, 1, 0);
char j2 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, -1, 0);
char j3 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, 0, 1);
char j4 = s0 ^ dev_shared_grid_step(dev_shared_grid, x, y, 0, -1);
float dh = (float) ( -((j1 + j2 + j3 + j4) *2 -4)*2*J );
float p = exp( -dh / temperature);
float ur = dev_unitrand(rngStates, tid);
if(ur < p ) {
dev_shared_grid[x+y*THR_NUMBER] = !dev_shared_grid[x+y*THR_NUMBER];
}
}
__device__ void dev_update_grid_shared(char grid[L*L], curandState * const rngStates, double temperature ) {
// the first argument here is the GLOBAL grid
// thread coords relative to the GLOBAL grid
struct coords glob_coords = dev_get_thread_coords();
int glob_x = glob_coords.x;
int glob_y = glob_coords.y;
// Determine thread ID (for RNG)
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
__shared__ char shared_grid[ THR_NUMBER*THR_NUMBER ];
shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] = grid[(glob_x )+ (glob_y )*L ]; // check formulas
__syncthreads();
// thread coords relative to the shared grid
int shared_x = threadIdx.x;
int shared_y = threadIdx.y;
// macro-checkboards
// macro-white
if( (blockIdx.x + (blockIdx.y)%2)%2 == 0 ) {
/////////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature );
}
}
__syncthreads();
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature );
}
}
__syncthreads();
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
}
//////////
}
__syncthreads();
// macro-black
if( (blockIdx.x + (blockIdx.y)%2)%2 == 1 ) {
//////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature );
}
}
__syncthreads();
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_spin_shared( shared_grid, shared_x, shared_y, rngStates, tid, temperature );
}
}
__syncthreads();
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
}
//////////
}
}
__device__ void dev_update_magnetization_tracker(char dev_grid[L*L], float * dev_single_run_avg, int * dev_partial_res ) {
struct coords glob_coords = dev_get_thread_coords();
int glob_x = glob_coords.x;
int glob_y = glob_coords.y;
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
int spin = (int) dev_grid[glob_x+glob_y*L];
atomicAdd(dev_partial_res, spin );
}
__syncthreads();
if ( blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0) {
float val = ( ((float) (*dev_partial_res) *2 ) - NTOT ) / (float) NTOT;
/*fl__*/ *dev_single_run_avg += val;
*dev_partial_res = 0;
}
}
__global__ /*fl__*/ void dev_measure_cycle_kernel(struct measure_plan pl, char * dev_grid, curandState * const rngStates, float * dev_single_run_avg, int * dev_partial_res , double temperature ) {
// INNER SIM LOOPS
int ksim=0;
for( ; ksim<pl.t_measure_wait; ksim++) {
dev_update_grid_shared(dev_grid, rngStates, temperature);
}
// end thermalization
for( ; ksim<pl.t_max_sim; ksim++) {
dev_update_grid_shared(dev_grid, rngStates, temperature);
////////////measures
if( ksim % pl.t_measure_interval == 0) {
dev_update_magnetization_tracker(dev_grid, dev_single_run_avg, dev_partial_res );
}
}
// END INNER SIM LOOPS
}
void parall_measure_cycle(char startgrid[L*L], struct measure_plan pl, char * dev_grid, curandState * const rngStates, FILE *resf, double temperature ) {
//OUTER REP LOOP
////////////measures
float n_measures_per_sim = (float) ((pl.t_max_sim - pl.t_measure_wait)/pl.t_measure_interval);
struct avg_tr outer_avg_tr = new_avg_tr(pl.steps_repeat);
// extra space needed by dev_update_magnetization_tracker
int * dev_partial_res;
cudaMalloc(&dev_partial_res, sizeof(int));
for( int krep=0; krep< pl.steps_repeat; krep++) {
/*fl__*/ float single_run_avg = 0.;
/*fl__*/ float * dev_single_run_avg;
/*fl__*/ cudaMalloc(&dev_single_run_avg, sizeof(float));
/*fl__*/ cudaMemcpy(dev_single_run_avg, &single_run_avg, sizeof(float), cudaMemcpyHostToDevice);
// printf("seeding with %i\n", SEED+krep);
// initialize starting grid on the device for this sim
cudaMemcpy(dev_grid, startgrid, L*L*sizeof(char), cudaMemcpyHostToDevice);
/*fl__*/ dev_measure_cycle_kernel<<<BLOCKS, THREADS>>>(pl, dev_grid, rngStates, dev_single_run_avg, dev_partial_res, temperature );
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(err));
}
// bring back results to CPU
/*fl__*/ cudaMemcpy(&single_run_avg, dev_single_run_avg, sizeof(float), cudaMemcpyDeviceToHost);
/*fl__*/ float lres = single_run_avg/(n_measures_per_sim);
// /*fl__*/ float lstdev = stdev(single_run_avg);
if (HISTORY) printf(" temperature: %f\n", temperature);
if (HISTORY) printf("# average of simulation %i:\n %f\n", krep+1, lres);
update_avg(&outer_avg_tr, lres);
char endgrid[L*L];
cudaMemcpy(endgrid, dev_grid, L*L*sizeof(char), cudaMemcpyDeviceToHost);
if (HISTORY) dump(endgrid);
/*fl__*/ cudaFree(dev_single_run_avg);
}
// END OUTER REP LOOP
////////////measures
fprintf(resf, "%f ", temperature);
fprintf(resf, "%f ", average(outer_avg_tr));
fprintf(resf, "%f\n", stdev(outer_avg_tr));
cudaFree(dev_partial_res);
}
int main() {
// L should be (multiple of THR_NUMBER -2) + 2
assert( ((L-2)% (THR_NUMBER-2) )== 0 );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
FILE *resf = fopen("results.txt", "w");
fprintf(resf, "# gpu1\n");
fprintf(resf, "# parameters:\n# linear_size: %i\n", L);
fprintf(resf, "# coupling: %f\n# repetitions: %i\n", J, PLAN.steps_repeat);
fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", PLAN.t_max_sim, PLAN.t_measure_wait, PLAN.t_measure_interval, SEED);
fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT);
fprintf(resf, "\n");
fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n");
srand(SEED);
// curand init
// Allocate memory for RNG states
curandState *d_rngStates = 0;
cudaMalloc((void **)&d_rngStates, THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(curandState));
// Initialise RNG
initRNG<<<BLOCKS, THREADS>>>(d_rngStates, SEED);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(err));
}
// device grid
char * dev_grid;
cudaMalloc(&dev_grid, L*L*sizeof(char));
char startgrid[L*L];
init_t0(startgrid);
// if (HISTORY) printf("starting grid:\n");
// if (HISTORY) dump(startgrid);
// // // temp cycle:
// for( double kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) {
// parall_measure_cycle(startgrid, PLAN, dev_grid, d_rngStates, resf, kt);
// }
// only 1:
parall_measure_cycle(startgrid, PLAN, dev_grid, d_rngStates, resf, SINGLETEMP);
cudaFree(d_rngStates);
cudaFree(dev_grid);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float total_time = 0;
cudaEventElapsedTime(&total_time, start, stop);
FILE *timef = fopen("time.txt", "w");
long int total_flips = ((long int)(n_temps))* ((long int)((PLAN.steps_repeat))) * ((long int)(PLAN.t_max_sim)) * ((long int)(NTOT));
fprintf(timef, "# gpu1\n");
fprintf(timef, "# total execution time (milliseconds):\n");
fprintf(timef, "%f\n", total_time);
fprintf(timef, "# total spin flips performed:\n");
fprintf(timef, "%li\n", total_flips);
fprintf(timef, "# average spin flips per millisecond:\n");
fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) );
fclose(timef);
fclose(resf);
return 0;
}
|
4,948 | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the rc machines
==================================================================
*/
/*
Daniel Burkholder
June 6 2016
USF Summer 2016
CIS 4930 - Programming Massively Parallel Systems
Project 1
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket *histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom *atom_list; /* list of all data points */
// CUDA Error Check
void checkCudaError(cudaError_t e, char in[]) {
if (e != cudaSuccess) {
printf("CUDA Error: %s, %s \n", in, cudaGetErrorString(e));
exit(EXIT_FAILURE);
}
}
__device__ double
p2p_distance(atom *a, int ind1, int ind2) {
double x1 = a[ind1].x_pos;
double x2 = a[ind2].x_pos;
double y1 = a[ind1].y_pos;
double y2 = a[ind2].y_pos;
double z1 = a[ind1].z_pos;
double z2 = a[ind2].z_pos;
return sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
__global__ void
PDH_baseline(bucket *histo, atom *atomList, double w, int size) {
int i, j, pos;
double dist;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = i + 1;
for (int a = j; a < size; a++) {
//printf("i: %d, j: %d \n", i, j);
dist = p2p_distance(atomList, i, a);
pos = (int) (dist / w);
//__syncthreads();
//histo[pos].d_cnt++;
atomicAdd( &histo[pos].d_cnt, 1);
//__syncthreads();
}
//printf("\n");
}
__global__ void
PDH2D_baseline(bucket *histo, atom *atomList, double w) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < j) {
double dist = p2p_distance(atomList, i, j);
int pos = (int) (dist / w);
histo[pos].d_cnt++;
printf("%d, %d : %d, %f \n", i, j, pos, dist);
}
__syncthreads();
}
void output_histogram(bucket *histo){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histo[i].d_cnt);
total_cnt += histo[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char const *argv[])
{
PDH_acnt = atoi(argv[1]); // Number of atoms
PDH_res = atof(argv[2]); // Input Distance: W
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
size_t histogramSize = sizeof(bucket)*num_buckets;
size_t atomSize = sizeof(atom)*PDH_acnt;
histogram = (bucket *)malloc(histogramSize);
atom_list = (atom *)malloc(atomSize);
srand(1);
/* generate data following a uniform distribution */
for(int i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
// Malloc space on device, copy to device
bucket *d_histogram = NULL;
atom *d_atom_list = NULL;
checkCudaError( cudaMalloc((void**) &d_histogram, histogramSize),
"Malloc Histogram");
checkCudaError( cudaMalloc((void**) &d_atom_list, atomSize),
"Malloc Atom List");
checkCudaError( cudaMemcpy(d_histogram, histogram, histogramSize, cudaMemcpyHostToDevice),
"Copy histogram to Device");
checkCudaError( cudaMemcpy(d_atom_list, atom_list, atomSize, cudaMemcpyHostToDevice),
"Copy atom_list to Device");
//dim3 threadsPerBlock(256, 256); // 64 Threads, 2D
//dim3 numBlocks( ceil(PDH_acnt / threadsPerBlock.x), ceil(PDH_acnt / threadsPerBlock.y));
//PDH2D_baseline <<<numBlocks, threadsPerBlock>>>(d_histogram, d_atom_list, PDH_res);
PDH_baseline <<<ceil(PDH_acnt/32), 32>>> (d_histogram, d_atom_list, PDH_res, PDH_acnt);
checkCudaError(cudaGetLastError(), "Checking Last Error, Kernel Launch");
checkCudaError( cudaMemcpy(histogram, d_histogram, histogramSize, cudaMemcpyDeviceToHost),
"Copy device histogram to host");
output_histogram(histogram);
checkCudaError(cudaFree(d_histogram), "Free device histogram");
checkCudaError(cudaFree(d_atom_list), "Free device atom_list");
free(histogram);
free(atom_list);
checkCudaError(cudaDeviceReset(), "Device reset");
return 0;
}
|
4,949 |
__global__ void buildUstar(float *Ustar, float *U, float *R, float *ShearSource, float dt, int m, int n)
{
// Calculate the row and column of the thread within the thread block
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// First check if the thread is operating on a cell inside of the block's two cell deep ghost cells
if (col > 1 && row > 1 && col < n-2 && row < m-2)
{
// Calculate the index of this cell in the U, Ustar, and R matrices
int i = row*n*3 + col*3;
// Calculate the index of this cell's shear source value
int shearIndex = row*n + col;
// Build Ustar
Ustar[i] = U[i] + dt * R[i];
Ustar[i + 1] = (U[i + 1] + dt * R[i + 1]) / (1.0f + dt * ShearSource[shearIndex]);
Ustar[i + 2] = (U[i + 2] + dt * R[i + 2]) / (1.0f + dt * ShearSource[shearIndex]);
}
}
__global__ void buildUnext(float *Unext, float *U, float *Ustar, float *Rstar, float *ShearSourceStar, float dt, int m, int n)
{
// Calculate the row and column of the thread within the thread block
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// First check if the thread is operating on a cell inside of the block's two cell deep ghost cells
if (col > 1 && row > 1 && col < n-2 && row < m-2)
{
// Calculate the index of this cell in the U, Unext, Ustar, and R matrices
int i = row*n*3 + col*3;
// Calculate the index of this cell's shear source value
int shearIndex = row*n + col;
// Build Unext
Unext[i] = 0.5f * U[i] + 0.5f * (Ustar[i] + dt * Rstar[i]);
Unext[i + 1] = (0.5f * U[i + 1] + 0.5f * (Ustar[i + 1] + dt * Rstar[i + 1])) / (1.0f + 0.5f * dt * ShearSourceStar[shearIndex]);
Unext[i + 2] = (0.5f * U[i + 2] + 0.5f * (Ustar[i + 2] + dt * Rstar[i + 2])) / (1.0f + 0.5f * dt * ShearSourceStar[shearIndex]);
}
}
|
4,950 | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for GPU Premier League Round 2: n-body simulation
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <cuda_runtime.h>
using namespace std;
//////////////////////////////////////////////////////////////////////////
////TODO 0: Please replace the following strings with your team name and author names
////Note: Please do not use space in the string, use "_" instead
//////////////////////////////////////////////////////////////////////////
namespace name
{
std::string team="Slim_Shaders";
std::string author_1="TEST";
std::string author_2="DO NOT SUBMIT";
std::string author_3="Name_3"; ////optional
};
//////////////////////////////////////////////////////////////////////////
////Here is a sample function implemented on CPU for n-body simulation.
__host__ void N_Body_Simulation_CPU_Poorman(double* pos_x,double* pos_y,double* pos_z, ////position array
double* vel_x,double* vel_y,double* vel_z, ////velocity array
double* acl_x,double* acl_y,double* acl_z, ////acceleration array
const double* mass, ////mass array
const int n, ////number of particles
const double dt, ////timestep
const double epsilon_squared) ////epsilon to avoid 0-denominator
{
////Step 1: set particle accelerations to be zero
memset(acl_x,0x00,sizeof(double)*n);
memset(acl_y,0x00,sizeof(double)*n);
memset(acl_z,0x00,sizeof(double)*n);
////Step 2: traverse all particle pairs and accumulate gravitational forces for each particle from pairwise interactions
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
////skip calculating force for itself
if(i==j) continue;
////r_ij=x_j-x_i
double rx=pos_x[j]-pos_x[i];
double ry=pos_y[j]-pos_y[i];
double rz=pos_z[j]-pos_z[i];
////a_ij=m_j*r_ij/(r+epsilon)^3,
////noticing that we ignore the gravitational coefficient (assuming G=1)
double dis_squared=rx*rx+ry*ry+rz*rz;
double one_over_dis_cube=1.0/pow(sqrt(dis_squared+epsilon_squared),3);
double ax=mass[j]*rx*one_over_dis_cube;
double ay=mass[j]*ry*one_over_dis_cube;
double az=mass[j]*rz*one_over_dis_cube;
////accumulate the force to the particle
acl_x[i]+=ax;
acl_y[i]+=ay;
acl_z[i]+=az;
}
}
////Step 3: explicit time integration to update the velocity and position of each particle
for(int i=0;i<n;i++){
////v_{t+1}=v_{t}+a_{t}*dt
vel_x[i]+=acl_x[i]*dt;
vel_y[i]+=acl_y[i]*dt;
vel_z[i]+=acl_z[i]*dt;
////x_{t+1}=x_{t}+v_{t}*dt
pos_x[i]+=vel_x[i]*dt;
pos_y[i]+=vel_y[i]*dt;
pos_z[i]+=vel_z[i]*dt;
}
}
//////////////////////////////////////////////////////////////////////////
////TODO 1: your GPU variables and functions start here
////Your implementations end here
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
////Test function for n-body simulator
ofstream out;
//////////////////////////////////////////////////////////////////////////
////Please do not change the values below
const double dt=0.001; ////time step
const int time_step_num=10; ////number of time steps
const double epsilon=1e-2; ////epsilon added in the denominator to avoid 0-division when calculating the gravitational force
const double epsilon_squared=epsilon*epsilon; ////epsilon squared
////We use grid_size=4 to help you debug your code, change it to a bigger number (e.g., 16, 32, etc.) to test the performance of your GPU code
const unsigned int grid_size=32; ////assuming particles are initialized on a background grid
const unsigned int particle_n=pow(grid_size,3); ////assuming each grid cell has one particle at the beginning
__host__ void Test_N_Body_Simulation()
{
////initialize position, velocity, acceleration, and mass
double* pos_x=new double[particle_n];
double* pos_y=new double[particle_n];
double* pos_z=new double[particle_n];
////initialize particle positions as the cell centers on a background grid
double dx=1.0/(double)grid_size;
for(unsigned int k=0;k<grid_size;k++){
for(unsigned int j=0;j<grid_size;j++){
for(unsigned int i=0;i<grid_size;i++){
unsigned int index=k*grid_size*grid_size+j*grid_size+i;
pos_x[index]=dx*(double)i;
pos_y[index]=dx*(double)j;
pos_z[index]=dx*(double)k;
}
}
}
double* vel_x=new double[particle_n];
memset(vel_x,0x00,particle_n*sizeof(double));
double* vel_y=new double[particle_n];
memset(vel_y,0x00,particle_n*sizeof(double));
double* vel_z=new double[particle_n];
memset(vel_z,0x00,particle_n*sizeof(double));
double* acl_x=new double[particle_n];
memset(acl_x,0x00,particle_n*sizeof(double));
double* acl_y=new double[particle_n];
memset(acl_y,0x00,particle_n*sizeof(double));
double* acl_z=new double[particle_n];
memset(acl_z,0x00,particle_n*sizeof(double));
double* mass=new double[particle_n];
for(int i=0;i<particle_n;i++){
mass[i]=100.0;
}
//////////////////////////////////////////////////////////////////////////
////Default implementation: n-body simulation on CPU
////Comment the CPU implementation out when you test large-scale examples
auto cpu_start=chrono::system_clock::now();
cout<<"Total number of particles: "<<particle_n<<endl;
cout<<"Tracking the motion of particle "<<particle_n/2<<endl;
for(int i=0;i<time_step_num;i++){
N_Body_Simulation_CPU_Poorman(pos_x,pos_y,pos_z,vel_x,vel_y,vel_z,acl_x,acl_y,acl_z,mass,particle_n,dt,epsilon_squared);
cout<<"pos on timestep "<<i<<": "<<pos_x[particle_n/2]<<", "<<pos_y[particle_n/2]<<", "<<pos_z[particle_n/2]<<endl;
}
auto cpu_end=chrono::system_clock::now();
chrono::duration<double> cpu_time=cpu_end-cpu_start;
cout<<"CPU runtime: "<<cpu_time.count()*1000.<<" ms."<<endl;
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
////Your implementation: n-body simulator on GPU
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float gpu_time=0.0f;
cudaDeviceSynchronize();
cudaEventRecord(start);
//////////////////////////////////////////////////////////////////////////
////TODO 2: Your GPU functions are called here
////Requirement: You need to copy data from the CPU arrays, conduct computations on the GPU, and copy the values back from GPU to CPU
////The final positions should be stored in the same place as the CPU n-body function, i.e., pos_x, pos_y, pos_z
////The correctness of your simulation will be evaluated by comparing the results (positions) with the results calculated by the default CPU implementations
//////////////////////////////////////////////////////////////////////////
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime: %.4f ms\n",gpu_time);
cudaEventDestroy(start);
cudaEventDestroy(end);
//////////////////////////////////////////////////////////////////////////
out<<"R0: "<<pos_x[particle_n/2]<<" " <<pos_y[particle_n/2]<<" " <<pos_z[particle_n/2]<<endl;
out<<"T1: "<<gpu_time<<endl;
}
int main()
{
if(name::team=="Team_X"){
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name=name::team+"_competition_2_nbody.dat";
out.open(file_name.c_str());
if(out.fail()){
printf("\ncannot open file %s to record results\n",file_name.c_str());
return 0;
}
Test_N_Body_Simulation();
return 0;
}
|
4,951 | #include "includes.h"
__global__ void findLabels(int nPixels, int filterCount, int clusterCount, float* responses, float* centroids, int* clusters, int* changes) {
__shared__ float sharedCentroids[34 * 32];
__shared__ unsigned int localChanges;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (threadIdx.x < 32) {
for(int i = 0; i < 34; i++) {
float element = centroids[i * 64 + threadIdx.x];
sharedCentroids[i * 32 + threadIdx.x] = element;
}
}
__syncthreads();
int bestLabel = -1;
float bestDistance = 1000000;
if (x < nPixels) {
for(int label = 0; label < 32; label++) {
float accumulant = 0.0f;
int index = x;
for(int dimension = 0; dimension < 34; dimension++) {
float diff = sharedCentroids[dimension * 32 + label] - responses[index];
accumulant += diff * diff;
index += nPixels;
}
if (accumulant < bestDistance) {
bestLabel = label;
bestDistance = accumulant;
}
}
}
__syncthreads();
if (threadIdx.x < 32) {
for(int i = 0; i < 34; i++) {
sharedCentroids[i * 32 + threadIdx.x] = centroids[i * 64 + threadIdx.x + 32];
}
}
__syncthreads();
if (x < nPixels) {
for(int label = 0; label < 32; label++) {
float accumulant = 0.0f;
int index = x;
for(int dimension = 0; dimension < 34; dimension++) {
float diff = sharedCentroids[dimension * 32 + label] - responses[index];
accumulant += diff * diff;
index += nPixels;
}
if (accumulant < bestDistance) {
bestLabel = label + 32;
bestDistance = accumulant;
}
}
int formerCluster = clusters[x];
if (bestLabel != formerCluster) {
atomicInc(&localChanges, 10000000);
}
clusters[x] = bestLabel;
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(changes, localChanges);
}
} |
4,952 |
/*************************************
* Matrix-Vector product CUDA kernel *
* V2: With Shared memory *
*************************************/
#include <stdio.h>
#define CUDA_SAFE_CALL( call ) { \
cudaError_t err = call; \
if( cudaSuccess != err ) { \
fprintf(stderr,"CUDA: error occurred in cuda routine. Exiting...\n"); \
exit(err); \
} }
#define A(i,j) A[ (j) + ((i)*(n)) ]
#define x(i) x[ (i) ]
#define y(i) y[ (i) ]
#define y_gpu(i) y_gpu[ (i) ]
#define y_cpu(i) y_cpu[ (i) ]
#define d_A(i,j) d_A[ (j) + ((i)*(n)) ]
#define d_x(i) d_x[ (i) ]
#define d_y(i) d_y[ (i) ]
#define min(i,j) ( (i)<(j) ? (i) : (j) )
#define BLOCKSIZE 32
__global__ void compute_kernel( unsigned int m, unsigned int n, float *d_A, float *d_x, float *d_y ) {
/* Obtain (x,y) coordinates of the thread within the block */
/* Obtain the global index to a matrix row (variable i). Note that there is only one dimension in the grid
so blockIdx.x and blockDim.x are the only existing variables */
unsigned int i;
unsigned int j;
i = blockIdx.x * BLOCKSIZE;
j = blockIdx.y * BLOCKSIZE;
__syncthreads();
/* Declare share memory space of a piece of array d_x of size BLOCKSIZE */
__shared__ float b[BLOCKSIZE];
/* Declare share memory space of a square block of order BLOCKSIZE */
__shared__ float a[BLOCKSIZE*BLOCKSIZE];
__syncthreads();
if( i < m ) { /* Prevent work on positions beyond m */
b[threadIdx.x] = d_x[i + threadIdx.x];
__syncthreads();
/* Implement Part 1 here */
/* Loop (threadIdx.x:BLOCKSIZE:n-1) */
/* Copy subvector x in shared memory */
/* Perform the add+product on a local variable */
float sum = 0.0f;
for(int k=threadIdx.x;k<n-1;k+=BLOCKSIZE){
sum += d_A(j,k)*b[k];
}
/* Save local variable in shared memory */
a[i+BLOCKSIZE*j] = sum;
/* Implement Part 2 here */
/* Only if threadIdx.x==0 */
if(threadIdx.x==0){
for(int l = 0;l<BLOCKSIZE;l++){
d_y(i)+=a[i + BLOCKSIZE*l];
}
}
/* Add all column elements along a row of the shared memory square block */
/* Save result in d_y */
}
}
int cu_matrix_vector( unsigned int m, unsigned int n, float *h_A, float *h_x, float *h_y ) {
// Allocate device memory
float *d_A, *d_x, *d_y;
CUDA_SAFE_CALL( cudaMalloc((void **) &d_A, m*n*sizeof(float) ) );
CUDA_SAFE_CALL( cudaMalloc((void **) &d_x, n*sizeof(float) ) );
CUDA_SAFE_CALL( cudaMalloc((void **) &d_y, m *sizeof(float) ) );
// Copy host memory to device
CUDA_SAFE_CALL( cudaMemcpy( d_A, h_A, m*n*sizeof(float), cudaMemcpyHostToDevice ) );
CUDA_SAFE_CALL( cudaMemcpy( d_x, h_x, n*sizeof(float), cudaMemcpyHostToDevice ) );
int n_blocks = (int) ceil( (float) m / (float) BLOCKSIZE );
// Execute the kernel
dim3 dimGrid( n_blocks );
dim3 dimBlock( BLOCKSIZE, BLOCKSIZE );
compute_kernel<<< dimGrid, dimBlock >>>( m, n, d_A, d_x, d_y );
// Copy device memory to host
CUDA_SAFE_CALL( cudaMemcpy( h_y, d_y, m *sizeof(float), cudaMemcpyDeviceToHost ) );
// Deallocate device memory
CUDA_SAFE_CALL( cudaFree(d_A) );
CUDA_SAFE_CALL( cudaFree(d_x) );
CUDA_SAFE_CALL( cudaFree(d_y) );
return EXIT_SUCCESS;
}
int matrix_vector( unsigned int m, unsigned int n, float *A, float *x, float *y ) {
unsigned int i, j;
for( i=0; i<m; i++ ) {
y( i ) = 0.0f;
for( j=0; j<n; j++ ) {
y( i ) += A( i, j ) * x( j );
}
}
return EXIT_SUCCESS;
}
int main( int argc, char *argv[] ) {
unsigned int m, n;
unsigned int i, j;
/* Generating input data */
if( argc<3 ) {
printf("Usage: %s n_rows n_cols \n",argv[0]);
exit(-1);
}
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
float *A = (float *) malloc( m*n*sizeof(float) );
float *x = (float *) malloc( n*sizeof(float) );
printf("%s: Generating a random matrix of size %dx%d and a vector of size %d...\n",argv[0],m,n,n);
for( i=0; i<m; i++ ) {
for( j=0; j<n; j++ ) {
A( i, j ) = 2.0f * ( (float) rand() / RAND_MAX ) - 1.0f;
}
}
for( j=0; j<n; j++ ) {
x( j ) = 2.0f * ( (float) rand() / RAND_MAX ) - 1.0f;
}
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
CUDA_SAFE_CALL( cudaEventCreate(&start) );
CUDA_SAFE_CALL( cudaEventCreate(&stop) );
printf("%s: y=A*x in CPU...\n",argv[0]);
float *y_cpu = (float *) malloc( m*sizeof(float) );
CUDA_SAFE_CALL( cudaEventRecord(start, NULL) ); // Record the start event
matrix_vector( m, n, A, x, y_cpu );
CUDA_SAFE_CALL( cudaEventRecord(stop, NULL) ); // Record the stop event
CUDA_SAFE_CALL( cudaEventSynchronize(stop) ); // Wait for the stop event to complete
float msecCPU = 0.0f;
CUDA_SAFE_CALL( cudaEventElapsedTime(&msecCPU, start, stop) );
printf("%s: y=A*x in GPU...\n",argv[0]);
float *y_gpu = (float *) malloc( m*sizeof(float) );
CUDA_SAFE_CALL( cudaEventRecord(start, NULL) ); // Record the start event
cu_matrix_vector( m, n, A, x, y_gpu );
CUDA_SAFE_CALL( cudaEventRecord(stop, NULL) ); // Record the stop event
CUDA_SAFE_CALL( cudaEventSynchronize(stop) ); // Wait for the stop event to complete
float msecGPU = 0.0f;
CUDA_SAFE_CALL( cudaEventElapsedTime(&msecGPU, start, stop) );
/* Check for correctness */
float max = fabs( y_cpu( 0 ) );
for( i=1; i<m; i++ ) {
max = fabs( y_cpu( i ) > max ? y_cpu( i ) : max );
}
float error = 0.0f;
for( i=0; i<m; i++ ) {
error += fabs( y_gpu( i ) - y_cpu( i ) );
}
printf("Error CPU/GPU = %.3e\n",error/max);
double flops = 2.0 * (double) m * (double) n;
double gigaFlopsCPU = (flops * 1.0e-9f) / (msecCPU / 1000.0f);
double gigaFlopsGPU = (flops * 1.0e-9f) / (msecGPU / 1000.0f);
printf("CPU time = %.2f msec.\n",msecCPU);
printf("GPU time = %.2f msec.\n",msecGPU);
printf("Gflops CPU = %.2f \n",gigaFlopsCPU);
printf("Gflops GPU = %.2f \n",gigaFlopsGPU);
free(A);
free(x);
free(y_cpu);
free(y_gpu);
}
|
4,953 | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
#include <bits/stdc++.h>
using namespace std;
using namespace std::chrono;
__global__ void maximum(int *input) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if(input[second] > input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void minimum(int *input, int n) {
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0) {
if(tid < number_of_threads) {
int first = tid*step_size*2;
int second = first + step_size;
if((first < n && second < n) && input[second] < input[first])
input[first] = input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
}
__global__ void gpu_sum(int *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
__global__ void mean_diff_sq(float *input, float mean) {
input[threadIdx.x] -= mean;
input[threadIdx.x] *= input[threadIdx.x];
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i = 0; i < size; i++)
dest[i] = (float)src[i];
}
__global__ void gpu_sd(float *input) {
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0) {
if(tid < number_of_threads) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
step_size <<= 1;
if(number_of_threads == 1)
number_of_threads = 0;
else
number_of_threads = ceil((double)number_of_threads / 2);
}
if(tid == 0) {
int first = tid * step_size * 2;
int second = first + step_size;
input[first] += input[second];
}
}
long cpu_sum(int *input, int n) {
long sum = 0;
for(int i = 0 ; i < n ; i++) {
sum += input[i];
}
return sum;
}
long cpu_min(int *arr, int n) {
int min = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] < min)
min = arr[i];
}
return min;
}
long cpu_max(int *arr, int n) {
int max = arr[0];
for(int i = 1 ; i < n ; i++) {
if(arr[i] > max)
max = arr[i];
}
return max;
}
double cpu_sd(int *arr, int n, float mean) {
float *arr_std = new float[n];
for(int i = 0 ; i < n ; i++) {
arr_std[i] = pow(((float)arr[i] - mean),2);
}
double total = 0;
for(int i = 0 ; i < n ; i++) {
total += arr_std[i];
}
total = total / n;
return sqrt(total);
}
void random_init(int *arr, int n) {
for(int i = 0 ; i < n ; i++) {
arr[i] = rand()%1000;
}
}
int main() {
int *d;
int n = 80;
int *arr = new int[n];
int result;
int size = n * sizeof(int);
random_init(arr,n);
cout<<"Input Array: [";
for(int i = 0 ; i < n ; i++) {
cout<<arr[i]<<", ";
}
cout<<"]"<<endl;
cout<<"======================================="<<endl;
cudaMalloc((void **)&d,size);
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
float gpu_elapsed_time;
cudaEvent_t gpu_start,gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start,0);
gpu_sum<<<1,n/2>>>(d);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Sum is: "<<result<<"\n";
float mean = (double)result/n;
cout<<"GPU Mean is: "<<mean<<endl;
float *arr_float = new float[n];
float *arr_std, std;
cudaMalloc((void **)&arr_std,n*sizeof(float));
copy_int_to_float(arr_float, arr, n);
cudaMemcpy(arr_std,arr_float,n*sizeof(float),cudaMemcpyHostToDevice);
mean_diff_sq <<<1,n>>>(arr_std, mean);
gpu_sd <<<1,n/2>>>(arr_std);
cudaMemcpy(&std,arr_std,sizeof(float),cudaMemcpyDeviceToHost);
cout<<"GPU Standard Deviation: "<<sqrt(std/n)<<endl;
cout<<"======================================="<<endl;
auto start = high_resolution_clock::now();
ios_base::sync_with_stdio(false);
result = cpu_sum(arr,n);
cout<<"CPU Sum is: "<<result<<"\n";
auto stop = high_resolution_clock::now();
double time_taken = chrono::duration_cast<chrono::milliseconds>(stop - start).count();
time_taken *= 1e-9;
mean = (float)result/n;
cout<<"CPU Mean is: "<<mean<<endl;
std = cpu_sd(arr, n, mean);
cout<<"CPU Standard Deviation: "<<std<<endl;
cout<<"======================================="<<endl;
result = 0;
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
minimum<<<1,n/2>>>(d,n);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Min is: "<<result<<endl;
result = cpu_min(arr,n);
cout<<"CPU Min is: "<<result<<"\n";
cout<<"======================================="<<endl;
cudaMemcpy(d,arr,size,cudaMemcpyHostToDevice);
maximum<<<1,n/2>>>(d);
int gMax;
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"GPU Max is: "<<result<<endl;
result = cpu_max(arr,n);
cout<<"CPU Max is: "<<result<<"\n";
cout<<"======================================="<<endl;
return 0;
} |
4,954 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ void partition_by_bit(unsigned int *values, unsigned int bit);
__global__ void radix_sort(unsigned int *values)
{
int bit;
for( bit = 0; bit < 32; ++bit )
{
partition_by_bit(values, bit);
__syncthreads();
}
}
__device__ int plus_scan(unsigned int *x)
{
unsigned int i = threadIdx.x; // id of thread executing this instance
unsigned int n = blockDim.x; // total number of threads in this block
unsigned int offset; // distance between elements to be added
for( offset = 1; offset < n; offset *= 2) {
unsigned int t;
if ( i >= offset )
t = x[i-offset];
__syncthreads();
if ( i >= offset )
x[i] = t + x[i]; // i.e., x[i] = x[i] + x[i-1]
__syncthreads();
}
return x[i];
}
__device__ void partition_by_bit(unsigned int *values, unsigned int bit)
{
unsigned int i = threadIdx.x;
unsigned int size = blockDim.x;
unsigned int x_i = values[i]; // value of integer at position i
unsigned int p_i = (x_i >> bit) & 1; // value of bit at position bit
values[i] = p_i;
__syncthreads();
unsigned int T_before = plus_scan(values);
unsigned int T_total = values[size-1];
unsigned int F_total = size - T_total;
__syncthreads();
if ( p_i )
values[T_before-1 + F_total] = x_i;
else
values[i - T_before] = x_i;
}
int main ()
{
unsigned int a[1000];
int size = 1000;
srand(time(NULL));
for (int i = 0; i < 1000; i++)
{
a[i] = rand ()%1000;
}
unsigned int *dev_a;
cudaMalloc(&dev_a, size * sizeof(unsigned int));
cudaMemcpy( dev_a, a, size * sizeof(unsigned int), cudaMemcpyHostToDevice);
radix_sort<<<1,size>>>(dev_a);
cudaMemcpy( a, dev_a, size * sizeof(unsigned int), cudaMemcpyDeviceToHost );
for (int i = 0; i < 1000; i++)
{
printf("%u ", a[i]);
}
printf ("\n");
}
|
4,955 | #include<bits/stdc++.h>
#include<cuda.h>
#define PI 3.14159265
#define BlockSize 1024
using namespace std;
__global__ void FD(float *U_d, int T, int N,float r){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
for(int t=1; t<T; ++t){
U_d[t*N]=0;
U_d[t*N+(N-1)]=0;
if(idx>0 && idx<N-1){
U_d[t*N+idx] = r*U_d[(t-1)*N+(idx-1)]+(1-2*r)*U_d[(t-1)*N+idx]+r*U_d[(t-1)*N+(idx+1)];
}
__syncthreads();
}
}
float f(float i, float deltax){
float x = i*deltax;
return 2*sin(2*PI*x); //initial condition
}
//print temperature for each node
void print( float *U, float deltaX,int T,int N){
for(int i=0; i<T; ++i){
for(int j=0; j<N; ++j){
cout<<j*deltaX<<" "<<U[i*N+j]<<endl;
}
cout<<endl;
}
}
int main(){
float *U_d,*U_h, xa,xb,ta,tb,T,N,gamma,r;
float deltaX,deltaT;
int nodes,sizeU;
cin>>xa>>xb>>ta>>tb>>N>>T>>gamma;
nodes = (N+2)*(T+2);
sizeU = sizeof(float)*nodes;
U_h = (float*)malloc(sizeU);
deltaX = (xb-xa) / float(N+1);
deltaT = (tb-ta) / float(T+1);
r = (gamma*deltaT)/(deltaX*deltaX);
if(r<=0 || r>=0.5 || deltaT>(deltaX*deltaX)/2.0){
cout<<"r:"<<r<<" k"<<deltaT<<endl;
cout<<"error"<<endl;
return 0;
}
for(int i=0; i<N+2; i++){
U_h[i] = f(i,deltaX);
}
cudaMalloc((void**)&U_d,sizeU);
cudaMemcpy(U_d,U_h,sizeU,cudaMemcpyHostToDevice);
dim3 dimBlock(BlockSize);
dim3 dimGrid(ceil((N+2)/float(BlockSize)));
FD<<<dimGrid,dimBlock>>>(U_d,T+2,N+2,r);
cudaMemcpy(U_h,U_d,sizeU,cudaMemcpyDeviceToHost);
print(U_h,deltaX,T+2,N+2);
cudaFree(U_d);
}
|
4,956 | //#include "thand.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <pthread.h>
#define CYCLE 1024 * 1024 * 1024
#define THREAD 1
int * count;
__global__ void Check_gpu(int * count)
{
#if 1
__syncthreads();
while(*count < CYCLE)
{
//printf("\n\n\n\n\n\n");
//printf("____________GPU function is called______________\n");
__syncthreads();
atomicAdd(count, 1);
//printf("\n\n\n\n\n\n");
}
printf("_______________Out of GPU fct loop________________\n");
#else
printf("____________GPU function is called______________\n");
#endif
}
extern "C"
void Check(void)
{
printf("Check!!\n");
Check_gpu<<<1,512>>>(count);
cudaDeviceSynchronize();
printf("End Check!!!!!!!!!\n");
}
extern "C"
void cudasynch(void)
{
cudaDeviceSynchronize();
}
#if THREAD
void* get_cnt(void * data)
{
int prev = 0;
printf("In get_cnt!!!!!!!!!!!!!!!\n");
while(1)
{
int ret = 0;
cudaMemcpy(&ret, count, sizeof(int), cudaMemcpyDeviceToHost);
if(prev != ret)
{
printf("In CPU : count = %d\n", ret);
prev = ret;
}
}
}
#else
void get_cnt(void)
{
int j = 0;
printf("In get_cnt!!!!!!!!!!!!!!!\n");
while(j < 100)
{
int ret = 0, tmp;
cudaMemcpy(&ret, count, sizeof(int), cudaMemcpyDeviceToHost);
printf("In CPU : count = %d\n", ret);
j++;
}
}
#endif
int main(void)
{
pthread_t thread;
cudaMalloc((void**)&count, sizeof(int));
cudaMemset(count, 0, sizeof(int));
printf("___1____\n");
#if THREAD
pthread_create(&thread, NULL, get_cnt, NULL);
Check();
printf("___2____\n");
#else
Check();
get_cnt();
printf("___2____\n");
#endif
cudaDeviceSynchronize();
printf("___3____\n");
return 0;
}
|
4,957 | #include "includes.h"
__global__ void dset_kernel(double *vals, int N, double mu)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N)
vals[idx] = mu;
} |
4,958 | #include <iostream>
#include <iomanip>
#include <cstdlib>
#include <stdlib.h>
#include <cstdio>
// Fourth order interpolation function.
__host__ __device__ inline double interp(const double m2, const double m1, const double p1, const double p2)
{
return (-1./16)*(m2+p2) + (9./16)*(m1+p1);
}
// Fourth order gradient function.
__host__ __device__ inline double grad(const double m2, const double m1, const double p1, const double p2)
{
return (1./24.)*(m2-p2) + (27./24.)*(p1-m1);
}
/*
4th order advection on cpu
*/
void advec_cpu(double * const __restrict__ ut,
const double * const __restrict__ u, const double * const __restrict__ v, const double * const __restrict__ w,
const int istart, const int iend,
const int jstart, const int jend,
const int kstart, const int kend,
const int icells, const int ijcells)
{
const int ii1 = 1;
const int ii2 = 2;
const int ii3 = 3;
const int jj1 = 1*icells;
const int jj2 = 2*icells;
const int jj3 = 3*icells;
const int kk1 = 1*ijcells;
const int kk2 = 2*ijcells;
const int kk3 = 3*ijcells;
for (int k=kstart; k<kend; ++k)
for (int j=jstart; j<jend; ++j)
#pragma ivdep
for (int i=istart; i<iend; ++i)
{
const int ijk = i + j*icells + k*ijcells;
ut[ijk] += grad( interp( u[ijk-ii3], u[ijk-ii2], u[ijk-ii1], u[ijk ] ) * interp( u[ijk-ii3], u[ijk-ii2], u[ijk-ii1], u[ijk ] ),
interp( u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1] ) * interp( u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1] ),
interp( u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2] ) * interp( u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2] ),
interp( u[ijk ], u[ijk+ii1], u[ijk+ii2], u[ijk+ii3] ) * interp( u[ijk ], u[ijk+ii1], u[ijk+ii2], u[ijk+ii3] ))
+ grad( interp( v[ijk-ii2-jj1], v[ijk-ii1-jj1], v[ijk-jj1], v[ijk+ii1-jj1] ) * interp( u[ijk-jj3], u[ijk-jj2], u[ijk-jj1], u[ijk ] ),
interp( v[ijk-ii2 ], v[ijk-ii1 ], v[ijk ], v[ijk+ii1 ] ) * interp( u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1] ),
interp( v[ijk-ii2+jj1], v[ijk-ii1+jj1], v[ijk+jj1], v[ijk+ii1+jj1] ) * interp( u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2] ),
interp( v[ijk-ii2+jj2], v[ijk-ii1+jj2], v[ijk+jj2], v[ijk+ii1+jj2] ) * interp( u[ijk ], u[ijk+jj1], u[ijk+jj2], u[ijk+jj3] ))
+ grad( interp( w[ijk-ii2-kk1], w[ijk-ii1-kk1], w[ijk-kk1], w[ijk+ii1-kk1] ) * interp( u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ] ),
interp( w[ijk-ii2 ], w[ijk-ii1 ], w[ijk ], w[ijk+ii1 ] ) * interp( u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1] ),
interp( w[ijk-ii2+kk1], w[ijk-ii1+kk1], w[ijk+kk1], w[ijk+ii1+kk1] ) * interp( u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2] ),
interp( w[ijk-ii2+kk2], w[ijk-ii1+kk2], w[ijk+kk2], w[ijk+ii1+kk2] ) * interp( u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3] ));
}
}
/*
4th order advection (3D), no shared memory use
*/
__global__ void advec_gpu(double * const __restrict__ ut,
const double * const __restrict__ u, const double * const __restrict__ v, const double * const __restrict__ w,
const int istart, const int iend,
const int jstart, const int jend,
const int kstart, const int kend,
const int icells, const int ijcells)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x + istart;
const int j = blockIdx.y*blockDim.y + threadIdx.y + jstart;
const int k = blockIdx.z + kstart;
if(i < iend && j < jend && k < kend)
{
const int ijk = i + j*icells + k*ijcells;
const int ii1 = 1;
const int ii2 = 2;
const int ii3 = 3;
const int jj1 = 1*icells;
const int jj2 = 2*icells;
const int jj3 = 3*icells;
const int kk1 = 1*ijcells;
const int kk2 = 2*ijcells;
const int kk3 = 3*ijcells;
ut[ijk] += grad( interp( u[ijk-ii3], u[ijk-ii2], u[ijk-ii1], u[ijk ] ) * interp( u[ijk-ii3], u[ijk-ii2], u[ijk-ii1], u[ijk ] ),
interp( u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1] ) * interp( u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1] ),
interp( u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2] ) * interp( u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2] ),
interp( u[ijk ], u[ijk+ii1], u[ijk+ii2], u[ijk+ii3] ) * interp( u[ijk ], u[ijk+ii1], u[ijk+ii2], u[ijk+ii3] ))
+ grad( interp( v[ijk-ii2-jj1], v[ijk-ii1-jj1], v[ijk-jj1], v[ijk+ii1-jj1] ) * interp( u[ijk-jj3], u[ijk-jj2], u[ijk-jj1], u[ijk ] ),
interp( v[ijk-ii2 ], v[ijk-ii1 ], v[ijk ], v[ijk+ii1 ] ) * interp( u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1] ),
interp( v[ijk-ii2+jj1], v[ijk-ii1+jj1], v[ijk+jj1], v[ijk+ii1+jj1] ) * interp( u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2] ),
interp( v[ijk-ii2+jj2], v[ijk-ii1+jj2], v[ijk+jj2], v[ijk+ii1+jj2] ) * interp( u[ijk ], u[ijk+jj1], u[ijk+jj2], u[ijk+jj3] ))
+ grad( interp( w[ijk-ii2-kk1], w[ijk-ii1-kk1], w[ijk-kk1], w[ijk+ii1-kk1] ) * interp( u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ] ),
interp( w[ijk-ii2 ], w[ijk-ii1 ], w[ijk ], w[ijk+ii1 ] ) * interp( u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1] ),
interp( w[ijk-ii2+kk1], w[ijk-ii1+kk1], w[ijk+kk1], w[ijk+ii1+kk1] ) * interp( u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2] ),
interp( w[ijk-ii2+kk2], w[ijk-ii1+kk2], w[ijk+kk2], w[ijk+ii1+kk2] ) * interp( u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3] ));
}
}
/*
4th order advection, smem
*/
__global__ void advec_gpu_smem(double * const __restrict__ ut,
const double * const __restrict__ u, const double * const __restrict__ v, const double * const __restrict__ w,
const int istart, const int iend,
const int jstart, const int jend,
const int kstart, const int kend,
const int icells, const int ijcells, const int ngc)
{
extern __shared__ double shared[];
const int smem_block = (blockDim.x + 2*ngc) * (blockDim.y + 2*ngc);
double *us = &shared[0];
double *vs = &shared[smem_block];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int i = blockIdx.x*blockDim.x + threadIdx.x + istart;
const int j = blockIdx.y*blockDim.y + threadIdx.y + jstart;
const int blockxpad = blockDim.x+2*ngc;
if(i < iend && j < jend)
{
const int ii1 = 1;
const int ii2 = 2;
const int ii3 = 3;
const int jj1 = 1*icells;
const int jj2 = 2*icells;
const int jj3 = 3*icells;
const int kk1 = 1*ijcells;
const int kk2 = 2*ijcells;
const int kk3 = 3*ijcells;
const int kk4 = 4*ijcells;
const int jjs1 = 1*blockxpad;
const int jjs2 = 2*blockxpad;
const int jjs3 = 3*blockxpad;
int ijk;
const int ijks = (tx+ngc) + (ty+ngc)*blockxpad;
for(int k=kstart; k<kend; ++k)
{
ijk = i + j*icells + k*ijcells; // index in global memory
us[ijks] = u[ijk];
vs[ijks] = v[ijk];
if(ty < ngc)
{
us[ijks-jjs3] = u[ijk-jj3];
vs[ijks-jjs3] = v[ijk-jj3];
}
if(ty >= blockDim.y-ngc)
{
us[ijks+jjs3] = u[ijk+jj3];
vs[ijks+jjs3] = v[ijk+jj3];
}
if(tx < ngc)
{
us[ijks-ngc] = u[ijk-ngc];
vs[ijks-ngc] = v[ijk-ngc];
if(ty < ngc)
{
us[ijks-jjs3-ngc] = u[ijk-jj3-ngc];
vs[ijks-jjs3-ngc] = v[ijk-jj3-ngc];
}
if(ty >= blockDim.y-ngc)
{
us[ijks+jjs3-ngc] = u[ijk+jj3-ngc];
vs[ijks+jjs3-ngc] = v[ijk+jj3-ngc];
}
}
if(tx >= blockDim.x-ngc)
{
us[ijks+ngc] = u[ijk+ngc];
vs[ijks+ngc] = v[ijk+ngc];
if(ty < ngc)
{
us[ijks-jjs3+ngc] = u[ijk-jj3+ngc];
vs[ijks-jjs3+ngc] = v[ijk-jj3+ngc];
}
if(ty >= blockDim.y-ngc)
{
us[ijks+jjs3+ngc] = u[ijk+jj3+ngc];
vs[ijks+jjs3+ngc] = v[ijk+jj3+ngc];
}
}
__syncthreads();
ut[ijk] += grad( interp( us[ijks-ii3], us[ijks-ii2], us[ijks-ii1], us[ijks ] ) * interp( us[ijks-ii3], us[ijks-ii2], us[ijks-ii1], us[ijks ] ),
interp( us[ijks-ii2], us[ijks-ii1], us[ijks ], us[ijks+ii1] ) * interp( us[ijks-ii2], us[ijks-ii1], us[ijks ], us[ijks+ii1] ),
interp( us[ijks-ii1], us[ijks ], us[ijks+ii1], us[ijks+ii2] ) * interp( us[ijks-ii1], us[ijks ], us[ijks+ii1], us[ijks+ii2] ),
interp( us[ijks ], us[ijks+ii1], us[ijks+ii2], us[ijks+ii3] ) * interp( us[ijks ], us[ijks+ii1], us[ijks+ii2], us[ijks+ii3] ))
+ grad( interp( vs[ijks-ii2-jjs1], vs[ijks-ii1-jjs1], vs[ijks-jjs1], vs[ijks+ii1-jjs1] ) * interp( us[ijks-jjs3], us[ijks-jjs2], us[ijks-jjs1], us[ijks ] ),
interp( vs[ijks-ii2 ], vs[ijks-ii1 ], vs[ijks ], vs[ijks+ii1 ] ) * interp( us[ijks-jjs2], us[ijks-jjs1], us[ijks ], us[ijks+jjs1] ),
interp( vs[ijks-ii2+jjs1], vs[ijks-ii1+jjs1], vs[ijks+jjs1], vs[ijks+ii1+jjs1] ) * interp( us[ijks-jjs1], us[ijks ], us[ijks+jjs1], us[ijks+jjs2] ),
interp( vs[ijks-ii2+jjs2], vs[ijks-ii1+jjs2], vs[ijks+jjs2], vs[ijks+ii1+jjs2] ) * interp( us[ijks ], us[ijks+jjs1], us[ijks+jjs2], us[ijks+jjs3] ))
+ grad( interp( w[ijk-ii2-kk1], w[ijk-ii1-kk1], w[ijk-kk1], w[ijk+ii1-kk1] ) * interp( u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], us[ijks ] ),
interp( w[ijk-ii2 ], w[ijk-ii1 ], w[ijk ], w[ijk+ii1 ] ) * interp( u[ijk-kk2], u[ijk-kk1], us[ijks ], u[ijk+kk1] ),
interp( w[ijk-ii2+kk1], w[ijk-ii1+kk1], w[ijk+kk1], w[ijk+ii1+kk1] ) * interp( u[ijk-kk1], us[ijks ], u[ijk+kk1], u[ijk+kk2] ),
interp( w[ijk-ii2+kk2], w[ijk-ii1+kk2], w[ijk+kk2], w[ijk+ii1+kk2] ) * interp( us[ijks ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3] ));
}
}
}
/*
Get max difference between two fields
*/
double maxdiff(const double * const __restrict__ a, const double * const __restrict__ b, const int n)
{
double maxdiff=0;
double diff=0;
for(int i=0; i<n; ++i)
{
diff = std::abs(a[i]-b[i]);
if(diff > maxdiff)
maxdiff = diff;
}
return maxdiff;
}
int main()
{
//
// Grid
//
const int itot = 256;
const int jtot = 256;
const int ktot = 256;
const int gc = 3;
const int iter = 10;
//
// Calculate the required variables.
//
const int ncells = (itot+2*gc)*(jtot+2*gc)*(ktot+2*gc);
const int istart = gc;
const int jstart = gc;
const int kstart = gc;
const int iend = itot+gc;
const int jend = jtot+gc;
const int kend = ktot+gc;
const int icells = itot+2*gc;
const int jcells = jtot+2*gc;
const int kcells = ktot+2*gc;
const int ijcells = (itot+2*gc)*(jtot+2*gc);
// Padded settings, interior aligned to 128 byte blocks
const int mo = 16 - gc; // Padding at start of array
const int pl = 16-(int)itot%16; // Elements left in last 128 byte block
const int icellsp = itot + pl + (pl < 2*gc)*16;
const int ijcellsp = icellsp * jcells;
const int ncellsp = ijcellsp * kcells + mo;
//
// Prepare fields on HOST
//
double *u = new double[ncells];
double *v = new double[ncells];
double *w = new double[ncells];
double *ut = new double[ncells];
double *tmp1 = new double[ncells];
for (int n=0; n<ncells; ++n)
{
u [n] = 0.001 * (std::rand() % 1000) - 0.5;
v [n] = 0.001 * (std::rand() % 1000) - 0.5;
w [n] = 0.001 * (std::rand() % 1000) - 0.5;
ut[n] = 0.;
tmp1[n] = 0.;
}
//
// Prepare fields on DEVICE
//
double *ud, *vd, *wd, *utd;
cudaMalloc((void **)&ud, ncellsp*sizeof(double));
cudaMalloc((void **)&vd, ncellsp*sizeof(double));
cudaMalloc((void **)&wd, ncellsp*sizeof(double));
cudaMalloc((void **)&utd, ncellsp*sizeof(double));
cudaMemcpy2D(&ud[mo], icellsp*sizeof(double), u, icells*sizeof(double), icells*sizeof(double), jcells*kcells, cudaMemcpyHostToDevice);
cudaMemcpy2D(&vd[mo], icellsp*sizeof(double), v, icells*sizeof(double), icells*sizeof(double), jcells*kcells, cudaMemcpyHostToDevice);
cudaMemcpy2D(&wd[mo], icellsp*sizeof(double), w, icells*sizeof(double), icells*sizeof(double), jcells*kcells, cudaMemcpyHostToDevice);
cudaMemcpy2D(&utd[mo], icellsp*sizeof(double), ut, icells*sizeof(double), icells*sizeof(double), jcells*kcells, cudaMemcpyHostToDevice);
//
// CUDA thread blocks
//
const int blocki = 32;
const int blockj = 8;
const int gridi = itot/blocki + (itot%blocki > 0);
const int gridj = jtot/blockj + (jtot%blockj > 0);
dim3 gridGPU (gridi, gridj, ktot);
dim3 gridGPU2d(gridi, gridj, 1);
dim3 blockGPU(blocki, blockj, 1);
//
// Timer stuff
//
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
float dt1, dt2;
//
// Execute kernels
//
//////////////////// CPU //////////////////////////
cudaEventRecord(startEvent, 0);
for(int n=0; n<iter; ++n) // iter+1 since GPU version is warmed up with one call
{
advec_cpu(ut, u, v, w,istart, iend, jstart, jend, kstart, kend, icells, ijcells);
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&dt1, startEvent, stopEvent);
printf("CPU; elapsed=%f [ms]\n",dt1);
////////////////////// GPU //////////////////////////
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
cudaEventRecord(startEvent, 0);
for(int n=0; n<iter; ++n)
{
//advec_gpu<<<gridGPU, blockGPU>>>
// (&utd[mo], &ud[mo], &vd[mo], &wd[mo], istart, iend, jstart, jend, kstart, kend, icellsp, ijcellsp);
advec_gpu_smem<<<gridGPU2d, blockGPU, 2*(blocki+2*gc)*(blockj+2*gc)*sizeof(double)>>>
(&utd[mo], &ud[mo], &vd[mo], &wd[mo], istart, iend, jstart, jend, kstart, kend, icellsp, ijcellsp, gc);
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&dt2, startEvent, stopEvent);
//
// Copy device field to tmp1
//
cudaMemcpy2D(tmp1, icells*sizeof(double), &utd[mo], icellsp*sizeof(double), icells*sizeof(double), jcells*kcells, cudaMemcpyDeviceToHost);
printf("GPU; elapsed=%f [ms], speedup=%f, maxdiff=%e \n",dt2,dt1/dt2,maxdiff(ut,tmp1,ncells));
return 0;
}
|
4,959 | #include "includes.h"
__global__ void calc_avg_activation_kernel(float *src, float *dst, int size, int channels, int batches)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int xy = i % size;
int b = i / size;
if (i < size*batches) {
dst[i] = 0;
for (int c = 0; c < channels; ++c) {
dst[i] += src[xy + size*(c + channels*b)];
}
dst[i] = dst[i] / channels;
}
} |
4,960 | // REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm \
// RUN: -fopenmp -fopenmp-version=50 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm \
// RUN: -fopenmp -fopenmp-version=50 -o - -x c++ %s | FileCheck %s
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device \
// RUN: -emit-llvm -o - %s | FileCheck -check-prefixes=DEV %s
// CHECK: declare{{.*}}@_Z7nohost1v()
// DEV-NOT: _Z7nohost1v
void nohost1() {}
#pragma omp declare target to(nohost1) device_type(nohost)
// CHECK: declare{{.*}}@_Z7nohost2v()
// DEV-NOT: _Z7nohost2v
void nohost2() {nohost1();}
#pragma omp declare target to(nohost2) device_type(nohost)
|
4,961 | #define REORDER 0
#define GOOD_WEATHER 0
#define BAD_WEATHER 1
#define TAG_Car 0
#define TAG_Pedestrian 1
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//#include <random>
//#include <array>
#include <algorithm>
#define NUM_CARS 4096
#define NUM_PEDS 16384
#define NUM_STREETS 500
#define MAX_CONNECTIONS 10
#define MAX_LEN 25
using namespace std;
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
typedef struct
{
float progress;
int street;
float max_velocity;
int tag;
int a1;
int a2;
int a3;
int a4;
int a5;
int a6;
int a7;
int a8;
int a9;
int a10;
// TODO: add more fields here to avoid cache locality
} struct_Actor;
typedef struct
{
float length;
float max_velocity;
int neighbor_array_index;
int s1;
int s2;
int s3;
int s4;
int s5;
} struct_Street;
typedef struct
{
int tag;
int id;
} tag_id_pair;
__device__ struct_Actor *d_Actors;
__device__ struct_Street *d_Streets;
__device__ int *d_Array_Street_arrays;
__device__ int *d_Array_Street_offset;
__device__ int *d_Array_Street_size;
__device__ int *d_input_actor_id;
__device__ int *d_jobs;
__device__ int *d_randomn;
__device__ void method_Car_move(int car_id, int weather)
{
float weather_multiplier;
if (weather == GOOD_WEATHER)
{
weather_multiplier = 1.0;
}
else if (weather == BAD_WEATHER)
{
weather_multiplier = 0.75;
}
float speed = min(d_Actors[car_id].max_velocity, d_Streets[d_Actors[car_id].street].max_velocity) * weather_multiplier;
d_Actors[car_id].progress = d_Actors[car_id].progress + (speed / 60.0); /* 1 tick = 1 minute */
if (d_Actors[car_id].progress >= d_Streets[d_Actors[car_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[car_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[car_id].street] % d_Array_Street_size[array_id];
d_Actors[car_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[car_id].progress = 0.0f;
}
}
__device__ void method_Pedestrian_move(int ped_id, int weather)
{
float speed = d_randomn[((int) (d_Actors[ped_id].progress*d_Actors[ped_id].progress)) % NUM_STREETS] % 7 - 2;
d_Actors[ped_id].progress = d_Actors[ped_id].progress + (speed / 60.0);
if (d_Actors[ped_id].progress >= d_Streets[d_Actors[ped_id].street].length)
{
// move to different street
int array_id = d_Streets[d_Actors[ped_id].street].neighbor_array_index;
int neighbor_index = d_randomn[d_Actors[ped_id].street] % d_Array_Street_size[array_id];
d_Actors[ped_id].street = d_Array_Street_arrays[d_Array_Street_offset[array_id] + neighbor_index];
d_Actors[ped_id].progress = 0.0f;
}
}
__device__ void block(int actor_id, int weather, int ticks)
{
for (int i = 0; i < ticks; i++)
{
switch (d_Actors[actor_id].tag)
{
case TAG_Car:
method_Car_move(actor_id, weather);
break;
case TAG_Pedestrian:
method_Pedestrian_move(actor_id, weather);
break;
}
}
}
__global__ void kernel(int weather, int ticks,
struct_Actor *v_d_Actors, struct_Street *v_d_Streets,
int *v_d_Array_Street_size, int *v_d_Array_Street_offset, int *v_d_Array_Street_arrays,
int *v_d_input_actor_id, int *v_d_jobs, int *v_d_randomn)
{
d_Actors = v_d_Actors;
d_Streets = v_d_Streets;
d_Array_Street_size = v_d_Array_Street_size;
d_Array_Street_offset = v_d_Array_Street_offset;
d_Array_Street_arrays = v_d_Array_Street_arrays;
d_input_actor_id = v_d_input_actor_id;
d_jobs = v_d_jobs;
d_randomn = v_d_randomn;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
#if (REORDER)
block(d_input_actor_id[d_jobs[tid]], weather, ticks);
#else
block(d_input_actor_id[tid], weather, ticks);
#endif
}
int main()
{
printf("Setting up scenario...\n");
srand(42);
// streets
float *Street_length = new float[NUM_STREETS];
float *Street_max_velocity = new float[NUM_STREETS];
int *Street_neighbors = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
Street_length[i] = rand() % MAX_LEN + 1;
Street_max_velocity[i] = rand() % 40 + 45; /* speed between 45 and 105 */
Street_neighbors[i] = i;
}
// neighbors
int *Array_Street_offset = new int[NUM_STREETS];
int *Array_Street_size = new int[NUM_STREETS];
int num_connections = 0;
for (int i = 0; i < NUM_STREETS; i++)
{
Array_Street_offset[i] = num_connections;
int connections = rand() % MAX_CONNECTIONS + 1;
Array_Street_size[i] = connections;
num_connections += connections;
}
int *Array_Street_arrays = new int[num_connections];
for (int i = 0; i < num_connections; i++)
{
Array_Street_arrays[i] = rand() % NUM_STREETS;
}
// actors
int *Actor_street = new int[NUM_PEDS + NUM_CARS];
float *Actor_progress = new float[NUM_PEDS + NUM_CARS];
float *Car_max_velocity = new float[NUM_CARS + NUM_PEDS];
int *Actor_id = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_street[i] = rand() % NUM_STREETS;
Actor_progress[i] = rand() % 10;
Car_max_velocity[i] = rand() % 20 + 65;
}
for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
{
Actor_id[i] = i;
}
// jobs (dummy)
int *jobs = new int[NUM_PEDS + NUM_CARS];
for (int i = 0; i < NUM_CARS + NUM_PEDS; i++)
{
jobs[i] = i;
}
// random numbers
int *randomn = new int[NUM_STREETS];
for (int i = 0; i < NUM_STREETS; i++)
{
// TODO: real random
randomn[i] = rand() % NUM_STREETS;
}
printf("Scenario set up.\n");
printf("Converting data to row format...\n");
struct_Actor *actors = new struct_Actor[NUM_CARS + NUM_PEDS];
struct_Street *streets = new struct_Street[NUM_STREETS];
for (int i = 0; i < NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].tag = TAG_Pedestrian;
}
for (int i = NUM_PEDS; i < NUM_CARS + NUM_PEDS; i++)
{
actors[i].progress = Actor_progress[i];
actors[i].street = Actor_street[i];
actors[i].max_velocity = Car_max_velocity[i];
actors[i].tag = TAG_Car;
}
for (int i = 0; i < NUM_STREETS; i++)
{
streets[i].length = Street_length[i];
streets[i].max_velocity = Street_max_velocity[i];
streets[i].neighbor_array_index = Street_neighbors[i];
}
std::srand(42);
#if !(REORDER)
random_shuffle(actors, actors + NUM_CARS + NUM_PEDS);
#endif
printf("Done converting data.\n");
printf("Copying data to GPU...\n");
struct_Actor *v_d_Actors;
struct_Street *v_d_Streets;
int *v_d_Array_Street_size;
int *v_d_Array_Street_offset;
int *v_d_Array_Street_arrays;
int *v_d_input_actor_tag;
int *v_d_input_actor_id;
int *v_d_jobs;
int *v_d_randomn;
CudaSafeCall(cudaMalloc((void**) &v_d_Actors, sizeof(struct_Actor) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_Streets, sizeof(struct_Street) * NUM_STREETS));
CudaSafeCall(cudaMalloc((void**) &v_d_Array_Street_size, sizeof(int) * NUM_STREETS));
CudaSafeCall(cudaMalloc((void**) &v_d_Array_Street_offset, sizeof(int) * NUM_STREETS));
CudaSafeCall(cudaMalloc((void**) &v_d_Array_Street_arrays, sizeof(int) * num_connections));
CudaSafeCall(cudaMalloc((void**) &v_d_input_actor_tag, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_input_actor_id, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_jobs, sizeof(int) * (NUM_PEDS + NUM_CARS)));
CudaSafeCall(cudaMalloc((void**) &v_d_randomn, sizeof(int) * NUM_STREETS));
CudaSafeCall(cudaMemcpy(v_d_Actors, &actors[0], sizeof(struct_Actor) * (NUM_CARS + NUM_PEDS), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Streets, &streets[0], sizeof(struct_Street) * NUM_STREETS, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Array_Street_size, &Array_Street_size[0], sizeof(int) * NUM_STREETS, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Array_Street_offset, &Array_Street_offset[0], sizeof(int) * NUM_STREETS, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_Array_Street_arrays, &Array_Street_arrays[0], sizeof(int) * num_connections, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_input_actor_id, &Actor_id[0], sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_jobs, &jobs[0], sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(v_d_randomn, &randomn[0], sizeof(int) * NUM_STREETS, cudaMemcpyHostToDevice));
printf("Finished copying data.\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Launching kernel...\n");
cudaEventRecord(start);
kernel<<<dim3(32), dim3((NUM_PEDS + NUM_CARS) / 32)>>>(GOOD_WEATHER, 1000000,
v_d_Actors, v_d_Streets,
v_d_Array_Street_size, v_d_Array_Street_offset, v_d_Array_Street_arrays,
v_d_input_actor_id, v_d_jobs, v_d_randomn);
CudaCheckError();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
CudaCheckError();
printf("Kernel finished.\n");
// cudaMemcpy(Actor_progress, v_d_Actor_progress, sizeof(float) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %f ", Actor_progress[i]);
// }
// cudaMemcpy(Actor_street, v_d_Actor_street, sizeof(int) * (NUM_PEDS + NUM_CARS), cudaMemcpyDeviceToHost);
// for (int i = 0; i < NUM_PEDS + NUM_CARS; i++)
// {
// printf(" %i ", Actor_street[i]);
// }
printf("\n\n\nElapsed time millis: %f\n", milliseconds);
}
|
4,962 | #include "includes.h"
__global__ void transpose_unroll4_row(int * mat, int * transpose, int nx, int ny)
{
int ix = blockIdx.x * blockDim.x * 4 + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int ti = iy * nx + ix;
int to = ix * ny + iy;
if (ix + 3 * blockDim.x < nx && iy < ny)
{
transpose[to] = mat[ti];
transpose[to + ny*blockDim.x] = mat[ti + blockDim.x];
transpose[to + ny * 2 * blockDim.x] = mat[ti + 2 * blockDim.x];
transpose[to + ny * 3 * blockDim.x] = mat[ti + 3 * blockDim.x];
}
} |
4,963 | #define CUDA_SAFE_CALL(func) \
do { \
cudaError_t err = (func); \
if (err != cudaSuccess) { \
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", cudaGetErrorString(err), err, __FILE__, __LINE__); \
exit(err); \
} \
} while (0)
__global__ void
cudaProcessUnsignedChar0(unsigned char *dst, unsigned char *src, int imgW, int imgH)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx * 2;
int y = blockIdx.y*bh + ty * 2;
int px = y * imgW + x;
bool flag = 0 < y && y < (imgH - 2) && 0 < x && x < (imgW - 2);
int sx1 = flag ? px - imgW : 0;
int sx2 = flag ? px - imgW + 1 : 0;
int sx3 = flag ? px - imgW + 2 : 0;
int sx4 = flag ? px - 1 : 0;
int sx5 = flag ? px : 0;
int sx6 = flag ? px + 1 : 0;
int sx7 = flag ? px + 2 : 0;
int sx8 = flag ? px + imgW - 1 : 0;
int sx9 = flag ? px + imgW : 0;
int sxa = flag ? px + imgW + 1 : 0;
int sxb = flag ? px + imgW + 2 : 0;
int sxc = flag ? px + imgW * 2 - 1 : 0;
int sxd = flag ? px + imgW * 2 : 0;
int sxe = flag ? px + imgW * 2 + 1 : 0;
// G0 R0 G1 R1 x0 x1 x2 x3
// B0 G2 B1 G3 x4 x5 x6 x7
// G4 R2 G5 R3 x8 x9 xA xB
// B2 G6 B3 G7 xC xD xE xF
int g1 = (int)src[sx2];
int g2 = (int)src[sx5];
int g3 = (int)src[sx7];
int g4 = (int)src[sx8];
int g5 = (int)src[sxa];
int g6 = (int)src[sxd];
int b0 = (int)src[sx4];
int b1 = (int)src[sx6];
int b2 = (int)src[sxc];
int b3 = (int)src[sxe];
int r0 = (int)src[sx1];
int r1 = (int)src[sx3];
int r2 = (int)src[sx9];
int r3 = (int)src[sxb];
int db0 = (b0 + b1) >> 1;
int dg0 = g2;
int dr0 = (r0 + r1) >> 1;
int db1 = b1;
int dg1 = (g1 + g2 + g3 + g5) >> 2;
int dr1 = (r0 + r1 + r2 + r3) >> 2;
int db2 = (b0 + b1 + b2 + b3) >> 2;
int dg2 = (g2 + g4 + g5 + g6) >> 2;
int dr2 = r2;
int db3 = (b1 + b3) >> 1;
int dg3 = g5;
int dr3 = (r2 + r3) >> 1;
int dx = px * 3;
int dst0 = dx;
int dst1 = dx + 3;
int dst2 = dx + imgW * 3;
int dst3 = dx + (imgW + 1) * 3;
dst[dst0 + 0 < imgW * imgH * 3 ? dst0 + 0 : 0] = (unsigned char)db0;
dst[dst0 + 1 < imgW * imgH * 3 ? dst0 + 1 : 0] = (unsigned char)dg0;
dst[dst0 + 2 < imgW * imgH * 3 ? dst0 + 2 : 0] = (unsigned char)dr0;
dst[dst1 + 0 < imgW * imgH * 3 ? dst1 + 0 : 0] = (unsigned char)db1;
dst[dst1 + 1 < imgW * imgH * 3 ? dst1 + 1 : 0] = (unsigned char)dg1;
dst[dst1 + 2 < imgW * imgH * 3 ? dst1 + 2 : 0] = (unsigned char)dr1;
dst[dst2 + 0 < imgW * imgH * 3 ? dst2 + 0 : 0] = (unsigned char)db2;
dst[dst2 + 1 < imgW * imgH * 3 ? dst2 + 1 : 0] = (unsigned char)dg2;
dst[dst2 + 2 < imgW * imgH * 3 ? dst2 + 2 : 0] = (unsigned char)dr2;
dst[dst3 + 0 < imgW * imgH * 3 ? dst3 + 0 : 0] = (unsigned char)db3;
dst[dst3 + 1 < imgW * imgH * 3 ? dst3 + 1 : 0] = (unsigned char)dg3;
dst[dst3 + 2 < imgW * imgH * 3 ? dst3 + 2 : 0] = (unsigned char)dr3;
}
extern "C" void
launchCudaProcessUnsignedChar(dim3 grid, dim3 block, unsigned char* srcImage, unsigned char* dstImage, int imgW, int imgH)
{
cudaProcessUnsignedChar0 <<< grid, block>>>(dstImage, srcImage, imgW, imgH);
}
|
4,964 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
typedef unsigned char BYTE;
#define IMAGE_SIZE 6*1000*1000
#define MAXITER 1000
#define X_RES 1000
#define Y_RES 1000
// Write Mandelbrot image in PGM format
void writeOutput(const char *fileName, BYTE *image, int width, int height) {
double xmin = -2;
double xmax = 1;
double ymin = -1.5;
double ymax = 1.5;
FILE *fp = fopen(fileName, "wb");
fprintf(fp,"P6\n# Mandelbrot, xmin=%lf, xmax=%lf, ymin=%lf, ymax=%lf, maxiter=%d\n%d\n%d\n%d\n", xmin, xmax, ymin, ymax, MAXITER, X_RES, Y_RES, (MAXITER < 256 ? 256 : MAXITER));
fwrite(image, 1, IMAGE_SIZE, fp);
fclose(fp);
}
__global__ void mandelbrot(BYTE* image,uint16_t maxiter){
double xmin = -2;
double xmax = 1;
double ymin = -1.5;
double ymax = 1.5;
double dx = (xmax-xmin)/X_RES;
double dy = (ymax-ymin)/Y_RES;
int j = blockIdx.y * gridDim.x + blockIdx.x;
int i = threadIdx.y * blockDim.x + threadIdx.x;
double x = xmin + i * dx;
double y = ymax - j * dy;
int k;
double u = 0.0;
double v = 0.0;
double u2 = u*u;
double v2 = v*v;
for (k = 1; k < maxiter && (u2 + v2 < 4.0); k++) {
v = 2 * u * v + y;
u = u2 - v2 + x;
u2 = u * u;
v2 = v * v;
}
int pxlStartLoc = 6*((j*X_RES)+i);
if (k >= maxiter) {
image[pxlStartLoc+0] = 0;
image[pxlStartLoc+1] = 0;
image[pxlStartLoc+2] = 0;
image[pxlStartLoc+3] = 0;
image[pxlStartLoc+4] = 0;
image[pxlStartLoc+5] = 0;
} else {
image[pxlStartLoc+0] = k >> 8;
image[pxlStartLoc+1] = k & 255;
image[pxlStartLoc+2] = k >> 8;
image[pxlStartLoc+3] = k & 255;
image[pxlStartLoc+4] = k >> 8;
image[pxlStartLoc+5] = k & 255;
}
}
int main(int argc, char* argv[]) {
BYTE* image;
dim3 grid_dim(100,10,1);
dim3 block_dim(100,10,1);
cudaMallocManaged(&image, IMAGE_SIZE);
mandelbrot<<<grid_dim,block_dim>>>(image,MAXITER);
cudaDeviceSynchronize();
writeOutput(argv[1],image,X_RES,Y_RES);
cudaFree(image);
return 0;
}
|
4,965 | #include <iostream>
// Everything done by Rolf Andreassen!
using namespace std;
__device__ bool* syncArray = 0;
__device__ void device_vector_reduce_blocks_recursive (double* toBeReduced, int workingLength) {
syncArray[blockDim.x] = false;
// First reduce this block
// Copy from global to shared memory for local reduction
__shared__ double localResults[1024];
int localIndex = blockDim.x * blockIdx.x + threadIdx.x;
localResults[threadIdx.x] = toBeReduced[localIndex];
// Reduce local block of (at most) 1024 entries
int len = blockDim.x;
if (len > workingLength) len = workingLength;
while (len > 1) {
if ((localIndex < workingLength) && (threadIdx.x < (len - (len % 2)) / 2)) {
localResults[threadIdx.x] += localResults[threadIdx.x + (len + (len % 2)) / 2];
}
len = (len + (len % 2)) / 2;
__syncthreads();
}
// Need to synchronise over blocks! Otherwise we may overwrite
// data another block is still working on.
syncArray[blockDim.x] = true;
bool everyoneDone = false;
while (!everyoneDone) {
everyoneDone = true;
for (int i = 0; i < blockDim.x; ++i) {
if (syncArray[i]) continue;
everyoneDone = false;
break;
}
}
if ((0 == threadIdx.x) && (localIndex < workingLength)) toBeReduced[blockIdx.x] = localResults[threadIdx.x];
// First blockDim.x entries of toBeReduced are now sums for individual blocks
// Now repeat reduction just on the first part of the vector.
int newWorkingLength = (workingLength + blockDim.x - 1) / blockDim.x;
if (newWorkingLength > 1) device_vector_reduce_blocks_recursive(toBeReduced, newWorkingLength);
}
__global__ void device_vector_reduce_blocks (double* toBeReduced, int workingLength) {
// Global function cannot recurse, outsource actual algorithm to device function
device_vector_reduce_blocks_recursive(toBeReduced, workingLength);
}
int main (int argc, char** argv) {
int sizeOfVector = 100;
if (argc > 1) sizeOfVector = atoi(argv[1]);
double* host_numbers = new double[sizeOfVector];
double checkSum = 0;
srand(42);
for (int i = 0; i < sizeOfVector; ++i) {
host_numbers[i] = rand() % 10;
checkSum += host_numbers[i];
}
std::cout << "CPU result: " << checkSum << std::endl;
double* dev_numbers;
cudaMalloc((void**) &dev_numbers, sizeOfVector*sizeof(double));
cudaMemcpy(dev_numbers, host_numbers, sizeOfVector*sizeof(double), cudaMemcpyHostToDevice);
int numThreads = min(1024, sizeOfVector);
int numBlocks = (1023 + sizeOfVector) / 1024;
std::cout << "Blocks and threads: " << numBlocks << " " << numThreads << std::endl;
cudaMalloc((void**) syncArray, numBlocks * sizeof(bool));
device_vector_reduce_blocks<<<numBlocks, numThreads>>>(dev_numbers, sizeOfVector);
cudaDeviceSynchronize(); // Ensure that kernel is done before copying result
cudaMemcpy(&checkSum, dev_numbers, sizeof(double), cudaMemcpyDeviceToHost);
std::cout << "GPU result: " << checkSum << std::endl;
return 0;
} |
4,966 | #include <stdio.h>
#include <stdlib.h>
#include <sys/timeb.h>
#include <cuda_runtime.h>
#define N 1500000000
int cudaCheck(cudaError_t code) {
if(code == cudaSuccess) {
//printf("cudaSuccess\n");
return 0;
} else {
printf("cudaCheck(): %s\n", cudaGetErrorString(cudaGetLastError()));
return -1;
}
}
int main() {
void* host = malloc(N);
if(host == NULL) {
perror("malloc()");
return 1;
}
void* device = NULL;
cudaError_t ret = cudaMalloc(&device, N);
if(cudaCheck(ret) < 0) {
return 1;
}
unsigned long int diff = 0;
struct timeb t_begin = {0}, t_end = {0};
ftime(&t_begin);
cudaMemcpy(host, device, N, cudaMemcpyDeviceToHost);
ftime(&t_end);
diff = (t_end.time * 1000 + t_end.millitm) - (t_begin.time * 1000 + t_begin.millitm);
printf("bytes transferred from device to host: %lu\n", (unsigned long int)N);
printf("time elapsed: %lu ms\n", diff);
printf("speed: %f MB/s\n", ((unsigned long int)N) / (1000.0 * diff));
putchar('\n');
ftime(&t_begin);
cudaMemcpy(device, host, N, cudaMemcpyHostToDevice);
ftime(&t_end);
diff = (t_end.time * 1000 + t_end.millitm) - (t_begin.time * 1000 + t_begin.millitm);
printf("bytes transferred from host to device: %lu\n", (unsigned long int)N);
printf("time elapsed: %lu ms\n", diff);
printf("speed: %f MB/s\n", ((unsigned long int)N) / (1000.0 * diff));
free(host);
cudaFree(device);
return 0;
} |
4,967 | // Multiplicação de matrizes em CUDA
// Disciplina: OPRP001 - Programação Paralela
// Prof.: Mauricio Pillon
// Aluno: Renato Tanaka
#include <cuda.h>
#include <stdio.h>
#include <math.h>
// Matriz Quadrada (nro_linhas = nro_colunas)
#define N 4 // Número de linhas
// Número de colunas
// GPU: Multiplicação das matrizes (a) e (b), resultado em (c)
__global__ void matMult (int *da, int *db, int *dc) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
dc[i*N+j] = 0;
for(int k=0; k<N; k++)
dc[i*N+j] += da[i*N+k] * db[k*N+j];
}
// GPU: Imprime índices na matriz
__global__ void printIndex (void) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n",i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x,threadIdx.y, blockIdx.y, blockDim.y);
}
// GPU: Inicializa os vetores (a), (b) e (c) na Memória Global
__global__ void dirtyMem (int *da, int *db, int *dc) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
da[i] = -1;
db[i] = -2;
dc[i] = -3;
}
// CPU: Inicializa os vetores (a) e (b)
__host__ void initvet(int *host_a, int *host_b) {
for (int i=0; i < N; i++) {
for (int j=0; j < N; j++) {
host_b[i*N+j] = (i+j)+((N-1)*i);
host_a[i*N+j] = (N*N)-host_b[i*N+j];
}
}
}
// CPU: Imprime matriz
__host__ void printMat (int *mat){
for (int j =0; j < N; j++)
printf("\t(%d)", j);
printf("\n");
for (int i=0; i < N; i++) {
printf("(%d)", i);
for (int j=0; j < N; j++){
printf("\t%d", mat[i*N+j]);
}
printf("\n");
}
}
// CPU: função principal
int main(int argc, char const *argv[]) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size;
// Alocação de matriz quadrada
size = N * N * sizeof(int);
// Alocação de memória no host
cudaMallocHost((void **) &a, size);
cudaMallocHost((void **) &b, size);
cudaMallocHost((void **) &c, size);
// Alocação de memória na GPU para os vetores (a,b e c)
cudaMalloc ((void **) &dev_a, size);
cudaMalloc ((void **) &dev_b, size);
cudaMalloc ((void **) &dev_c, size);
// Atribui valores iniciais aos vetores em GPU
dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c);
// Cópia GPU para CPU
cudaMemcpy (a, dev_a, size, cudaMemcpyDeviceToHost);
cudaMemcpy (b, dev_b, size, cudaMemcpyDeviceToHost);
cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost);
// Impressão na tela dos valores dos vetores
printf ("\t ### Valores Inicializados na GPU ###\n");
printf ("\t ### Matriz (a) ### \n");
printMat(a);
printf ("\t ### Matriz (b) ### \n");
printMat(b);
printf ("\t ### Matriz (c) ### \n");
printMat(c);
// Inicialização dos vetores (a) e (b) no host
initvet(a,b);
// Cópia dos vetores gerados em CPU p/ memória da GPU
cudaMemcpy (dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy (dev_b, b, size, cudaMemcpyHostToDevice);
// Número de blocos e threads p/ dimensões (x,y)
dim3 dimBlock (1, 1);
dim3 dimThreads(N, N);
// Imprime as posições acessadas pelo dimBlock e dimThreads
printIndex<<< dimBlock, dimThreads>>>();
// Execução do kernel matMult em GPU
matMult<<< dimBlock, dimThreads>>>(dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
// Cópia do vetor (c) da GPU (Memória Global) para CPU
cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost);
// Impressão na tela dos valores dos vetores
printf ("\t ### Valores após processamento em GPU ###\n");
printf ("\t ### Matriz (a) ### \n");
printMat(a);
printf ("\t ### Matriz (b) ### \n");
printMat(b);
printf ("\t ### Matriz (c) ### \n");
printMat(c);
// Libera a Memória Global (GPU)
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// Libera a Memória Global (CPU)
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
return 0;
}
|
4,968 | // ][ -> *n+
__device__ void lubksb(float* a, int* indx, float* b)
{
int i,ii=0,ip,j;
float sum;
int n = 5;
for (i=0;i<n;i++) {
ip=indx[i];
sum=b[ip];
b[ip]=b[i];
if (ii != 0)
for (j=ii-1;j<i;j++) sum -= a[i*n+j]*b[j];
else if (sum != 0.0)
ii=i+1;
b[i]=sum;
}
for (i=n-1;i>=0;i--) {
sum=b[i];
for (j=i+1;j<n;j++) sum -= a[i*n+j]*b[j];
b[i]=sum/a[i*n+i];
}
}
|
4,969 | #include <stdio.h>
__global__ void test(int* dataD, int* sumD) {
for (int i = 0; i < 1000000; i++) {
int x = dataD[0];
int y = dataD[1];
int z = dataD[2];
int sum = x+y+z;
*sumD += sum;
}
}
int main() {
int* dataH = (int*)malloc(sizeof(int)*10);
for (int i = 0; i < 10; i++) {
dataH[i] = i;
}
int* dataD;
cudaMalloc((void**)&dataD, sizeof(int)*10);
cudaMemcpy(dataD, dataH, sizeof(int)*10, cudaMemcpyHostToDevice);
int* sumH = (int*)malloc(sizeof(int));
int* sumD;
cudaMalloc((void**)&sumD, sizeof(int));
test<<<1,1>>>(dataD, sumD);
cudaMemcpy(sumH, sumD, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n", *sumH);
}
|
4,970 | #include <thrust/device_vector.h>
#include <stdio.h>
/*
* Function: load_char
* --------------------
* copies a subset u * m of data into output vector
*
* output_vector: destination output vector
* u: number of users to copy
* m: number of movies to copy
*
* returns: Nothing
*/
void load_char(thrust::device_vector<char> & output_vector, const int u, const int m) {
int num_of_movies = 3952;
FILE *file;
int temp;
file = fopen("data.dat", "r");
//For each user in subset
for(int k = 0; k < u; k++) {
//Read an entire row of movies of size num_of_movies
for(int i = 0; i < num_of_movies; i++) {
fscanf(file, "%d", &temp);
//As is a sequential file we need to read the rest of movie
//ratings but we only care of the ones which id < m
//that means the size of our movie subset
if(i < m) {
output_vector[i+(k*m)] = (char)temp;
}
}
}
fclose(file);
return;
}
/*
* Function: load_char
* --------------------
* generates dataset of size u * m with the ratings of the client
*
* output_vector: destination output vector
* u: number of users to copy
* m: number of movies to copy
* client_id: user_id of client
*
* returns: Nothing
*/
void load_char(thrust::device_vector<char> & output_vector, const int u, const int m, const int client_id) {
int num_of_movies = 3952;
thrust::device_vector<int> client_ratings(m);
FILE *file;
int temp;
file = fopen("data.dat", "r");
//Read from the file until we reach the row of client_id
for(int k = 0; k < client_id * num_of_movies; k++) {
fscanf(file, "%d", &temp);
}
//Read the client_id movie ratings
for(int i = 0; i < m; i++) {
fscanf(file, "%d", &temp);
client_ratings[i] = (char)temp;
}
fclose(file);
//Copy the client_ratings to form a matrix of u size
for(int i = 0; i < u; i++) {
thrust::copy(client_ratings.begin(), client_ratings.end(), output_vector.begin()+(i*m));
}
return;
}
/*
* Function: load_char_from
* --------------------
* copies a subset u * m of data into output vector
*
* output_vector: destination output vector
* u: number of users to copy
* m: number of movies to copy
* offset: Start reading from this user_id
*
* returns: Nothing
*/
void load_char_from(thrust::device_vector<char> & output_vector, const int u, const int m, const int offset) {
int num_of_movies = 3952;
FILE *file;
int temp;
file = fopen("data.dat", "r");
//Read from the file until we reach the row of offset
for(int k = 0; k < offset * num_of_movies; k++) {
fscanf(file, "%d", &temp);
}
//For each user in subset
for(int k = 0; k < u; k++) {
//Read an entire row of movies of size num_of_movies
for(int i = 0; i < num_of_movies; i++) {
fscanf(file, "%d", &temp);
//As is a sequential file we need to read the rest of movie
//ratings but we only care of the ones which id < m
//that means the size of our movie subset
if(i < m) {
output_vector[i+(k*m)] = (char)temp;
}
}
}
fclose(file);
return;
}
|
4,971 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <thrust/host_vector.h>
#define THREADS_PER_BLOCK 256
using namespace std;
__global__ void maskCompute(uchar4 *sourceImg,bool *mask,int cols,int rows)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
int size=cols*rows;
mask[id]=0;
if(id<size)
{
if(sourceImg[id].x!=255 && sourceImg[id].y!=255 && sourceImg[id].z!=255)
{
mask[id]=1;
}
}
}
__global__ void borderMark(unsigned char *border,bool *mask,int cols,int rows)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
//int size=cols*rows;
int r=id/cols;
int c=id%cols;
int cnt=0;
int x=r*cols+c;
if(r>0 && r<rows && c>0 && c<cols){
if(mask[x-cols]==1)
cnt++;
if(mask[x+cols]==1)
cnt++;
if(mask[x+1]==1)
cnt++;
if(mask[x-1]==1)
cnt++;
}
if(cnt==0)
border[id]=0;
else if(cnt==4)
border[id]=2;
else
border[id]=1;
}
__global__ void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<numRows*numCols)
{
redChannel[id]=inputImageRGBA[id].x;
blueChannel[id]=inputImageRGBA[id].z;
greenChannel[id]=inputImageRGBA[id].y;
}
}
__global__ void init_guess(unsigned char *channel,float* buf1,float* buf2,int sz)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
unsigned char x=channel[id];
if(id<sz)
{
buf1[id]=x;
buf2[id]=x;
}
}
__global__ void blender(unsigned char *sourceChannel,
unsigned char* destChannel,
float *pbuf,
float *cbuf,
int numCols,
int numRows,
unsigned char* border)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
float sum1=0,sum2=0;
int row=id/numCols;
int col=id%numCols;
if(row<numRows && col<numCols)
{
if(border[id]==2)
{
unsigned char sc=sourceChannel[id];
int n1=(row-1)*numCols+col;
if(border[n1]==2)
sum1+=pbuf[n1];
else
sum1+=destChannel[n1];
sum2+=sc-sourceChannel[n1];
n1=(row)*numCols+col-1;
if(border[n1]==2)
sum1+=pbuf[n1];
else
sum1+=destChannel[n1];
sum2+=sc-sourceChannel[n1];
n1=(row)*numCols+col+1;
if(border[n1]==2)
sum1+=pbuf[n1];
else
sum1+=destChannel[n1];
sum2+=sc-sourceChannel[n1];
n1=(row+1)*numCols+col;
if(border[n1]==2)
sum1+=pbuf[n1];
else
sum1+=destChannel[n1];
sum2+=sc-sourceChannel[n1];
float newVal=(sum1+sum2)/4.f;
cbuf[id]=min(255.f, max(0.f, newVal));
}
}
}
__global__ void final_merge(uchar4 *blendedImg,
int numCols,
int numRows,
float *green,
float *blue,
float *red,
unsigned char *border)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<numRows*numCols)
{
if(border[id]==2)
{
blendedImg[id].x=red[id];
blendedImg[id].z=blue[id];
blendedImg[id].y=green[id];
}
}
}
int main()
{
int numRowsSource, numColsSource;
numRowsSource=540;//Hard Coded values
numColsSource=960;
uchar4* h_sourceImg,*h_destImg,*h_blendedImg; //IN
int totalSize=numColsSource*numRowsSource;
freopen("s_im.txt","r",stdin);
h_sourceImg=(uchar4*)malloc(sizeof(uchar4)*totalSize);
h_destImg=(uchar4*)malloc(sizeof(uchar4)*totalSize);
h_blendedImg=(uchar4*)malloc(sizeof(uchar4)*totalSize);
int t1;
for(int i=0;i<numRowsSource;i++)
{
for(int j=0;j<numColsSource;j++)
{
cin>>t1;
h_sourceImg[i*numColsSource+j].z=t1;
cin>>t1;
h_sourceImg[i*numColsSource+j].y=t1;
cin>>t1;
h_sourceImg[i*numColsSource+j].x=t1;
}
}
freopen("d_im.txt","r",stdin);
for(int i=0;i<numRowsSource;i++)
{
for(int j=0;j<numColsSource;j++)
{
cin>>t1;
h_destImg[i*numColsSource+j].z=t1;
cin>>t1;
h_destImg[i*numColsSource+j].y=t1;
cin>>t1;
h_destImg[i*numColsSource+j].x=t1;
}
}
cout<<"input taken"<<endl;
//freopen("d_out.txt","w",stdout);
uchar4 *d_sourceImg,*d_destImg;
bool *d_mask;
cudaStream_t rstream,gstream,bstream;
cudaStreamCreate(&rstream);
cudaStreamCreate(&gstream);
cudaStreamCreate(&bstream);
unsigned char *source_r,*source_g,*source_b,*dest_r,*dest_b,*dest_g,*d_border;
cudaMalloc((uchar4**)&d_sourceImg,sizeof(uchar4)*totalSize);
cudaMalloc((uchar4**)&d_destImg,sizeof(uchar4)*totalSize);
cudaMemcpy(d_destImg,h_destImg,sizeof(uchar4)*totalSize,cudaMemcpyHostToDevice);
cudaMemcpy(d_sourceImg,h_sourceImg,sizeof(uchar4)*totalSize,cudaMemcpyHostToDevice);
cudaMalloc((bool**)&d_mask,sizeof(bool)*totalSize);
cudaMalloc((unsigned char**)&d_border,sizeof(char)*totalSize);
int blocks=(totalSize)/THREADS_PER_BLOCK;
//1
maskCompute<<<blocks,THREADS_PER_BLOCK>>>(d_sourceImg,d_mask,numColsSource,numRowsSource);
//2
borderMark<<<blocks,THREADS_PER_BLOCK>>>(d_border,d_mask,numColsSource,numRowsSource);
//3
//Channels for source image
cudaMalloc((unsigned char**)&source_r,sizeof(char)*totalSize);
cudaMalloc((unsigned char**)&source_b,sizeof(char)*totalSize);
cudaMalloc((unsigned char**)&source_g,sizeof(char)*totalSize);
//channels for destination image
cudaMalloc((unsigned char**)&dest_r,sizeof(char)*totalSize);
cudaMalloc((unsigned char**)&dest_b,sizeof(char)*totalSize);
cudaMalloc((unsigned char**)&dest_g,sizeof(char)*totalSize);
separateChannels<<<blocks,THREADS_PER_BLOCK,0,rstream>>>(d_sourceImg,numRowsSource,numColsSource,source_r,source_g,source_b);
separateChannels<<<blocks,THREADS_PER_BLOCK,0,gstream>>>(d_destImg,numRowsSource,numColsSource,dest_r,dest_g,dest_b);
//4
float *buf1_r,*buf1_g,*buf1_b,*buf2_r,*buf2_g,*buf2_b;
cudaMalloc((float**)&buf1_r,sizeof(float)*totalSize);
cudaMalloc((float**)&buf1_g,sizeof(float)*totalSize);
cudaMalloc((float**)&buf1_b,sizeof(float)*totalSize);
cudaMalloc((float**)&buf2_r,sizeof(float)*totalSize);
cudaMalloc((float**)&buf2_g,sizeof(float)*totalSize);
cudaMalloc((float**)&buf2_b,sizeof(float)*totalSize);
init_guess<<<blocks,THREADS_PER_BLOCK,0,rstream>>>(source_r,buf1_r,buf2_r,totalSize);
init_guess<<<blocks,THREADS_PER_BLOCK,0,gstream>>>(source_g,buf1_g,buf2_g,totalSize);
init_guess<<<blocks,THREADS_PER_BLOCK,0,bstream>>>(source_b,buf1_b,buf2_b,totalSize);
//5
//Call the kernel 800 times for each color channel
for(int i=0;i<400;i++)
{
blender<<<blocks,THREADS_PER_BLOCK,0,rstream>>>(source_r,dest_r,buf1_r,buf2_r,numColsSource,numRowsSource,d_border);
blender<<<blocks,THREADS_PER_BLOCK,0,bstream>>>(source_b,dest_b,buf1_b,buf2_b,numColsSource,numRowsSource,d_border);
blender<<<blocks,THREADS_PER_BLOCK,0,gstream>>>(source_g,dest_g,buf1_g,buf2_g,numColsSource,numRowsSource,d_border);
blender<<<blocks,THREADS_PER_BLOCK,0,rstream>>>(source_r,dest_r,buf2_r,buf1_r,numColsSource,numRowsSource,d_border);
blender<<<blocks,THREADS_PER_BLOCK,0,bstream>>>(source_b,dest_b,buf2_b,buf1_b,numColsSource,numRowsSource,d_border);
blender<<<blocks,THREADS_PER_BLOCK,0,gstream>>>(source_g,dest_g,buf2_g,buf1_g,numColsSource,numRowsSource,d_border);
}
final_merge<<<blocks,THREADS_PER_BLOCK>>>(d_destImg,numColsSource,numRowsSource,buf1_g,buf1_b,buf1_r,d_border);
cudaMemcpy(h_blendedImg,d_destImg,sizeof(uchar4)*totalSize,cudaMemcpyDeviceToHost);
int sz=numColsSource*numRowsSource;
freopen("b_out.txt","w",stdout);
for(int i=0;i<sz;i++)
{
cout<<int(h_blendedImg[i].z)<<" "<<int(h_blendedImg[i].y)<<" "<<int(h_blendedImg[i].x)<<" ";
}
cudaDeviceReset();
return 0;
}
|
4,972 | #include "includes.h"
__global__ void mAddDrip(float *dense, int centerX, int centerY, float redius) {
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
int x = threadIdx.x;
int y = blockIdx.x;
float length = sqrt((float)((x-centerX)*(x-centerX))+(float)((y-centerY)*(y-centerY)));
if(length < redius) {
dense[Idx] += 200;
}
} |
4,973 | #include "includes.h"
__global__ void k2(int *Aux,int *S){
Aux[threadIdx.x]=S[(threadIdx.x+1)*B-1];
} |
4,974 | #include "includes.h"
__global__ void myset(unsigned long long *p, unsigned long long v, long long n) {
const long long tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
p[tid] = v;
}
return;
} |
4,975 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void transformKernel(float *outputData, int width, int height, float theta, cudaTextureObject_t tex){
// calculate normalized texture coordinates
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = (float)x - (float)width/2;
float v = (float)y - (float)height/2;
float tu = u*cosf(theta) - v*sinf(theta);
float tv = v*cosf(theta) + u*sinf(theta);
tu /= (float)width;
tv /= (float)height;
// read from texture and write to global memory
outputData[y*width + x] = tex2D<float>(tex, tu+0.5f, tv+0.5f);
}
void loadPGM(float* _imageData, const char* _imagePath, int* width, int* height){
for(int i=0;i<256;i++){
_imageData[i] = i+0.0;
}
*width = 16;
*height = 16;
return;
}
int main(){
/**
1. load image and reference image
2. allocate device memory for result
3. [most important] allocate array and copy image data, then bind it to texture memory (as well as copy host data to device)
4. issure kernel
5. copy result of device to host
6. do post process
*/
int width = 0;
int height = 0;
int size = 256*sizeof(float);
const float angle = 0.5f;
// load image
float* h_imageData = (float*) malloc(256*sizeof(float));
const char* imagePath = "./test.pgm";
loadPGM(h_imageData, imagePath, &width, &height);
// load reference image
//float* h_refData;
//const char* refPath = "./testRef.pgm";
//loadPGM(h_refData, refPath, );
float* h_resData = (float*) malloc(256*sizeof(float));
// allocate device memory for res, inorder to copy result from gpu to cpu.
float* d_resData;
cudaMalloc((void **) &d_resData, size);
// allocate arry and copy image data, bind to texture memory
//first cudaChannelDesc
//cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); //low level, C style
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); //high level, C++ style, using template
//second, create cudaArray used for texture memory and fill it with h_imageData
cudaArray* cuArray;
cudaMallocArray(&cuArray, &channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, h_imageData, size, cudaMemcpyHostToDevice);
//third, create cudaResourceDesc (with cudaArray) and cudaTextureDesc
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = cuArray;
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = true;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeWrap;
texDescr.addressMode[1] = cudaAddressModeWrap;
texDescr.readMode = cudaReadModeElementType;
// fourth, create cudaTextureObject
cudaTextureObject_t tex; //aka cudaTextureObject*
cudaCreateTextureObject(&tex, &texRes, &texDescr, NULL); //Bind cudaArray(imageData) to texture memory HERE!!!!
//Done! Issue kernel
//Do not need warm up, just for testing
dim3 threadsPerBlock(8, 8, 1); //
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y, 1);
transformKernel<<<numBlocks,threadsPerBlock>>>(d_resData, width, height, angle, tex);
//copy result from gpu to host
cudaMemcpy(h_resData, d_resData, size, cudaMemcpyDeviceToHost);
//Do post processing
//sdkSavePGM(outputFilename, hOutputData, width, height);
//check result
for(int i=0;i<256;i++){
printf("%lf ", h_resData[i]);
}
} |
4,976 | #include <cuda_runtime_api.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
using namespace std;
void cuSetDeviceFlags(){
cudaSetDeviceFlags(cudaDeviceMapHost);
}
void cuMallocManaged(void** h_img, int r, int c){
cudaMallocManaged(h_img,sizeof(unsigned char)*r*c);
}
void cuMalloc(void** h_img, int r, int c){
cudaMalloc(h_img, sizeof(float)*r*c);
}
void cuDeviceSynchronize(){
cudaDeviceSynchronize();
}
|
4,977 | #include "includes.h"
__global__ void rgb2gray (float * input, float *output, int height, int width)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x<height && y<width)
{
unsigned int idx = x* width + y;
float r = input[3 * idx];
float g = input[3 * idx + 1]; // green value for pixel
float b = input[3 * idx + 2];
output[idx] = (0.21f * r + 0.71f * g + 0.07f * b);
}
} |
4,978 | #include "includes.h"
__global__ void KerComputeVelMod(unsigned n,const float4 *vel,float *velmod)
{
unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Number of particle.
if(p<n){
const float4 r=vel[p];
velmod[p]=r.x*r.x+r.y*r.y+r.z*r.z;
}
} |
4,979 | /*
Compiling with nvcc:
nvcc mat_mul.cu -o mat_mul -std=c++11
./mat_mul
Sample Output:
[Enter size of square matrix]
100
[matrix multiplication of 100 elements]
Time taken for matrix multiplication without shared memory : 20 microseconds
Time taken for matrix multiplication with shared memory : 9 microseconds
*/
// Matrix multiplication with and without shared memory
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
using namespace std;
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride; //
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Get a matrix element
__device__ float GetElement(const Matrix mat, int row, int col)
{
return mat.elements[row * mat.stride + col];
}
// Set mat matrix element
__device__ void SetElement(Matrix mat, int row, int col,
float value)
{
mat.elements[row * mat.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix mat, int row, int col)
{
Matrix matsub;
matsub.width = BLOCK_SIZE;
matsub.height = BLOCK_SIZE;
matsub.stride = mat.stride;
matsub.elements = &mat.elements[mat.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return matsub;
}
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernelSharedMemory(const Matrix, const Matrix, Matrix);
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
auto start = high_resolution_clock::now();// Calculate Execution Time
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
cout << "Time taken for matrix multiplication without shared memory : "<< duration.count() << " microseconds"<<"\n";
auto start1 = high_resolution_clock::now();// Calculate Execution Time
MatMulKernelSharedMemory<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
auto stop1 = high_resolution_clock::now();
auto duration1 = duration_cast<microseconds>(stop1 - start1);
cout << "Time taken for matrix multiplication with shared memory : "<< duration1.count() << " microseconds"<<"\n";
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e]
* B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
// Matrix multiplication kernel using shared memory()
// The following code sample is an implementation of
// matrix multiplication that does take advantage of
// shared memory. In this implementation, each thread
// block is responsible for computing one square sub-matrix
// Csub of C and each thread within the block is responsible
// for computing one element of Csub.
__global__ void MatMulKernelSharedMemory(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
int main(void)
{
// Print the matrix length to be used, and compute its size
int matSize;
printf("[Enter size of square matrix]\n");
scanf("%d",&matSize);
Matrix h_A,h_B,h_C;
h_A.width = h_B.width = h_C.width = matSize;
h_A.height = h_B.height = h_C.height = matSize;
size_t size = matSize * matSize * sizeof(float);
printf("[matrix multiplication of %d elements]\n", matSize);
// Allocate the host input matrix A
h_A.elements = (float *)malloc(size);
// Allocate the host input matrix B
h_B.elements = (float *)malloc(size);
// Allocate the host output matrix C
h_C.elements = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A.elements == NULL || h_B.elements == NULL || h_C.elements == NULL)
{
fprintf(stderr, "Failed to allocate host matrix!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input matrix
for (int i = 0; i < matSize; ++i)
for (int j= 0; j < matSize; ++j)
{
{
h_A.elements[i * matSize + j] = rand()/(float)RAND_MAX;
h_B.elements[i * matSize + j] = rand()/(float)RAND_MAX;
}
}
MatMul(h_A,h_B,h_C);
}
|
4,980 | #include<stdio.h>
#include<stdlib.h>
__device__ int gpuHistogram[10];
__global__ void computeGpuHistogram(int *arr, int noOfElements)
{
//clear the global gpu Histogram array
if(blockIdx.x == 0 && threadIdx.x < 10)
gpuHistogram[threadIdx.x] = 0;
//force all threads to wait for the first 10 threads
__syncthreads();
//initialize the counter variables.
//NOTE: These variables will be allocated to registers.
int count0 = 0,
count1 = 0,
count2 = 0,
count3 = 0,
count4 = 0,
count5 = 0,
count6 = 0,
count7 = 0,
count8 = 0,
count9 = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int noOfThreads = blockDim.x * gridDim.x;
while(tid < noOfElements)
{
if(arr[tid] == 0)
++count0;
else if(arr[tid] == 1)
++count1;
else if(arr[tid] == 2)
++count2;
else if(arr[tid] == 3)
++count3;
else if(arr[tid] == 4)
++count4;
else if(arr[tid] == 5)
++count5;
else if(arr[tid] == 6)
++count6;
else if(arr[tid] == 7)
++count7;
else if(arr[tid] == 8)
++count8;
else
++count9;
tid += noOfThreads;
}
//wait for all threads to complete writing into the shared mem.
__syncthreads();
//compute the final histogram.
atomicAdd(&gpuHistogram[0], count0);
atomicAdd(&gpuHistogram[1], count1);
atomicAdd(&gpuHistogram[2], count2);
atomicAdd(&gpuHistogram[3], count3);
atomicAdd(&gpuHistogram[4], count4);
atomicAdd(&gpuHistogram[5], count5);
atomicAdd(&gpuHistogram[6], count6);
atomicAdd(&gpuHistogram[7], count7);
atomicAdd(&gpuHistogram[8], count8);
atomicAdd(&gpuHistogram[9], count9);
}
void checkError(cudaError_t error, char * function)
{
if(error != cudaSuccess)
{
printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, cudaGetErrorString(error));
exit(-1);
}
}
void readValue(int *value, char * msg, int lowerBound, int upperBound)
{
while(1)
{
printf("%s(%d-%d): ", msg, lowerBound, upperBound);
scanf("%d", value);
if(*value <= upperBound && *value >= lowerBound)
return;
printf("Incorrect values. Enter again.\n");
}
}
void fillArrayWithRandNos(int * arr, int noOfElements)
{
int i;
srand(5); //for consistent numbers on every run.
if(noOfElements < 20)
{
for(i = 0 ; i < noOfElements; ++i)
{
arr[i] = rand()%10;
printf("%d ", arr[i]);
}
printf("\n");
return;
}
for(i = 0 ; i < noOfElements; ++i)
arr[i] = rand()%10;
}
void computeHistogram(int *arr, int *histogram, int noOfElements)
{
int i;
for(i = 0 ; i < noOfElements ; ++i)
++histogram[arr[i]];
}
bool cpuGpuResultsCompare(int *cpuResultsArray, int * histogramFromGPU)
{
for(int i = 0 ; i < 10 ; i ++)
if(cpuResultsArray[i] != histogramFromGPU[i])
return false;
return true;
}
int main()
{
int noOfElements = -1, i;
int *arr, *gpuArray;
size_t size;
//have variables for threads per block, number of blocks.
int threadsPerBlock = 0, blocksInGrid = 0;
//create cuda event variables
cudaEvent_t hostStart, hostStop, deviceStart, deviceStop;
float timeDifferenceOnHost, timeDifferenceOnDevice;
//create cuda events.
cudaEventCreate(&hostStart);
cudaEventCreate(&hostStop);
cudaEventCreate(&deviceStart);
cudaEventCreate(&deviceStop);
while(1)
{
int histogram[10] = {0,0,0,0,0,0,0,0,0,0}, histogramFromGPU[10];
printf("Enter the no. of elements to run test on: ");
scanf("%d", &noOfElements);
arr = (int *)malloc(noOfElements * sizeof(int));
printf("Filling array with random numbers...\n");
fillArrayWithRandNos(arr, noOfElements);
printf("Computing histogram on CPU...\n");
cudaEventRecord(hostStart, 0);
computeHistogram(arr, histogram, noOfElements);
cudaEventRecord(hostStop, 0);
cudaEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop);
//printf("Computation over. Results of CPU computation:\n");
//for(i = 0 ; i < 10 ; ++i)
//printf("No of %d: %d\n", i, histogram[i]);
size = noOfElements * sizeof(int);
checkError(cudaMalloc((void**)&gpuArray, size), "Mallocing array on GPU");
checkError(cudaMemcpy(gpuArray, arr, size, cudaMemcpyHostToDevice), "Input array copy");
//create a proper grid block using dim3
readValue(&threadsPerBlock, "Enter no. of threads per block(input of 'P' will construct a P threaded linear block)", 4,1024);
readValue(&blocksInGrid, "Enter no. of blocks in grid(input of 'P' will construct linear grid with P blocks)", 0, 65535/threadsPerBlock+1);
cudaEventRecord(deviceStart, 0);
computeGpuHistogram<<<blocksInGrid, threadsPerBlock>>>(gpuArray, noOfElements);
cudaThreadSynchronize();
cudaEventRecord(deviceStop, 0);
cudaEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop);
cudaMemcpyFromSymbol(&histogramFromGPU,"gpuHistogram", sizeof(histogramFromGPU), 0, cudaMemcpyDeviceToHost);
if(cpuGpuResultsCompare(histogram, histogramFromGPU))
printf("GPU and CPU results match\n");
else
printf("GPU and CPU results don't match\n");
printf("CPU & GPU stats: \n");
for(i = 0 ; i < 10 ; ++i)
printf("No of %ds: %d %d\n", i, histogram[i], histogramFromGPU[i]);
printf("Time on CPU : %5.5f, Time on GPU: %5.5f\n", timeDifferenceOnHost, timeDifferenceOnDevice);
printf("-----------------------------------------------\n");
printf("Speedup: %5.5f\n", timeDifferenceOnHost/timeDifferenceOnDevice);
free(arr);
cudaFree(gpuArray);
char c = 'n';
printf("Again?(y/n): ");
while(true)
{
c = getchar();
if(c == 'y' || c == 'n')
break;
}
if(c == 'n')
break;
}
printf("\n");
cudaEventDestroy(deviceStop);
cudaEventDestroy(deviceStart);
cudaEventDestroy(hostStart);
cudaEventDestroy(hostStop);
return 0;
}
|
4,981 | #include <iostream>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#define ITERATIONS 100
#define ARR_SIZE 1000000
#define ARR_SIZE_PRINT_LIMIT 100
//CUDA values
#define NUM_BLOCKS 4096
#define NUM_THREADS 1
//Random values range
const int MIN_RAND_NUM = -100;
const int MAX_RAND_NUM = 100;
void print_array(int *arr, int size){
if(size > ARR_SIZE_PRINT_LIMIT){
return;
}
for(int i = 0; i < size; ++i){
printf("%d ", arr[i]);
}
printf("\n");
}
__device__ void sort_2_preordered_segments(const int &start_idx, const int &middle_idx, const int &end_idx, int* from, int* to){
//Define the starting indexes of the leftmost segment, the rightmost segment and the new segment
//that will store the new sorted segment as a combination of the 2 previous ones
int left_idx = start_idx;
int right_idx = middle_idx+1;
int idx = start_idx;
//Sort the new segment by sorting the two previously sorted leftmost and rightmost segments
while(left_idx <= middle_idx || right_idx <= end_idx){
if(left_idx <= middle_idx && right_idx <= end_idx){
if(from[left_idx] < from[right_idx]){
to[idx++] = from[left_idx++];
} else{
to[idx++] = from[right_idx++];
}
} else if(left_idx <= middle_idx){
to[idx++] = from[left_idx++];
} else{
to[idx++] = from[right_idx++];
}
}
}
__global__ void process_layer(int *from, int *to, int layer_size, int num_segments, int segments_per_block){
int index = blockIdx.x*blockDim.x + threadIdx.x;
int range_size = index*segments_per_block;
//End process if blockId surpasses the number of segments to process
if(range_size >= num_segments){
return;
}
//Iterate over each segment in charge of the current Block in the array [FROM] of size "layer_size" and
//sort it as 2 separated ordered lists into the new array [TO]
for(int range_idx = range_size;
range_idx < range_size+segments_per_block && range_idx < num_segments; ++range_idx){
int segment_idx = range_idx*layer_size;
//Current segment is complete (it has a size of at least "layer_size")
if( segment_idx+layer_size-1 < ARR_SIZE ){
//Get start, end and middle indexes of the current segment to sort it as 2 previously ordered segments
//The first segment from start_idx to middle_idx
//The second segment from middle_idx+1 to end_idx
int start_idx = segment_idx;
int end_idx = segment_idx+layer_size-1;
int middle_idx = (start_idx+end_idx)>>1;
//Sort the preordered segments into a new combined ordered segment
sort_2_preordered_segments(start_idx, middle_idx, end_idx, from, to);
} else if( ARR_SIZE-segment_idx > (layer_size>>1) ){ //Last segment in layer is not complete but is greater than the previous "layer_size"
//Get start, end and middle index of the leftmost and rightmost segments
int start_idx = segment_idx;
int end_idx = ARR_SIZE-1;
int middle_idx = segment_idx+(layer_size>>1)-1;
//Sort the preordered segments into a new combined ordered segment
sort_2_preordered_segments(start_idx, middle_idx, end_idx, from, to);
} else{ //Last segment in layer is not complete but is smaller than the previous "layer_size"
//Copy the last segment directly from [FROM] to [TO]
for(int i = segment_idx; i < ARR_SIZE; ++i){
to[i] = from[i];
}
}
}
}
__global__ void process_last_layer(int *from, int *to, int layer_size){
//Get start, end and middle index of the leftmost and rightmost segments
int start_idx = 0;
int end_idx = ARR_SIZE-1;
int middle_idx = (layer_size>>1)-1;
//Sort the preordered segments into a new combined ordered segment
sort_2_preordered_segments(start_idx, middle_idx, end_idx, from, to);
}
int* iterative_merge_sort(int *arrA, int *arrB){
//Auxiliar pointers to know to which array; the segments to sort are being passed during each layer
//Each layer will always try to get the elements to sort from the [FROM] array and put the sorted elements to the [TO] array
int *from = arrA;
int *to = arrB;
//The size of the segments in each processed layer
int layer_size;
//Iterate over all the segment sizes that are powers of two (2, 4, 8, 16, ...) to sort the array by layers of that sizes
for(layer_size = 2; layer_size <= ARR_SIZE; layer_size <<= 1){
//Each layer swaps the [FROM] and [TO] pointers so the array that was previously the target to store the newly ordered segments
//is now the one were the ordered segments are extracted from
std::swap(from,to);
//Number of segments of the total array size to be processed in the current layer
int num_segments = ARR_SIZE/layer_size+(ARR_SIZE%layer_size != 0);
//Number of segments to be processed by each block
int segments_per_block = num_segments/(NUM_BLOCKS*NUM_THREADS)+
(num_segments%(NUM_BLOCKS*NUM_THREADS) != 0);
//CUDA Kernel
process_layer<<<NUM_BLOCKS,NUM_THREADS>>>(from, to, layer_size, num_segments, segments_per_block);
//cudaDeviceSynchronize();
}
//An extra layer has to be processed due to the fact that ARR_SIZE is not a perfect power of 2
if(layer_size > ARR_SIZE){
std::swap(from, to);
//CUDA Kernel
process_last_layer<<<1,1>>>(from, to, layer_size);
//cudaDeviceSynchronize();
}
//Return pointer to final sorted values of the array
return to;
}
int main(){
//Random Seed
srand(time(NULL));
//Allocate array's A memory
int *arrA = (int*)malloc(sizeof(int)*ARR_SIZE);
//Allocate array's B memory
int *arrB = (int*)malloc(sizeof(int)*ARR_SIZE);
//Device arrays reference pointers
int *deviceA, *deviceB;
//Allocate space for device copies of array A, B
cudaMalloc((void**) &deviceA, sizeof(int)*ARR_SIZE);
cudaMalloc((void**) &deviceB, sizeof(int)*ARR_SIZE);
for(int iteration = 0; iteration < ITERATIONS; ++iteration){
//Initialize array A and array B with random values
for(int i = 0; i < ARR_SIZE; ++i){
arrA[i] = arrB[i] = (rand()%(MAX_RAND_NUM-MIN_RAND_NUM+1))+MIN_RAND_NUM;
}
//Copy initialized arrays to device
cudaMemcpy(deviceA, arrA, sizeof(int)*ARR_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, arrB, sizeof(int)*ARR_SIZE, cudaMemcpyHostToDevice);
//Print array A
print_array(arrA, ARR_SIZE);
//Iterative merge sort
int *sorted_array = iterative_merge_sort(deviceA, deviceB);
//Copy sorted array from device to host
cudaMemcpy(arrB, sorted_array, sizeof(int)*ARR_SIZE, cudaMemcpyDeviceToHost);
//Print any error regarding the GPU
//printf("%s\n", cudaGetErrorString(cudaGetLastError()));
//Print whatever array the pointer "to" ended up pointing to (this one stores the final sorted values)
print_array(arrB, ARR_SIZE);
}
//Free allocated memory
free(arrA);
free(arrB);
//Free device allocated memory
cudaFree(deviceA);
cudaFree(deviceB);
return 0;
} |
4,982 | #include "includes.h"
__global__ void update(int* U, int* F, int* d, int* del, size_t gSize) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
if (globalThreadId < gSize) {
F[globalThreadId] = 0;
if(U[globalThreadId] && d[globalThreadId] < del[0]) {
U[globalThreadId] = 0;
F[globalThreadId] = 1;
}
}
} |
4,983 | #include <cuda.h>
#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdlib.h>
#include <stdio.h>
int main(){
thrust::host_vector<float> H;
float r;
for(int i=0; i<100; i++ ){
r = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
H.push_back(r);
}
thrust::device_vector<float> D=H;
thrust::sort(D.begin(),D.end());
H=D;
for(int i=0; i<100; i++ ){
printf("%f ", H[i]);
}
return 0;
}
|
4,984 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void vectorSum(float *a, float *b, float *c){
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
int main(int argc, char *argv[]){
unsigned int length = 4194304;
int i, Size;
float *a, *b, *c, *copyC, *gpuA, *gpuB, *gpuC;
time_t seed;
cudaEvent_t start;
cudaEvent_t stop;
float msecTotal;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (argc>1)
sscanf(argv[1],"%d",&length);
Size = sizeof(float)*length;
unsigned long int padded_length = floor((length + ((512*32)-1))/(1.0*512*32)) * (1.0*512*32);
a = (float *)calloc(length, sizeof(float));
b = (float *)calloc(length, sizeof(float));
c = (float *)calloc(length, sizeof(float));
copyC = (float *)calloc(length, sizeof(float));
time(&seed);
srand48(seed);
for (i=0; i<length; i++)
a[i] = drand48(), b[i] = drand48();
cudaSetDevice(0);
cudaError_t error;
error = cudaMalloc((void**)&gpuA, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuA, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&gpuB, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuB, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&gpuC, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuC, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
cudaEventRecord(start, NULL);
for (i=0; i<length; i++)
c[i] = a[i] + b[i];
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("cpu time: %.3f ms\n", msecTotal);
error = cudaMemcpy(gpuA, a, Size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(gpuB, b, Size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
dim3 numThreads(512, 1);
dim3 numBlocks(32, 1);
cudaEventRecord(start, NULL);
vectorSum<<<numBlocks, numThreads>>>(gpuA, gpuB, gpuC);
cudaDeviceSynchronize();
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
error = cudaMemcpy(copyC, gpuC, Size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
cudaEventElapsedTime(&msecTotal, start, stop);
printf("gpu time: %.3f ms\n", msecTotal);
for (i=0; i<length; i++)
if (fabs(c[i]-copyC[i]) > 0.000001){
printf("%d\t%f\t%f\n", i, c[i], copyC[i]);
return 1;
}
return 0;
}
|
4,985 | extern "C" {
//灰度图像一维数据第一种访问方式
__global__ void image_add_gray_1(int* img1, int* img2, int* imgres, int length){
// 一维数据索引计算(万能计算方法)
int tid = blockIdx.z * (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z) \
+ blockIdx.y * gridDim.x * (blockDim.x * blockDim.y * blockDim.z) \
+ blockIdx.x * (blockDim.x * blockDim.y * blockDim.z) \
+ threadIdx.z * (blockDim.x * blockDim.y) \
+ threadIdx.y * blockDim.x \
+ threadIdx.x;
if (tid < length) {
imgres[tid] = (img1[tid] + img2[tid]) / 2;
}
}
//灰度图像一维数据第二种访问方式
__global__ void image_add_gray_2(int * img1,int * img2,int * imgres,int width,int height){
//Grid中x方向上的索引
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Grid中y方向上的索引
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = xIndex + yIndex * width;
if (xIndex < width && yIndex < height && idx < width * height){
imgres[idx] = (img1[idx] + img2[idx]) / 2;
}
}
//灰度图像带权重方式相加
__global__ void image_add_gray_weighted(float * img1,float* img2,float * imgres,float alpha,float beta,int width,int height){
//Grid中x方向上的索引
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Grid中y方向上的索引
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = xIndex + yIndex * width;
if (xIndex < width && yIndex < height){
imgres[idx] = alpha * img1[idx] + beta * img2[idx];
}
}
// RGB图像相加
__global__ void image_add_rgb(int3 * img1,int3 * img2,int3 * imgres,int width,int height){
//Grid中x方向上的索引
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Grid中y方向上的索引
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = xIndex + yIndex * width;
if (xIndex < width && yIndex < height && idx < width * height){
int3 rgb1 = img1[idx];
int3 rgb2 = img2[idx];
imgres[idx].x = rgb1.x + rgb2.x;
imgres[idx].y = rgb1.y + rgb2.y;
imgres[idx].z = rgb1.z + rgb2.z;
}
}
//RGB图像带权重方式相加
__global__ void image_add_rgb_weighted(float3 * img1,float3* img2,float3* imgres,float alpha,float beta,int width,int height){
//Grid中x方向上的索引
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Grid中y方向上的索引
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = xIndex + yIndex * width;
if (xIndex < width && yIndex < height && idx < width * height){
float3 rgb1 = img1[idx];
float3 rgb2 = img2[idx];
imgres[idx].x = alpha * rgb1.x + beta * rgb2.x;
imgres[idx].y = alpha * rgb1.y + beta * rgb2.y;
imgres[idx].z = alpha * rgb1.z + beta * rgb2.z;
}
}
//https://blog.csdn.net/hujingshuang/article/details/53115572
} |
4,986 | /*
* Ejercicio 4 Práctica 4: CUDA
* Mariana Hernández
* Alan Córdova
*/
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include <time.h>
# define NPOINTS 2000
# define MAXITER 2000
#define ARRAY_SIZE 256
#define NUM_BLOCKS 1
#define THREADS_PER_BLOCK 256
struct complex{
double real;
double imag;
};
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
/* Kernel para sumar generar puntos muestra */
__global__
void maldel_gen_points(complex *d_out, int *numout_out)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < NPOINTS*NPOINTS; i += stride) {
d_out[i].real = -2.0+2.5*(double)(i)/(double)(NPOINTS)+1.0e-7;
d_out[i].imag = 1.125*(double)(i)/(double)(NPOINTS)+1.0e-7;
for (int iter=0; iter<MAXITER; iter++){
double ztemp;
struct complex z;
z = d_out[i];
ztemp=(z.real*z.real)-(z.imag*z.imag)+d_out[i].real;
z.imag=z.real*z.imag*2+d_out[i].imag;
z.real=ztemp;
if ((z.real*z.real+z.imag*z.imag)>4.0e0) {
numout_out[i]++;
break;
}
}
}
}
/*
*
*/
int main(int argc, char** argv) {
complex *cnumbers;
complex *d_cnumbers;
int *numout;
int *d_numout;
size_t sz1 = NPOINTS*NPOINTS * sizeof(complex);
size_t sz2 = NPOINTS*NPOINTS * sizeof(int);
cnumbers = (complex *) malloc (sz1);
cudaMalloc(&d_cnumbers, sz1);
numout = (int *) malloc (sz2);
cudaMalloc(&d_numout, sz2);
// inicialización
for (int i = 0; i < NPOINTS*NPOINTS; i++) {
numout[i] = 0;
}
// copia host to device
cudaMemcpy(d_numout, numout, sz2, cudaMemcpyHostToDevice);
//invocamos kernel
int blockSize = 256;
int numBlocks = (NPOINTS*NPOINTS + blockSize - 1) / blockSize;
// Para obtener tiempos de ejecucion del kernel
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
maldel_gen_points<<< numBlocks, blockSize >>>(d_cnumbers, d_numout);
cudaDeviceSynchronize();
checkCUDAError("kernel invocation");
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Tiempo de maldel_gen_points: \t %f \n", milliseconds);
cudaMemcpy(cnumbers, d_cnumbers, sz1, cudaMemcpyDeviceToHost);
cudaMemcpy(numout, d_numout, sz2, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
int res = 0;
for(int i = 0; i < NPOINTS*NPOINTS; i++){
res += numout[i];
}
/* print out the result */
double area=2.0*2.5*1.125*(double)(NPOINTS*NPOINTS-res)/(double)(NPOINTS*NPOINTS);
double error=area/(double)NPOINTS;
printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n",area,error);
//printf("Tiempo de ejecución: %f segundos \n",difftime(t2,t1));
cudaFree(d_cnumbers);
free(cnumbers);
cudaFree(d_numout);
free(numout);
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
4,987 | /* CUDA version of the DBNN code for classification of stars, galaxies. The code was originally written by Prof. Sajeeth
Author: Ajay Vibhute
*/
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <iostream>
using namespace std;
#include <stdlib.h>
#include<sys/times.h> // times() fun. is here.
#include <time.h>
#include <vector>
#define classes 500
#define max_resol 1600
#define features 100
#include"kernel.cu"
using std::vector;
/**************************
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "vsipc.h"
***************************/
//#define oneround 100 // Memory size.
//#define fst_gain 1.0 Moved to be a floating variable
static float bgain,gain,dmyclass[classes+2],classval[classes+2],cmax,c2max,c3max,c4max,tmp2_wts,totprob,oldj;
static float LoC=0.65;
static float nLoC=0.0;
static int jx=0, resol=100,nresol=0,nerror=0,nLoCcnt=1,skpchk=0,MissingDat=-9999;
static float omax,omin,rslt,rslt2,orslt,orslt2,prslt,nrslt,fst_gain;
clock_t start,stop;
static int argfnd, oneround=100,kmax,k2max,k3max,k4max,ans1,tcnt,rnn,rnd,i,j,k,l,n,m,p,c1cnt,c2cnt,pcnt,pocnt,invcnt,innodes=100,outnodes=100;
char fln[256],fltmp[256],urchoice,urchoicex,bcchoice,savedpar;
FILE *fl1,*fl2,*fl3,*fl4,*fl5,*fl6,*fl7,*fl8,*fl9,*fl10;
int main(int argv, char *argp[256])
/*
Important note for revision in ver. 4.02
Compute the bin center of gravities and the Kernel fit that holds the probability to
find them. The slope of it should be used as bgain for each bin.
*/
/*
You can now run dbnn in automated mode by specifying the parameters in 0.par and 1.par
files. Also dbnn can now use the bin values from the saved apf file.
*/
{
if(argv > 3)
{
argfnd=1;
cout << "The selected option is " << *argp[3] <<"\n";
switch(*argp[3])
{
case '0':
ans1=0;
if((fl2=fopen("0.par","r"))!=NULL)
{
fscanf(fl2,"%c\n",&bcchoice); //Handle missing or out of range values? Y if yes. NEW in Ver 7
fscanf(fl2,"%c\n",&urchoice);
fscanf(fl2,"%c\n",&savedpar);
fscanf(fl2,"%c\n",&urchoicex);
if(bcchoice == 'Y'||bcchoice =='y')
{
fscanf(fl2,"%d\n",&skpchk);
if(skpchk <0) MissingDat=skpchk;
cout << "System is configured for handling missing data with missing data indicator" << MissingDat <<"\n";
}
fclose(fl2);
}
else
{
cout << "No Parameter File... existing..";
exit(1);
}
break;
case '1':
ans1=1;
if((fl2=fopen("0.par","r"))!=NULL)
{
fscanf(fl2,"%c\n",&bcchoice); //Handle missing or out of range values? Y if yes. NEW in Ver 7
fscanf(fl2,"%c\n",&urchoice);
fscanf(fl2,"%c\n",&savedpar);
fscanf(fl2,"%c\n",&urchoicex);
if(bcchoice == 'Y'||bcchoice =='y')
{
fscanf(fl2,"%d\n",&skpchk);
if(skpchk <0) MissingDat=skpchk;
cout << "System is configured for handling missing data with missing data indicator" << MissingDat <<"\n";
}
fclose(fl2);
}
else
{
cout << "No Parameter File... existing..";
exit(1);
}
if((fl2=fopen("1.par","r"))!=NULL)
{
fscanf(fl2,"%f",&gain);
fscanf(fl2,"%d",&oneround);
fclose(fl2);
}
else
{
cout << "No Parameter File... existing..";
exit(1);
}
break;
case '2':
ans1=2;
if((fl2=fopen("0.par","r"))!=NULL)
{
fscanf(fl2,"%c\n",&bcchoice); //Handle missing or out of range values? Y if yes. NEW in Ver 7
fscanf(fl2,"%c\n",&urchoice);
fscanf(fl2,"%c\n",&savedpar);
fscanf(fl2,"%c\n",&urchoicex);
if(bcchoice == 'Y'||bcchoice =='y')
{
fscanf(fl2,"%d\n",&skpchk);
if(skpchk <0) MissingDat=skpchk;
cout << "System is configured for handling missing data with missing data indicator" << MissingDat <<"\n";
}
fclose(fl2);
}
else
{
cout << "No Parameter File... existing..";
exit(1);
}
break;
case '3':
ans1=3;
if((fl2=fopen("0.par","r"))!=NULL)
{
fscanf(fl2,"%c\n",&bcchoice); //Handle missing or out of range values? Y if yes. NEW in Ver 7
fscanf(fl2,"%c\n",&urchoice);
fscanf(fl2,"%c\n",&savedpar);
fscanf(fl2,"%c\n",&urchoicex);
if(bcchoice == 'Y'||bcchoice =='y')
{
fscanf(fl2,"%d\n",&skpchk);
if(skpchk <0) MissingDat=skpchk;
cout << "System is configured for handling missing data with missing data indicator" << MissingDat <<"\n";
}
fclose(fl2);
}
else
{
cout << "No Parameter File... existing..";
exit(1);
}
break;
default:
cout << "Create the APF file(0) or Create the Weights file (1) or Classify Data(2,3) ?";
cin >> ans1;
break;
}
}
else
{
argfnd=0;
cout << "Create the APF file(0) or Create the Weights file (1) or Classify Data(2,3) ?";
cin >> ans1;
}
if(ans1 == 2)
{
if(argfnd==1)
bgain=0.0;
else
{
cout << "Allowed relaxation on the boundary (in % use 0 for default from training data) :";
cin >> bgain;
bgain=bgain*1.0;
}
}
else
bgain= 0; // During training we are strict on boundary constraints.
if(argv < 3)
{
cout << "Enter the name of the input file without extension (dat) :";
cin >> fln;
}
else
{
strcpy(fln,argp[1]);
}
strcpy(fltmp,fln);
strcat(fltmp,".dat");
/*
The structure of the data file is:
Feature1 Feature2 Feature3 ....(etc upto innodes) ActualClass
Feature1 Feature2 Feature3 ....(etc upto innodes) ActualClass
Feature1 Feature2 Feature3 ....(etc upto innodes) ActualClass
The delimiters are spaces and not tabs!!
ActualClass should be a numeric > 0
*/
if((fl1=fopen(fltmp,"r"))!=NULL)
{
strcpy(fltmp,fln);
strcat(fltmp,".inf");
/*
The format of the info file is: (in each line enter)
innodes
outnodes
margin <- This addition is required for regression problems.
1.0 <- You can give any real positive value here. It is just a label.
2.0
... (etc. upto no of classes)
0.65 <- The Margin or Line of Control for marginal values.
100 <- By default, the maximum bin size is set to 100. You can change this if required.
0,1,2 <- no error bars, uniform error bar, upper lower separate error values per entry.
*/
if((fl2=fopen(fltmp,"r"))!=NULL)
{
i=0;
fscanf(fl2,"%d",&innodes);
fscanf(fl2,"%d",&outnodes);
for (i=0;i<=outnodes;i++) // dmyclass[0] contains margin others are expected values.
fscanf(fl2,"%f",&dmyclass[i]);
fscanf(fl2,"%f",&LoC); // New parameter to specify the Line Of Control
fscanf(fl2,"%d",&nresol);
fscanf(fl2,"%d",&nerror);
cout <<"You have "<< innodes << " input nodes and " << outnodes <<" Output nodes with " << "margin set to " << LoC << " and error levels set to "<< nerror <<"\n";
cout << "The target outputs are\n";
for (i=0;i<=outnodes;i++) cout << dmyclass[i] <<"\n";
if(nresol >0)
{
resol=nresol;cout << "The maximum binsize is: " << resol <<"\n";
}
else
{
cout << "The maximum binsize is: " << resol<<"\n";
}
fst_gain*=1.0/outnodes;
}
else
{
cout << "Unable to find the Info file. Exiting !!";
exit(1);
}
} // program ends.
else // data file read error.
{
cout << "Unable to open the data file";
exit(1);
}
cout << "Going to initialise the arrays\n";
/**************** Let us Define the Network Structure *********************************/
//float mask_disp_maxres; // Space to save max resol for normalisation of mask_dist
strcpy(fltmp,fln);
strcat(fltmp,".dat");
int numlines=getNumlines(fltmp);
printf("NUMLINES:%d\n",numlines);
float vectso[innodes+outnodes+2],tmpv,max[innodes+2],min[innodes+2],vects[innodes+outnodes+2];
float err1vects[innodes+2], err2vects[innodes+2];
//float arr_vects[numlines][innodes+outnodes+2];
float *arr_tmpv=(float*)malloc(sizeof(float)*(numlines+2));
float *arr_vects=(float*)malloc(sizeof(float)*(numlines+2)*(innodes+outnodes+2));
float *arr_err1vects=(float*)malloc(sizeof(float)*(numlines+2)*(innodes+2));
float *arr_err2vects=(float*)malloc(sizeof(float)*(numlines+2)*(innodes+2));
int totsize=(innodes+2)*(resol+2)*(innodes+2)*(resol+4)*(outnodes+2);
int totsendreceivesize=(innodes+1)*(resol+2)*(innodes+1)*(resol+1)*(outnodes+1);
float *arr_anti_wts=(float*) malloc(totsize*sizeof(float));
int *arr_anti_net=(int*)malloc(sizeof(int)*totsize);
int ik=innodes+1,jk=resol+1,lk=innodes+1,mk=resol+1,kk=outnodes+1;
int resolution[innodes+8];
float classtot[innodes+2][resol+2]; // Total Prob. computed
if(classtot==NULL){cout << "Out of Memory to Run Code at classtot.. Exiting\n";exit(1);}
//float binloc[innodes+4][resol+8];
float *arr_binloc=(float*)malloc(sizeof(float)*(innodes+4)*(resol+8));
int rn=resol+1;
int iin=innodes+4;
/***************************Let us put up the Network***********************************/
// Start the counter for case 2 here.................
start = times(NULL);
if (ans1==0)
{
n=0;
omax=-400;
omin=400;
while (!feof(fl1))
{
skpchk=0;
for(i=1;i<=innodes;i++)
if (n==0)
{
fscanf(fl1,"%f",&vects[i]);
if(nerror ==2){fscanf(fl1,"%f",&err1vects[i]);fscanf(fl1,"%f",&err2vects[i]);}else
if(nerror ==1){fscanf(fl1,"%f",&err1vects[i]); err2vects[i]=err1vects[i];}
if(vects[i] != MissingDat)
{
min[i]=vects[i];
max[i]=vects[i];
}
else max[i]=MissingDat;
}
else
{
fscanf(fl1,"%f",&vects[i]);
if(vects[i] != MissingDat)
{
if( vects[i]> max[i]) max[i]=vects[i];
if (min[i] > vects[i]) min[i]=vects[i];
}
}
fscanf(fl1,"%f\n",&tmpv);
if(tmpv>omax) omax = tmpv;
if(tmpv<omin) omin =tmpv;
k=1;
j=1;
n++;
}
cout << "No of vectors =" << n <<" and i/n is= " << 1.0/n << "\n";
for(i=1;i<=innodes;i++)
{
if(min[i]==max[i])if(min[i]!=0){min[i]= -1.0*max[i];}else{min[i]=0.0; max[i]=1.0;}
}
if(argfnd==0)
{
cout <<"Do you want to use the saved parameters (Y/N)? ";
cin >>savedpar;
}
if (savedpar == 'y') savedpar='Y';
else
if(savedpar == 'n') savedpar='N';
if((savedpar == 'Y') || (savedpar=='y'))
{
strcpy(fltmp,fln);
strcat(fltmp,".apf");
fl2=NULL;
if((fl2=fopen(fltmp,"r"))!=NULL)
{
cout << "Reading from the saved information\n";
for (i=1;i<=innodes;i++)
{
fscanf(fl2,"%d",&resolution[i]);
for(j=0;j<=resolution[i];j++) arr_binloc[(i*rn)+(j+1)]=j*1.0;
}
cout << innodes << " items read from " << fltmp <<"\n";
}
else
{
cout << "ERROR: File " << fltmp << " not found" << "\n";
exit(1);
}
}
else
for(i=1;i<=innodes;i++)
{
if(min[i]==max[i])if(min[i]!=0){min[i]= -1.0*max[i];}else{min[i]=0.0; max[i]=1.0;}
cin >> resolution[i];
for(j=0;j<=resolution[i];j++) arr_binloc[(i*rn)+(j+1)]=j*1.0;
}
for(k=1;k<=outnodes;k++)
for(i=1;i<=innodes;i++)
for(j=0;j<=resolution[i];j++)
for(l=1;l<=innodes;l++)
for(m=0;m<=resolution[l];m++)
{
//anti_net[i][j][l][m][k]=1;
//anti_wts[i][j][l][m][k]=(float)(1.0);
arr_anti_wts[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]=(double)(1.0);
arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]=1;
}
// Start the counter now...............
start = times(NULL);
rewind(fl1);
tcnt=0;
while (!feof(fl1))
{
tcnt++;
for (i=1;i<=innodes;i++)
{
fscanf(fl1,"%f",&vects[i]);
if(nerror ==2){fscanf(fl1,"%f",&err1vects[i]);fscanf(fl1,"%f",&err2vects[i]);}else
if(nerror ==1){fscanf(fl1,"%f",&err1vects[i]);err2vects[i]=err1vects[i];}
}
fscanf(fl1,"%f\n",&tmpv);
for(i=1;i<=innodes;i++)
{
if((vects[i] != MissingDat)&&(max[i] !=MissingDat))
{
vectso[i]=vects[i];
vects[i]=round((vects[i]-min[i])/(max[i]-min[i])*resolution[i]);
err1vects[i]=round((err1vects[i])/(max[i]-min[i])*resolution[i]);
err2vects[i]=round((err2vects[i])/(max[i]-min[i])*resolution[i]);
}
}
for (i=1;i<=innodes;i++)
{
j=0;
if(vects[i] != MissingDat)
{
// oldj=(float)2*resolution[i];
while ((fabs(vects[i]-arr_binloc[(i*rn)+(j+1)]) >=1.0 )&& (j<= resolution[i]))
{
// oldj=fabs(vects[i]-binloc[i][j+1]);
j++;
}
for (l=1;l<=innodes;l++)
{
m=0;
if(i!=l)
{
// oldj=(float)2*resolution[l];
while ((fabs(vects[l]-arr_binloc[(l*rn)+(m+1)]) >=1.0)&& (m<= resolution[l]))
{
// oldj=fabs(vects[l]-binloc[l][m+1]);
m++;
}
k=1;
while ((k<=outnodes)&&(fabs(tmpv - dmyclass[k])) > dmyclass[0]) k++;
//(anti_net[i][j][l][m][k])++;
//(anti_net[i][j][l][m][0])++;
(arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)])++;
(arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+0)])++;
}
}
}
}
}//end of while
fclose(fl1);
fclose(fl2);
stop = times(NULL);
cout << "The computation took " << fabs(start - stop)*10000/(CLOCKS_PER_SEC) << " Secs.\n";
/*
The conditional Probability,
P(A|B) = P(A intersection B)/P(B) is the
probability for the occurance of A(k) if B(ij) has happened =
Share of B(ij) that is held by A(k) / Probability of total B(ij)
in that particular feature i with resolution j.
*/
strcpy(fltmp,fln);
strcat(fltmp,".awf"); // This file holds the weights
fl6=fopen(fltmp,"w+");
strcpy(fltmp,fln);
strcat(fltmp,".apf"); // This file holds the estimated probability
if((fl1=fopen(fltmp,"w+"))!=NULL)
{
for(i=1;i<=innodes;i++) fprintf(fl1,"%d ",resolution[i]);
fprintf(fl1,"\n%f %f \n",omax,omin);
for(i=1;i<=innodes;i++) fprintf(fl1,"%f ",max[i]);
fprintf(fl1,"\n");
for(i=1;i<=innodes;i++) fprintf(fl1,"%f ",min[i]);
fprintf(fl1,"\n");
for(k=1;k<=outnodes;k++)
{
for(i=1;i<=innodes;i++)
for(j=0;j<=resolution[i];j++)
{
for(l=1;l<=innodes;l++)
if(i!=l)
{
for(m=0;m<=resolution[l];m++)
{
//fprintf(fl1,"%d ",anti_net[i][j][l][m][k]);
//fprintf(fl6,"%f ",(float)anti_wts[i][j][l][m][k]);
fprintf(fl1,"%d ",arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]);
fprintf(fl6,"%f ",(float)arr_anti_wts[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]);
}
fprintf(fl6,"\n");
fprintf(fl1,"\n");
}
}
fprintf(fl6,"\n");
fprintf(fl1,"\n");
}
fprintf(fl6,"\n");
fprintf(fl1,"\n");
}
else
{
cout << "Unable to create file for output\n";
exit(1);
}
for(i=1;i<=innodes;i++)
for(j=1;j<=resolution[i];j++)
fprintf(fl6,"%f\n", (float)arr_binloc[(i*rn)+(j)]); /// Let us print the bins.
fclose(fl1);
fclose(fl6);
fflush(NULL);
cout << "Creating the Anticipated Weights data file\n";
}
/**********************************End of Case 0 ******************************/
if(ans1==1)
{
start = times(NULL);
pcnt=0;
pocnt=0;
rslt=0.0;
rslt2=0.0;
orslt=rslt;
orslt2=rslt2;
for(i=0;i<totsize;i++)
arr_anti_wts[i]=0;
cout << "The programe will now modify the compensatory weights\n";
if(argfnd==0)
{
cout << "Please enter the gain:";
cin >> gain;
cout << "Please enter the number of training epochs:";
cin >> oneround;
}
// Start the counter in this round here...................
start = times(NULL);
strcpy(fltmp,fln);
strcat(fltmp,".awf");
if((fl6=fopen(fltmp,"r"))!=NULL)
{
strcpy(fltmp,fln);
strcat(fltmp,".apf");
fl2=NULL;
if((fl2=fopen(fltmp,"r"))!=NULL)
{
for (i=1;i<=innodes;i++)
{
fscanf(fl2,"%d",&resolution[i]);
for(j=0;j<=resolution[i];j++) arr_binloc[(i*rn)+(j+1)]=j*1.0;
}
fscanf(fl2,"\n%f",&omax);
fscanf(fl2,"%f",&omin);
fscanf(fl2,"\n");
for(i=1;i<=innodes;i++) fscanf(fl2,"%f",&max[i]);
fscanf(fl2,"\n");
for(i=1;i<=innodes;i++) fscanf(fl2,"%f",&min[i]);
fscanf(fl2,"\n");
for(i=1;i<=innodes;i++)for(j=0;j<=resolution[i];j++)
//for(l=1;l<=innodes;l++)for(m=0;m<=resolution[l];m++) anti_net[i][j][l][m][0] =0;
for(l=1;l<=innodes;l++)for(m=0;m<=resolution[l];m++) arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+0)] =0;
int ijk=0;
for(k=1;k<=outnodes;k++)
{
for(i=1;i<=innodes;i++)
for(j=0;j<=resolution[i];j++)
{
for(l=1;l<=innodes;l++)
if(i!=l)
{
for(m=0;m<=resolution[l];m++)
{
ijk++;
//fscanf(fl2,"%d",&anti_net[i][j][l][m][k]);
//anti_net[i][j][l][m][0]+=anti_net[i][j][l][m][k];
//fscanf(fl6,"%f",&anti_wts[i][j][l][m][k]);
fscanf(fl2,"%d",&arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]);
arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+0)]+=arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)];
fscanf(fl6,"%f",&arr_anti_wts[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]);
}
fscanf(fl2,"\n");
fscanf(fl6,"\n");
}
}
fscanf(fl2,"\n");
fscanf(fl6,"\n");
}
for(i=1;i<=innodes;i++)
for(j=1;j<=resolution[i];j++)
fscanf(fl6,"%f\n", &arr_binloc[(i*rn)+(j)]); /// Let us print the bins.
}
else
{
cout << "Unable to Open the APF information file\n";
exit(1);
}
fclose(fl2);
}
else
{
cout << "Unable to Open the AWF information file\n";
exit(1);
}
fclose(fl6);
/*GPU Memory allocation*/
int *d_arr_anti_net,*d_resolution;
float *d_arr_anti_wts,*d_arr_tmpv,*d_arr_vects,*d_arr_err1vects,*d_arr_err2vects,*d_min,*d_max,*d_arr_binloc,*d_dmyclass;
float *d_rslt,*d_rslt2,*tmp_rslt,*tmp_rslt2;
int *d_pcnt,*tmp_pcnt,chunksize=0;
cudaError_t status ;
tmp_pcnt=(int*)malloc(sizeof(int)*numlines);
tmp_rslt=(float*)malloc(sizeof(float)*numlines);
tmp_rslt2=(float*)malloc(sizeof(float)*numlines);
for(i=0;i<numlines;i++)
tmp_pcnt[i]=0;
//allocate memory on GPU
status=cudaMalloc((void **)&d_pcnt,sizeof(int)*(numlines+10));
status=cudaMemset(d_pcnt, 0, sizeof(int)*(numlines+10));
status=cudaMalloc((void **)&d_rslt,sizeof(float)*(numlines+10));
status=cudaMemset(d_rslt, 0, sizeof(float)*(numlines+10));
status=cudaMalloc((void **)&d_rslt2,sizeof(float)*(numlines+10));
status=cudaMemset(d_rslt2, 0, sizeof(float)*(numlines+10));
status=cudaMalloc((void **)&d_arr_anti_net,sizeof(int)*totsize);
status=cudaMemset(d_arr_anti_net, 0, sizeof(int)*totsize);
status=cudaMalloc((void **)&d_resolution,sizeof(int)*(innodes+8));
status=cudaMemset(d_resolution, 0, sizeof(int)*(innodes+8));
status=cudaMalloc((void **)&d_arr_anti_wts,sizeof(float)*totsize);
status=cudaMemset(d_arr_anti_wts, 0, sizeof(int)*totsize);
status=cudaMalloc((void **)&d_arr_tmpv,sizeof(float)*(numlines+2));
status=cudaMemset(d_arr_tmpv, 0, sizeof(float)*(numlines+2));
status=cudaMalloc((void **)&d_arr_vects,sizeof(float)*(numlines+2)*(innodes+outnodes+2));
status=cudaMemset(d_arr_vects, 0, sizeof(float)*(numlines+2)*(innodes+outnodes+2));
status=cudaMalloc((void **)&d_arr_err1vects,sizeof(float)*(numlines+2)*(innodes+2));
status=cudaMemset(d_arr_err1vects, 0, sizeof(float)*(numlines+2)*(innodes+2));
status=cudaMalloc((void **)&d_arr_err2vects,sizeof(float)*(numlines+2)*(innodes+2));
status=cudaMemset(d_arr_err2vects, 0, sizeof(float)*(numlines+2)*(innodes+2));
status=cudaMalloc((void **)&d_min,sizeof(float)*(innodes+2));
status=cudaMemset(d_min, 0, sizeof(float)*(innodes+2));
status=cudaMalloc((void **)&d_max,sizeof(float)*(innodes+2));
status=cudaMemset(d_max, 0,sizeof(float)*(innodes+2));
status=cudaMalloc((void **)&d_arr_binloc,sizeof(float)*(innodes+4)*(resol+8));
status=cudaMemset(d_arr_binloc, 0, sizeof(float)*(innodes+4)*(resol+8));
status=cudaMalloc((void **)&d_dmyclass,sizeof(float)*(classes+2));
status=cudaMemset(d_dmyclass, 0, sizeof(float)*(classes+2));
if (status != cudaSuccess)
printf("Error in cuda memory allocation\n");
for(rnd=0;rnd<=oneround;rnd++) // Training round starts here....
{
if((n==pocnt)&& (n>0)){ printf("breaking\n"); break;}
strcpy(fltmp,fln);
strcat(fltmp,".dat");
fl1=fopen(fltmp,"r");
n=0;
rslt=0.0;
rslt2=0.0;
pcnt=0;
int cindex=0;
for(cindex=0;cindex<numlines;cindex++)
{
for(k=1;k<=outnodes;k++) classval[k]=1.0;
n++;
if(ans1==3)
{
for (i=1;i<=innodes;i++)
{
fscanf(fl1,"%f",&arr_vects[(cindex*innodes)+i]);
if(nerror ==2){fscanf(fl1,"%f",&arr_err1vects[(cindex*innodes)+i]);fscanf(fl1,"%f",&err2vects[(cindex*innodes)+i]);}else
if(nerror ==1){fscanf(fl1,"%f",&arr_err1vects[(cindex*innodes)+i]);err2vects[(cindex*innodes)+i]=arr_err1vects[(cindex*innodes)+i];}
}
fscanf(fl1,"\n");
}
else
{
for (i=1;i<=innodes;i++)
{
fscanf(fl1,"%f",&arr_vects[(cindex*innodes)+i]);
if(nerror ==2){fscanf(fl1,"%f",&arr_err1vects[(cindex*innodes)+i]);fscanf(fl1,"%f",&arr_err2vects[(cindex*innodes)+i]);}else
if(nerror ==1){fscanf(fl1,"%f",&arr_err1vects[(cindex*innodes)+i]);arr_err2vects[(cindex*innodes)+i]=arr_err1vects[(cindex*innodes)+i];}
}
fscanf(fl1,"%f\n",&arr_tmpv[cindex]);
}
}
fclose(fl1);
cudaMemcpy(d_arr_anti_net,arr_anti_net,sizeof(int)*totsize,cudaMemcpyHostToDevice);
cudaMemcpy(d_resolution,resolution,sizeof(int)*(innodes+8),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_anti_wts,arr_anti_wts,sizeof(float)*totsize,cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_tmpv,arr_tmpv,sizeof(float)*(numlines+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_vects,arr_vects,sizeof(float)*(numlines+2)*(innodes+outnodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_err1vects,arr_err1vects,sizeof(float)*(numlines+2)*(innodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_err2vects,arr_err2vects,sizeof(float)*(numlines+2)*(innodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_min,min,sizeof(float)*(innodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_max,max,sizeof(float)*(innodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_binloc,arr_binloc,sizeof(float)*(innodes+4)*(resol+8),cudaMemcpyHostToDevice);
cudaMemcpy(d_dmyclass,dmyclass,sizeof(float)*(classes+2),cudaMemcpyHostToDevice);
int numblocks=ceil(numlines/512.0);
kernel1(numlines,arr_tmpv,min,max,resolution,arr_vects,arr_err1vects,arr_err2vects,arr_binloc,rn,arr_anti_net,arr_anti_wts,dmyclass,gain,innodes,resol,outnodes,nerror,rnd);
strcpy(fltmp,fln);
strcat(fltmp,".dat");
fl1=fopen(fltmp,"r");
m=n;
n=0;
rslt=0.0;
rslt2=0.0;
pcnt=0;
// while (!feof(fl1)) // Test round...
for(cindex=0;cindex<numlines;cindex++)
{
n++;
if(ans1==3)
{
for (i=1;i<=innodes;i++)
{
fscanf(fl1,"%f",&arr_vects[(cindex*innodes)+i]);
if(nerror ==2){fscanf(fl1,"%f",&arr_err1vects[(cindex*innodes)+i]);fscanf(fl1,"%f",&arr_err2vects[(cindex*innodes)+i]);}else
if(nerror ==1){fscanf(fl1,"%f",&arr_err1vects[(cindex*innodes)+i]);arr_err2vects[(cindex*innodes)+i]=arr_err1vects[(cindex*innodes)+i];}
}
fscanf(fl1,"\n");
}
else
{
for (i=1;i<=innodes;i++)
{
fscanf(fl1,"%f",&arr_vects[(cindex*innodes)+i]);
if(nerror ==2){fscanf(fl1,"%f",&arr_err1vects[(cindex*innodes)+i]);fscanf(fl1,"%f",&arr_err2vects[(cindex*innodes)+i]);}else
if(nerror ==1){fscanf(fl1,"%f",&arr_err1vects[(cindex*innodes)+i]);arr_err2vects[(cindex*innodes)+i]=arr_err1vects[(cindex*innodes)+i];}
}
fscanf(fl1,"%f\n",&arr_tmpv[cindex]);
}
}
fclose(fl1);
i=0;
status=cudaMemset(d_pcnt, 0, sizeof(int)*(numlines+10));
status=cudaMemset(d_rslt, 0, sizeof(float)*(numlines+10));
status=cudaMemset(d_rslt2, 0, sizeof(float)*(numlines+10));
//copy the results
cudaMemcpy(d_resolution,resolution,sizeof(int)*(innodes+8),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_tmpv,arr_tmpv,sizeof(float)*(numlines+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_vects,arr_vects,sizeof(float)*(numlines+2)*(innodes+outnodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_err1vects,arr_err1vects,sizeof(float)*(numlines+2)*(innodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_err2vects,arr_err2vects,sizeof(float)*(numlines+2)*(innodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_min,min,sizeof(float)*(innodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_max,max,sizeof(float)*(innodes+2),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_binloc,arr_binloc,sizeof(float)*(innodes+4)*(resol+8),cudaMemcpyHostToDevice);
cudaMemcpy(d_dmyclass,dmyclass,sizeof(float)*(classes+2),cudaMemcpyHostToDevice);
if((status = cudaGetLastError()) != cudaSuccess)
{
printf("Error(%s:%d) %s\n",__FILE__,__LINE__,cudaGetErrorString(status));
}
numblocks=ceil(numlines/512.0);
kernel2<<<numblocks,512>>>(numlines,d_arr_tmpv,d_min,d_max,d_resolution,d_arr_vects,d_arr_err1vects,d_arr_err2vects,d_arr_binloc,rn,d_arr_anti_net,d_arr_anti_wts,d_dmyclass,gain,innodes,resol,outnodes,nerror,rnd,d_pcnt,d_rslt,d_rslt2);
if((status = cudaGetLastError()) != cudaSuccess)
{
printf("%s\n",cudaGetErrorString(status));
}
cudaMemcpy(tmp_pcnt,d_pcnt,sizeof(int)*numlines,cudaMemcpyDeviceToHost);
if((status = cudaGetLastError()) != cudaSuccess)
{
printf("%s\n",cudaGetErrorString(status));
}
cudaMemcpy(tmp_rslt,d_rslt,sizeof(float)*numlines,cudaMemcpyDeviceToHost);
if((status = cudaGetLastError()) != cudaSuccess)
{
printf("%s\n",cudaGetErrorString(status));
}
cudaMemcpy(tmp_rslt2,d_rslt2,sizeof(float)*numlines,cudaMemcpyDeviceToHost);
if((status = cudaGetLastError()) != cudaSuccess)
{
printf("%s\n",cudaGetErrorString(status));
}
pcnt=0;
rslt=0;
rslt2=0;
for(i=0;i<numlines;i++)
{
pcnt+=tmp_pcnt[i];
rslt+=tmp_rslt[i];
rslt2+=tmp_rslt2[i];
}
printf("rnd:%d\trslt:%f\tRslt2:%f\tOrslt2:%f\tpcnt:%d\n",rnd,rslt,rslt2,orslt2,pcnt);
kmax=1;
if(orslt2==0) orslt2=rslt2;
if(orslt==0) orslt=rslt;
prslt=(rslt2-orslt2);
if(rslt > 0)
nrslt=(orslt/rslt);
if(pcnt>pocnt)
{
rnn=rnd;
pocnt=pcnt; // The best result is now saved in pocnt
strcpy(fltmp,fln);
strcat(fltmp,".awf");
fl6=fopen(fltmp,"w+");
kmax=1;
for(k=1;k<=outnodes;k++)
{
for(i=1;i<=innodes;i++)
for(j=0;j<=resolution[i];j++)
{
for(l=1;l<=innodes;l++)
if(i!=l)
{
for(m=0;m<=resolution[l];m++)
{
//fprintf(fl6,"%f ",anti_wts[i][j][l][m][k]);
fprintf(fl6,"%f ",arr_anti_wts[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]);
}
fprintf(fl6,"\n");
}
}
fprintf(fl6,"\n");
}
fprintf(fl6,"\n");
for(i=1;i<=innodes;i++)
for(j=1;j<=resolution[i];j++)
fprintf(fl6,"%f\n", arr_binloc[(i*rn)+(j)]); /// Let us print the bins.
fflush(fl6);
fclose(fl6);
cout << "Round:" << rnn << "| TProb["<<prslt<<"," <<nrslt<<"] | Passed count:" << pocnt << endl;
if(orslt2 <rslt2) orslt2=rslt2;
if(rslt < orslt) orslt=rslt;
}
n=m;
} //rnd inc.
fl6=NULL;
cout << "Best result at round " << rnn<< endl;
} // ans <> 1
/***********************************End of Case 1*******************************/
strcpy(fltmp,fln);
strcat(fltmp,".dat");
fl1=fopen(fltmp,"r");
strcpy(fltmp,fln);
strcat(fltmp,".awf");
fl6=NULL;
fl6=fopen(fltmp,"r");
strcpy(fltmp,fln);
strcat(fltmp,".apf");
fl2=NULL;
if((fl2=fopen(fltmp,"r"))!=NULL)
{
cout << "Creating the Anticipated Network outputs\n";
for (i=1;i<=innodes;i++)
{
fscanf(fl2,"%d",&resolution[i]);
for(j=0;j<=resolution[i];j++) arr_binloc[(i*rn)+(j+1)]=j*1.0;
}
fscanf(fl2,"%f",&omax);
fscanf(fl2,"%f",&omin);
fscanf(fl2,"\n");
for(i=1;i<=innodes;i++) fscanf(fl2,"%f",&max[i]);
fscanf(fl2,"\n");
for(i=1;i<=innodes;i++) fscanf(fl2,"%f",&min[i]);
fscanf(fl2,"\n");
for(i=1;i<=innodes;i++)for(j=0;j<=resolution[i];j++)
for(l=1;l<=innodes;l++)for(m=0;m<=resolution[l];m++) arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+0)] =0;
for(k=1;k<=outnodes;k++)
{
for(i=1;i<=innodes;i++)
for(j=0;j<=resolution[i];j++)
{
for(l=1;l<=innodes;l++)
if(i!=l)
{
for(m=0;m<=resolution[l];m++)
{
fscanf(fl2,"%d",&arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]);
fscanf(fl6,"%f",&arr_anti_wts[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]);
arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+0)]+=(float)(arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]);
}
fscanf(fl2,"\n");
fscanf(fl6,"\n");
}
}
fscanf(fl2,"\n");
fscanf(fl6,"\n");
}
}
else
{
cout << "Unable to Open the APF information file";
exit(1);
}
for(i=1;i<=innodes;i++)
for(j=1;j<=resolution[i];j++)
{
fscanf(fl6,"%f\n",&arr_binloc[(i*rn)+(j)]); /// Let us print the bins.
}
fclose(fl6);
fl4=fopen("output.dat","w+"); // Network Output values
cout << "Read all input parameters\n";
// *********** case 3 ***********************************************
if (ans1 !=3)
{
fl5=fopen("actual.dat","w+"); // Expected Output Values
strcpy(fltmp,fln);
strcat(fltmp,argp[2]);
strcpy(fltmp,fln);
strcat(fltmp,argp[2]);
strcat(fltmp,".cmp"); // Lets see how well the classification went.
fl7=fopen(fltmp,"w+");
fprintf(fl7,"Sample Predicted Actual Prediction \n");
fprintf(fl7," No. Ist 2nd 3rd 4th item Confidence\n");
c1cnt=0;
c2cnt=0;
invcnt=0;
n=0;
}
// Create classtot values ***********************
while (!feof(fl1))
{
n++;
cmax= 0.0;
c2max=0.0;
c3max=0.0;
c4max=0.0;
kmax=0;
k2max=0;
k3max=0;
k4max=0;
classval[0]=0.0;
if(ans1==3)
{
for (i=1;i<=innodes;i++)
{
fscanf(fl1,"%f",&vects[i]);
if(nerror ==2){fscanf(fl1,"%f",&err1vects[i]);fscanf(fl1,"%f",&err2vects[i]);}else
if(nerror ==1){fscanf(fl1,"%f",&err1vects[i]);err2vects[i]=err1vects[i];}
}
fscanf(fl1,"\n");
}
else
{
for (i=1;i<=innodes;i++)
{
fscanf(fl1,"%f",&vects[i]);
if(nerror ==2){fscanf(fl1,"%f",&err1vects[i]);fscanf(fl1,"%f",&err2vects[i]);}else
if(nerror ==1){fscanf(fl1,"%f",&err1vects[i]);err2vects[i]=err1vects[i];}
}
fscanf(fl1,"%f\n",&tmpv);
}
skpchk=0;
for(i=1;i<=innodes;i++)
{
vectso[i]=vects[i];
if((((max[i]-min[i]) >0)&& (vects[i] !=MissingDat))&&(max[i] !=MissingDat))
{
vects[i]=round(((vects[i]-min[i])/(max[i]-min[i]))*resolution[i]);
err1vects[i]=round((err1vects[i])/(max[i]-min[i])*resolution[i]);
err2vects[i]=round((err2vects[i])/(max[i]-min[i])*resolution[i]);
skpchk=0;
}
else
skpchk=1;
}
for(k=1;k<=outnodes;k++) classval[k]=1.0; tmp2_wts=1.0;
for (i=1;i<=innodes;i++)
{
j=0;
if(vects[i]==MissingDat)
skpchk=1;
else
skpchk=0;
if ((resolution[i] >= vects[i]) &&(skpchk==0))
{
while ((fabs(vects[i]-arr_binloc[(i*rn)+(j+1)]) >=1.0)&& (j<= resolution[i]))
{
j++;
}
jx=0;
}
else
{
//NSP_added jx=-1;
jx=1;
}
for (l=1;l<=innodes;l++)
{
if((i!=l) && (jx==0))
{
m=0;
if((vects[l]==MissingDat)||(vects[i]==MissingDat))
skpchk=1;
else
skpchk=0;
if ((resolution[l] >= vects[l]) &&(skpchk==0))
{
while ((fabs(vects[l]-arr_binloc[(l*rn)+(m+1)]) >=1.0)&& (m<= resolution[l]))
{
m++;
}
}
for (k=1;k<=outnodes;k++)
{
if(jx==0){tmp2_wts=(float)arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)];}else{tmp2_wts=1.0/outnodes;}
if(nerror ==2)
{
for(p=(m-(int)err1vects[l]);p<=(m+(int)err2vects[l]);p++)
{
if(p<0) p=0; if(p>resolution[l]) break;
if ((float)arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(p*kk)+k)] > tmp2_wts)
m=p;
}
}
if(nerror ==1)
{
for(p=(m-(int)err1vects[l]);p<=(m+(int)err1vects[l]);p++)
{
if(p<0) p=0; if(p>resolution[l]) break;
if ((float)arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(p*kk)+k)] > tmp2_wts)
m=p;
}
}
if((arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+0)] > 0) && (resolution[i]>= vects[i])&& (resolution[l]>= vects[l])&&(skpchk==0))
{
if(jx==0){tmp2_wts=(float)arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]*arr_anti_wts[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+k)]*1.0/(arr_anti_net[((i*jk*lk*mk*kk)+(j*lk*mk*kk)+(l*mk*kk)+(m*kk)+0)]);}
else{tmp2_wts=1.0/outnodes;}
}
else
if(skpchk == 1) // || bcchoice == 'y')
{
tmp2_wts= 1.0; //(float)1.0/outnodes; //1.0; //
}
else
{
tmp2_wts=(float)1.0/outnodes;
}
if((resolution[i] >= vects[i])&& (resolution[l]>= vects[l])&&(skpchk==0))
{
classval[k]*=(float)tmp2_wts;
}
}
totprob=0;
for(k=1;k<=outnodes;k++) totprob+=classval[k];
if (totprob==0) {totprob=innodes*outnodes; cout <<"Caution!! Item did not have known types\n";}
for(k=1;k<=outnodes;k++) classval[k]=classval[k]/totprob;
}
}
}
cmax=0.0;
c2max=0.0;
c3max=0.0;
k3max=0.0;
kmax=0.0;
k2max=0.0;
totprob=0.0;
for (k=1;k<=outnodes;k++)
{
if (classval[k] > cmax)
{
c4max=c3max;
k4max=k3max;
c3max=c2max;
k3max=k2max;
c2max=classval[kmax];
k2max=kmax;
cmax=classval[k];
kmax=k;
}
else
if (classval[k]>c2max)
{
c4max=c3max;
k4max=k3max;
c3max=c2max;
k3max=k2max;
c2max=classval[k];
k2max=k;
}
else
if (classval[k]>c3max)
{
c4max=c3max;
k4max=k3max;
c3max=classval[k];
k4max=k;
}
else
if (classval[k]>c4max)
{
c4max=classval[k];
k4max=k;
}
totprob += (float)classval[k];
}
if(totprob <=0.0) totprob=innodes*outnodes;
if(ans1 ==3)
{
if (dmyclass[(int)kmax]- (int)dmyclass[(int)kmax] ==0.0)
{
fprintf(fl4,"%d %d %-5.2f %d %-5.2f %d %-5.2f %d %-5.2f",n, (int)dmyclass[(int)kmax],100.0*((classval[kmax])/totprob),(int)dmyclass[(int)k2max],100.0*((classval[k2max])/totprob),(int)dmyclass[(int)k3max],100.0*((classval[k3max])/totprob),(int)dmyclass[(int)k4max],100.0*((classval[k4max])/totprob));
}
else
{
fprintf(fl4,"%d %f %-5.2f %f %-5.2f %f %-5.2f %f %-5.2f",n, dmyclass[(int)kmax],100.0*((classval[kmax])/totprob),dmyclass[(int)k2max],100.0*((classval[k2max])/totprob),dmyclass[(int)k3max],100.0*((classval[k3max])/totprob),dmyclass[(int)k4max],100.0*((classval[k4max])/totprob));
}
if((fabs(classval[kmax]-classval[k2max]))<0.01*classval[kmax]) //classval[kmax])
{
nLoC+=classval[kmax]/totprob;
nLoCcnt++;
if(classval[kmax]>totprob*LoC) //LoC)
{
fprintf(fl4, " <-- Either of it");
}
else
{
fprintf(fl4, " <-- Rejected");
}
}
else
{
if(classval[kmax]>totprob*LoC) //LoC)
{
fprintf(fl4, " <-- confident");
}
else
{
fprintf(fl4, " <-- Rejected");
}
}
fprintf(fl4,"\n");
}
if(ans1 !=3)
{
if (dmyclass[(int)kmax]- (int)dmyclass[(int)kmax] ==0.0)
{
fprintf(fl4,"%d %d\n",n, (int)dmyclass[(int)kmax]);
fprintf(fl7, "%-8d %d %d %d %d %d ",n,(int)dmyclass[(int)kmax],(int)dmyclass[(int)k2max],(int)dmyclass[(int)k3max],(int)dmyclass[(int)k4max],(int)tmpv);
}
else
{
fprintf(fl4,"%d %f\n",n, dmyclass[(int)kmax]);
fprintf(fl7, "%-8d %f %f %f %f %f ",n,dmyclass[(int)kmax],dmyclass[(int)k2max],dmyclass[(int)k3max],dmyclass[(int)k4max],tmpv);
}
if(fabs(dmyclass[kmax]-tmpv) >= dmyclass[0])
{
if (classval[kmax]==0.0)
{
invcnt++;
fprintf(fl7, "%-5.2f %% <-Out of range %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob));
}
else
{
if (fabs(dmyclass[k2max]-tmpv) < dmyclass[0])
{
if((fabs(classval[kmax]-classval[k2max]))<0.01*classval[k2max]) //classval[kmax])
{
nLoC+=classval[kmax]/totprob;
nLoCcnt++;
if (classval[kmax]>totprob*LoC) // LoC)
{
c2cnt++; // No more differences. NSP (OCT 2001)
fprintf(fl7, "%-5.2f %% <-F(1)P(2) %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
}
else
{
fprintf(fl7, "%-5.2f %% <-FMC %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
invcnt++;
}
}
else
{
if (classval[kmax]>totprob*LoC) // LoC)
{
fprintf(fl7, "%-5.2f %% <-Failed %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
}
else
{
fprintf(fl7, "%-5.2f %% <-FMC %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
invcnt++;
}
}
}
else
{
if (classval[kmax]>totprob*LoC) // LoC)
{
fprintf(fl7, "%-5.2f %% <-Failed %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
}
else
{
fprintf(fl7, "%-5.2f %% <-FMC %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
invcnt++;
}
}
}
}
else
{
if((fabs(classval[kmax]-classval[k2max]))<0.01*classval[kmax])
{
nLoC+=classval[kmax]/totprob;
nLoCcnt++;
if (classval[kmax]>totprob*LoC) // LoC)
{
fprintf(fl7, "%-5.2f %% <-P(1)F(2) %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
c1cnt++;
}
else
{
invcnt++;
fprintf(fl7, "%-5.2f %% <-PMC %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
}
}
else
{
if (classval[kmax]>totprob*LoC) // LoC)
{
fprintf(fl7, "%-5.2f %% <-Passed %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
c1cnt++;
}
else
{
invcnt++;
fprintf(fl7, "%-5.2f %% <-PMC %-5.2f %% %-5.2f %% %-5.2f %% \n",100.0*((classval[kmax])/totprob),100.0*((classval[k2max])/totprob),100.0*((classval[k3max])/totprob),100.0*((classval[k4max])/totprob));
}
}
}
fprintf(fl5,"%d %e \n",n,(float) tmpv);
} // ans1 != 3 ends here ******************
}
cout << "The suggested LoC is " << nLoC/nLoCcnt << "\n";
fclose(fl1);
fclose(fl2);
fclose(fl4);
if(ans1 < 3)
{
strcpy(fltmp,fln);
// tmp2_wts=0.0;
fclose(fl5);
fprintf(fl7,"*________________________________________________________________________\n");
fprintf(fl7,"*Total Success in Success in Non classified Real success in \n");
cout << "*________________________________________________________________________\n";
cout << "*Total Success in Success in Non classified Real success in \n";
if (outnodes > 3)
{
fprintf(fl7,"* No. Ist Choice 2nd Choice items two chances \n");
fprintf(fl7,"* %d %d %d %d %-5.2f %% \n",n,c1cnt,c2cnt,invcnt,(float)100.0*(c1cnt+c2cnt)/(n-invcnt));
cout << "* No. Ist Choice 2nd Choice items two chances \n";
printf("* %d %d %d %d %-5.2f %% \n",n,c1cnt,c2cnt,invcnt,(float)100.0*(c1cnt+c2cnt)/(n-invcnt));
}
else
{
fprintf(fl7,"* No. Ist Choice 2nd Choice items First chance \n");
fprintf(fl7,"* %d %d %d %d %-5.2f %% \n",n,c1cnt,c2cnt,invcnt,(float)100.0*(c1cnt)/(n-invcnt));
cout << "* No. Ist Choice 2nd Choice items First chance \n";
printf("* %d %d %d %d %-5.2f %% \n",n,c1cnt,c2cnt,invcnt,(float)100.0*(c1cnt)/(n-invcnt));
}
fprintf(fl7,"*________________________________________________________________________\n");
printf("*________________________________________________________________________\n");
fclose(fl7);
} // ******** ans1!=3 ends here *************
cout << "Done.\n";
stop = times(NULL);
cout << "The computation took " << fabs(start - stop)*10000/(CLOCKS_PER_SEC) << " Secs.\n";
} //end main
|
4,988 | // cuda_example3.cu : Defines the entry point for the console application.
//
#include <stdio.h>
#include <string.h>
#include <cuda.h>
const int N = 64;
__global__ void foo( float **a, int N )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i < N && j<N )
a[i][j] = 1;
}
// main routine that executes on the host
int main( void )
{
const int block_size = 4;
int n_blocks;
dim3 dimblock(block_size, block_size);
float a_h[N][N], **a_d; // Pointer to host & device arrays
size_t size = N * N * sizeof( float );
cudaMalloc( (void **)&a_d, size ); // Allocate array on device
// Initialize host array and copy it to CUDA device
for ( int i = 0; i < N; i++){
for(int j = 0; j<N; j++){
a_h[i][j] = (float)i;
}
}
for ( int i = 0; i < N; i++ ){
for(int j = 0; j<N; j++){
printf("%d ",(int)a_h[i][j]);
}
}
puts("\n");
/*
invocando o kernel
*/
n_blocks = N / block_size + ( N % block_size == 0 ? 0 : 1 );
cudaMemcpy( a_d, a_h, size, cudaMemcpyHostToDevice );
foo<<< n_blocks, dimblock >>> ( a_d, N );
cudaMemcpy( a_h, a_d,size, cudaMemcpyDeviceToHost );
puts("\nDEVICE - HOST:\n");
for ( int i = 0; i < N; i++ ){
for(int j = 0; j<N; j++){
printf("%d ",(int)a_h[i][j]);
}
}
puts("\n");
//free( a_h );
cudaFree( a_d );
return 0;
}
|
4,989 | //pass
//--gridDim=8 --blockDim=512
__global__ void simpleKernel(int *dst, int *src, int num)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] / num;
}
|
4,990 | #include<stdio.h>
#include<assert.h>
#include<cuda.h>
#include<errno.h>
#include<math.h>
#include<sys/time.h>
#define MAX_VAL 10
#define BLOCK_WIDTH 256
#define MAX_SIZE 2048*2048*2
cudaError_t cuerr;
float* createArray(int size)
{
float *temp;
int err=0;
errno = 0;
temp = (float*) malloc (sizeof(float)*size);
err = errno;
if(err)
printf("Error: %s, %d, %s", __FILE__, __LINE__, strerror(err));
return temp;
}
void destroyArray(float* p)
{
free(p);
}
void initArray(float* p, int size)
{
for(int i=0; i<size; i++)
p[i] = rand()%MAX_VAL;
}
void copyArray(float *p, float *q, int size)
{
for(int i=0;i<size;i++)
q[i]=p[i];
}
void printArray(float *p, int size)
{
printf("\n");
for( int i=0; i< size; i++)
printf("%4.2f ",p[i]);
}
void reduce(float* p, int size)
{
for( size_t j=1;j<=size/2;j*=2)
{
for( size_t i=0;i<size/2;i++)
{
size_t ind = 2*i*j;
if(ind < size)
p[ind] = p[ind]+p[ind+j];
}
printArray(p,10);
}
}
void reduce2(float* p, int size)
{
for( size_t j=size/2;j>0;j/=2)
{
for( size_t i=0;i<size/2;i++)
{
if(i+j < size)
p[i] = p[i]+p[i+j];
}
}
}
void createArrayDevice(float **p, int size)
{
cuerr = cudaSuccess;
cuerr = cudaMalloc(p, sizeof(float)*size);
if (cuerr != cudaSuccess)
{
fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, cudaGetErrorString(cuerr));
exit(EXIT_FAILURE);
}
}
void transferToDevice(float *hostptr, float *deviceptr, int size)
{
cuerr = cudaSuccess;
cuerr = cudaMemcpy(deviceptr, hostptr, sizeof(float)*size, cudaMemcpyHostToDevice);
if (cuerr != cudaSuccess)
{
fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, cudaGetErrorString(cuerr));
exit(EXIT_FAILURE);
}
}
void transferFromDevice(float *hostptr, float *deviceptr, int size)
{
cuerr = cudaSuccess;
cuerr = cudaMemcpy(hostptr, deviceptr, sizeof(float)*size, cudaMemcpyDeviceToHost);
if (cuerr != cudaSuccess)
{
fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, cudaGetErrorString(cuerr));
exit(EXIT_FAILURE);
}
}
__global__
void reduceKernel(float *p, int size, float *con)
{
__shared__ float localblock[BLOCK_WIDTH*2];
int tx = threadIdx.x;
int in = blockIdx.x * blockDim.x + tx;
localblock[tx*2] = p[in*2];
localblock[tx*2+1] = p[in*2+1];
__syncthreads();
for( int it=1; it<= (BLOCK_WIDTH*2)/2; it*=2)
{
int ind = 2 * tx * it;
if(ind < blockDim.x*2)
localblock[ind] = localblock[ind]+localblock[ind+it];
__syncthreads();
}
p[in]=localblock[tx];
if(tx == 0) con[blockIdx.x] = localblock[tx];
}
void preduce(float *p, int size, float *con)
{
//dim3 gridProp(ceil(size/(BLOCK_WIDTH*2)),1,1);
dim3 gridProp((int)ceil(size/(BLOCK_WIDTH*2)),1,1);
dim3 blockProp(BLOCK_WIDTH,1,1);
printf("\nRunnig Kernel with %d thpb, %d bpg\n", BLOCK_WIDTH, (int)ceil(size/(BLOCK_WIDTH*2)));
cuerr = cudaSuccess;
reduceKernel<<<gridProp,blockProp>>>(p, size, con);
if (cuerr != cudaSuccess)
{
fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, cudaGetErrorString(cuerr));
exit(EXIT_FAILURE);
}
}
__global__
void reduceKernel2(float *p, int size)
{
__shared__ float localblock[BLOCK_WIDTH*2];
int tx = blockIdx.x * blockDim.x + threadIdx.x;
localblock[tx*2] = p[tx*2];
localblock[tx*2+1] = p[tx*2+1];
__syncthreads();
for( int it=size/2; it>0; it/=2)
{
if(tx+it < blockDim.x*2)
localblock[tx] = localblock[tx]+localblock[tx+it];
__syncthreads();
}
p[tx] = localblock[tx];
}
void preduce2(float *p, int size)
{
//dim3 gridProp(ceil(size/(BLOCK_WIDTH*2)),1,1);
dim3 gridProp(1,1,1);
dim3 blockProp(BLOCK_WIDTH,1,1);
printf("Runnig Kernel with %d thpb, %d bpg\n", BLOCK_WIDTH, (int)ceil(size/(BLOCK_WIDTH*2)));
cuerr = cudaSuccess;
reduceKernel2<<<gridProp,blockProp>>>(p, size);
if (cuerr != cudaSuccess)
{
fprintf(stderr, "%s, %d.\n %s.", __FILE__, __LINE__, cudaGetErrorString(cuerr));
exit(EXIT_FAILURE);
}
}
int main()
{
float *A,*B,*C;
int Asize = MAX_SIZE;
struct timeval as,ae,bs,be,ad,bd;
cudaEvent_t custart, cuend;
float timeelap;
cudaEventCreate(&custart);
cudaEventCreate(&cuend);
float *container;
int csize = (MAX_SIZE/(BLOCK_WIDTH*2));
A = createArray(Asize);
assert(A != NULL);
B = createArray(Asize);
assert(B != NULL);
C = createArray(Asize);
assert(C != NULL);
container = createArray(csize);
assert(container != NULL);
for( int i=0;i<csize;i++)
container[i] = 0;
initArray(A, Asize);
copyArray(A, B, Asize);
copyArray(A, C, Asize);
gettimeofday(&as,NULL);
reduce(A, Asize);
gettimeofday(&ae,NULL);
//printf("\nA[0] is %f\n",A[0]);
//printArray(A, Asize);
gettimeofday(&bs,NULL);
reduce2(B, Asize);
gettimeofday(&be,NULL);
//printf("\nB[0] is %f\n",B[0]);
float *dA, *dB, *dcont;
createArrayDevice(&dA, Asize);
createArrayDevice(&dcont, csize);
printArray(C,10);
cudaEventRecord(custart);
transferToDevice(C, dA, Asize);
preduce(dA, Asize, dcont);
cudaEventRecord(cuend);
cudaEventSynchronize(cuend);
cudaDeviceSynchronize();
cudaEventElapsedTime(&timeelap, custart, cuend);
transferFromDevice(C, dA, Asize);
transferFromDevice(container, dcont, csize);
printf("\n[");
float finsum=0;
for( int i=0;i<csize;i++)
finsum += (float)container[i];
printf("%f", finsum);
printf("]\n");
printArray(C,10);
printf("\nC[0] is %f\n",C[0]);
destroyArray(A);
destroyArray(B);
timersub(&ae, &as, &ad);
timersub(&be, &bs, &bd);
printf(" %f reduce\n", (float)(ad.tv_sec*1000)+(ad.tv_usec/1000));
printf(" %f reduce2\n",(float)(bd.tv_sec*1000)+(bd.tv_usec/1000));
printf(" %f preduce2\n", timeelap);
return 0;
}
/*
If the array size spans multiple blocks
create MAX_SIZE/BLOCK_WIDTH array
This array will store value from each block
rerun reduction on this array or use CPU to calcualte the final value
*/
|
4,991 | #include <stdio.h>
#include <stdlib.h>
__global__ void foo(int *ptr){
*ptr = 7;
}
int main(){
foo<<<1,1>>>(0);
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess){
printf("Cuda error: %s\n", cudaGetErrorString(error));
exit(-1);
}
return 0;
} |
4,992 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/random.h>
#include <thrust/generate.h>
#include <thrust/detail/type_traits.h>
#include <algorithm>
#include <time.h>
#include <limits.h>
#include <math.h>
bool thrustSort (int numElements, int numIterations) {
thrust::host_vector<double> h_keys(numElements);
thrust::host_vector<double> h_keysSorted(numElements);
// Fill up with some random data
thrust::default_random_engine rng(clock());
thrust::uniform_real_distribution<double> u01(0, 1);
for (int i = 0; i < (int)numElements; i++)
h_keys[i] = u01(rng);
// Copy data onto the GPU
thrust::device_vector<double> d_keys = h_keys;
// run multiple iterations to compute an average sort time
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
float totalTime = 0;
for (unsigned int i = 0; i < numIterations; i++)
{
// reset data before sort
d_keys = h_keys;
cudaEventRecord(start_event, 0);
thrust::sort(d_keys.begin(), d_keys.end());
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
float time = 0;
cudaEventElapsedTime(&time, start_event, stop_event);
totalTime += time;
}
printf("Sorting %d elements\n. Average time of %d runs is: %.5f ms\n",
numElements, numIterations, (totalTime) / numIterations);
// Get results back to host for correctness checking
thrust::copy(d_keys.begin(), d_keys.end(), h_keysSorted.begin());
// Check results
bool bTestResult = thrust::is_sorted(h_keysSorted.begin(), h_keysSorted.end());
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
return bTestResult;
}
int main() {
bool bTestResult = false;
for (int size = 10; size <= 30; size++)
{
int elems = pow(2,size);
bTestResult = thrustSort(elems, 5);
printf(bTestResult ? "Test passed\n" : "Test failed!\n");
}
return 0;
}
|
4,993 | #include <iostream>
#include "../include/matrixMultiplication.cuh"
namespace blas3{
namespace cudaBlas {
__global__ void
naiveMatrixMultiplication(float *MatA, float *MatB, float *result, size_t m, size_t n, size_t k) {
unsigned int column_id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row_id = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.0f;
if (row_id < m && column_id < k) {
for (size_t i = 0; i < k; i++) {
sum += MatA[n * row_id + i] * MatB[i * k + column_id];
}
result[row_id * n + column_id] = sum;
}
}
__global__ void
sharedMatrixMultiplication(float *MatA, float *MatB, float *result, size_t M, size_t K, size_t N) {
/*
* Matrix Multiplication with
* Tiling
* Coalesced Access
* No Bank Conflict
* */
#define BLOCK_SIZE 32
unsigned int block_x_id = blockIdx.x;
unsigned int block_y_id = blockIdx.y;
unsigned int thread_x_id = threadIdx.x;
unsigned int thread_y_id = threadIdx.y;
__shared__ float A_shared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float B_shared[BLOCK_SIZE][BLOCK_SIZE];
auto a_start_index = K * BLOCK_SIZE * block_y_id;
auto a_end = a_start_index + K - 1;
int a_step = BLOCK_SIZE;
auto b_start_index = BLOCK_SIZE * block_x_id;
auto b_step = BLOCK_SIZE * N;
float sum = 0.0f;
for (int a = a_start_index, b = b_start_index; a <= a_end; a += a_step, b += b_step) {
A_shared[thread_y_id][thread_x_id] = MatA[a + K * thread_y_id + thread_x_id];
B_shared[thread_y_id][thread_x_id] = MatB[b + N * thread_y_id + thread_x_id];
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; k++) {
sum += A_shared[thread_y_id][k] * B_shared[k][thread_x_id];
}
__syncthreads();
}
auto result_index = N * BLOCK_SIZE * block_y_id + BLOCK_SIZE * block_x_id;
result[result_index + N * thread_y_id + thread_x_id] = sum;
}
__global__ void
MatrixMultiplication_reducedOps32(float *MatA, float *MatB, float *result, size_t M, size_t N, size_t K) {
const int TILE_SIZE = 32;
const int VECTOR_SIZE = 4;
unsigned int block_x = blockIdx.x;
unsigned int thread_x = threadIdx.x;
unsigned int block_y = blockIdx.y;
unsigned int thread_y = threadIdx.y;
__shared__ float A_shared[TILE_SIZE * TILE_SIZE];
float result_vector[TILE_SIZE] = {0};
auto a_start_index = K * TILE_SIZE * block_y;
auto a_end = a_start_index + K - 1;
auto a_step = TILE_SIZE;
auto b_start_index = TILE_SIZE * VECTOR_SIZE * block_x;
auto b_step = TILE_SIZE * N;
for (int a = a_start_index, b = b_start_index; a <= a_end; a += a_step, b += b_step) {
#pragma unroll
for (int i = 0; i < TILE_SIZE / VECTOR_SIZE; ++i) {
A_shared[(i * VECTOR_SIZE + thread_y) + TILE_SIZE * thread_x] =
MatA[a + K * (i * VECTOR_SIZE + thread_y) + thread_x];
}
__syncthreads();
float *a_shared_base = A_shared;
float *b_base = MatB + b + TILE_SIZE * thread_y + thread_x;
#pragma unroll
for (int i = 0; i < TILE_SIZE; i++) {
float b_value = *b_base;
result_vector[0] += a_shared_base[0] * b_value;
result_vector[1] += a_shared_base[1] * b_value;
result_vector[2] += a_shared_base[2] * b_value;
result_vector[3] += a_shared_base[3] * b_value;
result_vector[4] += a_shared_base[4] * b_value;
result_vector[5] += a_shared_base[5] * b_value;
result_vector[6] += a_shared_base[6] * b_value;
result_vector[7] += a_shared_base[7] * b_value;
result_vector[8] += a_shared_base[8] * b_value;
result_vector[9] += a_shared_base[9] * b_value;
result_vector[10] += a_shared_base[10] * b_value;
result_vector[11] += a_shared_base[11] * b_value;
result_vector[12] += a_shared_base[12] * b_value;
result_vector[13] += a_shared_base[13] * b_value;
result_vector[14] += a_shared_base[14] * b_value;
result_vector[15] += a_shared_base[15] * b_value;
result_vector[16] += a_shared_base[16] * b_value;
result_vector[17] += a_shared_base[17] * b_value;
result_vector[18] += a_shared_base[18] * b_value;
result_vector[19] += a_shared_base[19] * b_value;
result_vector[20] += a_shared_base[20] * b_value;
result_vector[21] += a_shared_base[21] * b_value;
result_vector[22] += a_shared_base[22] * b_value;
result_vector[23] += a_shared_base[23] * b_value;
result_vector[24] += a_shared_base[24] * b_value;
result_vector[25] += a_shared_base[25] * b_value;
result_vector[26] += a_shared_base[26] * b_value;
result_vector[27] += a_shared_base[27] * b_value;
result_vector[28] += a_shared_base[28] * b_value;
result_vector[29] += a_shared_base[29] * b_value;
result_vector[30] += a_shared_base[30] * b_value;
result_vector[31] += a_shared_base[31] * b_value;
a_shared_base += TILE_SIZE;
b_base += N;
}
__syncthreads();
}
auto c_ptr = N * TILE_SIZE * block_y + TILE_SIZE * VECTOR_SIZE * block_x;
c_ptr += TILE_SIZE * thread_y + thread_x;
#pragma unroll
for (int i = 0; i < TILE_SIZE; ++i) {
result[c_ptr] = result_vector[i];
c_ptr += N;
}
}
__global__ void
MatrixMultiplication_reducedOps16(float *MatA, float *MatB, float *result, size_t M, size_t N, size_t K) {
const int TILE_SIZE_16 = 16;
const int VECTOR_SIZE = 4;
unsigned int block_x = blockIdx.x;
unsigned int thread_x = threadIdx.x;
unsigned int block_y = blockIdx.y;
unsigned int thread_y = threadIdx.y;
__shared__ float A_shared[TILE_SIZE_16 * TILE_SIZE_16];
float result_vector[TILE_SIZE_16] = {0};
auto a_start_index = K * TILE_SIZE_16 * block_y;
auto a_end = a_start_index + K - 1;
auto a_step = TILE_SIZE_16;
auto b_start_index = TILE_SIZE_16 * VECTOR_SIZE * block_x;
auto b_step = TILE_SIZE_16 * N;
for (int a = a_start_index, b = b_start_index; a <= a_end; a += a_step, b += b_step) {
#pragma unroll
for (int i = 0; i < TILE_SIZE_16 / VECTOR_SIZE; ++i) {
A_shared[(i * VECTOR_SIZE + thread_y) + TILE_SIZE_16 * thread_x] =
MatA[a + K * (i * VECTOR_SIZE + thread_y) + thread_x];
}
__syncthreads();
float *a_shared_base = A_shared;
float *b_base = MatB + b + TILE_SIZE_16 * thread_y + thread_x;
#pragma unroll
for (int i = 0; i < TILE_SIZE_16; i++) {
float b_value = *b_base;
result_vector[0] += a_shared_base[0] * b_value;
result_vector[1] += a_shared_base[1] * b_value;
result_vector[2] += a_shared_base[2] * b_value;
result_vector[3] += a_shared_base[3] * b_value;
result_vector[4] += a_shared_base[4] * b_value;
result_vector[5] += a_shared_base[5] * b_value;
result_vector[6] += a_shared_base[6] * b_value;
result_vector[7] += a_shared_base[7] * b_value;
result_vector[8] += a_shared_base[8] * b_value;
result_vector[9] += a_shared_base[9] * b_value;
result_vector[10] += a_shared_base[10] * b_value;
result_vector[11] += a_shared_base[11] * b_value;
result_vector[12] += a_shared_base[12] * b_value;
result_vector[13] += a_shared_base[13] * b_value;
result_vector[14] += a_shared_base[14] * b_value;
result_vector[15] += a_shared_base[15] * b_value;
a_shared_base += TILE_SIZE_16;
b_base += N;
}
__syncthreads();
}
auto c_ptr = N * TILE_SIZE_16 * block_y + TILE_SIZE_16 * VECTOR_SIZE * block_x;
c_ptr += TILE_SIZE_16 * thread_y + thread_x;
#pragma unroll
for (int i = 0; i < TILE_SIZE_16; ++i) {
result[c_ptr] = result_vector[i];
c_ptr += N;
}
}
__global__ void sharedGEMM(float *MatA, float *MatB, float *Result, size_t M, size_t N, size_t K) {
const unsigned int TILE_SIZE = 32;
unsigned int row_id = blockIdx.y * TILE_SIZE + threadIdx.y;
unsigned int column_id = blockIdx.x * TILE_SIZE + threadIdx.x;
__shared__ float A_shared[TILE_SIZE][TILE_SIZE];
__shared__ float B_shared[TILE_SIZE][TILE_SIZE];
float sum = 0.0f;
for (size_t m = 0; m < (TILE_SIZE + N - 1) / TILE_SIZE; m++) {
if (m * TILE_SIZE + threadIdx.x < N and row_id < M)
A_shared[threadIdx.y][threadIdx.x] = MatA[row_id * N + m * TILE_SIZE + threadIdx.x];
else
A_shared[threadIdx.y][threadIdx.x] = 0.0f;
if (m * TILE_SIZE + threadIdx.y < N && column_id < K) {
B_shared[threadIdx.y][threadIdx.x] = MatB[(m * TILE_SIZE + threadIdx.y) * K +
column_id]; //Coalesced ??
} else
B_shared[threadIdx.y][threadIdx.x] = 0.0f;
__syncthreads();
for (int n = 0; n < TILE_SIZE; n++) {
sum += A_shared[threadIdx.y][n] * B_shared[n][threadIdx.x];
}
__syncthreads();
}
if (row_id < M and column_id < K) {
Result[((blockIdx.y * blockDim.y + threadIdx.y) * K) +
blockIdx.x * blockDim.x + threadIdx.x] = sum;
}
}
}
}
/*
int main(){
size_t m = 102;
size_t n = 604;
size_t k = 366;
float* h_a, *h_b, *h_c;
float* d_a, *d_b, *d_c;
h_a = (float*) malloc(m * n * sizeof(float ));
h_b = (float*) malloc(n * k * sizeof(float ));
h_c = (float*) malloc(m * k * sizeof(float ));
cudaMalloc(&d_a, m * n * sizeof(float ));
cudaMalloc(&d_b, n * k * sizeof(float ));
cudaMalloc(&d_c, m * k * sizeof(float ));
for(int i = 0; i < m * n;i++){
h_a[i] = 1.0f;
}
for(int i=0; i< n*k; i++){
h_b[i] = 1.0f;
}
cudaMemcpy(d_a, h_a, m * n * sizeof(float ), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, n * k * sizeof(float ), cudaMemcpyHostToDevice);
dim3 threads(32, 32);
dim3 blocksPerGrid((n - 1) / threads.x + 1, (n - 1) / threads.y + 1);
blas3::sharedGEMM<<<blocksPerGrid, threads>>>(d_a, d_b, d_c, m ,n, k);
cudaDeviceSynchronize();
cudaMemcpy(h_c, d_c, m * k * sizeof(float ), cudaMemcpyDeviceToHost);
for(int i=0; i<500; i++){
std::cout<<h_c[i]<<" ";
}
std::cout<<std::endl;
}
*/ |
4,994 | #include "includes.h"
__global__ void setTensorCheckPatternKernel(unsigned int* data, unsigned int ndata) {
for (unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;i < ndata;i += blockDim.x*gridDim.x) {
data[i] = i;
}
} |
4,995 | #include <stdio.h>
#include <float.h>
void __global__ kernel_isnan(float* array_device, int* rowArray, int rowArrayLength, int* colArray, int colArrayLength, int totalCols, int totalRows, float* results)
{
int n = blockIdx.x * blockDim.x + threadIdx.x;
int m = blockIdx.y * blockDim.y + threadIdx.y;
if (n < rowArrayLength && m < colArrayLength)
{
int arrayInd = n*totalCols + m;
int resultsInd = n*colArrayLength + m;
if (isnan(array_device[arrayInd]) || isinf(array_device[arrayInd])) // I think this is all we need to do
results[resultsInd] = 1;
else
results[resultsInd] = 0;
}
}
void __global__ kernel_nan2num(float* array_device, int* rowArray, int rowArrayLength, int* colArray, int colArrayLength, int totalCols, int totalRows, bool inPlace, float* results)
{
int n = blockIdx.x * blockDim.x + threadIdx.x;
int m = blockIdx.y * blockDim.y + threadIdx.y;
if (n < rowArrayLength && m < colArrayLength)
{
int arrayInd = n*totalCols + m;
int resultsInd = n*colArrayLength + m;
if (inPlace)
{
if (isnan(array_device[arrayInd])) // I think this is all we need to do
array_device[arrayInd] = 0;
else if (isinf(array_device[arrayInd]))
// so seems that isinf checks for both negative and positive infinity, and then we can see if the value is above or below zero?
if (array_device[arrayInd] > 0)
array_device[arrayInd] = FLT_MAX;
else
array_device[arrayInd] = -FLT_MAX;
}
else
{
if (isnan(array_device[arrayInd])) // I think this is all we need to do
results[resultsInd] = 0;
else if (isinf(array_device[arrayInd]))
if (array_device[arrayInd] > 0)
results[resultsInd] = FLT_MAX;
else
results[resultsInd] = -FLT_MAX; // note that FLT_MIN is the smallest float, ie E-38, NOT -E38
else
results[resultsInd] = array_device[arrayInd]; // otherwise just copy the value
}
}
__syncthreads();
}
|
4,996 | #include "cuda_runtime.h"
#include <iostream>
#include <fstream>
#include <chrono>
#include <string>
__global__ void reduction(const int* data, const int size, int* output) {
extern __shared__ int shared_data[];
int indx = blockIdx.x * blockDim.x + threadIdx.x;
shared_data[threadIdx.x] = data[indx];
__syncthreads();
for(int i = 1; i < blockDim.x; i *= 2) {
if (threadIdx.x % (2*i) == 0)
shared_data[threadIdx.x] += shared_data[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0)
output[blockIdx.x] = shared_data[0];
}
__global__ void optimized_reduction(const int* data, const int size, int* output) {
extern __shared__ int shared_data[];
int indx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
shared_data[threadIdx.x] = data[indx] + data[indx + blockDim.x * 2];
__syncthreads();
for(int i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
shared_data[threadIdx.x] += shared_data[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0)
output[blockIdx.x] = shared_data[0];
}
///////////////////////////////////////////////////////////////////////////////////////////////
__global__ void initialize(int* data, const int size) {
int indx = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = indx; i < size; i += stride)
data[i] = 1;
}
//////////////////////////////////////////////////////////////////////////////////////////////
int main() {
int* data;
int* result;
const int size = 1024 * std::pow(2, 16);
size_t data_size = size * sizeof(int);
int ID;
int SM;
cudaGetDevice(&ID);
cudaDeviceGetAttribute(&SM, cudaDevAttrMultiProcessorCount, ID);
cudaMallocManaged(&data, data_size);
cudaMallocManaged(&result, 32*SM*sizeof(int));
cudaMemPrefetchAsync(data, data_size, ID);
cudaMemPrefetchAsync(result, 32*SM*sizeof(int), ID);
initialize<<<32*SM, 256>>>(data, size);
auto start_first = std::chrono::high_resolution_clock::now();
reduction<<<SM * 32, 256, SM*64>>>(data, size, result);
cudaDeviceSynchronize();
auto stop_first = std::chrono::high_resolution_clock::now();
auto start_optimized = std::chrono::high_resolution_clock::now();
reduction<<<SM * 32, 256>>>(data, size, result);
cudaDeviceSynchronize();
auto stop_optimized = std::chrono::high_resolution_clock::now();
int sum = 0;
int control = 0;
for(int i = 0; i < size; i++)
control += data[i];
for(int i = 0; i < 32*SM; i++)
sum += result[i];
if( sum == size )
std::cout << "result good" << std::endl;
std::cout << sum << std::endl;
std::cout << control << std::endl;
std::ofstream save;
save.open("time.txt");
// save << std::chrono::duration_cast<std::chrono::nanoseconds>(stop_first-start_first).count()
// << std::endl << std::chrono::duration_cast<std::chrono::nanoseconds>(stop_optimized-start_optimized).count();
save.close();
return 0;
}
|
4,997 | #include "includes.h"
__global__ void grayscale(float4* imagem, int width, int height)
{
const int i = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
if(i < width * height)
{
float v = 0.3 * imagem[i].x + 0.6 * imagem[i].y + 0.1 * imagem[i].z;
imagem[i] = make_float4(v, v, v, 0);
}
} |
4,998 | #include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
//#include <helper_functions.h>
//#include <helper_cuda.h>
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
__global__ void testKernel(int val)
{
printf("[%d, %d]:\t\tValue is:%d\n", blockIdx.y*gridDim.x+blockIdx.x, \
threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x, val);
}
int main(int argc, char **argv)
{
int devID;
cudaDeviceProp props;
// devID = findCudaDevice(argc, (const char **) argv);
//checkCudaErrors(cudaGetDevice(&devID));
//checkCudaErrors(cudaGetDeviceProperties(&props, devID));
//printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor);
printf("printf() is called. Output:\n\n");
dim3 dimGrid(2,2);
dim3 dimBlock(2,2,2);
testKernel<<<dimGrid, dimBlock>>>(10);
cudaDeviceSynchronize();
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
4,999 |
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <ctype.h>
// #include "cudaDefines.h"
struct ImgProp {
uint32_t Hpixels;
uint32_t Vpixels;
uint8_t HeaderInfo[14];
uint8_t* HeaderMeta;
uint16_t HeaderMetaSize;
uint32_t Hbytes;
uint32_t IMAGESIZE;
};
struct ImgProp ip;
uint8_t* TheImg, * CpyImg;
uint8_t* GPUImg, * GPUCopyImg;
uint8_t* ReadBMPlin(char* fn) {
static uint8_t* Img;
FILE* f = fopen(fn, "rb");
if (f == NULL) { printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uint8_t HeaderInfo[14];
fread(HeaderInfo, sizeof(uint8_t), 14, f); // read the 54-byte header
ip.HeaderMetaSize = *(int*)&HeaderInfo[10];
ip.HeaderMeta = (uint8_t*)malloc(ip.HeaderMetaSize * sizeof(uint8_t));
fread(ip.HeaderMeta, sizeof(uint8_t), ip.HeaderMetaSize, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&(ip.HeaderMeta[4]); ip.Hpixels = width;
int height = *(int*)&(ip.HeaderMeta[8]); ip.Vpixels = height;
//int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
int RowBytes = ip.Hpixels * 3; ip.Hbytes = RowBytes;
ip.IMAGESIZE = ip.Hbytes * ip.Vpixels;
memcpy(ip.HeaderInfo, HeaderInfo, 14); //save header for re-use
printf("\n Input File name: %17s\n\nHeaderMetaSize: %u, Hb: %u, Hp: %u, Vp: %u, File Size=%u\n\n", fn,
ip.HeaderMetaSize, ip.Hbytes, ip.Hpixels, ip.Vpixels, ip.IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uint8_t*)malloc(ip.IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uint8_t), ip.IMAGESIZE, f); fclose(f); return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uint8_t* Img, char* fn) {
FILE* f = fopen(fn, "wb");
if (f == NULL) { printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
fwrite(ip.HeaderInfo, sizeof(uint8_t), 14, f); //write header
fwrite(ip.HeaderMeta, sizeof(uint8_t), ip.HeaderMetaSize, f); //write header
fwrite(Img, sizeof(uint8_t), ip.IMAGESIZE, f); //write data
printf("\nOutput File name: %17s (%u x %u) File Size=%u\n\n", fn, ip.Hpixels,
ip.Vpixels, ip.IMAGESIZE);
fclose(f);
}
__global__
void Vflip(uint8_t* ImgDst, uint8_t* ImgSrc, uint32_t Hpixels, uint32_t Vpixels) {
uint32_t ThrPerBlk = blockDim.x;
uint32_t MYbid = blockIdx.x;
uint32_t MYtid = threadIdx.x;
uint32_t MYgtid = ThrPerBlk * MYbid + MYtid;
uint32_t BlkPerRow = (Hpixels + ThrPerBlk - 1) / ThrPerBlk; // ceil
uint32_t RowBytes = (Hpixels * 3 + 3) & (~3);
uint32_t MYrow = MYbid / BlkPerRow;
uint32_t MYcol = MYgtid - MYrow * BlkPerRow * ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
uint32_t MYmirrorrow = Vpixels - 1 - MYrow;
uint32_t MYsrcOffset = MYrow * RowBytes;
uint32_t MYdstOffset = MYmirrorrow * RowBytes;
uint32_t MYsrcIndex = MYsrcOffset + 3 * MYcol;
uint32_t MYdstIndex = MYdstOffset + 3 * MYcol;
// swap pixels RGB @MYcol , @MYmirrorcol
ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex];
ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1];
ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2];
}
int main() {
cudaError_t cudaStatus, cudaStatus2;
cudaEvent_t time1, time2, time3, time4;
cudaDeviceProp GPUprop;
uint32_t SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk;
char SupportedBlocks[100];
uint32_t BlkPerRow, ThrPerBlk = 128, NumBlocks, GPUDataTransfer;
float totalTime, tfrCPUtoGPU, kernelExecutionTime, tfrGPUtoCPU;
char InputFileName[] = "../img/img.bmp";
char OutputFileName[] = "../img/flip.bmp";
TheImg = ReadBMPlin(InputFileName);
CpyImg = (uint8_t*)malloc(ip.IMAGESIZE);
//WriteBMPlin(TheImg, OutputFileName);
//return 0;
int NumGPUs = 0; cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0) {
std::cout << "\nNo CUDA Device is available\n";
exit(EXIT_FAILURE);
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = ((uint32_t)GPUprop.maxGridSize[0] * (uint32_t)GPUprop.maxGridSize[1] *
(uint32_t)GPUprop.maxGridSize[2]) / 1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks,
(SupportedMBlocks >= 5) ? 'M' : 'K');
MaxThrPerBlk = (uint32_t)GPUprop.maxThreadsPerBlock;
cudaEventCreate(&time1); cudaEventCreate(&time2);
cudaEventCreate(&time3); cudaEventCreate(&time4);
BlkPerRow = (ip.Hpixels + ThrPerBlk - 1) / ThrPerBlk;
NumBlocks = ip.Vpixels * BlkPerRow;
GPUDataTransfer = 2 * ip.IMAGESIZE;
cudaEventRecord(time1, 0);
cudaStatus = cudaMalloc((void**)&GPUImg, ip.IMAGESIZE);
cudaStatus2 = cudaMalloc((void**)&GPUCopyImg, ip.IMAGESIZE);
if ((cudaStatus != cudaSuccess) || (cudaStatus2 != cudaSuccess)) {
std::cout << "cudaMalloc failed! Can't allocate GPU memory";
exit(EXIT_FAILURE);
}
cudaStatus = cudaMemcpy(GPUImg, TheImg, ip.IMAGESIZE, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::cout << "cudaMemCpy CPU to GPU failed!";
exit(EXIT_FAILURE);
}
cudaEventRecord(time2, 0);
Vflip <<<NumBlocks, ThrPerBlk>>> (GPUCopyImg, GPUImg, ip.Hpixels, ip.Vpixels);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
std::cout << "cudaDeviceSynchronize error code " << cudaStatus << " ...\n";
exit(EXIT_FAILURE);
}
cudaEventRecord(time3, 0);
cudaStatus = cudaMemcpy(CpyImg, GPUCopyImg, ip.IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
std::cout << "cudaMemCpy GPU to CPU failed!" << cudaStatus;
exit(EXIT_FAILURE);
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1); cudaEventSynchronize(time2);
cudaEventSynchronize(time3); cudaEventSynchronize(time4);
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time2, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
std::cout << "Program failed afeter cudaDeviceSyncronize()";
free(TheImg); free(CpyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CpyImg, OutputFileName);
printf("--...--\n"); printf("%s ComputeCapab=%d.%d [supports max %s blocks]\n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks); printf("...\n");
printf("maxTrPerBlk: %d\n", MaxThrPerBlk);
printf("%s\n %s\n\n ThrPerBlock: %u, Blocks: %u, BlkPerRow: %u\n", InputFileName,
OutputFileName, ThrPerBlk, NumBlocks, BlkPerRow);
printf("-------------------- ... ----------------------------\n");
printf("CPU->GPU Transfer = %5.2f ms ... %4d MB ... %6.2f GB/s\n",
tfrCPUtoGPU, ip.IMAGESIZE / 1024 / 1024, (float)ip.IMAGESIZE / (tfrCPUtoGPU *
1024.0 * 1024.0));
printf("Kernel Execution = %5.2f ms ... %4d MB ... %6.2f GB/s\n",
kernelExecutionTime, GPUDataTransfer / 1024 / 1024, (float)GPUDataTransfer /
(kernelExecutionTime * 1024.0 * 1024.0));
printf("GPU->CPU Transfer = %5.2f ms ... %4d MB ... %6.2f GB/s\n",
tfrGPUtoCPU, ip.IMAGESIZE / 1024 / 1024, (float)ip.IMAGESIZE / (tfrGPUtoCPU *
1024.0 * 1024.0));
printf("Total time elapsed = %5.2f ms\n", totalTime);
printf("-------------------- ... ----------------------------\n");
cudaFree(GPUImg); cudaFree(GPUCopyImg);
cudaEventDestroy(time1); cudaEventDestroy(time2);
cudaEventDestroy(time3); cudaEventDestroy(time4);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
std::cout << "cudaDeviceReset failed!";
free(TheImg); free(CpyImg); exit(EXIT_FAILURE);
}
free(TheImg); free(CpyImg);
getchar();
//getchar();
return(EXIT_SUCCESS);
} |
5,000 | #include "includes.h"
__global__ void calculateError(float *aFourth, float *err, int expectedOutput)
{
int i = threadIdx.x;
err[i] = aFourth[i] - (i + 1 == expectedOutput);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.