serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
20,701 | #include <stdio.h>
__device__ void VecAdd ( void* param1)
{
// warp hard coded
int warp_size = 32;
// unbox params
float* mem = (float*)param1;
int size = (int)mem[0];
int As = (int)mem[1];
float *A = mem+2;
float* C = A + As*size;
//C[tid] = A1[tid] + A2[tid] + A3[tid] + ...;
int i;
for(i=0; i<As; i++){
float * cur = A + i*size;
int tid = threadIdx.x%warp_size;
while(tid<size){
C[tid] += cur[tid];
tid += warp_size;
}
}
/* while (tid < size)
{
int i, temp;
temp=0;
for(i=0; i<As; i++) temp += [tid]);
C[tid]=temp;
tid = tid + warp_size;
}*/
}
|
20,702 | #include "mat-rvect-add.hh"
#include "graph.hh"
#include "../runtime/graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
MatRvectAdd::MatRvectAdd(Op* left, Op* right)
: Op("mat_rvect_add", left->shape_get(), {left, right})
{}
void MatRvectAdd::compile()
{
auto& g = Graph::instance();
auto& cleft = g.compiled(preds()[0]);
auto& cright = g.compiled(preds()[1]);
std::size_t n = cleft.out_shape[0];
std::size_t p = cleft.out_shape[1];
Shape out_shape({int(n), int(p)});
dbl_t* out_data = tensor_alloc(out_shape.total());
auto out_node = rt::Node::op_mat_rvect_add(cleft.out_data, cright.out_data, out_data,
n, p, {cleft.out_node, cright.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
20,703 | //#include <stdio.h>
//#include "Cublas.h"
//
//
//// Allocates a matrix with random float entries.
//void randomInit(float *data, int size)
//{
// for (int i = 0; i < size; ++i)
// data[i] = rand() / (float)RAND_MAX;
//}
//
//
//////////////////////////////////////////////////////////////////////////////////
//// Program main
//////////////////////////////////////////////////////////////////////////////////
//int main(int argc, char **argv)
//{
// int seed = 123;
// int arrayLen = 10;
// printf("Hi");
// curandGenerator_t *g1 = (curandGenerator_t *)malloc(sizeof(curandGenerator_t));
// BSTR res = DllMakeCublasHandle((void **)&g1);
//
// int[] aa = new int
//
//
//
// res = DllDestroyCublasHandle((void *)g1);
//} |
20,704 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
__global__ void mul( float *Ad, float *Bd, float *Cd, int msize, int tile, int task);
int main( int argc, char **argv){
// argv[0]: name, argv[1]: msize, argv[2]: tile_width/ per block, argv[3]: task per thread, argv[4]: isVerification
clock_t start = clock();
int i, j;
int msize = atoi(argv[1]);
int tile = atoi(argv[2]);
int task = atoi(argv[3]);
float *A, *B, *C;
float *Ad, *Bd, *Cd;
A = (float*)malloc(msize * msize * sizeof(float));
B = (float*)malloc(msize * msize * sizeof(float));
C = (float*)malloc(msize * msize * sizeof(float));
// for verification
if( argc == 5){
for( i = 0; i < msize; i++){
for( j = 0; j < msize; j++){
A[i * msize + j] = (float)1;
B[i * msize + j] = (float)1;
}
}
}
else{
for( i = 0; i < msize; i++){
for ( j = 0; j < msize; j++){
srand(time(NULL));
A[ i * msize + j] = (float)(rand()%2);
srand(time(NULL));
B[ i * msize + j] = (float)(rand()%2);
}
}
}
cudaMalloc((void**)&Ad, msize * msize * sizeof(float));
cudaMalloc((void**)&Bd, msize * msize * sizeof(float));
cudaMalloc((void**)&Cd, msize * msize * sizeof(float));
cudaMemcpy(Ad, A, msize * msize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bd, B, msize * msize * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid((msize/tile), (msize/tile));
dim3 dimBlock((tile/task), (tile/task));
mul<<<dimGrid, dimBlock>>> (Ad, Bd, Cd, msize, tile, task);
cudaMemcpy(C, Cd, msize * msize * sizeof(float), cudaMemcpyDeviceToHost);
//print out the verification result
int hoosh = 0;
if( argc == 5){
printf("\n=================== V =========================\n");
for ( i = 0; i < msize; i++){
printf("\n");
for( j = 0; j < msize; j++){
printf("%.2f ", C[i * msize + j]);
if ( C[i * msize + j] != msize)
hoosh++;
}
}
printf("\n hoosh = %d \n", hoosh);
if( hoosh != 0 )
printf("\n YOU JUST DRIVING ME CRAZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZY\n");
else
printf("\n GOOD! ONLY ONE MORE TO GO!!! BIG BIG : ) \n");
}
free(A); free(B); free(C);
cudaFree(Ad);cudaFree(Bd);cudaFree(Cd);
printf( " \n msize: %d\t tilewidth: %d\t task: %d\t timeElapsed: %f\n", msize, tile, task, ((double)(clock()-start)/CLOCKS_PER_SEC));
return 1;
}
__global__ void mul( float *Ad, float *Bd, float *Cd, int msize, int tile, int task){
int tx, ty;
int r, c;
float Cv;
int m;
for ( tx = 0; tx < task; tx++){
for ( ty = 0; ty < task; ty++){
r = blockIdx.y * tile + threadIdx.y * task + ty;
c = blockIdx.x * tile + threadIdx.x * task + tx;
Cv = (float)0;
for ( m = 0; m < msize; m++){
Cv += Ad[ r * msize + m] * Bd[ m * msize + c];
}
Cd[ r * msize + c] = Cv;
}
}
}
|
20,705 | #include "distance_matrix.cuh"
/**
* @brief Get the value of the (`i`, `j`) element in the distance matrix.
* @param i The row of the element.
* @param j The column of the element.
* @return The value of the (`i`, `j`) element in the distance matrix.
*/
template<class T>
T DistanceMatrix<T>::at(uint32_t i, uint32_t j) const {
if (i >= n_observations_ || j >= n_observations_) {
throw std::out_of_range("Index is out of bounds for this size matrix.");
}
if (i == j) {
return (T) 0;
}
auto idx = index_at(i, j);
return data.at(idx);
}
/**
* @brief Get indices of the `n` values closest to observation `i`.
* @param i The index of the observation.
* @param n The number of observations to take.
* @return The indices of the `n` values closest to observation `i`.
*/
template<class T>
std::vector<uint32_t> DistanceMatrix<T>::closest(uint32_t i, uint32_t n) {
n = std::min(n, n_cols());
std::vector<uint32_t> indices(n);
std::vector<uint32_t> dists(n_cols());
for (auto j = 0; j < n_cols(); ++j) {
dists.at(j) = at(i, j);
}
auto as = utilities::argsort(dists);
indices.assign(as.begin(), as.begin() + n);
return indices;
}
/**
* @brief Set the value at the (`i`, `j`) coordinate to `val`.
* @param i The row in the distance matrix.
* @param j The column in the distance matrix.
* @param val The value to insert.
*/
template<class T>
void DistanceMatrix<T>::set_at(uint32_t i, uint32_t j, T val) {
if (i >= n_observations_ || j >= n_observations_) {
throw std::out_of_range("Index is out of bounds for this size matrix.");
}
if (i == j) {
throw std::domain_error("Setting a diagonal element is forbidden.");
}
auto idx = index_at(i, j);
data.at(idx) = val;
}
/**
* @brief Get the index into the data vector of the (`i`, `j`) element in the
* distance matrix.
* @param i The row of the element.
* @param j The column of the element.
* @return The index into the data vector of the (`i`, `j`) element.
*/
template<class T>
uint32_t DistanceMatrix<T>::index_at(uint32_t i, uint32_t j) const {
// accessed only from at, so we can skip a bounds check
if (j < i) {
return index_at(j, i);
}
// the index in the samples_ array of the (i, j) element
return i * n_observations_ - (i + 1) * (i + 2) / 2 + j;
}
template
class DistanceMatrix<float>;
template
class DistanceMatrix<double>; |
20,706 | #include "includes.h"
using namespace std;
void KNearestNeighborsCPU(float3 *dataArray, int *result, int cnt);
// cpu algorithm
__global__ void KNearestNeighborsGPU(float3 *dataArray, int *result, int cnt)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= cnt) return;
float3 point = dataArray[id], current;
float minimumDist = 3.4028234664e38f, distance = 0;
for (int j = 0; j < cnt; j++)
{
if (id == j) continue;
current = dataArray[j];
distance = (point.x - current.x) * (point.x - current.x);
distance += (point.y - current.y) * (point.y - current.y);
distance += (point.z - current.z) * (point.z - current.z);
if (distance < minimumDist)
{
minimumDist = distance;
result[id] = j;
}
}
} |
20,707 | #include <stdio.h>
#include<sys/time.h>
#include<math.h>
#define N 8192
#define nth 1024
__global__ void fast_transpose(size_t* A, size_t* B){
__shared__ size_t Ablock[nth];
__shared__ size_t Bblock[nth];
size_t dimx=blockDim.x;
size_t dimy=blockDim.y;
//dimx=linear dimension in x of a submatrix block
size_t th=threadIdx.x+threadIdx.y*dimx;
size_t thx=threadIdx.x;
size_t thy=threadIdx.y;
size_t starty=blockIdx.y*N*dimy;
size_t startx=blockIdx.x*dimx;
size_t start= startx+starty;
//Ablock is different for every block, so I can index it with th
Ablock[th]= A[start+thx+(thy)*(N)];
//creation of A completed for each block
__syncthreads();
//transpose into B block
Bblock[dimy*thx + thy] = Ablock[th];
__syncthreads();
//put Bblock in B
start=blockIdx.y*dimy+dimx*N*blockIdx.x; //the x block index of the original matrix becomes y index of transpose, so skip N
B[ start+thy+(thx)*(N) ]=Bblock[dimy*thx + thy];
}
__global__ void transpose(size_t* A, size_t *B){
size_t j=blockIdx.x;
size_t i=threadIdx.x;
while(i<N){
B[j+i*N]=A[i+j*N];
i+=blockDim.x;
}
}
/////////////////////C utilites//////////////////////////////
int transposed(size_t *A, size_t* At){
size_t i,j;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
if(A[i+j*N]!=At[j+i*N]){return 0;}
}
}
return 1;
}
double seconds()
{
struct timeval tmp;
double sec;
gettimeofday( &tmp, (struct timezone *)0 );
sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0;
return sec;
}
////////////////////////////////////main
int main(){
size_t elements=N*N;
size_t space=N*N*sizeof(size_t);
size_t*A=(size_t*)malloc(space);
size_t*dev_A;
size_t*B=(size_t*)malloc(space);
size_t*dev_B;
size_t i;
for(i=0;i<elements;i++){
A[i]=i%N;
}
cudaMalloc( (void**)&dev_A, space );
cudaMalloc( (void**)&dev_B, space );
cudaMemcpy( dev_A, A, space, cudaMemcpyHostToDevice );
double tstart=seconds();
transpose<<< N, nth >>>(dev_A, dev_B);
cudaDeviceSynchronize();
double duration=seconds()-tstart;
printf("transp time: %lf\n",duration);
cudaMemcpy( B, dev_B, space, cudaMemcpyDeviceToHost );
printf("correct? %d\n\n",transposed(A,B));
size_t block_side= (size_t)sqrt(nth);
dim3 grid,block;
if(block_side*block_side==nth){
grid.x=grid.y=N/block_side; //number of orizontal blocks=number of vertical blocks
block.x=block.y=block_side; //block linear length
}
else{
grid.x=N/32; //ideally, we should have an algorithm that given nth finds (a,b) integers such that nth=a*b and (a,b) closest to each other
grid.y=N/16; //to be preferred a>b, so that we read more often on x (continous in memory)
block.x=32;
block.y=16;
}
tstart=seconds();
fast_transpose<<< grid, block >>>(dev_A, dev_B);
cudaDeviceSynchronize();
duration=seconds()-tstart;
printf("fast times: %lf\n",duration);
cudaMemcpy( B, dev_B, space, cudaMemcpyDeviceToHost );
/*for(i=0;i<elements;i++){
if(i%N==0 && i!=0)printf("\n");
printf("%d ", A[i]);
}
printf("\n");
for(i=0;i<elements;i++){
if(i%N==0 && i!=0)printf("\n");
printf("%d ", B[i]);
}
printf("\n"); */
printf("correct? %d\n\n",transposed(A,B));
free(A);free(B);
cudaFree(dev_A);cudaFree(dev_B);
}
|
20,708 | #include "includes.h"
__global__ void k1( float* g_dataA, float* g_dataB, int floatpitch, int width)
{
extern __shared__ float s_data[];
// TODO, implement this kernel below
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
y = y + 1; //because the edge of the data is not processed
// global thread(data) column index
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
x = x + 1; //because the edge of the data is not processed
if( y >=width - 1|| x >= width - 1 || y < 1 || x < 1 )// this is to check to make sure that the thread is within the array.
return;
int startRow = blockIdx.y;
int startCol = blockDim.x * blockIdx.x;
int s_rowwidth = blockDim.x +2; // because the blocks have to overlap on the right side that is why you add 2
int s_index0 = threadIdx.x +1; //row zero in s_data. you add one because you don't deal with the outer edge
int s_index1 = threadIdx.x + s_rowwidth + 1; //row one in s_data.so this goes to the other side
int s_index2 = threadIdx.x + 2 * s_rowwidth +1; //this is to get the last
//int s_index_result = threadIdx.x + 3 * s_rowwidth + 1;
int mid_row = blockIdx.x * blockDim.x + 1 + floatpitch * blockIdx.y;
int g_index0 = (mid_row -1) * floatpitch + startCol + 1+ threadIdx.x;
int g_index1 = (mid_row) * floatpitch + startCol + 1 + threadIdx.x;
int g_index2 = (mid_row +1) * floatpitch +startCol + 1 + threadIdx.x;
if(startCol + startRow + 1 < width -1)
{
//copy the data from gobal mem to shared mem
s_data[s_index0] = g_dataA[g_index0];
s_data[s_index1] = g_dataA[g_index1];
s_data[s_index2] = g_dataA[g_index2];
}//end of if statement to populate the middle row of the current block
if(startRow == 0)
{
//copy the extra two columns in the globabl mem
s_data[s_index0 -1] = g_dataA[g_index0 - 1];
s_data[s_index1 -1] = g_dataA[g_index1 -1];
s_data[s_index2 -1] = g_dataA[g_index2 -1];
}//end of if statement to populate the edge row
if(threadIdx.x == width -3 - startCol || threadIdx.x == blockDim.x-1)
{
s_data[s_index0 + 1] = g_dataA[g_index0 +1];
s_data[s_index1 + 1] = g_dataA[g_index1 +1];
s_data[s_index2 +1] = g_dataA[g_index2 + 1];
}//end of if statement to populate the row below the middle row
__syncthreads();
//if( x >= width - 1|| y >= width - 1 || x < 1 || y < 1 )// this is to check to make sure that the thread is within the array.
// return;
//this is copied from the other kernel
g_dataB[y * width + x] = (
0.2f * s_data[s_index1] + //itself s_ind_1
0.1f * s_data[s_index0 -1] + //N s_ind_0
0.1f * s_data[s_index0 +1] + //NE s_ind_0
0.1f * s_data[s_index0 ] + //E s_ind1
0.1f * s_data[s_index1 +1] + //SE s_ind2
0.1f * s_data[s_index1 -1] + //S s_ind2
0.1f * s_data[s_index2 ] + //SW
0.1f * s_data[s_index2 -1] + //W
0.1f * s_data[s_index2 +1] //NW
) * 0.95f;//*/
} |
20,709 | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the C4 lab machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
const long BOX_SIZE = 23000; /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry {
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
struct debuginfo {
int idx;
int ran;
int i;
int j;
float dist;
int which_bucket;
};
bucket * histogram; /* list of all buckets in the histogram */
unsigned long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
if (h_pos >= 0 && h_pos < num_buckets)
histogram[h_pos].d_cnt++;
else
printf("Warning: value %lf falls outside histogram", dist);
}
}
return 0;
}
__global__
void PDH_kernel(long n_threads, bucket *d_buckets, int n_buckets, const atom *d_atoms, double w
#ifdef DEBUG
, struct debuginfo *d_dinfo
#endif
) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= n_threads)
return;
// Please don't make me explain. It was 2am and I scribbled some math and it works
int i = (sqrt(8.0*idx + 1.0) - 1.0)/2;
int j = idx - i*(i + 1)/2;
i++;
double deltax = d_atoms[i].x_pos - d_atoms[j].x_pos;
double deltay = d_atoms[i].y_pos - d_atoms[j].y_pos;
double deltaz = d_atoms[i].z_pos - d_atoms[j].z_pos;
double dist = sqrt(deltax*deltax + deltay*deltay + deltaz*deltaz);
int h_pos = (int) (dist / w);
if (h_pos >= 0 && h_pos < n_buckets)
// atomicAdd(&d_buckets[h_pos].d_cnt, 1);
d_buckets[h_pos*n_threads + idx].d_cnt++; // Coalesce!
#ifdef DEBUG
d_dinfo[idx].idx = idx;
d_dinfo[idx].i = i;
d_dinfo[idx].j = j;
d_dinfo[idx].ran = 1;
d_dinfo[idx].dist = dist;
d_dinfo[idx].which_bucket = (int) (dist / w);
#endif
}
void PDH_gpu() {
unsigned long num_threads = PDH_acnt*(PDH_acnt - 1)/2;
// allocate histogram
bucket *d_buckets;
cudaMalloc(&d_buckets, sizeof(*histogram) * num_buckets);
cudaMemset(d_buckets, 0, sizeof(*histogram) * num_buckets);
#ifdef DEBUG
// allocate debuginfo
struct debuginfo *d_dinfo;
cudaMalloc(&d_dinfo, sizeof(*d_dinfo) * num_threads);
cudaMemset(d_dinfo, 0, sizeof(*d_dinfo) * num_threads);
#endif
// Copy atoms to device
atom *d_atoms;
cudaMalloc(&d_atoms, sizeof(*atom_list) * PDH_acnt);
cudaMemcpy(d_atoms, atom_list, sizeof(*atom_list) * PDH_acnt, cudaMemcpyHostToDevice);
PDH_kernel<<<(num_threads + 255)/256, 256>>>(num_threads, d_buckets, num_buckets, d_atoms, PDH_res
// PDH_kernel<<<1, num_threads>>>(num_threads, d_buckets, num_buckets, d_atoms, PDH_res
#ifdef DEBUG
, d_dinfo
#endif
);
// cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("CUDA ERROR: %s\n", cudaGetErrorString(err));
puts("This is probably due to a too-large block count");
}
// Copy histogram from device and cleanup
cudaFree(d_atoms);
cudaMemcpy(histogram, d_buckets, sizeof(*histogram) * num_buckets, cudaMemcpyDeviceToHost);
cudaFree(d_buckets);
#ifdef DEBUG
// Copy debuginfo from device and cleanup
struct debuginfo *h_dinfo = (struct debuginfo *) malloc(sizeof(*h_dinfo) * num_threads);
cudaMemcpy(h_dinfo, d_dinfo, sizeof(*h_dinfo) * num_threads, cudaMemcpyDeviceToHost);
cudaFree(d_dinfo);
for (unsigned long long i=0; i<num_threads; i++) {
printf("%llu: idx=%d, ran=%d, i=%d, j=%d, dist=%f, bucket=%d\n",
i, h_dinfo[i].idx, h_dinfo[i].ran, h_dinfo[i].i, h_dinfo[i].j,
h_dinfo[i].dist, h_dinfo[i].which_bucket);
}
#endif
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time(const char *type) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec - startTime.tv_usec;
if (usec_diff < 0) {
sec_diff--;
usec_diff += 1000000;
}
printf("Running time for %s version: %ld.%06ld\n", type, sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram() {
int i;
unsigned long long total_cnt = 0;
for(i=0; i<num_buckets; i++) {
if (i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if (i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
// printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
// CPU implementation
puts("Running CPU version...");
memset(histogram, 0, sizeof(*histogram) * num_buckets);
gettimeofday(&startTime, &Idunno);
PDH_baseline();
report_running_time("CPU");
output_histogram();
// GPU implementation
puts("\nRunning GPU version...");
memset(histogram, 0, sizeof(*histogram) * num_buckets);
gettimeofday(&startTime, &Idunno);
PDH_gpu();
report_running_time("GPU");
output_histogram();
return 0;
}
|
20,710 | #include <stdio.h>
__global__ void hello() {
printf("Hello, CUDA! Thread [%d] in block [%d]\n", threadIdx.x, blockIdx.x);
}
int main( int argc, char** argv ) {
hello<<<1,1>>>(); // asynchronous call!
cudaDeviceSynchronize(); // wait for all operations on the GPU to finish
return 0;
}
|
20,711 | #include <cuda.h>
#include <cuda_runtime.h>
#include "stdio.h"
#define TILE_SIZE 64
#define WARP_SIZE 32
extern "C" void CSR_matvec(int N, int nnz, int* start, int* indices, float* data, float* x, float *y, bool bVectorized);
extern "C" void CSR_create(int N, int nnz, int* start, int * indices, float * data , float * x , float * y, int** start_d, int **indices_d, float **data_d, float **x_d, float **y_d);
extern "C" void CSR_kernel(int N, int nnz, int* start_d, int * indices_d, float * data_d , float * x_d , float * y_d, bool bVectorized);
extern "C" void CSR_destroy(int* start_d, int* indices_d, float* data_d, float* x_d, float* y_d);
extern "C" void ELL_create(int N, int num_cols_per_row, int * indices, float * data , float * x , float * y, int **indices_d, float **data_d, float **x_d, float **y_d);
extern "C" void ELL_kernel(int N, int num_cols_per_row , int * indices_d, float * data_d , float * x_d , float * y_d);
extern "C" void ELL_destroy(int* indices_d, float* data_d, float* x_d, float* y_d);
extern "C" void band_create(int N, int num_cols_per_row, float * data , float * x , float * y, float **data_d, float **x_d, float **y_d);
extern "C" void band_kernel(int N, int num_cols_per_row , float * data_d , float * x_d , float * y_d);
extern "C" void band_destroy(float* data_d, float* x_d, float* y_d);
/**
* Custom CUDA error check wrapper.
*/
#define checkCUDAError() do { \
cudaError_t error = cudaGetLastError(); \
if (error != cudaSuccess) { \
printf("(CUDA) %s", cudaGetErrorString(error)); \
printf(" (" __FILE__ ":%d)\n", __LINE__); \
}\
} while (0)
/**
* Cuda kernel for: CSR_s(A)x = y
*/
__global__ void k_csr_mat_vec_mm(const int N, int *start, int* indices, float *data, float *x, float* y) {
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if ( row < N ){
float dot = 0;
int row_start = start [ row ];
int row_end = start [ row+1];
for ( int jj = row_start ; jj < row_end ; jj ++) {
dot += data [ jj ] * x [ indices [ jj ]];
}
y[row] = dot ;
}
}
/**
* Cuda kernel for: CSR_v(A)x = y
*/
__global__ void k_csr2_mat_vec_mm(const int N, int *start, int* indices, float *data, float *x, float* y) {
__shared__ float vals[TILE_SIZE];
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int warp_id = thread_id / WARP_SIZE;
int lane = thread_id & (WARP_SIZE - 1);
int row = warp_id;
if (row < N) {
int row_start = start[row];
int row_end = start[row + 1];
// compute running sum per thread
vals[threadIdx.x] = 0;
for (int jj = row_start + lane; jj < row_end; jj += WARP_SIZE) {
vals[threadIdx.x] += data[jj] * x[indices[jj]];
}
// parallel reduction in shared memory
for (int d = WARP_SIZE >> 1; d >= 1; d >>= 1) {
if (lane < d) vals[threadIdx.x] += vals[threadIdx.x + d];
}
// first thread in a warp writes the result
if (lane == 0) {
y[row] = vals[threadIdx.x];
}
}
}
/**
* Cuda kernel for: ELL(A)x = y
*/
__global__ void k_ell_mat_vec_mm ( const int N, const int num_cols_per_row , int * indices,
float * data , float * x , float * y ) {
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if ( row < N ){
float dot = 0;
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = indices [ N * n + row ];
float val = data [ N * n + row ];
if ( val != 0)
dot += val * x [ col ];
}
y [ row ] = dot ;
}
}
/**
* Cuda kernel for: Band(A)x = y
*/
__global__ void band_matvec(int N, int k_max,
float* a, float* x, float* y) {
int i = TILE_SIZE * blockIdx.x + threadIdx.x;
if (i < N) {
float dot = 0;
for (int k = 0; k < 2 * k_max + 1; k++) {
float val = a[N * k + i];
int j = i + k - k_max;
if (val != 0) dot += val * x[j];
}
y[i] = dot;
}
}
/**
* Perform: CSR(A)x = y
*/
void CSR_matvec(const int N, const int nnz, int* start, int * indices, float * data , float * x , float * y, const bool bVectorized) {
int *start_d, *indices_d;
float *data_d, *x_d, *y_d;
CSR_create(N, nnz, start, indices, data, x, y, &start_d, &indices_d, &data_d, &x_d, &y_d);
CSR_kernel(N, nnz, start_d, indices_d, data_d, x_d, y_d, bVectorized);
cudaMemcpy(y, y_d, N * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError();
CSR_destroy(start_d, indices_d, data_d, x_d, y_d);
}
/**
* Create CSR matrix
*/
void CSR_create(const int N, const int nnz,
int * start, int * indices, float * data , float * x , float * y,
int ** start_d, int ** indices_d, float **data_d, float **x_d, float **y_d) {
/************************/
/* copy to device */
/************************/
cudaMalloc((void **) start_d, (N+1) * sizeof(int));
checkCUDAError();
cudaMemcpy(*start_d, start, (N+1) * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) indices_d, nnz * sizeof(int));
checkCUDAError();
cudaMemcpy(*indices_d, indices, nnz * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) data_d, nnz * sizeof(float));
checkCUDAError();
cudaMemcpy(*data_d, data, nnz * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*y_d, y, N * sizeof(float) , cudaMemcpyHostToDevice);
checkCUDAError();
}
/**
* Perform: CSR(A)x = y
*/
void CSR_kernel(const int N, const int nnz, int * start_d , int * indices_d, float * data_d , float * x_d , float * y_d, const bool bVectorized) {
if (bVectorized) {
//#threads = #rows * #threads per row (= N * WARP_SIZE)
dim3 grid((N * WARP_SIZE + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr2_mat_vec_mm <<< grid, block >>> (N, start_d, indices_d, data_d, x_d, y_d);
} else {
//#threads = #rows (= N)
dim3 grid((N + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr_mat_vec_mm <<< grid, block >>> (N, start_d, indices_d, data_d, x_d, y_d);
}
checkCUDAError();
}
/**
* Destroy CSR matrix
*/
void CSR_destroy(int* start_d, int* indices_d, float* data_d, float* x_d, float* y_d) {
cudaFree(start_d);
cudaFree(indices_d);
cudaFree(data_d);
cudaFree(x_d);
cudaFree(y_d);
}
/**
* Create band matrix
*/
void band_create(const int N, const int num_cols_per_row,
float * data , float * x , float * y,
float **data_d, float **x_d, float **y_d) {
cudaMalloc((void **) data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
cudaMemcpy(*data_d, data, N * num_cols_per_row * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*y_d, y, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
}
/**
* Perform: band(A)x = y
*/
void band_kernel(int N, int k_max , float * data_d , float * x_d , float * y_d) {
//#threads = #rows (= N)
dim3 grid((N + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
band_matvec <<< grid, block >>> (N, k_max, data_d , x_d, y_d);
checkCUDAError();
}
/**
* Destroy ELL matrix
*/
void band_destroy(float* data_d, float* x_d, float* y_d) {
cudaFree(data_d);
cudaFree(x_d);
cudaFree(y_d);
}
/**
* Create ELL matrix
*/
void ELL_create(const int N, const int num_cols_per_row,
int * indices, float * data , float * x , float * y,
int ** indices_d, float **data_d, float **x_d, float **y_d) {
cudaMalloc((void **) indices_d, N * num_cols_per_row * sizeof(int));
checkCUDAError();
cudaMemcpy(*indices_d, indices, N * num_cols_per_row * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
cudaMemcpy(*data_d, data, N * num_cols_per_row * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*y_d, y, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
}
/**
* Perform: ELL(A)x = y
*/
void ELL_kernel(int N, int num_cols_per_row , int * indices_d, float * data_d , float * x_d , float * y_d) {
//round grid size N/TILE_SIZE up
dim3 grid((N + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_ell_mat_vec_mm <<< grid, block >>> (N, num_cols_per_row, indices_d, data_d , x_d, y_d);
checkCUDAError();
}
/**
* Destroy ELL matrix
*/
void ELL_destroy(int* indices_d, float* data_d, float* x_d, float* y_d) {
cudaFree(indices_d);
cudaFree(data_d);
cudaFree(x_d);
cudaFree(y_d);
}
|
20,712 | #include "includes.h"
__device__ double dnorm(float x, float mu, float sigma)
{
float std = (x - mu)/sigma;
float e = exp( - 0.5 * std * std);
return(e / ( sigma * sqrt(2 * 3.141592653589793)));
}
__global__ void dnorm_kernel(float *vals, int N, float mu, float sigma)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N) {
vals[idx] = sigma;
}
} |
20,713 | #include <iostream>
#include <stdio.h>
#include <algorithm>
#include <cmath>
__global__
void mish_gridstride(int n, float* tx, float* aten_mul) {
for (int i = (threadIdx.x + blockDim.x * blockIdx.x) * 4; i < n; i += gridDim.x * blockDim.x * 4) {
float4 tx4 = __ldg(reinterpret_cast<float4*>(tx + i));
tx4.x = tx4.x * tanh(log1p(exp(tx4.x)));
tx4.y = tx4.y * tanh(log1p(exp(tx4.y)));
tx4.z = tx4.z * tanh(log1p(exp(tx4.z)));
tx4.w = tx4.w * tanh(log1p(exp(tx4.w)));
*reinterpret_cast<float4*>(aten_mul + i) = tx4;
}
}
__global__
void mish_threadper(int n, float* tx, float* aten_mul) {
int i = (threadIdx.x + blockDim.x * blockIdx.x) * 4;
if (i < n) {
float4 tx4 = __ldg(reinterpret_cast<float4*>(tx + i));
tx4.x = tx4.x * tanh(log1p(exp(tx4.x)));
tx4.y = tx4.y * tanh(log1p(exp(tx4.y)));
tx4.z = tx4.z * tanh(log1p(exp(tx4.z)));
tx4.w = tx4.w * tanh(log1p(exp(tx4.w)));
*reinterpret_cast<float4*>(aten_mul + i) = tx4;
}
}
__global__
void mish_threadper_fix(int n, float* tx, float* aten_mul) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < (n / 4)) {
float4 tx4 = __ldg(reinterpret_cast<float4*>(tx) + i);
tx4.x = tx4.x * tanh(log1p(exp(tx4.x)));
tx4.y = tx4.y * tanh(log1p(exp(tx4.y)));
tx4.z = tx4.z * tanh(log1p(exp(tx4.z)));
tx4.w = tx4.w * tanh(log1p(exp(tx4.w)));
reinterpret_cast<float4*>(aten_mul)[i] = tx4;
}
int rem = n % 4;
if (i == n / 4 && rem) {
while (rem) {
int idx = n - rem--;
float elt = tx[idx];
aten_mul[idx] = elt * tanh(log1p(exp(elt)));
}
}
}
template<typename T, typename U>
constexpr T ceildiv(T t, U u) {
return (t + u - 1) / u;
}
int main() {
constexpr int N = (1 << 28) + 3;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N * sizeof(float));
y = (float*)malloc(N * sizeof(float));
cudaMalloc(&d_x, N * sizeof(float));
cudaMalloc(&d_y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 3.0f;
y[i] = 2.0f;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice);
{
constexpr int blockSize = 512;
constexpr int maxBlocks = 0; //ceildiv(N / 4, blockSize);
for (int numBlocks = 512; numBlocks <= maxBlocks; numBlocks <<= 1) {
std::cout << "numBlocks: " << numBlocks << "\n";
float millis = 0.0f;
float temp = 0.0f;
for (int i = 0; i < 500; i++) {
cudaEventRecord(start);
mish_gridstride<<<numBlocks, blockSize>>>(N, d_x, d_y);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&temp, start, stop);
millis += temp;
}
millis = millis / 500;
cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
float mv = 3.0f * tanhf(std::log1p(std::exp(3.0)));
maxError = std::max(maxError, std::abs(mv - y[i]));
}
printf("max error: %f\n", maxError);
printf("duration (ms): %f\n", millis);
printf("effective bandwidth (gb/s): %f\n", (float)N * sizeof(float) * 3 / millis / 1e6);
}
}
for (int algo = 2; algo < 3; algo++) {
switch (algo) {
case 0:
std::cout << "algorithm: grid stride loop\n";
break;
case 1:
std::cout << "algorithm: thread per element\n";
break;
case 2:
std::cout << "algorithm: thread per element with vector tail\n";
break;
}
constexpr int blockSize = 512;
int nBlocks = ceildiv(N, blockSize) / 4;
if (algo == 0) {
nBlocks = 8192;
}
float millis = 0.0f;
float temp = 0.0f;
for (int i = 0; i < 500; i++) {
cudaEventRecord(start);
switch (algo) {
case 0:
mish_gridstride<<<nBlocks, blockSize>>>(N, d_x, d_y);
break;
case 1:
mish_threadper<<<nBlocks, blockSize>>>(N, d_x, d_y);
break;
case 2:
mish_threadper_fix<<<nBlocks, blockSize>>>(N, d_x, d_y);
break;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&temp, start, stop);
millis += temp;
}
millis = millis / 500;
cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
float mv = 3.0f * tanhf(std::log1p(std::exp(3.0)));
maxError = std::max(maxError, std::abs(mv - y[i]));
}
printf("max error: %f\n", maxError);
printf("duration (ms): %f\n", millis);
printf("effective bandwidth (gb/s): %f\n", (float)N * sizeof(float) * 2 / millis / 1e6);
}
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
return 0;
}
|
20,714 | /**
* CUDA organizes execution into grids. Each device contains grids. Each grid
* contains blocks. Each block contains threads.
* Device[id]->Grid[id]->Block[id]->Thread[id].
*/
__global__ void OrgKernel(void * in, void * out, int size) {
// block and grid dimensions describe how large the execution grid/block is
// thread/block indexes specify the index into the block
// grid dimensions have an x and y coordinate
// after CUDA 4 can have z coordinate
int gridDimX = gridDim.x;
// block dimensions have an x, y and z coordinate
int blockDimX = blockDim.x;
// block indexes tell you where in the block you are executing and have an x
// and y coordinate. Ranges from 0 to gridDim.x - 1
int blockIdX = blockIdx.x;
// thread indexes mirror block dimensions and have an x, y and z coordinate
// x*y*z <= totalNumberOfThreadsAvailable
int threadIdX = threadIdx.x;
int xCoord = blockIdX*blockDimX + threadIdX;
int yCoord = blockIdx.y*blockDim.y + threadIdx.y;
}
int main(void) {
// grids can't be specified by users, but thread/block sizes can
// taken from http://stackoverflow.com/questions/2392250/understanding-cuda-grid-dimensions-block-dimensions-and-threads-organization-s
int imageWidth = 512; // we have a 512x512 image
int imageHeight = 512;
int desiredThreadsPerBlock = 64;
int neededBlocks = (imageWidth*imageHeight)/desiredThreadsPerBlock; // 4096 blocks needed
// 8x8 is == desiredThreadsPerBlock
dim3 threadsPerBlock(8, 8); // 64 threads per block
// 64*64 == neededBlocks
dim3 numBlocks(imageWidth/threadsPerBlock.x, // 512/8 = 64
imageHeight/threadsPerBlock.y); // also 64, 64*64 is 4096 total blocks
// launch kernel with specified blocks, etc
// first param numBlocks dictates the size of the grid, 64x64 blocks
// second param threadsPerBlock dictates the size of each block, 8x8 or 64 threads per block
OrgKernel <<<numBlocks,threadsPerBlock>>>((void*)NULL, (void*)NULL, imageWidth*imageHeight);
}
|
20,715 | // matrix vector multiplecation
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <vector>
using namespace std::chrono;
#define NUM_THREADS_PERBLOCK 128
// the macro to check the cudaAPI return code
#define cudaCheck(error) \
if (error != cudaSuccess) { \
printf("Fatal error: %s at %s:%d\n", cudaGetErrorString(error), __FILE__, \
__LINE__); \
exit(1); \
}
// the kernel operation executed by each thread
// the m and n is the size of input
__global__ void MVfunc(float* dMatrix, float* dVector, float* dAns, size_t m,
size_t n) {
// for each thread
size_t threadIndex = threadIdx.x + (blockIdx.x * blockDim.x);
// elem num is more than thread number in all blocks
size_t elemid = 0;
size_t loopCount = 0;
// it becomes to the naive case if there is enough numbers of block
// and every thread access one data point
//if (threadIndex < 10) {
// printf("loopCount1 %ld", loopCount);
//}
size_t elemAllBlocks = gridDim.x * blockDim.x;
while (true) {
//if (threadIndex < 10) {
// printf("loopCount2 %ld", loopCount);
//}
// add parathesis explicitly
elemid = (loopCount * elemAllBlocks) + threadIndex;
//if (threadIndex < 10) {
// printf(
// "threadIndex %ld elemid %ld loopCount %ld gridDim.x %d blockDim.x "
// "%d\n",
// threadIndex, elemid, loopCount, gridDim.x, blockDim.x);
//}
if (elemid >= m * n) {
break;
}
size_t rowIndex = elemid / n;
size_t columnIndx = elemid % n;
// float a1 = dMatrix[rowIndex * n + columnIndx];
float a1 = dMatrix[elemid];
float a2 = dVector[columnIndx];
float v = a1 * a2;
// printf("n gindex %ld rowindex %ld columnIndx %ld a1 %lf a2 %lf\n",
// gindex,
// rowindex, columnIndx, a1, a2);
// put it into the dAns vector
// using atomicAdd to avoid the race condition
atomicAdd(&dAns[rowIndex], v);
loopCount++;
}
return;
}
// do the memory copy and the data partition
void gpuMV(std::vector<float>& matrix, std::vector<float>& vector,
std::vector<float>& ans, size_t m, size_t n) {
float* dMatrix;
float* dVector;
float* dAns;
// set the device id
cudaSetDevice(0);
// allocate memory on gpu
cudaCheck(cudaMalloc((void**)&dMatrix, m * n * sizeof(float)));
cudaCheck(cudaMalloc((void**)&dVector, n * sizeof(float)));
cudaCheck(cudaMalloc((void**)&dAns, m * sizeof(float)));
// set the memory to zero
cudaCheck(cudaMemset(dAns, 0, m * sizeof(float)));
// std::cout << "debug input " << matrix[1 * n + 1] << std::endl;
// init the data on the device
// copy the data from the cpu into the device
auto memcpy1start = high_resolution_clock::now();
cudaCheck(cudaMemcpy(dMatrix, matrix.data(), m * n * sizeof(float),
cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(dVector, vector.data(), n * sizeof(float),
cudaMemcpyHostToDevice));
auto memcpy1end = high_resolution_clock::now();
auto memcpy1DurationMicro =
duration_cast<microseconds>(memcpy1end - memcpy1start);
std::cout << "memcpy to device time " << memcpy1DurationMicro.count()
<< std::endl;
// do computation
auto kernelstart = high_resolution_clock::now();
// size_t blockSize =
// (elementsNum + NUM_THREADS_PERBLOCK - 1) / NUM_THREADS_PERBLOCK;
size_t blockSize = 64;
MVfunc<<<blockSize, NUM_THREADS_PERBLOCK>>>(dMatrix, dVector, dAns, m, n);
// MVfunc<<<(elementsNum + NUM_THREADS_PERBLOCK - 1) / NUM_THREADS_PERBLOCK,
// NUM_THREADS_PERBLOCK>>>(dMatrix, dVector, dAns, m, n);
auto kernelstop = high_resolution_clock::now();
auto kernelDurationMicro =
duration_cast<microseconds>(kernelstop - kernelstart);
std::cout << "kernel time " << kernelDurationMicro.count() << std::endl;
auto memcpy2start = high_resolution_clock::now();
// copy results back, set it into the vector direactly
cudaCheck(cudaMemcpy((float*)ans.data(), dAns, m * sizeof(float),
cudaMemcpyDeviceToHost));
auto memcpy2end = high_resolution_clock::now();
auto memcpy2Macro = duration_cast<microseconds>(memcpy2end - memcpy2start);
std::cout << "memcpy to host time " << memcpy2Macro.count() << std::endl;
}
std::vector<float> cpuMV(std::vector<float>& matrix, std::vector<float>& vector,
size_t m, size_t n) {
std::vector<float> ans;
// the position of (0,0) is at the top left corner
float tempv = 0;
for (size_t i = 0; i < m; i++) {
tempv = 0;
for (size_t j = 0; j < n; j++) {
tempv = tempv + (matrix[i * n + j] * vector[j]);
// printf("i %ld j %ld m %f v %f tempv %f\n", i, j, matrix[i * n + j],
// vector[j], tempv);
}
ans.push_back(tempv);
}
return ans;
}
int main(int argc, char** argv) {
// int matrix size m*n
size_t m = 100;
size_t n = 200;
// matrix (it is unnecessary to use the insert vector here)
std::vector<float> matrix(m * n);
// vector is n*1
std::vector<float> vector(n);
// init matrix and vector
srand(static_cast<unsigned>(time(0)));
for (size_t i = 0; i < m; i++) {
for (size_t j = 0; j < n; j++) {
// matrix[i * m + j] =
// static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
matrix[i * n + j] = (i * 10 + j) * 0.1;
}
}
// init vector
for (size_t j = 0; j < n; j++) {
// vector[j] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
vector[j] = j * 0.1;
}
// std::cout << "debug input 1 " << matrix[1 * n + 1] << std::endl;
// ans is m*1
std::vector<float> ansCPU;
std::vector<float> ansGPU(m);
auto cpustart = high_resolution_clock::now();
ansCPU = cpuMV(matrix, vector, m, n);
auto cpuend = high_resolution_clock::now();
auto cputime = duration_cast<microseconds>(cpuend - cpustart);
std::cout << "cputime time " << cputime.count() << std::endl;
std::cout << "gputime time: " << std::endl;
gpuMV(matrix, vector, ansGPU, m, n);
// compare the difference
// the max diff is around 0.25 in this case
float epsilon = 0.1;
for (int j = 0; j < m; j++) {
float diff = fabs(ansCPU[j] - ansGPU[j]);
if (diff > epsilon) {
std::cout << "error " << j << " " << ansCPU[j] << ", " << ansGPU[j]
<< " diff " << diff << std::endl;
}
}
return 0;
} |
20,716 | /* Andrew Miller <amiller@dappervision.com>
*
* Cuda 512*512*512*4bytes test
*
* According to the KinectFusion UIST 2011 paper, it's possible
* to do a sweep of 512^3 voxels, 32-bits each, in ~2ms on a GTX470.
*
* This code is a simple benchmark accessing 512^3*2 short ints.
* voxel has two 16-bit components. In this benchmark kernel, we
* simply increment these values by a constant K. More than anything
* it's a test of the memory bandwidth.
*
* On my GTX470 card, this kernel takes 10.7ms instead of ~2ms. Is there
* a faster way to do this?
*
* Citation: http://dl.acm.org/citation.cfm?id=2047270
* Public gdocs pdf link: http://tinyurl.com/6xlznbx
*/
#include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
#include <assert.h>
const int N_DATA = (512*512*512);
const int N_BYTES = (N_DATA*2);
const int N_GRID = 512*4;
const int N_BLOCK = 512;
const int N_CHUNK = 8;
const int N_FAN = N_DATA/N_GRID/N_BLOCK/N_CHUNK;
const int K = 13;
const int N_LOOPS = 10;
struct __align__(16) short8 {
short s0, s1, s2, s3, s4, s5, s6, s7;
};
/*
Each kernel processes several adjacent elements
N_DATA = (N_GRID) * (N_FAN) * (N_BLOCK) * (N_CHUNK) = 512*512*512*2
*/
__device__ void _incr_short(short int &s) {
s += K;
}
__global__ void copy_data_float(float4 *data) {
for (int i = 0; i < N_BYTES / sizeof(float4); i ++) {
float4 d = data[i];
}
}
__global__ void incr_data3(short int *data) {
// Outer loop skips by strides of N_BLOCK*N_CHUNK
for (int i = 0; i < N_FAN; i++) {
int idx = blockIdx.x*(N_FAN*N_BLOCK*N_CHUNK) + i*(N_BLOCK*N_CHUNK) + threadIdx.x*(N_CHUNK);
int4 *dd = (int4 *) &data[idx];
int4 d_ = *dd;
short8 d = *((short8 *) &d_);
_incr_short(d.s0);
_incr_short(d.s1);
_incr_short(d.s2);
_incr_short(d.s3);
_incr_short(d.s4);
_incr_short(d.s5);
_incr_short(d.s6);
_incr_short(d.s7);
*dd = *((int4 *) &d);
}
}
/*
__global__ void incr_data1(short int *data) {
// Outer loop skips by strides of N_BLOCK*N_CHUNK
for (int i = 0; i < N_FAN; i++) {
int idx = blockIdx.x*(N_FAN*N_BLOCK*N_CHUNK) + i*(N_BLOCK*N_CHUNK) + threadIdx.x*(N_CHUNK);
// Inner loop processes 16 bytes (8 short ints) at once (a chunk)
#pragma unroll
for (int j = 0; j < N_CHUNK; j+=8, idx++) {
short4 *d = (short4 *) data;
d[(idx+j)/4].x += K;
d[(idx+j)/4].y += K;
d[(idx+j)/4].z += K;
d[(idx+j)/4].w += K;
}
}
}
__global__ void copy_data1(short int *data, short int *out) {
// Outer loop skips by strides of N_BLOCK*N_CHUNK
for (int i = 0; i < N_FAN; i++) {
int idx = blockIdx.x*(N_FAN*N_BLOCK*N_CHUNK) + i*(N_BLOCK*N_CHUNK) + threadIdx.x*(N_CHUNK);
// Inner loop processes 16 bytes (8 short ints) at once (a chunk)
#pragma unroll
for (int j = 0; j < N_CHUNK; j+=8, idx++) {
short4 *d = (short4 *) data;
short4 *o = (short4 *) out;
o[(idx+j)/4].x = d[(idx+j)/4].x;
o[(idx+j)/4].y = d[(idx+j)/4].y;
o[(idx+j)/4].z = d[(idx+j)/4].z;
o[(idx+j)/4].w = d[(idx+j)/4].w;
}
}
}
__global__ void copy_data2(short int *data, short int *out) {
// Outer loop skips by strides of N_BLOCK*N_CHUNK
for (int i = 0; i < N_FAN; i++) {
int idx = blockIdx.x*(N_FAN*N_BLOCK*N_CHUNK) + i*(N_BLOCK*N_CHUNK) + threadIdx.x*(N_CHUNK);
int4 *dd = (int4 *) &data[idx];
int4 *oo = (int4 *) &out[idx];
short8 d = *((short8 *) dd);
*oo = *((int4 *) &d);
}
}
__global__ void incr_data2(short int *data) {
// Outer loop skips by strides of N_BLOCK*N_CHUNK
for (int i = 0; i < N_FAN; i++) {
int idx = blockIdx.x*(N_FAN*N_BLOCK*N_CHUNK) + i*(N_BLOCK*N_CHUNK) + threadIdx.x*(N_CHUNK);
// Inner loop processes 16 bytes (8 short ints) at once (a chunk)
for (int j = 0; j < N_CHUNK; j+=8, idx++) {
short8 *d = (short8 *) data;
d[(idx+j)/8].s0 += K;
d[(idx+j)/8].s1 += K;
d[(idx+j)/8].s2 += K;
d[(idx+j)/8].s3 += K;
d[(idx+j)/8].s4 += K;
d[(idx+j)/8].s5 += K;
d[(idx+j)/8].s6 += K;
d[(idx+j)/8].s7 += K;
}
}
}
*/
int main(void) {
short int *data_gpu;
short int *data_gpuA;
short int *data_cpu;
cudaMalloc((void **) &data_gpu, N_BYTES);
cudaMalloc((void **) &data_gpuA, N_BYTES);
data_cpu = (short int *) calloc(N_BYTES, 1);
cudaMemcpy(data_gpu, data_cpu, N_BYTES, cudaMemcpyHostToDevice);
dim3 dimBlock(N_BLOCK,1,1);
dim3 dimGrid(N_GRID,1,1);
cudaEvent_t e_start, e_stop;
cudaEventCreate(&e_start);
cudaEventCreate(&e_stop);
cudaEventRecord(e_start);
// Run the kernel several times
for (int i = 0; i < N_LOOPS; i++) {
incr_data3<<<dimGrid, dimBlock>>>(data_gpu);
//incr_data3<<<dimGrid, dimBlock>>>(data_gpu, data_gpuA);
}
cudaEventRecord(e_stop);
cudaEventSynchronize(e_stop);
// Copy back to the host and check we have what we expect
cudaMemcpy(data_cpu, data_gpu, N_BYTES, cudaMemcpyDeviceToHost);
for (int i = 0; i < N_DATA; i++) {
assert(data_cpu[i] == (short)N_LOOPS*K);
}
// Timing information
float ms;
cudaEventElapsedTime(&ms, e_start, e_stop);
printf("%d sweeps of %.1f megabytes in %.1fms (avg %.1fms)\n",
N_LOOPS, N_BYTES/1000.0/1000.0, ms, ms/N_LOOPS);
cudaFree(data_gpu);
free(data_cpu);
return 0;
}
|
20,717 |
inline void fill_host(int *h_v, int value, int m){
for (int i = 0; i < m; i++)
h_v[i] = value;
return;
}
|
20,718 | #include <stdio.h>
#include <stdint.h>
int main(){
int *a = (int *) malloc(sizeof(int));
int b = reinterpret_cast<uintptr_t>(a);
int *c = reinterpret_cast<int *>(b);
printf("%p %x %p\n", a, b, c);
free(a);
return 0;
}
|
20,719 | #include "includes.h"
__global__ void gpu_seqwr_kernel(int *buffer, size_t reps, size_t elements)
{
for(size_t j = 0; j < reps; j++) {
size_t ofs = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
while(ofs < elements) {
buffer[ofs] = 0;
ofs += step;
}
}
} |
20,720 | __device__ float sigmoid (float x)
{
return 1.0 / (1.0 + expf (-x));
}
extern "C"
__global__ void sigmoidKernel (int length, float *source, float *destination)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
destination[index] = sigmoid(source[index]);
}
} |
20,721 | #include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
const int blocksize = 1024;
const int factor = 16;
const int arraysize = blocksize / factor;
template <typename T>
__global__ void addition_test_kernel(T * a, T * sum) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int idx = (tid) % arraysize;
sum[idx] += a[idx];
atomicAdd(&sum[idx], a[idx]);
if (idx == 2)
printf("%d %d %d %d -> ", idx, tid, sum[idx], a[idx]);
}
template <typename T>
void test_atomic_int() {
dim3 dimBlock(blocksize, 1);
dim3 dimGrid(1, 1);
T *a, *b, *sum, *answer, *ad, *bd, *sumd, *answerd;
a = (T*)malloc(arraysize * sizeof(T));
sum = (T*)malloc(arraysize * sizeof(T));
answer = (T*)malloc(arraysize * sizeof(T));
for (int i = 0; i < arraysize; ++i) {
a[i] = 1;
sum[i] = 0;
answer[i] = i + i;
}
cudaMalloc((void**)&ad, arraysize * sizeof(T));
gpuErrchk(cudaPeekAtLastError());
cudaMalloc((void**)&sumd, arraysize * sizeof(T));
gpuErrchk(cudaPeekAtLastError());
cudaMemcpy(ad, a, arraysize * sizeof(T), cudaMemcpyHostToDevice);
gpuErrchk(cudaGetLastError());
cudaMemcpy(sumd, sum, arraysize * sizeof(T), cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
printf("addition kernel;\n");
addition_test_kernel<<<dimGrid, dimBlock>>>(ad, sumd);
gpuErrchk(cudaPeekAtLastError());
cudaMemcpy(sum, sumd, arraysize * sizeof(T), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for (int i = 0; i < arraysize; ++i) {
printf("sum[%d]: %d\n", i, sum[i]);
}
}
int main(int argc, char *argv[])
{
test_atomic_int<int32_t>();
return 0;
}
|
20,722 | #include <iostream>
#include <vector>
#include <string.h>
//#include <stdio.h>
//#include <sys/types.h>
//#include <unistd.h>
using namespace std;
/*string* word(string s)
{
string[] word_array = new string[20];
for(auto x: s)
{
if(x == ' ')
{
}
}
}*/
int main()
{
cout << "Hello" << endl;
vector<string> user_in;
char s[200];
fgets(s,sizeof(s),stdin);
string temp;
for(int i = 0; i < sizeof(s); i++)
{
if(s[i] == ' ')
{
user_in.push_back(temp);
temp="";
}
else{ temp+=s[i];}
}
/* vector<string> user_string;
string str;
getline(cin,str);
for(int i = 0; i < str.size(); i++)
{
}
user_string.push_back(str);
*/
for(int i = user_in.size(); i >= 0; i--)
{
cout << user_in[i] << endl;
/* int pid;
if((pid = fork()) == -1)
{
cout << "fork error" << endl;
}
else if(pid == 0)
{
execve(i,0,0);
}*/
}
return 0;
}
|
20,723 | #include <cuda.h>
#include <stdio.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <time.h>
__global__ void initPRNG(int seed, curandState *rngState)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
curand_init(seed, tid, 0, &rngState[tid]);
}
__global__ void generate_uniform_int(int n, int *data, int q, curandState *rngState)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned nGrid = blockDim.x*gridDim.x;
curandState localState = rngState[tid];
for(int i=tid; i<n; i+= nGrid)
data[i] = curand(&localState)%q;
rngState[tid] = localState;
}
int main()
{
int nBlocks = 128, nThreads = 128;
int seed = 12345;
int q=4;
int hist[q];
// default PRNG
curandState *rngState_dev;
cudaMalloc(&rngState_dev, sizeof(curandState)*nBlocks*nThreads);
initPRNG<<<nBlocks, nThreads>>>(seed, rngState_dev);
int n=1<<28;
int *hostData, *devData;
size_t memSize=sizeof(int)*n;
// host and device memory allocation
hostData = (int *)malloc(memSize);
cudaMalloc(&devData, memSize);
for(int i=0; i<10; i++)
generate_uniform_int<<<nBlocks, nThreads>>>(n, devData, q, rngState_dev);
cudaMemcpy(hostData, devData, memSize, cudaMemcpyDeviceToHost);
// histogram
for(int i=0; i<q; i++) hist[i] = 0;
for(int i=0; i<n; i++) {
hist[hostData[i]] ++;
}
for(int i=0; i<q; i++) printf("%d %d\n", i, hist[i]);
cudaFree(rngState_dev);
cudaFree(devData); free(hostData);
}
|
20,724 | #include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <sys/time.h>
void print_matrix(int* states, int n)
{
std::cout << "matrix:" << std::endl;
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < n; ++j)
{
std::cout << states[i*n+j] << " ";
}
std::cout << std::endl;
}
}
//code do main the job on GPU
__global__ void countActiveNeigb(int* states_d, int* active_neigb_d, int n)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int i = row * blockDim.x + col;
int peso_i = i / n;
int peso_j = i % n;
for (int x = peso_i - 1; x < peso_i + 2; ++x)
{
for (int y = peso_j - 1; y < peso_j + 2; ++y)
{
if (x >= 0 && y >= 0 && x < n && y < n)
{
active_neigb_d[i] += states_d[x * n + y];
}
}
}
active_neigb_d[i] -= states_d[peso_i * n + peso_j];
// printf("i: %d, peso_i: %d, peso_j: %d, states_d[i][j]: %d, active_neigb_d[i][j]: %d\n", i, peso_i, peso_j, states_d[i], active_neigb_d[i]);
}
__global__ void updateStates(int* states_d, int* active_neigb_d)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int i = row * blockDim.x + col;
if ( (states_d[i] == 1 && (active_neigb_d[i] == 2 || active_neigb_d[i] == 3)) || (states_d[i] == 0 && active_neigb_d[i] == 3))
{
states_d[i] = 1;
}
else
{
states_d[i] = 0;
}
}
int main(int argc, char** argv)
{
struct timeval start, end;
gettimeofday(&start, NULL);
if (argc != 4)
{
std::cout << "USAGE: cellular_automata <dim of matrix> <numGen> <num of blocks>" << std::endl;
return -1;
}
int n = atoi(argv[1]);
int num_gen = atoi(argv[2]);
int num_block = atoi(argv[3]);
int num_thread = n * n / num_block;
cudaSetDevice(0);
int* states = new int[n*n];
int* states_d = NULL;
int* active_neigb = new int[n*n];
int* active_neigb_d = NULL;
srand (time(NULL));
for (int i = 0; i < n*n; ++i)
{
states[i] = rand() % 2;
}
cudaMalloc((void**)&states_d, n * n * sizeof(int));
cudaMemcpy(states_d, states, n * n * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&active_neigb_d, n * n * sizeof(int));
cudaMemcpy(active_neigb_d, active_neigb, n * n *sizeof(int), cudaMemcpyHostToDevice);
printf("n: %d, num_block: %d, num_thread: %d\n", n, num_block, num_thread);
// print_matrix(states, n);
for (int i = 0; i < num_gen; ++i)
{
countActiveNeigb<<< dim3(num_block), dim3(num_thread)>>> (states_d, active_neigb_d, n);
cudaThreadSynchronize();
cudaMemcpy(active_neigb, active_neigb_d, n * n * sizeof(int), cudaMemcpyDeviceToHost);
//update states
updateStates<<< dim3(num_block), dim3(num_thread)>>> (states_d, active_neigb_d);
cudaThreadSynchronize();
cudaMemcpy(states, states_d, n * n * sizeof(int), cudaMemcpyDeviceToHost);
// print_matrix(states, n);
}
cudaFree(states_d);
cudaFree(active_neigb_d);
delete[] states;
delete[] active_neigb;
gettimeofday(&end, NULL);
double time_gap = (end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec;
printf("Time cost: %.2lf s.\n", time_gap / 100000);
return 0;
}
|
20,725 | #include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <tiffio.h>
#include <stdint.h>
__global__ void greyscale(uint8_t *d_out, uint8_t *d_in){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id%3 == 0)
d_out[id] = 0.299f * d_in[id] + 0.587f * d_in[id+1] + 0.114f * d_in[id+2];
else if(id%3 == 1)
d_out[id] = 0.299f * d_in[id-1] + 0.587f * d_in[id] + 0.114f * d_in[id+1];
else
d_out[id] = 0.299f * d_in[id-2] + 0.587f * d_in[id-1] + 0.114f * d_in[id];
}
int main(int argc, char **argv){
uint32_t width, length;
TIFF *iimage;
uint16_t bits_per_sample, photometric;
uint16_t planar_config;
uint16_t samples_per_pixel;
int size;
assert(argc == 3);
iimage = TIFFOpen(argv[1], "r");
assert(iimage);
assert(TIFFGetField(iimage, TIFFTAG_IMAGEWIDTH, &width));
assert(width > 0);
assert(TIFFGetField(iimage, TIFFTAG_IMAGELENGTH, &length));
assert(length > 0);
assert(TIFFGetField(iimage, TIFFTAG_BITSPERSAMPLE, &bits_per_sample) != 0);
assert(bits_per_sample == 8);
assert(TIFFGetField(iimage, TIFFTAG_PHOTOMETRIC, &photometric));
assert(photometric == PHOTOMETRIC_RGB);
assert(TIFFGetField(iimage, TIFFTAG_PLANARCONFIG, &planar_config) != 0);
assert(TIFFGetField(iimage, TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel));
assert(samples_per_pixel == 3);
size = width * length * samples_per_pixel * sizeof(char);
printf("size is %d\n",size);
printf("spp is %d\n",samples_per_pixel);
char *idata = (char *) malloc(size);
assert(idata != NULL);
char *curr = idata;
int count = TIFFNumberOfStrips(iimage);
size_t in;
for (int i = 0; i < count; ++i) {
in = TIFFReadEncodedStrip(iimage, i, curr, -1);
// assert(in != -1);
// printf("%li\n", in);
curr += in;
}
TIFFClose(iimage);
char *odata = (char *) malloc(size);
uint8_t* d_in;
cudaMalloc((void**) &d_in, size);
cudaMemcpy(d_in, idata, size, cudaMemcpyHostToDevice);
uint8_t* d_out;
cudaMalloc((void**) &d_out, size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
greyscale<<<size/width, width>>>(d_out, d_in);
cudaEventRecord(stop);
cudaMemcpy(odata, d_out, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel time is %fms\n", milliseconds);
assert(odata != NULL);
TIFF *oimage = TIFFOpen(argv[2], "w");
assert(oimage);
assert(TIFFSetField(oimage, TIFFTAG_IMAGEWIDTH, width));
assert(TIFFSetField(oimage, TIFFTAG_IMAGELENGTH, length));
assert(TIFFSetField(oimage, TIFFTAG_BITSPERSAMPLE, bits_per_sample));
assert(TIFFSetField(oimage, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE));
assert(TIFFSetField(oimage, TIFFTAG_PHOTOMETRIC, photometric));
assert(TIFFSetField(oimage, TIFFTAG_SAMPLESPERPIXEL, samples_per_pixel));
assert(TIFFSetField(oimage, TIFFTAG_PLANARCONFIG, planar_config));
assert(TIFFSetField(oimage, TIFFTAG_ROWSPERSTRIP, length));
size_t on = size;
assert(TIFFWriteEncodedStrip(oimage, 0, odata, on) == on);
TIFFClose(oimage);
free(idata);
free(odata);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
20,726 | #include "includes.h"
using namespace std;
long long remaining_N2(int , int ,long long );
long long remaining_N(int , int ,int );
__global__ void ker2(float * cormat, float * upper,int n1,int n,long long upper_size,int N,int i_so_far,long long M1)
{
long long idx = blockDim.x;
idx*=blockIdx.x;
idx+=threadIdx.x;
long i = idx/n;
long j = idx%n;
if(i<j && i<n1 && j<n)// &&i<N &&j<N && idx<(n1*n))
{
long long tmp=i;
tmp*=(i+1);
tmp/=2;
long long tmp_2=i;
tmp_2*=n;
tmp_2=tmp_2-tmp;
tmp_2+=j;
tmp_2-=i;
long long indexi=n1;
indexi*=j;
indexi=indexi+i;
upper[tmp_2-1]=cormat[indexi];
//if((i==39001 &&j == 69999)||(i==1 && j==2))
// printf("\n\n\n thread: %f ",upper[tmp_2-1]," ",cormat[indexi]);
}
} |
20,727 | #include <stdio.h>
#include <stdlib.h>
#define N 16
extern __global__
void cudaMatMul(int C[N][N], int A[N][N], int B[N][N], int n);
int main(int argc, char** argv)
{
int* A[N];
int* B[N];
// result
int* C[N];
// cuda guys
int* A_c[N];
int* B_c[N];
int* C_c[N];
// cuda result placed in this value
int* ret[N];
int i = 0;
int j = 0;
// malloc individual arrays
for(i = 0; i < N; i++)
{
//A[i] = (int*) malloc(N * sizeof(int));
A[i] = (int*) malloc(N * sizeof(int));
B[i] = (int*) malloc(N * sizeof(int));
C[i] = (int*) malloc(N * sizeof(int));
cudaMalloc((void**) &A_c[i], N * sizeof(int));
cudaMalloc((void**) &B_c[i], N * sizeof(int));
cudaMalloc((void**) &C_c[i], N * sizeof(int));
ret[i] = (int*) malloc(N * sizeof(int));
}
// init data
for(i = 0; i < N; i++)
{
for(j = 0; j < N; j++)
{
A[i][j] = i + j;
B[i][j] = i * j;
C[i][j] = 0;
//ret[i][j] = 0;
//printf("%d ", B[i][j]);
}
//printf("\n");
}
// COPY TO device memory
for(i = 0; i < N; i++)
{
cudaMemcpy(A_c[i], A[i], N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(B_c[i], B[i], N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(C_c[i], C[i], N * sizeof(int), cudaMemcpyHostToDevice);
}
//cudaMemcpy2D ( void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, cudaMemcpyKind kind )
//Copies data between host and device.
//dim3 dimBlock(N, N);
cudaMatMul<<<1, 1>>>((int (*) [16])C_c, (int (*) [16])A_c, (int (*) [16])B_c, N);
// for(i = 0; i < N; i++)
// {
// cudaMemcpy(ret[i], C_c[i], N * sizeof(int), cudaMemcpyDeviceToHost);
// }
cudaMemcpy2D(ret, N * sizeof(int), C_c, N * sizeof(int), N * sizeof(int), N * sizeof(int), cudaMemcpyDeviceToHost);
// printf("segfault before?\n");
for(i = 0; i < N; i++)
{
for(j = 0; j < N; j++)
printf("%d ", ret[i][j]);
printf("\n");
}
fflush(stdout);
// free arrays
for(i = 0; i < N; i++)
{
free(A[i]);
free(B[i]);
free(C[i]);
cudaFree(A_c[i]);
cudaFree(B_c[i]);
cudaFree(C_c[i]);
free(ret[i]);
}
return 0;
}
extern __global__
void cudaMatMul(int c[N][N], int a[N][N], int b[N][N], int n)
{
int i = 0;
int j = 0;
int k = 0;
// mat mul
for(i = 0; i < n; i++)
for(j = 0; j < n; j++)
for(k = 0; k < n; k++)
c[i][j] += a[i][k] * b[k][j];
}
|
20,728 | #include "includes.h"
__global__ void copyBiasToOutputs(float *ptrbias, float *ptroutput, const int size1, const int size2, const int nOutputPlane, const int linestride, const int imstride)
{
// each thread has a value to manage...
//const int blk =blockDim.x;
const int tidx=blockDim.x*blockIdx.x + threadIdx.x;
const int tidy=blockIdx.y;
const int tidz=blockIdx.z;
float val = ptrbias[tidx];
ptroutput+= tidz*imstride + tidy*linestride;
for(int k=0; k<size2; k++)
{
if(tidx<nOutputPlane) {
ptroutput[k*nOutputPlane+tidx]=val;
}
}
} |
20,729 | #include <stdio.h>
#include <math.h>
#include <time.h>
void add(int n, float* x, float* y) {
for(int i = 0; i < n; ++i)
y[i] += x[i];
}
void add(int x_size, int y_size, int z_size, float*** t1, float*** t2) {
for(int x = 0; x < x_size; ++x)
for(int y = 0; y < y_size; ++y)
for(int z = 0; z < z_size; ++z)
t1[x][y][z] += t2[x][y][z];
}
__global__
void add_gpu(int n, float* x, float* y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+=stride)
y[i] += x[i];
}
int main(void) {
int X = 500;
int Y = 500;
int Z = 500;
int N = X*Y*Z;
float *v1, *v2;
cudaMallocManaged(&v1, N*sizeof(float));
cudaMallocManaged(&v2, N*sizeof(float));
float ***t1, ***t2;
t1 = new float**[X];
t2 = new float**[X];
for(int x = 0; x < X; ++x) {
t1[x] = new float*[Y];
t2[x] = new float*[Y];
for(int y = 0; y < Y; ++y) {
t1[x][y] = new float[Z];
t2[x][y] = new float[Z];
}
}
for(int x = 0; x < X; ++x)
for(int y = 0; y < Y; ++y)
for(int z = 0; z < Z; ++z) {
v1[x + y*X + z*X*Y] = 1.0f;
v2[x + y*X + z*X*Y] = 2.0f;
t1[x][y][z] = 1.0f;
t2[x][y][z] = 2.0f;
}
// sum
clock_t t;
t = clock();
add(X*Y*Z, v1, v2);
t = clock() - t;
printf ("It took me %d clicks (%f seconds).\n",t,((float)t)/CLOCKS_PER_SEC);
t = clock();
add(X, Y, Z, t1, t2);
t = clock() - t;
printf ("It took me %d clicks (%f seconds).\n",t,((float)t)/CLOCKS_PER_SEC);
int blockSize = 256;
int numBlocks = (N + blockSize-1) / blockSize;
t = clock();
add_gpu<<< numBlocks, blockSize >>>(N, v1, v2);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
t = clock() - t;
printf ("It took me %d clicks (%f seconds).\n",t,((float)t)/CLOCKS_PER_SEC);
cudaFree(v1);
cudaFree(v2);
return 0;
}
|
20,730 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
__host__ void init_vects(int vect_len,float *h_vect1,float *h_vect2);
__global__ void vec_add(int vect_len, float *d_vect1, float *d_vect2, float *d_sum);
int main(int argc,char **argv)
{
cudaEvent_t start=0;
cudaEvent_t stop=0;
float time=0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int vect_len=1e6;
float h_vect1[vect_len], h_vect2[vect_len], h_sum[vect_len];
float *d_vect1, *d_vect2, *d_sum;
// initialization
init_vects(vect_len, h_vect1, h_vect2);
// tranfer vectors to global memory
cudaMalloc((void **)&d_vect1 , vect_len*sizeof(float) );
cudaMalloc((void **)&d_vect2 , vect_len*sizeof(float) );
cudaMalloc((void **)&d_sum , vect_len*sizeof(float) );
cudaMemcpy (d_vect1 , h_vect1 , vect_len*sizeof(float) , cudaMemcpyHostToDevice);
cudaMemcpy (d_vect2 , h_vect2 , vect_len*sizeof(float) , cudaMemcpyHostToDevice);
// determine block and grid size.
dim3 DimGrid(ceil(vect_len/1024),1 ,1);
dim3 DimBlock(1024,1,1);
cudaEventRecord(start,0);
vec_add<<<DimGrid,DimBlock>>>(vect_len, d_vect1 ,d_vect2 ,d_sum);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_sum , d_sum , vect_len*sizeof(float) , cudaMemcpyDeviceToHost);
//Free the Device array
cudaFree (d_vect1);
cudaFree (d_vect2);
cudaFree (d_sum);
cudaEventElapsedTime(&time,start,stop);
printf("time of the Kernel %f \n",time );
printf("v1=%f ,, v2 =%f ,, sum=%f \n",h_vect1[0],h_vect2[0],h_sum[0]);
return 0;
}
__global__ void vec_add(int vect_len, float *d_vect1, float *d_vect2, float *d_sum){
int tid = blockIdx.x * blockDim.x + threadIdx.x ;
if(tid<vect_len)
d_sum[tid]= d_vect1[tid] + d_vect2[tid];
}
__host__ void init_vects(int vect_len,float *h_vect1,float *h_vect2){
srand(time(NULL));
for (int i=0; i<vect_len; i++){
h_vect1[i] = rand();
h_vect2[i] = rand();
}
}
|
20,731 | // (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define gq58x4_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define gq58x4_BLOCKS 128
#define gq58x4_THREADS 128
#define gq58x4_ARRAY_SECTIONS (gq58x4_BLOCKS*gq58x4_THREADS/8)
#define gq58x4_k 8
#define gq58x4_q 48
#define gq58x4_g 288230374541099008ULL
#define gq58x4_gdiv16 18014398408818688ULL
typedef unsigned long long lt;
typedef struct{
lt xN[8] __attribute__ ((aligned(16))),
xP[8] __attribute__ ((aligned(16)));
} gq58x4_state;
typedef gq58x4_state gq58x4_sse_state;
lt gq58x4_sse_Consts[8] __attribute__ ((aligned(16))) =
{13835057977972752384ULL,13835057977972752384ULL,1610612736ULL,1610612736ULL,
288230371923853311ULL,288230371923853311ULL,288230374541099008ULL,288230374541099008ULL};
__host__ unsigned int gq58x4_sse_generate_(gq58x4_sse_state* state){
unsigned output;
asm volatile("movaps (%3),%%xmm0\n" \
"movaps (%2),%%xmm1\n" \
"movaps (%1),%%xmm4\n" \
"movaps %%xmm4,(%2)\n" \
"psllq $3,%%xmm4\n" \
"paddq %%xmm0,%%xmm4\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"paddq %%xmm3,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm4\n" \
"movaps %%xmm4,(%1)\n" \
"movaps %%xmm4,%%xmm1\n" \
"paddq %%xmm4,%%xmm1\n" \
"paddq %%xmm4,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm4\n" \
"movaps 16(%2),%%xmm1\n" \
"movaps 16(%1),%%xmm5\n" \
"movaps %%xmm5,16(%2)\n" \
"psllq $3,%%xmm5\n" \
"paddq %%xmm0,%%xmm5\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"paddq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,16(%1)\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm5\n" \
"movaps 32(%2),%%xmm1\n" \
"movaps 32(%1),%%xmm6\n" \
"movaps %%xmm6,32(%2)\n" \
"psllq $3,%%xmm6\n" \
"paddq %%xmm0,%%xmm6\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"movaps %%xmm6,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"paddq %%xmm3,%%xmm6\n" \
"movaps %%xmm6,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm6\n" \
"movaps %%xmm6,32(%1)\n" \
"movaps %%xmm6,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm6\n" \
"movaps 48(%2),%%xmm1\n" \
"movaps 48(%1),%%xmm7\n" \
"movaps %%xmm7,48(%2)\n" \
"psllq $3,%%xmm7\n" \
"paddq %%xmm0,%%xmm7\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"paddq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,48(%1)\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm7\n" \
"psrlq $54,%%xmm4\n" \
"psrlq $54,%%xmm5\n" \
"psrlq $54,%%xmm6\n" \
"psrlq $54,%%xmm7\n" \
"packssdw %%xmm5,%%xmm4\n" \
"packssdw %%xmm7,%%xmm6\n" \
"packssdw %%xmm6,%%xmm4\n" \
"packsswb %%xmm4,%%xmm4\n" \
"movaps %%xmm4,%%xmm0\n" \
"psrldq $4,%%xmm0\n" \
"pslld $4,%%xmm0\n" \
"pxor %%xmm0,%%xmm4\n"
"movd %%xmm4,%0\n" \
"":"=&r"(output):"r"(state->xN),"r"(state->xP),"r"(gq58x4_sse_Consts));
return output;
}
__device__ __host__ void gq58x4_get_sse_state_(gq58x4_state* state,gq58x4_sse_state* sse_state){
int i; for(i=0;i<8;i++) {sse_state->xN[i]=state->xN[i]; sse_state->xP[i]=state->xP[i];}
}
__device__ __host__ lt gq58x4_mod_g(lt x){ // returns x (mod g)
lt F,G; F = (x>>58); G = x-(F<<58)+(F<<29)+(F<<30);
return ((G>=gq58x4_g) ? (G-gq58x4_g) : G);
}
__device__ __host__ lt gq58x4_MyMult(lt A,lt B){ // returns AB (mod gq58x4_g), where it is implied that A,B<gq58x4_g;
lt A1,A0,B1,B0,curr,x,m;
A1=A>>32; B1=B>>32; A0=A-(A1<<32)+(12*A1); B0=B-(B1<<32)+(12*B1);
if(A0>>32) {A0-=4294967284ULL; A1++;}
if(B0>>32) {B0-=4294967284ULL; B1++;}
curr=A1*B0+B1*A0; m=curr>>26; x=curr-(m<<26);
curr=((3*m+(x<<4))<<28)+(gq58x4_g-12*x)+(144*A1*B1)+(gq58x4_mod_g(A0*B0));
return gq58x4_mod_g(curr);
}
__device__ __host__ lt gq58x4_CNext2(lt N,lt P,lt myk,lt myq){ // returns (myk*N-myq*P) (mod gq58x4_g)
lt curr1,curr2;
curr1=gq58x4_MyMult(myk,N); curr2=gq58x4_MyMult(myq,P);
if(curr1>=curr2) return (curr1-curr2); else return (gq58x4_g+curr1-curr2);
}
__device__ __host__ lt gq58x4_CNext(lt N,lt P){ // returns (8N-48P) (mod gq58x4_g)
return gq58x4_mod_g((N+6*(gq58x4_g-P))<<3);
}
__device__ __host__ lt gq58x4_GetNextN(lt x0,lt x1,unsigned int n){ //returns x_{2^n}
lt myk=gq58x4_k,myq=gq58x4_q,i,x=x1;
for(i=0;i<n;i++){
x=gq58x4_CNext2(x,x0,myk,myq);
myk=gq58x4_CNext2(myk,2,myk,myq);
myq=gq58x4_CNext2(myq,0,myq,0);
}
return x;
}
__device__ __host__ lt gq58x4_GetNextAny(lt x0,lt x1,lt N64,lt N0){ //N=2^64*N64+N0+1
lt i,xp=x0,xn=x1,xpnew,xnnew,shift=0;
i=N0; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gq58x4_GetNextN(xp,xn,shift);
xnnew=gq58x4_GetNextN(xn,gq58x4_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
i=N64; shift=64; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gq58x4_GetNextN(xp,xn,shift);
xnnew=gq58x4_GetNextN(xn,gq58x4_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
return xp; // returns x_N, where N=2^64*N64+N0+1
}
__device__ __host__ void gq58x4_skipahead_(gq58x4_state* state, lt offset64, lt offset0){ // offset=offset64*2^64+offset0+1
lt xn,xp,j;
for(j=0;j<8;j++){
xp=gq58x4_GetNextAny(state->xP[j],state->xN[j],offset64,offset0);
xn=gq58x4_GetNextAny(state->xP[j],state->xN[j],offset64,offset0+1);
state->xP[j]=xp; state->xN[j]=xn;
}
}
__device__ __host__ void gq58x4_init_(gq58x4_state* state){
lt x0=100152853817629549ULL,x1=132388305121829306ULL,xp,xn,j;
for(j=0;j<8;j++){
xp=gq58x4_GetNextAny(x0,x1,0,35048736516210783ULL);
xn=gq58x4_GetNextAny(x0,x1,0,35048736516210784ULL);
state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn;
}
}
__device__ __host__ void gq58x4_init_short_sequence_(gq58x4_state* state,unsigned SequenceNumber){
gq58x4_init_(state); // 0 <= SequenceNumber < 3*10^8; length of each sequence <= 8*10^7
gq58x4_skipahead_(state,0,82927047ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ void gq58x4_init_medium_sequence_(gq58x4_state* state,unsigned SequenceNumber){
gq58x4_init_(state); // 0 <= SequenceNumber < 3*10^6; length of each sequence <= 8*10^9
gq58x4_skipahead_(state,0,8799201913ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ void gq58x4_init_long_sequence_(gq58x4_state* state,unsigned SequenceNumber){
gq58x4_init_(state); // 0 <= SequenceNumber < 3*10^4; length of each sequence <= 8*10^11
gq58x4_skipahead_(state,0,828317697521ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ unsigned int gq58x4_generate_(gq58x4_state* state){
unsigned int sum=0; int i; lt temp;
for(i=0;i<8;i++){
temp=gq58x4_mod_g((state->xN[i]+6*(gq58x4_g-state->xP[i]))<<3);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+= ((temp/gq58x4_gdiv16)<<((i<4)?(8*i):(8*i-28)));
}
return sum;
}
__device__ __host__ float gq58x4_generate_uniform_float_(gq58x4_state* state){
unsigned int sum=0; int i; lt temp;
for(i=0;i<8;i++){
temp=gq58x4_mod_g((state->xN[i]+6*(gq58x4_g-state->xP[i]))<<3);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+= ((temp/gq58x4_gdiv16)<<((i<4)?(8*i):(8*i-28)));
}
return ((float) sum) * 2.3283064365386963e-10;
}
__host__ void gq58x4_print_state_(gq58x4_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<8;i++) {printf("%llu",state->xN[i]%gq58x4_g); printf((i<7)?",":"}\nxP={");}
for(i=0;i<8;i++) {printf("%llu",state->xP[i]%gq58x4_g); printf((i<7)?",":"}\n\n");}
}
__host__ void gq58x4_print_sse_state_(gq58x4_sse_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<8;i++) {printf("%llu",state->xN[i]%gq58x4_g); printf((i<7)?",":"}\nxP={");}
for(i=0;i<8;i++) {printf("%llu",state->xP[i]%gq58x4_g); printf((i<7)?",":"}\n\n");}
}
__global__ void gq58x4_kernel_generate_array(gq58x4_state* state, unsigned int* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift; long offset; lt temp;
__shared__ lt xP[gq58x4_THREADS]; // one generator per s=8 threads, i.e. one orbit
__shared__ lt xN[gq58x4_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x4_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 8;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>3; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x4_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x4_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
shift=((orbit<4)?(8*orbit):(8*orbit-28));
for(i=0;i<(*length);i++){
temp = gq58x4_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((temp/gq58x4_gdiv16)<<shift);
__syncthreads(); // each s=8 threads result in "length" values in the output array
if(orbit==0){ sum=0; for(j=0;j<8;j++) sum+=a[threadIdx.x+j]; out[offset+i]=sum; }
}
}
__host__ void gq58x4_generate_gpu_array_(gq58x4_state* state, unsigned int* dev_out, long length){
long mylength = length/gq58x4_ARRAY_SECTIONS;
gq58x4_state* dev_state;
long* dev_length;
if((mylength*gq58x4_ARRAY_SECTIONS)<length) mylength++;
gq58x4_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x4_state)));
gq58x4_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x4_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x4_state),cudaMemcpyHostToDevice));
gq58x4_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x4_kernel_generate_array<<<gq58x4_BLOCKS,gq58x4_THREADS>>>(dev_state,dev_out,dev_length);
gq58x4_CUDA_CALL(cudaGetLastError());
gq58x4_CUDA_CALL(cudaFree(dev_state)); gq58x4_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gq58x4_kernel_generate_array_float(gq58x4_state* state, float* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift; long offset; lt temp;
__shared__ lt xP[gq58x4_THREADS]; // one generator per s=8 threads, i.e. one orbit
__shared__ lt xN[gq58x4_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x4_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 8;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>3; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x4_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x4_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
shift=((orbit<4)?(8*orbit):(8*orbit-28));
for(i=0;i<(*length);i++){
temp = gq58x4_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((temp/gq58x4_gdiv16)<<shift);
__syncthreads(); // each s=8 threads result in "length" values in the output array
if(orbit==0){ sum=0; for(j=0;j<8;j++) sum+=a[threadIdx.x+j]; out[offset+i]=((float)sum)* 2.3283064365386963e-10; }
}
}
__host__ void gq58x4_generate_gpu_array_float_(gq58x4_state* state, float* dev_out, long length){
long mylength = length/gq58x4_ARRAY_SECTIONS;
gq58x4_state* dev_state;
long* dev_length;
if((mylength*gq58x4_ARRAY_SECTIONS)<length) mylength++;
gq58x4_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x4_state)));
gq58x4_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x4_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x4_state),cudaMemcpyHostToDevice));
gq58x4_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x4_kernel_generate_array_float<<<gq58x4_BLOCKS,gq58x4_THREADS>>>(dev_state,dev_out,dev_length);
gq58x4_CUDA_CALL(cudaGetLastError());
gq58x4_CUDA_CALL(cudaFree(dev_state)); gq58x4_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gq58x4_kernel_generate_array_double(gq58x4_state* state, double* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift; long offset; lt temp;
__shared__ lt xP[gq58x4_THREADS]; // one generator per s=8 threads, i.e. one orbit
__shared__ lt xN[gq58x4_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x4_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 8;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>3; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x4_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x4_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
shift=((orbit<4)?(8*orbit):(8*orbit-28));
for(i=0;i<(*length);i++){
temp = gq58x4_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((temp/gq58x4_gdiv16)<<shift);
__syncthreads(); // each s=8 threads result in "length" values in the output array
if(orbit==0){ sum=0; for(j=0;j<8;j++) sum+=a[threadIdx.x+j]; out[offset+i]=((double)sum)* 2.3283064365386963e-10; }
}
}
__host__ void gq58x4_generate_gpu_array_double_(gq58x4_state* state, double* dev_out, long length){
long mylength = length/gq58x4_ARRAY_SECTIONS;
gq58x4_state* dev_state;
long* dev_length;
if((mylength*gq58x4_ARRAY_SECTIONS)<length) mylength++;
gq58x4_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x4_state)));
gq58x4_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x4_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x4_state),cudaMemcpyHostToDevice));
gq58x4_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x4_kernel_generate_array_double<<<gq58x4_BLOCKS,gq58x4_THREADS>>>(dev_state,dev_out,dev_length);
gq58x4_CUDA_CALL(cudaGetLastError());
gq58x4_CUDA_CALL(cudaFree(dev_state)); gq58x4_CUDA_CALL(cudaFree(dev_length));
}
__host__ void gq58x4_generate_array_(gq58x4_state* state, unsigned int* out, long length){
long mylength = length/gq58x4_ARRAY_SECTIONS;
gq58x4_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*gq58x4_ARRAY_SECTIONS)<length) mylength++;
gq58x4_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x4_state)));
gq58x4_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*gq58x4_ARRAY_SECTIONS*sizeof(unsigned int)));
gq58x4_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x4_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x4_state),cudaMemcpyHostToDevice));
gq58x4_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x4_kernel_generate_array<<<gq58x4_BLOCKS,gq58x4_THREADS>>>(dev_state,dev_out,dev_length);
gq58x4_CUDA_CALL(cudaGetLastError());
gq58x4_CUDA_CALL(cudaMemcpy(out,dev_out,length*sizeof(unsigned int),cudaMemcpyDeviceToHost));
gq58x4_CUDA_CALL(cudaFree(dev_state)); gq58x4_CUDA_CALL(cudaFree(dev_out));
gq58x4_CUDA_CALL(cudaFree(dev_length));
}
|
20,732 | #include <iostream>
#include <fstream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void index_kernel( int* a, int N){
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
printf("%d\n", threadId);
}
void print_to_file(const char* file_name, const int* a, int N){
std::ofstream fout(file_name);
if (fout.is_open()){
for (int i = 0; i < N; i++){
fout << a[i] << "\n";
}
fout.close();
}
else {
std::cout << "Unable to open file\n";
}
}
int main(){
int N = 128;
size_t size = N * sizeof(int);
int *a = new int[N];
print_to_file("input.txt", a, N);
int *dev_a;
cudaMalloc((void **)&dev_a, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
index_kernel<<<dim3(2, 2, 2), dim3(4, 2, 2)>>> (dev_a, N);
cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost);
print_to_file("output1.txt", a, N);
/*
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
vector_add_kernel<<<dim3(2, 1, 1), dim3(64, 1, 1)>>> (dev_a, N);
cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost);
print_to_file("output2.txt", a, N);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
vector_add_kernel<<<dim3(2, 2, 1), dim3(32, 1, 1)>>> (dev_a, N);
cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost);
print_to_file("output3.txt", a, N);
*/
cudaFree(dev_a);
delete[] a;
return 0;
} |
20,733 | #include "stdio.h"
#include <limits>
#include <iostream>
#include <chrono>
__global__ void GPU_SAXPY(int n, float *x, float a, float* y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) y[index] = a * x[index] + y[index];
}
void CPU_SAXPY(int n, float *x, float a, float * y) {
for (int i = 0; i < n; i++) {
y[i] = a* x[i] + y[i];
}
}
void initialize(int N, float *x, float *a, float * y) {
// Initialize SAXPY computation with some values
for (int i = 0; i < N; i++) {
y[i] = 2.0;
x[i] = (float)(rand()) / ((float)(RAND_MAX));
}
a[0] = 2.0;
}
int main(int argc, char **argv) {
int N, threads, seed = 12;
N = std::atoi(argv[1]);
threads = std::atoi(argv[2]);
float *x_host, *y_host, *x_device, *y_device, a, *y_cpu_result, *y_gpu_result;
// Cuda errs
cudaError_t err;
// Allocate in cpu memory
x_host = (float*)malloc(sizeof(float) * N);
y_host = (float*)malloc(sizeof(float) * N);
srand(seed);
initialize(N, x_host, &a, y_host);
auto start = std::chrono::steady_clock::now();
CPU_SAXPY(N, x_host, a, y_host);
auto end = std::chrono::steady_clock::now();
std::cout << "CPU " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << std::endl;
// store result cpu
y_cpu_result = (float*)malloc(sizeof(float) * N);
memcpy(y_cpu_result, y_host, sizeof(float) * N);
// reset seed and reinit
srand(seed);
initialize(N, x_host, &a, y_host);
// Allocate memory on GPU
cudaMalloc(&x_device, sizeof(float) * N);
cudaMalloc(&y_device, sizeof(float) * N);
// Copy to device
cudaMemcpy(x_device, x_host, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(y_device, y_host, sizeof(float) * N, cudaMemcpyHostToDevice);
// Synchronize Copy
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString(err));
}
start = std::chrono::steady_clock::now();
// +1 to make sure we get the full array
GPU_SAXPY<<<(N / threads) + 1, threads>>>(N, x_device, a, y_device);
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString(err));
}
end = std::chrono::steady_clock::now();
std::cout << "GPU " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << std::endl;
// store gpu result
y_gpu_result = (float*)malloc(sizeof(float) * N);
cudaMemcpy(y_gpu_result , y_device, sizeof(float) * N, cudaMemcpyDeviceToHost);
float mse = 0;
for (int i = 0; i < N; i ++)
mse += sqrt((y_cpu_result[i] - y_gpu_result[i]) * (y_cpu_result[i] - y_gpu_result[i]));
std::cout << "mean squared error between GPU and CPU is: " << mse << std::endl;
// Free all memory
free(y_gpu_result);
free(y_cpu_result);
free(x_host);
free(y_host);
cudaFree(x_device);
cudaFree(y_device);
return 0;
}
|
20,734 | #include <stdio.h>
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
//------------------------------------------------------------------------------
void print_array(int *array, int size) {
printf("{ ");
for (int i=0; i<size; i++) {
printf("%d ", array[i]);
}
printf(" }");
}
//------------------------------------------------------------------------------
__global__ void increment_naive(int *g) {
// determine thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Wrap thread to array size
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
//------------------------------------------------------------------------------
__global__ void increment_atomic(int *g) {
// determine thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
// wrap thread to array size
i = i % ARRAY_SIZE;
atomicAdd(&g[i], 1);
}
//------------------------------------------------------------------------------
int main(int argc, char **argv) {
printf("%d threads in %d blocks writing %d elements\n",
NUM_THREADS,
NUM_THREADS / BLOCK_WIDTH,
ARRAY_SIZE);
// array on host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// array on GPU
int *d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
//increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
// copy results back from GPU
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
// free GPU memory
cudaFree(d_array);
return 0;
}
|
20,735 | #include <random>
#include <cuda.h>
#include <stdio.h>
#include <curand.h>
#include <time.h>
int main()
{
curandGenerator_t gen;
// default (WOWXOR) or Mersenne-Trister pseudo random number generator
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937);
// initialize the PRNG with seed
// std::random_device rd;
// unsigned int seed = rd();
unsigned int seed = time(0);
printf("seed = %u\n", seed);
curandSetPseudoRandomGeneratorSeed(gen, seed);
float *hostData, *devData;
int n=1<<24;
size_t memSize=sizeof(float)*n;
// host and device memory allocation
hostData = (float *)malloc(memSize);
cudaMalloc(&devData, memSize);
// generate n random numbers in (0,1] on the device array
curandGenerateUniform(gen, devData, n);
cudaMemcpy(hostData, devData, memSize, cudaMemcpyDeviceToHost);
for(int i=0; i<10; i++) printf("%d %e\n", i, hostData[i]);
curandDestroyGenerator(gen);
cudaFree(devData); free(hostData);
}
|
20,736 | #define TILE_DIM 8
template<typename T>
__device__ void matrixDotMatrix(const T* matrixA, const T* matrixB, T* result,
const int rowsA, const int colsA, const int rowsB, const int colsB) {
__shared__ T tileA[TILE_DIM][TILE_DIM];
__shared__ T tileB[TILE_DIM][TILE_DIM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
T value = 0;
#pragma unroll
for (int t = 0; t < (colsA - 1) / TILE_DIM + 1; t++) {
if (row < rowsA && t * TILE_DIM + tx < colsA) {
tileA[ty][tx] = matrixA[row * colsA + t * TILE_DIM + tx];
} else {
tileA[ty][tx] = 0;
}
if (t * TILE_DIM + ty < rowsB && col < colsB) {
tileB[ty][tx] = matrixB[(t * TILE_DIM + ty) * colsB + col];
} else {
tileB[ty][tx] = 0;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < TILE_DIM; i++) {
value += tileA[ty][i] * tileB[i][tx];
}
__syncthreads();
}
if (row < rowsA && col < colsB) {
result[row * colsB + col] = value;
}
}
|
20,737 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
void __global__ kernel0(int64_t Npart,int64_t* totalNpairs, int64_t* npairs){
int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if(i< Npart) {
for(int64_t j = 0;j < Npart;j++) {
totalNpairs[(i*Npart)+j]+=7;
}
}
__syncthreads();
// the code below for reduction is incorrect and should be commented, while the commented code in the main function should be uncommented
if(i==0){
for(int64_t k=0; k<Npart; k++){
for (int64_t j=0; j<Npart; j++){
npairs[j] += totalNpairs[(k*Npart)+ j];
}
}
}
}
//uncomment the code below for getting correct results
/*
void __global__ kernel1(int64_t *npairs, int64_t* totalNpairs, int64_t Npart){
int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if(i==0){
for(int64_t k=0; k<Npart; k++){
for (int64_t j=0; j<Npart; j++){
npairs[j] += totalNpairs[(k*Npart)+ j];
}
}
}
}
*/
int main(int argc,char **argv){
int64_t *device_npairs;
int64_t *device_totalNpairs;
int64_t Npart=10000;
int64_t *npairs = (int64_t *) (calloc(Npart,sizeof((*npairs))));
int64_t *totalNpairs = (int64_t *) (calloc((Npart*Npart),sizeof(( *npairs))));
int D_rows = (Npart > 1024 ) ? Npart/1024 : Npart;
int D_cols = (Npart > 1024 ) ? 1024 : 1;
if ( Npart % 1024 && Npart > 1024){
D_rows++;
}
printf("\nD_rows:%d, D_cols:%d\n",D_rows, D_cols);
dim3 dimGrid(D_rows,1);
dim3 dimBlock(D_cols,1);
for (int64_t j=0; j<Npart; j++){
npairs[j] = 5;
}
cudaMalloc((void **) &device_npairs,(Npart)*sizeof(int64_t));
cudaMemcpy(device_npairs,npairs,(Npart)*sizeof(int64_t),cudaMemcpyHostToDevice);
for (int64_t j=0; j<Npart*Npart; j++){
totalNpairs[j] = 0;
}
cudaMalloc((void **) &device_totalNpairs,(Npart*Npart)*sizeof(int64_t));
cudaMemcpy(device_totalNpairs,totalNpairs,(Npart*Npart)*sizeof(int64_t),cudaMemcpyHostToDevice);
kernel0<<<dimGrid,dimBlock>>>(Npart, device_totalNpairs, device_npairs);
cudaMemcpy(totalNpairs,device_totalNpairs,(Npart*Npart)*sizeof(int64_t), cudaMemcpyDeviceToHost);
//uncomment the code below to get correct results
//kernel1<<<dimGrid,dimBlock>>>(device_npairs,device_totalNpairs, Npart);
//cudaMemcpy(npairs,device_npairs,(Npart)*sizeof(int64_t), cudaMemcpyDeviceToHost);
cudaFree(device_npairs);
cudaFree(device_totalNpairs);
//only printing the first 100 elements
for (int i = 0; i < 100; i++) {
fprintf(stdout,"%lu\t ",npairs[i]);
}
free(npairs);
free(totalNpairs);
return 0;
}
|
20,738 | #include <stdio.h>
#include <stdlib.h>
#include <cstdio>
__global__ void input( int *output)
{
__shared__ int s_data[1024];
for(int i= 0 ; i < 1024 ; i++)
{
s_data[i] = 2;
}
__syncthreads();
/*
for(int i=0 ; i < 32; i++)
{
int t = threadIdx.x + i *32;
output[t]=s_data[t];
}*/
for(int i=0; i < 32 ; i++)
{
output[threadIdx.x*32+i] = s_data[threadIdx.x*32+i];
}
}
int main(void)
{
int *ary;
cudaMalloc((void**)&ary, 1024*sizeof(int));
input<<<1,32>>>(ary);
int *ary2;
ary2= (int *)malloc(sizeof(int)*1024);
cudaMemcpy(ary2,ary,sizeof(int)*1024,cudaMemcpyDeviceToHost);
printf("final result : %d %d %d",ary2[0],ary2[1],ary2[1023]);
return 0;
}
|
20,739 | #include<stdio.h>
#include <cuda.h>
#include <sys/time.h>
__global__ void compute(int* x,int* y,int n){
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
int num=col+row*n;
int neighbor=0;
//cell in the middle has eight neighbors,
//a cell in a corner has only three neighbors,
//a cell on an edge has five neighbors.
if(col<n && row<n){
//corner 3
//In order to move to corner,
//it should move either diagonal or move left/right and move up/down
//top left corner
if(col==0 && row==0){
neighbor+=x[num+1]; //move right
neighbor+=x[num+n]; //move bottom
neighbor+=x[num+n+1]; //bottom right
}
//bottom left corner
else if(col==0 && row==n-1){
neighbor+=x[num+1]; //move right
neighbor+=x[num-n]; //move up
neighbor+=x[num-n+1]; //top right
}
//bottom right
else if(col==n-1 && row==n-1){
neighbor+=x[num-1]; //move left
neighbor+=x[num-n]; //move up
neighbor+=x[num-n-1]; //top left
}
//edge 5
//In order to move to edge
// it should just move to left/right/up/down (including corner)
//top edge
else if(row==0 && col>0 && col<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num+1]; //right
neighbor+=x[num+n-1]; //bottom left -- corner
neighbor+=x[num+n]; //bottom
neighbor+=x[num+n+1]; //bottom right -- corner
}
//bottom edge
else if(row==n-1 && col>0 && col<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num+1]; //right
neighbor+=x[num-n+1]; //Top right-- corner
neighbor+=x[num-n]; //Top
neighbor+=x[num-n-1]; //Top left -- corner
}
//Left edge
else if(col==0 && row>0 && row<n-1){
neighbor+=x[num+1]; //right
neighbor+=x[num-n]; //Top
neighbor+=x[num-n+1]; //Top right-- corner
neighbor+=x[num+n]; //Bottom
neighbor+=x[num+n+1]; //Bottom right-- corner
}
//Right edge
else if(col==n-1 && row>0 && row<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num-n]; //Top
neighbor+=x[num-n-1]; //Top left-- corner
neighbor+=x[num+n]; //Bottom
neighbor+=x[num+n-1]; //Bottom left-- corner
}
//Right edge
else if(col==n-1 && row>0 && row<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num-n]; //Top
neighbor+=x[num-n-1]; //Top left-- corner
neighbor+=x[num+n]; //Bottom
neighbor+=x[num+n-1]; //Bottom left-- corner
}
//cell in the middle has eight neighbors,
else{
neighbor+=x[num-1];//left
neighbor+=x[num+1]; //right
neighbor+=x[num-n-1];//top left
neighbor+=x[num-n]; //top
neighbor+=x[num-n+1]; //top right
neighbor+=x[num+n-1]; //bottom left
neighbor+=x[num+n]; //bottom
neighbor+=x[num+n+1]; //bottom right
}
//Die : 0
//Live: 1
//A live cell with zero or one live neighbor dies from loneliness.
if(x[num]==1 && (neighbor ==0 || neighbor ==1))
y[num]=0;
//A live cell with four or more live neighbors dies due to overpopulation.
else if(x[num]==1 && neighbor>=4)
y[num]=0;
//A dead cell with two or three live neighbors becomes alive.
else if(x[num]==1 && (neighbor==2 || neighbor==3))
y[num]=1;
//Otherwise, a cell's state stays unchanged
else
y[num] = x[num];
}
}
int main(void){
int i,j,k;
int row= 6;
int col= 4;
int start[row][col];
int Round[row][col];
dim3 threadsPerBlock(32,32);
dim3 numBlocks(row/threadsPerBlock.x,col/threadsPerBlock.x);
int* x;
int* y;
int generation =1;// maximum generation/iteration
float milliseconds=0;
cudaEvent_t t_start,t_stop;
cudaEventCreate(&t_start);
cudaEventCreate(&t_stop);
//Initilazie the matrix of the x Generated cell
for(i=0;i<row;i++)
for(j=0;j<col;j++)
start[i][j]=rand()%2;
//Initilazie the matrix of the y Generated cell
for(i=0;i<row;i++)
for(j=0;j<col;j++)
Round[i][j]=0;
cudaMalloc((void **) &x,sizeof(int)*row*col);
cudaMemcpy(x,start,sizeof(int)*row*col,cudaMemcpyHostToDevice);
printf("Start\n");
printf("-------\n");
for(i=0;i<row;i++)
{
for(j=0;j<col;j++)
{
if(start[i][j]){
printf(" 0");
}
else{
printf(" 1");
}
}
printf("\n");
}
for(k=0;k<= generation;k++)
{
cudaEventRecord(t_start);
compute<<<numBlocks,threadsPerBlock>>>(y,y,row);
cudaEventRecord(t_stop);
cudaMalloc((void **) &y,sizeof(int)*row*col);
cudaMemcpy(Round,y,sizeof(int)*row*col,cudaMemcpyDeviceToHost);
printf("\n Round %d \n",k);
printf("-------\n");
for(i=0;i<row;i++)
{
for(j=0;j<col;j++)
{
if(Round[i][j])
{
printf(" 0");
}
else{
printf(" 1");
}
}
printf("\n");
}
cudaEventElapsedTime(&milliseconds,t_start,t_stop);
printf("Time taken for this computation = %f milliseconds\n\n",milliseconds);
}
return 0;
}
|
20,740 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cmath>
typedef unsigned int histogram_t;
typedef unsigned vector_t;
#define MIL 1000
#define MILLON MIL*MIL
#define N 20*MILLON
#define M 8 //Tamaño histograma
#define P 10 //Nº sub-histogramas
#define Q (int)ceil((float)N/(float)P) //Elementos por histograma
#define R 101 //Repeticiones para medir tiempo
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
__global__ void inicializar_histograma(histogram_t * h, size_t n){
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
if(thread_id < n) h[thread_id] = 0;
}
__global__ void p_histogramas(histogram_t * h, vector_t * v,
size_t m, size_t n, size_t q){
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
if(thread_id < n){
size_t histogram_id = thread_id/q;
size_t histogram_pos = v[thread_id] % m;
histogram_t * addr = &h[histogram_id*m+histogram_pos];
atomicAdd(addr, 1);
}
}
__global__ void reduccion_atomic(histogram_t * h, size_t m, size_t p){
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
if(thread_id >= m && thread_id < m*p){
size_t pos = thread_id%m;
atomicAdd(&h[pos], h[thread_id]);
}
}
//Hay un hilo por cada dos posiciones del histograma m*(p/2)
__global__ void reduccion_paralela(histogram_t * h, size_t m, size_t p){
size_t thread_id = threadIdx.x + blockDim.x * blockIdx.x;
if(thread_id >= m*(p/2)) return;
h[thread_id] += h[thread_id + m*(p/2)];
if((p%2) && thread_id < m)
h[thread_id] += h[thread_id + 2*m*(p/2)];
}
float tiempo_kernel(histogram_t * h, vector_t * v, unsigned kernel);
int main (void){
int sizeh = P*M*sizeof(histogram_t);
histogram_t * h = (histogram_t*)malloc(sizeh);
int sizev = N*sizeof(vector_t);
vector_t * v = (vector_t*)malloc(sizev);
for(int i = 0; i < N; i++) v[i] = rand();
histogram_t * h_device;
CUDA_CHECK_RETURN(cudaMalloc(&h_device, sizeh));
vector_t * v_device;
CUDA_CHECK_RETURN(cudaMalloc(&v_device, sizev));
CUDA_CHECK_RETURN(cudaMemcpy(v_device, v, sizev, cudaMemcpyHostToDevice));
printf("Llamando kernel con M %i N %i Q %i\n", M, N, Q);
float elapsedTime = tiempo_kernel(h_device, v_device, 1);
printf("Tiempo transcurrido: %f ms\n", elapsedTime);
CUDA_CHECK_RETURN(cudaMemcpy(h, h_device, sizeh, cudaMemcpyDeviceToHost));
printf("Resultado: ");
long long unsigned n_resultado = 0;
for(int i = 0; i < M; i++){
n_resultado+=h[i];
printf("%llu + ", h[i]);
}
printf("= %llu\n", n_resultado);
printf("Nº de elementos del vector: %i\n", N);
free(h);
free(v);
cudaFree(v_device);
cudaFree(h_device);
}
float tiempo_kernel(histogram_t * h, vector_t * v, unsigned kernel){
size_t threadsPerBlock = 1024;
size_t blocksPerGridM = ((unsigned)M + threadsPerBlock - 1) / threadsPerBlock;
size_t blocksPerGridN = ((unsigned)N + threadsPerBlock - 1) / threadsPerBlock;
size_t blocksPerGridPM = ((unsigned)(P*M) + threadsPerBlock - 1) / threadsPerBlock;
size_t blocksReduccion;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float totalTime = 0;
for(int i = 0; i < R; i++){
size_t p = P;
cudaEventRecord(start, 0);
switch(kernel){
case 1:
inicializar_histograma <<<blocksPerGridPM, threadsPerBlock>>>(h, M*P);
p_histogramas <<<blocksPerGridN, threadsPerBlock>>>(h, v, M, N, Q);
//reduccion_atomic <<<blocksPerGridPM, threadsPerBlock>>>(h, M, P);
while(p > 1){
blocksReduccion = M*(p/2);
reduccion_paralela<<<blocksReduccion, threadsPerBlock>>>(h, M, p);
p/=2;
}
if(cudaPeekAtLastError() != cudaSuccess) printf("Falla para %lu hilos/bloque y %lu bloques\n", threadsPerBlock, blocksPerGridPM);
break;
default:
printf("Cuidado! No se selecciona ningún kernel\n");
break;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
CUDA_CHECK_RETURN(cudaGetLastError());
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
if(i != 0) totalTime += elapsedTime;
}
return totalTime / (R-1);
}
|
20,741 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void helloKernel() {
}
int main(int argc, char **argv) {
helloKernel<<<1,1>>>();
printf("Host: Hello World!!!\n");
return (0);
}
|
20,742 | // #######################################################
//
// Exemplo (template) de multiplicação de matrizes em CUDA
// Disciplina: OPRP001 - Programação Paralela
// Prof.: Mauricio Pillon
//
// #######################################################
#include <cuda.h>
#include <math.h>
#include <stdio.h>
// Matriz Quadrada (nro_linhas = nro_colunas)
#define N \
4 // Número de linhas
// Número de colunas
// GPU: Multiplicação das matrizes (a) e (b), resultado em (c)
__global__ void matMult(int *da, int *db, int *dc) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
int k=0;
if (col < N && row < N) {
for (int i = 0; i < N; i++) {
sum += da[row * N + i] * db[i * N + col];
}
dc[row * N + col] = sum;
}
}
// GPU: Imprime índices na matriz
__global__ void printIndex(void) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
printf("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n", i, j, (i * N + j),
threadIdx.x, blockIdx.x, blockDim.x, threadIdx.y, blockIdx.y,
blockDim.y);
}
// GPU: Inicializa os vetores (a), (b) e (c) na Memória Global
__global__ void dirtyMem(int *da, int *db, int *dc) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
da[i] = -1;
db[i] = -2;
dc[i] = -3;
}
// CPU: Inicializa os vetores (a) e (b)
__host__ void initvet(int *host_a, int *host_b) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
host_b[i * N + j] = (i + j) + ((N - 1) * i);
host_a[i * N + j] = (N * N) - host_b[i * N + j];
}
}
}
// CPU: Imprime matriz
__host__ void printMat(int *mat) {
for (int j = 0; j < N; j++)
printf("\t(%d)", j);
printf("\n");
for (int i = 0; i < N; i++) {
printf("(%d)", i);
for (int j = 0; j < N; j++) {
printf("\t%d", mat[i * N + j]);
}
printf("\n");
}
}
// CPU: função principal
int main(int argc, char const *argv[]) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size;
// Alocação de matriz quadrada
size = N * N * sizeof(int);
// Alocação de memória no host
cudaMallocHost((void **)&a, size);
cudaMallocHost((void **)&b, size);
cudaMallocHost((void **)&c, size);
// Alocação de memória na GPU para os vetores (a,b e c)
cudaMalloc((void **)&dev_a, size);
cudaMalloc((void **)&dev_b, size);
cudaMalloc((void **)&dev_c, size);
// Atribui valores iniciais aos vetores em GPU
dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c);
// Cópia GPU para CPU
cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
// Impressão na tela dos valores dos vetores
printf("\t ### Valores Inicializados na GPU ###\n");
printf("\t ### Matriz (a) ### \n");
printMat(a);
printf("\t ### Matriz (b) ### \n");
printMat(b);
printf("\t ### Matriz (c) ### \n");
printMat(c);
// Inicialização dos vetores (a) e (b) no host
initvet(a, b);
// Cópia dos vetores gerados em CPU p/ memória da GPU
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
// Número de blocos e threads p/ dimensões (x,y)
dim3 dimBlock(1, 1);
dim3 dimThreads(N, N);
// Imprime as posições acessadas pelo dimBlock e dimThreads
printIndex<<<dimBlock, dimThreads>>>();
// Execução do kernel matMult em GPU
matMult<<<dimBlock, dimThreads>>>(dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
// Cópia do vetor (c) da GPU (Memória Global) para CPU
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
// Impressão na tela dos valores dos vetores
printf("\t ### Valores após processamento em GPU ###\n");
printf("\t ### Matriz (a) ### \n");
printMat(a);
printf("\t ### Matriz (b) ### \n");
printMat(b);
printf("\t ### Matriz (c) ### \n");
printMat(c);
// Libera a Memória Global (GPU)
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// Libera a Memória Global (CPU)
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
return 0;
}
|
20,743 | #include <cstdio>
#include <cstdlib>
#include <time.h>
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define THREADS_PER_BLOCK 1
#define NUM_BLOCKS 5
#define ARR_LEN 5 //matrix size
#define ITERS 1
#define PRINT_TIME 1
//typedef float float;
/************************************/
/* SOR */
__global__ void MMM_global( int* A, int a, int b )
{
long int i = blockDim.x * blockIdx.x + threadIdx.x;
i += blockDim.y * blockIdx.y + threadIdx.y;
int len = ARR_LEN;
if( i < len )
{
*(A+ a*len + i ) = ( *(A+ a*len + i) ) | ( *(A+ b*len + i ) );
//then reflect
*(A + i*len + a ) = *(A + a*len + i );
}
}
//----------------------------------------------------------------------------
int main( int argc, char **argv )
{
srand( time( NULL ));
size_t mem_size = sizeof( int ) * ARR_LEN * ARR_LEN;
//Randomizing matrix function
void initRandArray( int* array, int length );
void PrintMat( int*, int );
//GPU dimensions
dim3 dimGrid( NUM_BLOCKS, NUM_BLOCKS );
dim3 dimBlock( THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1 );
// GPU Timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
//host array
int* h_a;
int* h_result;
//allocate
h_a = (int*) malloc( mem_size );
h_result = (int*) malloc( mem_size );
//gpu arrays
int* d_a;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_a, mem_size ));
CUDA_SAFE_CALL(cudaSetDevice(0)); //set to correct device
//------------
//MMM test
//------------
int i;
int a = 0;
int b = 1;
printf("OR test\n----------\n");
for( i = 0; i < ITERS; i++ )
{
//initialize host array
initRandArray( h_a, (int)ARR_LEN );
printf("Initial Array\n");
PrintMat( h_a, ARR_LEN );
#if PRINT_TIME
// Create the cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record event on the default stream
cudaEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(d_a, h_a, mem_size, cudaMemcpyHostToDevice));
// Launch the kernel
MMM_global<<< dimGrid, dimBlock >>>( d_a, a, b );
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(h_result, d_a, mem_size, cudaMemcpyDeviceToHost));
printf("GPU result\n");
PrintMat( h_result, ARR_LEN );
#if PRINT_TIME
// Stop and destroy the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("time: %f (msec)\n", elapsed_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
}
return 0;
}
//----------------------------------------------------------------------------
//intialize an array to random values
void initRandArray ( int *arr, int len) {
int i;
int j;
for (i = 0; i < len; i++)
{
for( j = 0; j < len; j++ )
{
arr[i*len + j] = rand()&1;
}
}
}
void PrintMat( int* A, int V )
{
int i,j;
for( i = 0; i < V; i++ )
{
for( j = 0; j < V; j++ )
{
printf("%i ", *( A + i*V + j ) );
}
printf( "\n" );
}
return;
}
|
20,744 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
int iDivUp(const int a, const int b) { return (a % b != 0) ? (a / b + 1) : (a / b); };
__global__ static void KernelRect(unsigned char *imgdst,long *X,long *Y, int imgWidth, int imgHeight)
{
unsigned long index = threadIdx.x + blockIdx.x * blockDim.x;
int sum = 0;
//sommation Y
if(index < imgHeight)
{
for(int i = 0; i < imgWidth;i++)
sum = sum + imgdst[index * imgWidth + i];
Y[index] = sum;
}
sum = 0;
//sommation X
if(index < imgWidth)
{
for(int i = 0; i < imgHeight; i++)
sum = sum + imgdst[index + imgWidth * i];
X[index] = sum;
}
}
__global__ static void KernelSobel(unsigned char *imgscr,unsigned char *imgscr2, unsigned char *imgdst,int imgWidth,int imgHeight,unsigned char K)
{
unsigned long index;
unsigned char pValue;
index = threadIdx.x + blockIdx.x * blockDim.x;
pValue = abs(imgscr[index] - imgscr2[index]);
if(pValue < K)
imgdst[index] = 0;
else
imgdst[index] =255;
}
extern "C" void Mykernelfunc(unsigned char *c_imgsrc, unsigned char *c_imgsrc2, unsigned char *c_imgdst, long *X, long *Y, int imageWidth, int imageHeight, unsigned long imagesize, unsigned char K)
{
float tempsex = 0;
// Defining gpu variables
unsigned char *gpu_imgsrc = 0;
unsigned char *gpu_imgsrc2 = 0;
unsigned char *gpu_imgdst = 0;
long *gpu_X = 0;
long *gpu_Y = 0;
// Defining size of memory allocations
dim3 dimBlock = 16;
dim3 dimGrid = imagesize / dimBlock.x;
dim3 dimGridsum = 0;
// Grabbing the highest value of both imageheight and imagewidth
if(imageHeight > imageWidth)
dimGridsum = iDivUp(imageHeight, dimBlock.x);
else
dimGridsum = iDivUp(imageWidth, dimBlock.x);
// Finding the memory size of the image
size_t memsize = imagesize * sizeof(unsigned char);
//Allocating memory
cudaMalloc((void**)&gpu_imgsrc, memsize);
cudaMemcpy(gpu_imgsrc, c_imgsrc, memsize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_imgsrc2, memsize);
cudaMemcpy(gpu_imgsrc2, c_imgsrc2, memsize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_imgdst, memsize);
cudaMalloc((void**)&gpu_X,imageWidth * sizeof(long));
cudaMemcpy(gpu_X, X, imageWidth * sizeof(long), cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_Y,imageHeight * sizeof(long));
cudaMemcpy(gpu_Y, Y, imageHeight * sizeof(long), cudaMemcpyHostToDevice);
// Launching kernel functions
KernelSobel<<<dimGrid.x, dimBlock.x>>>(gpu_imgsrc, gpu_imgsrc2, gpu_imgdst, imageWidth, imageHeight, K);
cudaThreadSynchronize();
KernelRect<<<dimGridsum.x,dimBlock.x>>>(gpu_imgdst, gpu_X, gpu_Y, imageWidth, imageHeight);
cudaThreadSynchronize();
// Grabbing data from gpu variables
cudaMemcpy(c_imgdst,gpu_imgdst, memsize, cudaMemcpyDeviceToHost);
cudaMemcpy(X,gpu_X,imageWidth * sizeof(long), cudaMemcpyDeviceToHost);
cudaMemcpy(Y,gpu_Y,imageHeight * sizeof(long), cudaMemcpyDeviceToHost);
// Freeing memory
cudaFree(gpu_imgdst);
cudaFree(gpu_imgsrc);
cudaFree(gpu_imgsrc2);
cudaFree(gpu_X);
cudaFree(gpu_Y);
}
|
20,745 | /*
* Demonstration of 2-dimensional block- and thread-indices
* mostly the same as vec_addition example.
* adds up a square matrix with height and with N (Means N^2 calculations)
* kernel is divided in block with THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_X threads per block
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#define N 4
#define THREADS_PER_BLOCK_X 2
__global__ void add(int (*a)[N], int (*b)[N], int (*c)[N]) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
c[x][y] = a[x][y] + b[x][y];
printf("%3d %3d %14d %14d %14d \n",x,y, a[x][y], b[x][y], c[x][y]);
}
void random_ints(int *start, int num) {
for (unsigned int i = 0; i < num; i++) {
start[i] = rand();
}
}
int main(void) {
int (*a)[N], (*b)[N], (*c)[N]; // host copies of a, b, c
int (*d_a)[N], (*d_b)[N], (*d_c)[N]; // device copies of a, b, c
//matrix size in bytes
int size = N *N*sizeof(int);
//allcoate memory on the host
a = (int (*)[N]) malloc(size);
b = (int (*)[N]) malloc(size);
c = (int (*)[N]) malloc(size);
//allocate memory on device
cudaMalloc((void ***)&d_a, size);
cudaMalloc((void ***)&d_b, size);
cudaMalloc((void ***)&d_c, size);
//fill with random ints
for (unsigned int i = 0; i < N; i++) {
random_ints(a[i], N);
random_ints(b[i], N);
}
// Alloc space for host copies of a, b, c and setup input values
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//calculate number of blocks
int num_blocks_x = (N / THREADS_PER_BLOCK_X);
// Launch add() kernel on GPU with N blocks
add<<< dim3(num_blocks_x,num_blocks_x) , dim3(THREADS_PER_BLOCK_X,THREADS_PER_BLOCK_X) >>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf("kernel successfull finished \n");
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < N; j++) {
printf("%d %d %d \n", i, j, c[i][j]);
}
}
//cleanup
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
20,746 | #include<stdio.h>
#include<cuda.h>
#define N 10
__global__ void vecAdd(int *a, int *b, int *c)
{
int id = blockIdx.x;
if(id < N)
c[id] = a[id] + b[id];
}
void checkError(cudaError_t error, char * function)
{
if(error != cudaSuccess)
{
printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, cudaGetErrorString(error));
exit(-1);
}
}
int main()
{
int a[N], b[N], c[N];
int *deviceA, *deviceB, *deviceC;
int i; //a variable for use in loops.
size_t size = N * sizeof(int);
//put some value in the 'a' & 'b' arrays
for(i = 0 ; i < N ; i ++)
{
a[i] = i;
b[i] = i;
}
//initialize the memory on GPU
checkError(cudaMalloc((void**)&deviceA, size), "Cuda Malloc for deviceA");
checkError(cudaMalloc((void**)&deviceB, size), "Cuda Malloc for deviceB");
checkError(cudaMalloc((void**)&deviceC, size), "Cuda Malloc for deviceC");
checkError(cudaMemcpy(deviceA, a, size, cudaMemcpyHostToDevice), "Cuda MemCpy for DeviceA");
checkError(cudaMemcpy(deviceB, b, size, cudaMemcpyHostToDevice), "Cuda MemCpy for DeviceB");
vecAdd<<<N , 1>>>(deviceA, deviceB, deviceC);
checkError(cudaMemcpy(c, deviceC, size, cudaMemcpyDeviceToHost), "Cuda MemCpy for DeviceC");
for(i = 0 ; i < N ; i ++)
printf("c[%d] = %d\n", i , c[i]);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
return 0;
}
|
20,747 | #include "includes.h"
__global__ void MedianFilterWithMask3x3_Kernel(float* output, const float* input, const int width, const int height, const int nChannels, const bool* keep_mask)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height)
return;
int offset = y*width + x;
if (keep_mask[offset])
{
for (int c = 0; c < nChannels; c++)
output[offset*nChannels + c] = input[offset*nChannels + c];
return;
}
float vals[9] = { 0 };
int count = 0;
for (int c = 0; c < nChannels; c++)
{
count = 0;
int start_x = ((x - 1) >= 0) ? (x - 1) : 0;
int end_x = ((x + 1) <= (width - 1)) ? (x + 1) : (width - 1);
int start_y = ((y - 1) >= 0) ? (y - 1) : 0;
int end_y = ((y + 1) <= (height - 1)) ? (y + 1) : (height - 1);
for (int ii = start_y; ii <= end_y; ii++)
{
for (int jj = start_x; jj <= end_x; jj++)
{
int cur_offset = ii*width + jj;
if (keep_mask[cur_offset])
{
vals[count++] = input[cur_offset*nChannels + c];
}
}
}
if (count == 0)
{
output[offset*nChannels + c] = 0;
}
else
{
int mid = (count + 1) / 2;
for (int pass = 0; pass < mid; pass++)
{
float max_val = vals[pass];
int max_id = pass;
for (int id = pass + 1; id < count; id++)
{
if (max_val < vals[id])
{
max_val = vals[id];
max_id = id;
}
}
vals[max_id] = vals[pass];
vals[pass] = max_val;
}
output[offset*nChannels + c] = vals[mid];
}
}
} |
20,748 | extern "C" // ensure function name will be left alone rather than mangled like a C++ function
{
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void std_normal_pdf_double(const double *x, double *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const double ONE_OVER_ROOT_TWOPI = 1.0/sqrt(2.0*M_PI);
if(i<n) y[i] = exp(-0.5*x[i]*x[i])*ONE_OVER_ROOT_TWOPI;
}
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void std_normal_pdf_float(const float *x, float *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const float ONE_OVER_ROOT_TWOPI_F = rsqrt(2.0f*3.14159265358979f);
if(i<n) y[i] = exp(-0.5f*x[i]*x[i])*ONE_OVER_ROOT_TWOPI_F;
}
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void normal_pdf_double(const double *x, const double *mu, const double *sig, double *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const float ONE_OVER_ROOT_TWOPI = 1.0/sqrt(2.0*M_PI);
if(i<n)
{
double dx = x[i] - mu[i];
y[i] = exp(-0.5*dx*dx)*ONE_OVER_ROOT_TWOPI/sig[i];
}
}
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void normal_pdf_float(const float *x, const float *mu, const float *sig, float *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const float ONE_OVER_ROOT_TWOPI_F = rsqrt(2.0f*3.14159265358979f);
if(i<n)
{
float dx = x[i] - mu[i];
y[i] = exp(-0.5f*dx*dx)*ONE_OVER_ROOT_TWOPI_F/sig[i];
}
}
__global__ void sum_simplistic_double(const double *input, double *output, unsigned int n)
{
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
double sum = 0.0;
if (i==0)
{
for(int j=0;j<n;++j)
sum += input[j];
}
output[0] = sum;
}
__global__ void sum_simplistic_float(const float *input, float *output, unsigned int n)
{
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
float sum = 0.0;
if (i==0)
{
for(int j=0;j<n;++j)
sum += input[j];
}
output[0] = sum;
}
// Adopted from https://code.google.com/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/sum_reduction.cu
// this kernel computes, per-block, the sum
// of a block-sized portion of the input
// using a block-wide reduction
__global__ void block_sum_double(const double *input,
double *per_block_results,
unsigned int n)
{
extern __shared__ double sdata[];
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
// load input into __shared__ memory
double x = 0.0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(unsigned int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
unsigned int block_id_1d = (blockIdx.y * gridDim.x + blockIdx.x);
per_block_results[block_id_1d] = sdata[0];
}
}
}
|
20,749 | #include "includes.h"
__global__ void vectorLength(int *size, const double *x, const double *y, double *len) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
len[ix] = sqrt(x[ix] * x[ix] + y[ix] * y[ix]);
}
} |
20,750 | /**************************************************************************
* This file contains implementation of pqp (parallel quadratic programming)
* GPU version optimised with TILE and shared memory for MPC Term Project of HP3 Course.
* Group 7 CSE Dept. IIT KGP
* Objective function: 1/2 U'QpU + Fp'U + 1/2 Mp
* Constraints: GpU <= Kp
**************************************************************************/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define NUM_ITER 1000
#define pHorizon 1
#define nState 29
#define nInput 7
#define nOutput 7
#define nDis 1
#define erc 1e-6
#define eac 1e-6
#define eaj 1e-6
#define erj 1e-6
#define TILE_DIM 32
#define BLOCK_ROWS 8
#define BLOCK_SIZE 16
#define BLK_ROWS 32
#define BLK_COLS 32
//size of the share memory tile in the device
#define TILE_SIZE BLK_ROWS
__global__ void printMat(float *mat, int N, int M)
{
printf("printing mat\n");
for(int i=0;i<N;i++)
{
for(int j=0;j<M;j++)
{
printf("%f ",mat[i*M+j]);
}
printf("\n");
}
printf("\n");
}
__global__ void initMatCuda(float *mat, float val, int N) //coarsened // parallel
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if(id<N)
{
int id1=2*id+0;
int id2=2*id+1;
mat[id1] = val;
mat[id2]=val;
}
}
/**************************************************************************
* This is utility function initialize the matrix
* 1. Parameter is float type matrix pointer (*mat), float val,
* size of matrix
* 2. Return type void
**************************************************************************/
void initMat(float *mat, float val, int N) // parallel
{
dim3 block = 512; //1024;
dim3 grid = (N+1024-1)/1024;
initMatCuda<<<grid, block>>>(mat, val, N);
}
float *newMatrixCUDA(int n, int m)
{
float *tmp = NULL;
cudaError_t err = cudaMalloc((void **)&tmp, n*m*sizeof(float));
if ( err != cudaSuccess )
{
printf (" Failed to allocate device matrix! %s\n", cudaGetErrorString(err));
exit ( EXIT_FAILURE ) ;
}
initMat(tmp, 0, n*m);
return tmp;
}
/**************************************************************************
* This is utility function for create new matrix
* 1. Parameter is (int n, int m) dimension of (n X m matrix) ,
* 2. Return pointer of new matrix
* 3. This function create dynamic size matrix using malloc
**************************************************************************/
float *newMatrix(int n, int m)
{
float *tmp = (float *)malloc(n*m*sizeof(float));
for(int i=0;i<n*m;i++)
{
tmp[i] = 0;
}
return tmp;
}
void copyToDevice(float *dM, float *hM, int n, int m)
{
int size = n*m;
cudaMemcpy (dM ,hM, size * sizeof ( float ) , cudaMemcpyHostToDevice );
}
void copyToHost(float *hM, float *dM, int n, int m)
{
int size = n*m;
cudaMemcpy (hM ,dM, size * sizeof ( float ) , cudaMemcpyDeviceToHost );
}
__global__ void copyMatrixCuda(float *output, float *mat, int a, int b) // parallel //coarsened
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if(id<a*b)
{
int id1=2*id+0;
int id2=2*id+1;
output[id1] = mat[id1];
output[id2] = mat[id2];
}
}
/**************************************************************************
* This is utility function for generating addition or substraction
* of two matrix
* 1. Parameter is (pointer of matrix1, pointer of matrix2, int sign,int n int m)
* dimension of (n X m matrix)
* 2. sign parameters for decide addition or substraction
* 3. Result write back in matrix1
**************************************************************************/
void copyMatrix(float *output, float *mat, int a, int b) // parallel
{
dim3 block = 512; //1024;
dim3 grid = (a*b+1024-1)/1024;
copyMatrixCuda<<<grid,block>>>(output, mat, a, b);
}
__global__ void transposeCuda(float *odata, float *idata, int n, int m)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
//int width = gridDim.x * TILE_DIM;
for(int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
if(x<m && y<n)
{
tile[threadIdx.x][threadIdx.y] = idata[y*m+x];
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for(int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
if(y<m && x<n){
odata[(y*n) + x] = tile[threadIdx.y][threadIdx.x];
}
}
}
void transpose(float *odata, float *idata, int n, int m)
{
dim3 grid((n+TILE_DIM-1)/TILE_DIM, (m+TILE_DIM-1)/TILE_DIM, 1);
dim3 block(TILE_DIM, TILE_DIM, 1);
transposeCuda<<<grid,block>>>(odata,idata,n,m);
}
__global__ void matrixMultiplyCuda(float *output, float *matrix1, float *matrix2, int a, int b, int c)
{
//declare shared memory matrices for matrix1 and matrix2 matrices
__shared__ float shared_mat1_tile[TILE_SIZE][TILE_SIZE];
__shared__ float shared_mat2_tile[TILE_SIZE][TILE_SIZE];
int tsize;
if(a!=1 && c!=1){
tsize=TILE_SIZE;
}
else{
tsize=1;
}
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//check if thread directly maps to the dimensions of the resulting matrix
if (row < a && col < c)
{
float result = 0.0;
int k;
int phase;
//calculate output matrix indexes in phases. Each phase shares
//TILE_SIZE * TILE_SIZE data copied to the shared matrix mat1
//and matrix mat2.
for (phase = 0; phase <= b/tsize; phase++)
{
shared_mat1_tile[ty][tx] = matrix1[row * b + phase * tsize + tx];
shared_mat2_tile[ty][tx] = matrix2[(phase * tsize + ty) * c + col];
__syncthreads();
for (k = 0; k < tsize; k++)
{
if (k + (phase * tsize) < b)
{
result += (shared_mat1_tile[ty][k] * shared_mat2_tile[k][tx]);
}
}
__syncthreads();
}
output[row * c + col] = result;
}
}
void matrixMultiply(float *output, float *mat1, int transpose1, float *mat2, int transpose2, int a, int b, int c) //mat1-a*b mat2-b*c // parallel
{
float *tmp = newMatrixCUDA(a,c);
float *matrix1;
float *matrix2;
if(transpose1 && a!=1 && b!=1)
{
matrix1 = newMatrixCUDA(a,b);
transpose(matrix1, mat1, b,a);
}
else
{
matrix1 = mat1;
}
if(transpose2 && b!=1 && c!=1)
{
matrix2 = newMatrixCUDA(b,c);
transpose(matrix2, mat1, c,b);
}
else
{
matrix2 = mat2;
}
int B_C, B_R;
if(a!=1 && c!=1)
{
B_C=BLK_COLS;
B_R=BLK_ROWS;
}
else{
B_C=1;
B_R=1;
}
dim3 block(B_C,B_R);
dim3 grid((c+B_C-1)/B_C,(a+B_R-1)/B_R);
matrixMultiplyCuda<<<grid, block>>>(output, matrix1, matrix2, a, b, c);
if(transpose1 && a!=1 && b!=1)
{
cudaFree(matrix1);
}
if(transpose2 && b!=1 && c!=1)
{
cudaFree(matrix2);
}
cudaFree(tmp);
}
__global__ void matrixAddCuda(float *A, float *B, float sign, int a, int b) // adds b to a // parallel //coarsened
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if(id<a*b)
{
int id1=2*id+0;
int id2=2*id+1;
A[id1] += sign * B[id1];
A[id2] += sign * B[id2];
}
}
void matrixAdd(float *A, float *B, float sign, int a, int b) // adds b to a // parallel
{
dim3 block = 512; //1024;
dim3 grid = (a*b+1024-1)/1024;
matrixAddCuda<<<grid,block>>>(A,B,sign,a,b);
}
__global__ void negateMatrixCuda(float *mat, int n, int m) // parallel
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if(id<n*m)
{
mat[id] = -mat[id];
}
}
void negateMatrix(float *mat, int n, int m) // parallel
{
dim3 block = 1024;
dim3 grid = (n*m+1024-1)/1024;
negateMatrixCuda<<<grid,block>>>(mat,n,m);
}
__global__ void matrixPosCuda(float *mat1, float *mat2, int n, int m) // parallel
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if(id<n*m)
{
mat1[id] = fmaxf(0.0, mat2[id]);
}
}
void matrixPos(float *mat1, float *mat2, int n, int m) // parallel
{
dim3 block = 1024;
dim3 grid = (n*m+1024-1)/1024;
matrixPosCuda<<<grid,block>>>(mat1,mat2,n,m);
}
__global__ void matrixNegCuda(float *mat1, float *mat2, int n, int m) // parallel
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if(id<n*m)
{
mat1[id] = fmaxf(0.0, -mat2[id]);
}
}
void matrixNeg(float *mat1, float *mat2, int n, int m) // parallel
{
dim3 block = 1024;
dim3 grid = (n*m+1024-1)/1024;
matrixNegCuda<<<grid,block>>>(mat1,mat2,n,m);
}
__global__ void diagonalAddCuda(float *theta, float *tmp, int N) // parallel
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if(id<N)
{
// printf("tmp %f\n",tmp[i]);
theta[id*N+id] = fmaxf(tmp[id],5.0);
}
}
void diagonalAdd(float *theta, float *tmp, int N) // parallel
{
dim3 block = 1024;
dim3 grid = (N+1024-1)/1024;
diagonalAddCuda<<<grid,block>>>(theta,tmp,N);
}
__global__ void compareCuda(float *GpU, float *Kp, int *re, int N) // parallel
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if(id<N)
{
if(GpU[id] > Kp[id]+fmaxf(erc*Kp[id], eac))
{
*re = 0;
}
}
}
void compare(float *GpU, float *Kp, int *re, int N) // parallel
{
dim3 block = 1024;
dim3 grid = (N+1024-1)/1024;
compareCuda<<<grid,block>>>(GpU, Kp, re, N);
}
__global__ void updYCuda(float *Y_next, float *numerator, float *denominator, float *Y, int N) // parallel
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if(id<N)
{
Y_next[id] = numerator[id]/denominator[id]*Y[id];
}
}
void updY(float *Y_next, float *numerator, float *denominator, float *Y, int N) // parallel
{
dim3 block = 1024;
dim3 grid = (N+1023)/1024;
updYCuda<<<grid, block>>>(Y_next, numerator, denominator, Y, N);
}
void Gauss_Jordan(float *A,float *res, int N)
{
/*
size=Size of input matrix
A=input matrix
res= inverted matrix
*/
float temp;
float *matrix = newMatrix(N, 2*N);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < 2 * N; j++)
{
matrix[i*2*N+j]=0;
if (j == (i + N))
matrix[i*2*N+j] = 1;
}
}
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
matrix[i*2*N+j]=A[i*N+j];
}
}
for (int i = N - 1; i > 0; i--)
{
if (matrix[(i - 1)*2*N+0] < matrix[i*2*N+0])
for (int j = 0; j < 2 * N; j++)
{
temp = matrix[i*2*N+j];
matrix[i*2*N+j] = matrix[(i - 1)*2*N+j];
matrix[(i - 1)*2*N+j] = temp;
}
}
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
if (j != i)
{
temp = matrix[j*2*N+i] / matrix[i*2*N+i];
for (int k = 0; k < 2 * N; k++)
{
matrix[j*2*N+k] -= matrix[i*2*N+k] * temp;
}
}
}
}
for (int i = 0; i < N; i++)
{
temp = matrix[i*2*N+i];
for (int j = 0; j < 2 * N; j++)
{
matrix[i*2*N+j] = matrix[i*2*N+j] / temp;
}
}
for (int i = 0; i < N; i++)
{
for (int j = N; j <2*N; j++)
{
res[i*N+j-N]=matrix[i*2*N+j];
}
}
free(matrix);
}
void computeUfromY(float *U, float *Y, float *Fp, float *Gp, float *Qp_inv, int N, int M)
{
float *tmp = newMatrixCUDA(M,1);
matrixMultiply(tmp, Gp, 1, Y, 0, M, N, 1);
matrixAdd(tmp, Fp, 1, M, 1);
matrixMultiply(U, Qp_inv, 0, tmp, 0, M, M, 1);
negateMatrix(U, M, 1);
cudaFree(tmp);
}
void computeFp(float *Fp, float *Fp1, float *Fp2, float *Fp3, float *D, float *x)
{
matrixMultiply(Fp, Fp1, 0, D, 0, nInput*pHorizon, nDis*pHorizon, 1);
float *Fp2x = newMatrixCUDA(nInput*pHorizon,1);
matrixMultiply(Fp2x, Fp2, 0, x, 0, nInput*pHorizon, nState, 1);
matrixAdd(Fp, Fp2x, 1, nInput*pHorizon, 1);
matrixAdd(Fp, Fp3, -1, nInput*pHorizon, 1);
cudaFree(Fp2x);
// for(int i=0;i<nInput*pHorizon;i++)
// {
// printf("%f\n", Fp[i]);
// }
// printf("\n");
// printf("%d\n", Fp);
}
void computeMp(float *Mp, float *Mp1, float *Mp2, float *Mp3, float *Mp4, float *Mp5, float *Mp6, float *D, float *x)
{
initMat(Mp, 0, 1);
float *tmp = newMatrixCUDA(1,nState);
matrixMultiply(tmp, x, 1, Mp1, 0, 1, nState, nState);
matrixMultiply(tmp, tmp, 0, x, 0, 1, nState, 1);
matrixAdd(Mp, tmp, 0.5, 1,1);
// printMat<<<1,1>>>(Mp, 1, 1);
matrixMultiply(tmp, D, 1, Mp2, 0, 1, nDis*pHorizon, nState);
matrixMultiply(tmp, tmp, 0, x, 0, 1, nState, 1);
matrixAdd(Mp, tmp, 0.5, 1,1);
matrixMultiply(tmp, Mp4, 1, x, 0, 1, nState, 1);
matrixAdd(Mp, tmp, 0.5, 1,1);
cudaFree(tmp);
tmp = newMatrixCUDA(1, nDis*pHorizon);
matrixMultiply(tmp, D, 1, Mp3, 0, 1, nDis*pHorizon, nDis*pHorizon);
matrixMultiply(tmp, tmp, 0, D, 0, 1, nDis*pHorizon, 1);
matrixAdd(Mp, tmp, 0.5, 1,1);
matrixMultiply(tmp, Mp5, 1, D, 0, 1, nDis*pHorizon, 1);
matrixAdd(Mp, tmp, 0.5, 1,1);
matrixAdd(Mp, Mp6, 0.5, 1,1);
cudaFree(tmp);
}
void computeQd(float *Qd, float *Gp_Qp_inv, float *Gp, int N, int M)
{
matrixMultiply(Qd, Gp_Qp_inv, 0, Gp, 1, N, M, N);
}
void computeFd(float *Fd, float *Gp_Qp_inv, float *Fp, float *Kp, int N, int M)
{
matrixMultiply(Fd, Gp_Qp_inv, 0, Fp, 0, N, M, 1);
matrixAdd(Fd, Kp, 1, N, 1);
}
void computeMd(float *Md, float *Fp, float* Qp_inv, float* Mp, int N, int M)
{
float *tmp = newMatrixCUDA(1,M);
matrixMultiply(tmp, Fp, 1, Qp_inv, 0, 1, M, M);
matrixMultiply(Md, tmp, 0, Fp, 0, 1, M, 1);
matrixAdd(Md, Mp, -1, 1, 1);
cudaFree(tmp);
}
void convertToDual(float *Qd, float *Fd, float *Md, float *Qp_inv, float *Gp, float *Kp, float *Fp, float *Mp, int N, int M)
{
float *Gp_Qp_inv = newMatrixCUDA(N,M);
matrixMultiply(Gp_Qp_inv, Gp, 0, Qp_inv, 0, N, M, M);
computeQd(Qd, Gp_Qp_inv, Gp, N, M);
computeFd(Fd, Gp_Qp_inv, Fp, Kp, N, M);
computeMd(Md, Fp, Qp_inv, Mp, N, M);
cudaFree(Gp_Qp_inv);
}
void computeTheta(float *theta, float *Qd, int N)
{
float *Qdn = newMatrixCUDA(N,N);
matrixNeg(Qdn, Qd, N, N);
float *one = newMatrixCUDA(N,1);
initMat(one, 1, N);
float *tmp = newMatrixCUDA(N,1);
matrixMultiply(tmp, Qdn, 0, one, 0, N,N,1);
diagonalAdd(theta, tmp, N);
cudaFree(Qdn);
cudaFree(one);
cudaFree(tmp);
}
void computeQdp_theta(float *Qdp_theta, float *Qd, float *theta, int N)
{
matrixPos(Qdp_theta, Qd, N, N);
matrixAdd(Qdp_theta, theta, 1, N, N);
}
void computeQdn_theta(float *Qdn_theta, float *Qd, float *theta, int N)
{
matrixNeg(Qdn_theta, Qd, N, N);
matrixAdd(Qdn_theta, theta, 1, N, N);
}
void computealphaY(float *alphaY, float *ph, float *Qd, float *Y, float *Fd, int N)
{
float *temp = newMatrixCUDA(1,N);
matrixMultiply(temp, ph, 1, Qd, 0, 1, N, N);
matrixMultiply(temp, temp, 0, ph, 0, 1, N, 1);
if(temp[0] > 0)
{
float *temp2 = newMatrixCUDA(1,N);
matrixMultiply(temp2, Y, 1, Qd, 0, 1, N, N);
matrixAdd(temp2, Fd, 1, 1, N);
matrixMultiply(temp2, temp2, 0, ph, 0, 1, N, 1);
*alphaY = -temp2[0]/temp[0];
cudaFree(temp2);
}
else
{
alphaY = 0;
}
cudaFree(temp);
}
void updateY1(float *Y_next, float *Y, float alphaY, float *ph, int N)
{
copyMatrix(Y_next, Y, N, 1);
matrixAdd(Y_next, ph, alphaY, N, 1);
}
void updateY2(float *Y_next, float *Y, float *Qdp_theta, float *Qdn_theta, float *Fd, float *Fdp, float *Fdn, int N)
{
float *numerator = newMatrixCUDA(N,1);
float *denominator = newMatrixCUDA(N,1);
matrixMultiply(numerator, Qdn_theta, 0, Y, 0, N, N, 1);
matrixMultiply(denominator, Qdp_theta, 0, Y, 0, N, N, 1);
matrixAdd(numerator, Fdn, 1, N, 1);
matrixAdd(denominator, Fdp, 1, N, 1);
updY(Y_next, numerator, denominator, Y, N);
cudaFree(numerator);
cudaFree(denominator);
}
void computeph(float *ph, float *Qd, float *Y, float *Fd, int N)
{
matrixMultiply(ph, Qd, 0, Y, 0, N, N, 1);
matrixAdd(ph, ph, 1, N, 1);
matrixNeg(ph, ph, N, 1);
}
int checkFeas(float *U, float *Gp, float *Kp, int N, int M)
{
float *tmp = newMatrixCUDA(N,1);
matrixMultiply(tmp, Gp, 0, U, 0, N, M, 1);
int re = 1;
compare(tmp, Kp, &re, N);
cudaFree(tmp);
return re;
}
float computeCost(float *Z, float *Q, float *F, float *M, int N)
{
float *J=newMatrixCUDA(1,1);
float *tmp = newMatrixCUDA(1,N);
matrixMultiply(tmp, Z, 1, Q, 0, 1, N, N);
matrixMultiply(tmp, tmp, 0, Z, 0, 1, N, 1);
matrixAdd(J, tmp, 0.5, 1,1);
matrixMultiply(tmp, F, 1, Z, 0, 1, N, 1);
matrixAdd(J, tmp, 1, 1,1);
// printMat<<<1,1>>>(J,1,1);
// printMat<<<1,1>>>(M,1,1);
matrixAdd(J, M, 0.5, 1,1);
float *hJ = newMatrix(1,1);
copyToHost(hJ,J,1,1);
float cost = hJ[0];
free(hJ);
cudaFree(J);
cudaFree(tmp);
return cost;
}
int terminate(float *Y, float *Qd, float *Fd, float *Md, float *U, float *Qp, float *Qp_inv, float *Fp, float *Mp, float *Gp, float *Kp, int N, int M)
{
computeUfromY(U, Y, Fp, Gp, Qp_inv, N, M);
if(!checkFeas(U, Gp, Kp, N, M)) return 0;
float Jd = computeCost(Y, Qd, Fd, Md, N);
float Jp = computeCost(U, Qp, Fp, Mp, M);
if(Jp>-Jd) return 0;
if(Jp+Jd>eaj) return 0;
if((Jp+Jd)/fabs(Jd)>erj) return 0;
return 1;
}
void solveQuadraticDual(float *Y, float *Qd, float *Fd, float *Md, float *U, float *Qp, float *Qp_inv, float *Fp, float *Mp, float *Gp, float *Kp, int N, int M)
{
float *theta = newMatrixCUDA(N,N);
float *Qdp_theta = newMatrixCUDA(N,N);
float *Qdn_theta = newMatrixCUDA(N,N);
float *Y_next = newMatrixCUDA(N,1);
float *Fdn = newMatrixCUDA(N,1);
float *Fdp = newMatrixCUDA(N,1);
matrixPos(Fdp, Fd, N, 1);
matrixNeg(Fdn, Fd, N, 1);
computeTheta(theta, Qd, N);
computeQdp_theta(Qdp_theta, Qd, theta, N);
computeQdn_theta(Qdn_theta, Qd, theta, N);
initMat(Y, 1000.0, N);
// for(int i=0;i<N;i++) Y[i] = i+1;
float *ph = newMatrixCUDA(N,1);
long int h=1;
float alphaY=0;
// while(h<NUM_ITER)
while(!terminate(Y, Qd, Fd, Md, U, Qp, Qp_inv, Fp, Mp, Gp, Kp, N, M))
{
// if(h>100000) break;
// printf("h %ld\n",h);
if(1)
{
//update
// printf("here\n");
updateY2(Y_next, Y, Qdp_theta, Qdn_theta, Fd, Fdp, Fdn, N);
// printf("there\n");
}
else
{
// printf("accelerating\n");
// accelerate
computeph(ph, Qd, Y, Fd, N);
computealphaY(&alphaY, ph, Qd, Y, Fd, N);
// printf("alpha %f\n", alphaY);
updateY1(Y_next, Y, alphaY/10, ph, N);
}
copyMatrix(Y, Y_next, N, 1);
// for(int i=0;i<N;i++)
// {
// printf("%f ",Y[i]);
// }
// printf("\n\n");
h++;
}
printf("Printing number of iterations = %ld\n",h);
cudaFree(theta);
cudaFree(Qdp_theta);
cudaFree(Qdn_theta);
cudaFree(Y_next);
cudaFree(ph);
cudaFree(Fdp);
cudaFree(Fdn);
}
void input(float* qp_inv, float* Fp1, float* Fp2, float * Fp3, float * Mp1, float * Mp2, float * Mp3, float* Mp4, float* Mp5, float* Mp6, float* Gp, float* Kp, float* x, float* D, float* theta, float* Z)
{
FILE *fptr;
int i,j;
float num;
//Fill Qp_inverse
fptr = fopen("./example/Qp_inv.txt","r");
for(i=0;i<pHorizon*nInput;i++)
{
for(j=0;j<pHorizon*nInput;j++)
{
fscanf(fptr,"%f", &num);
qp_inv[j*pHorizon*nInput+i] = num;
}
}
fclose(fptr);
//Fill Fp1
fptr = fopen("./example/Fp1.txt","r");
for(i=0;i<nDis*pHorizon;i++)
{
for(j=0;j<nInput*pHorizon;j++)
{
fscanf(fptr,"%f", &num);
Fp1[j*nDis*pHorizon+i] = num;
}
}
fclose(fptr);
//Fill Fp2
fptr = fopen("./example/Fp2.txt","r");
for(i=0;i<nState;i++)
{
for(j=0;j<nInput*pHorizon;j++)
{
fscanf(fptr,"%f", &num);
Fp2[j*nState+i] = num;
}
}
fclose(fptr);
//Fill Fp3
fptr = fopen("./example/Fp3.txt","r");
for(j=0;j<nInput*pHorizon;j++)
{
fscanf(fptr,"%f", &num);
Fp3[j] = num;
}
fclose(fptr);
//Fill Mp1
fptr = fopen("./example/Mp1.txt","r");
for(i=0;i<nState;i++)
{
for(j=0;j<nState;j++)
{
fscanf(fptr,"%f", &num);
Mp1[j*nState+i] = num;
}
}
fclose(fptr);
//Fill Mp2
fptr = fopen("./example/Mp2.txt","r");
for(i=0;i<nState;i++)
{
for(j=0;j<nDis*pHorizon;j++)
{
fscanf(fptr,"%f", &num);
Mp2[j*nState+i] = num;
}
}
fclose(fptr);
//Fill Mp3
fptr = fopen("./example/Mp3.txt","r");
for(i=0;i<nDis*pHorizon;i++)
{
for(j=0;j<nDis*pHorizon;j++)
{
fscanf(fptr,"%f", &num);
Mp3[j*nDis*pHorizon+i] = num;
}
}
fclose(fptr);
//Fill Mp4
fptr = fopen("./example/Mp4.txt","r");
for(i=0;i<nState;i++)
{
fscanf(fptr,"%f", &num);
Mp4[i] = num;
}
fclose(fptr);
//Fill Mp5
fptr = fopen("./example/Mp5.txt","r");
for(i=0;i<nDis*pHorizon;i++)
{
fscanf(fptr,"%f", &num);
Mp5[i] = num;
}
fclose(fptr);
//Fill Mp6
fptr = fopen("./example/Mp6.txt","r");
fscanf(fptr,"%f", &num);
Mp6[0] = num;
fclose(fptr);
//Fill Gp
fptr = fopen("./example/Gp.txt","r");
for(i=0;i<pHorizon*nInput;i++)
{
for(j=0;j<4*pHorizon*nInput;j++)
{
fscanf(fptr,"%f", &num);
Gp[j*pHorizon*nInput+i] = num;
}
}
fclose(fptr);
//Fill Kp
fptr = fopen("./example/Kp.txt","r");
for(i=0;i<4*pHorizon*nInput;i++)
{
fscanf(fptr,"%f", &num);
Kp[i] = num;
}
fclose(fptr);
//Fill Z
fptr = fopen("./example/Z.txt","r");
for(i=0;i<nState;i++)
{
for(j=0;j<nOutput*pHorizon;j++)
{
fscanf(fptr,"%f", &num);
Z[j*nState+i] = num;
}
}
fclose(fptr);
//Fill Theta
fptr = fopen("./example/Theta.txt","r");
for(i=0;i<nDis*pHorizon;i++)
{
for(j=0;j<nOutput*pHorizon;j++)
{
fscanf(fptr,"%f", &num);
theta[j*nDis*pHorizon+i] = num;
}
}
fclose(fptr);
//Fill D
fptr = fopen("./example/D.txt","r");
for(i=0;i<nDis*pHorizon;i++)
{
fscanf(fptr,"%f", &num);
D[i] = num;
}
fclose(fptr);
//Fill x
fptr = fopen("./example/x.txt","r");
for(i=0;i<nState;i++)
{
fscanf(fptr,"%f", &num);
x[i] = num;
}
fclose(fptr);
}
int main()
{
// QP is of parametric from
// J(U) = min U 1/2*U'QpU + Fp'U + 1/2*Mp
// st GpU <= Kp
cudaDeviceReset();
// float *mat = newMatrixCUDA(32,32);
// float *trp = newMatrixCUDA(32,32);
// float *hmat= newMatrix(32,32);
// float *htrp = newMatrix(32,32);
//
// int cnt=1;
//
// for(int i=0;i<10;i++)
// {
// for(int j=0;j<5;j++)
// {
// hmat[5*i+j] = cnt++;
// }
// }
//
// copyToDevice(mat, hmat, 10,5);
// transpose(trp, mat, 10,5);
//
// copyToHost(htrp, trp, 5,10);
//
// for(int i=0;i<5;i++)
// {
// for(int j=0;j<10;j++)
// {
// printf("%f ",htrp[10*i+j]);
// }
// printf("\n");
// }
// return 0;
int N, M;
M = pHorizon*nInput;
N = 4*pHorizon*nInput;
// host matrix
float *hQp_inv = newMatrix(M,M);
float *hQp = newMatrix(M,M);
float *hFp1;
float *hFp2;
float *hFp3;
float *hMp1;
float *hMp2;
float *hMp3;
float *hMp4;
float *hMp5;
float *hMp6;
float *hFp = newMatrix(nInput*pHorizon,1);
float *hMp = newMatrix(1,1);
float *hGp;
float *hKp;
float *hx;
float *hD;
float *htheta;
float *hZ;
hFp1 = newMatrix(nInput*pHorizon, nDis*pHorizon);
hFp2 = newMatrix(nInput*pHorizon, nState);
hFp3 = newMatrix(1, nInput*pHorizon);
hMp1 = newMatrix(nState, nState);
hMp2 = newMatrix(nDis*pHorizon, nState);
hMp3 = newMatrix(nDis*pHorizon, nDis*pHorizon);
hMp4 = newMatrix(1, nState);
hMp5 = newMatrix(1, nDis*pHorizon);
hMp6 = newMatrix(1,1);
hGp = newMatrix(4*pHorizon*nInput, nInput*pHorizon);
hKp = newMatrix(1,4*pHorizon*nInput);
hZ = newMatrix(nOutput*pHorizon, nState);
htheta = newMatrix(nOutput*pHorizon, nDis*pHorizon);
hD = newMatrix(nDis*pHorizon,1);
hx = newMatrix(nState, 1);
// device matrix
float *Qp_inv = newMatrixCUDA(M,M);
float *Qp = newMatrixCUDA(M,M);
float *Fp1;
float *Fp2;
float *Fp3;
float *Mp1;
float *Mp2;
float *Mp3;
float *Mp4;
float *Mp5;
float *Mp6;
float *Fp = newMatrixCUDA(nInput*pHorizon,1);
float *Mp = newMatrixCUDA(1,1);
float *Gp;
float *Kp;
float *x;
float *D;
float *theta;
float *Z;
Fp1 = newMatrixCUDA(nInput*pHorizon, nDis*pHorizon);
Fp2 = newMatrixCUDA(nInput*pHorizon, nState);
Fp3 = newMatrixCUDA(1, nInput*pHorizon);
Mp1 = newMatrixCUDA(nState, nState);
Mp2 = newMatrixCUDA(nDis*pHorizon, nState);
Mp3 = newMatrixCUDA(nDis*pHorizon, nDis*pHorizon);
Mp4 = newMatrixCUDA(1, nState);
Mp5 = newMatrixCUDA(1, nDis*pHorizon);
Mp6 = newMatrixCUDA(1,1);
Gp = newMatrixCUDA(4*pHorizon*nInput, nInput*pHorizon);
Kp = newMatrixCUDA(1,4*pHorizon*nInput);
Z = newMatrixCUDA(nOutput*pHorizon, nState);
theta = newMatrixCUDA(nOutput*pHorizon, nDis*pHorizon);
D = newMatrixCUDA(nDis*pHorizon,1);
x = newMatrixCUDA(nState, 1);
input(hQp_inv, hFp1, hFp2, hFp3, hMp1, hMp2, hMp3, hMp4, hMp5, hMp6, hGp, hKp, hx, hD, htheta, hZ);
Gauss_Jordan(hQp_inv, hQp, M);
copyToDevice(Qp_inv, hQp_inv, M, M);
copyToDevice(Qp, hQp, M, M);
copyToDevice(Fp1, hFp1, nInput*pHorizon, nDis*pHorizon);
copyToDevice(Fp2, hFp2, nInput*pHorizon, nState);
copyToDevice(Fp3, hFp3, 1, nInput*pHorizon);
copyToDevice(Mp1, hMp1, nState, nState);
copyToDevice(Mp2, hMp2, nDis*pHorizon, nState);
copyToDevice(Mp3, hMp3, nDis*pHorizon, nDis*pHorizon);
copyToDevice(Mp4, hMp4, 1, nState);
copyToDevice(Mp5, hMp5, 1, nDis*pHorizon);
copyToDevice(Mp6, hMp6, 1,1);
copyToDevice(Gp, hGp, 4*pHorizon*nInput, nInput*pHorizon);
copyToDevice(Kp, hKp, 1,4*pHorizon*nInput);
copyToDevice(Z, hZ, nOutput*pHorizon, nState);
copyToDevice(D, hD, nDis*pHorizon,1);
copyToDevice(theta, htheta, nOutput*pHorizon, nDis*pHorizon);
copyToDevice(x, hx, nState, 1);
computeFp(Fp, Fp1, Fp2, Fp3, D, x);
computeMp(Mp, Mp1, Mp2, Mp3, Mp4, Mp5, Mp6, D, x);
// printf("Mp %f\n", Mp[0]);
// printf("er\n");
// printMat<<<1,1>>>(Mp,1,1);
// matrices and vectors required for dual form of QP
float *Qd = newMatrixCUDA(N,N);
float *Fd = newMatrixCUDA(N,1);
float *Md = newMatrixCUDA(1,1);
float *Y = newMatrixCUDA(N,1);
float *U = newMatrixCUDA(M,1);
// printf("er\n");
convertToDual(Qd, Fd, Md, Qp_inv, Gp, Kp, Fp, Mp, N, M);
// printf("Qd\n");
// for(int i=0;i<N;i++)
// {
// for(int j=0;j<N;j++)
// {
// printf("%f ", Qd[i*N+j]);
// }
// printf("\n");
// }
// printf("Fd\n");
// printf("%f\n", Md[0]);
// for(int i=0;i<N;i++)
// {
// printf("%f ", Fp[i]);
// }
// printf("\n");
solveQuadraticDual(Y, Qd, Fd, Md, U, Qp, Qp_inv, Fp, Mp, Gp, Kp, N, M);
// printf("erer\n");
computeUfromY(U, Y, Fp, Gp, Qp_inv, N, M);
// U[0] = -6.399018;
// U[1] = -10.648726;
// U[2] = -4.792378;
// U[3] = -7.033428;
// U[4] = -4.792378;
// U[5] = -10.648726;
// U[6] = -6.399018;
// U[0] = -6.398985;
// U[1] = -10.646729;
// U[2] = -4.792132;
// U[3] = -7.027614;
// U[4] = -4.792255;
// U[5] = -10.643004;
// U[6] = -6.398996;
float Jp = computeCost(U, Qp, Fp, Mp, M);
float Jd = computeCost(Y, Qd, Fd, Md, N);
printf("Jp = %f\n", Jp);
printf("Jd = %f\n", Jd);
float *hU = newMatrix(M,1);
float *hY = newMatrix(N,1);
copyToHost(hU,U,M,1);
copyToHost(hY,Y,N,1);
// printf("Printing Y*\n");
// for(int i=0;i<N;i++)
// {
// printf("%f\n", hY[i]);
// }
printf("Printing U*\n");
for(int i=0;i<M;i++)
{
printf("\t%f\n", hU[i]);
}
free(hQp_inv);
free(hQp);
free(hFp1);
free(hFp2);
free(hFp3);
free(hMp1);
free(hMp2);
free(hMp3);
free(hMp4);
free(hMp5);
free(hMp6);
free(hFp);
free(hMp);
free(hGp);
free(hKp);
free(hx);
free(hD);
free(htheta);
free(hZ);
cudaFree(Qp_inv);
cudaFree(Qp);
cudaFree(Fp1);
cudaFree(Fp2);
cudaFree(Fp3);
cudaFree(Mp1);
cudaFree(Mp2);
cudaFree(Mp3);
cudaFree(Mp4);
cudaFree(Mp5);
cudaFree(Mp6);
cudaFree(Fp);
cudaFree(Mp);
cudaFree(Gp);
cudaFree(Kp);
cudaFree(x);
cudaFree(D);
cudaFree(theta);
cudaFree(Z);
cudaFree(Qd);
cudaFree(Fd);
cudaFree(Md);
cudaFree(Y);
cudaFree(U);
}
|
20,751 | #include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
void _CheckCudaError(const cudaError_t cudaError, const char* file, const int line)
{
if (cudaError != cudaSuccess) {
std::cout << "[CUDA ERROR] " << cudaGetErrorString(cudaError) << " (" << file << ":" << line << ")\n";
exit(EXIT_FAILURE);
}
}
#define CheckCudaError(call) _CheckCudaError((call), __FILE__, __LINE__)
__global__ void add(const int *a, const int *b, int *c)
{
int tid = blockIdx.x;
if (tid < gridDim.x)
c[tid] = a[tid] + b[(gridDim.x - 1)- tid];
}
int main()
{
int h_a[6] = { 1, 2, 3, 4, 5, 6 };
int h_b[6] = { 10, 20, 30, 40, 50, 60 };
int h_c[6];
int* d_a;
int* d_b;
int* d_c;
CheckCudaError(cudaMalloc(&d_a, 6 * sizeof(int)));
CheckCudaError(cudaMalloc(&d_b, 6 * sizeof(int)));
CheckCudaError(cudaMalloc(&d_c, 6 * sizeof(int)));
CheckCudaError(cudaMemcpy(d_a, h_a, 6 * sizeof(int), cudaMemcpyHostToDevice));
CheckCudaError(cudaMemcpy(d_b, h_b, 6 * sizeof(int), cudaMemcpyHostToDevice));
add<<<6, 1>>>(d_a, d_b, d_c);
CheckCudaError(cudaDeviceSynchronize());
CheckCudaError(cudaMemcpy(h_c, d_c, 6 * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < 6; i++)
printf("%d ", h_c[i]);
printf("\n");
CheckCudaError(cudaFree(d_a));
CheckCudaError(cudaFree(d_b));
CheckCudaError(cudaFree(d_c));
}
|
20,752 | #include <cuda.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
#define nPerThread 32
/* Simple Cuda Program: Shared memory
* - Use dynamic shared memory
* - bank conflicts
* - synchronization
*/
// no bank conflicts
__global__ void addOneShared(const int n, double *data) {
extern __shared__ double smem[];
int nt = blockDim.x;
int t = threadIdx.x;
int b = blockIdx.x;
int i = b*(nt*nPerThread);
for (int j=0; j<nPerThread; j++)
smem[j*nt + t] = data[i + j*nt + t];
for (int j=0; j<nPerThread; j++)
smem[j*nt + t]++;
for (int j=0; j<nPerThread; j++)
data[i + j*nt + t] = smem[j*nt + t];
}
// bank conflicts
__global__ void addOneShared_bankConflits(const int n, double *data) {
extern __shared__ double smem[];
int nt = blockDim.x;
int t = threadIdx.x;
int b = blockIdx.x;
int i = b*(nt*nPerThread);
for (int j=0; j<nPerThread; j++)
smem[j*nt + t] = data[i + j*nt + t];
__syncthreads();
for (int j=0; j<nPerThread; j++)
smem[t*nPerThread + j]++;
__syncthreads();
for (int j=0; j<nPerThread; j++)
data[i + j*nt + t] = smem[j*nt + t];
}
int main() {
time_t sTime = time(NULL);
struct timeval tt1, tt2;
int ms;
double fms;
int nBlocks = 256;
int nThreads = 128;
int n = nPerThread*nThreads*nBlocks;
double *data = (double*) malloc(n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = i;
}
double *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(double));
cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
cout << "copy to device = " << error << " : " << cudaGetErrorString(error) << endl;
cudaThreadSynchronize();
gettimeofday( &tt1, NULL );
int sharedMem = nThreads * nPerThread * sizeof(double);
// (*) Add shared memory size to execution configuration parameters
//cudaFuncSetCacheConfig(addOneShared, cudaFuncCachePreferL1);
addOneShared <<< nBlocks, nThreads, sharedMem >>>(n, data_dev);
//addOneShared_bankConflits <<< nBlocks, nThreads, sharedMem >>>(n, data_dev);
error = cudaGetLastError();
cout << "run kernel = " << error << " : " << cudaGetErrorString(error) << endl;
cudaThreadSynchronize();
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Comp time = " << fms << endl;
cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
error = cudaGetLastError();
cout << "copy from device = " << error << " : " << cudaGetErrorString(error) << endl;
cudaFree(data_dev);
cout << "data[n-1] = " << data[n-1] << endl;
free(data);
}
|
20,753 | #include <stdio.h>
int main() {
int num_dev;
cudaGetDeviceCount(&num_dev);
printf("%d\n", num_dev);
return 0;
}
|
20,754 | //nvcc -o lab5_3_1 lab5_3_1.cu
/*Author:
Pedro Silva
*/
/*3. Implemente um programa em CUDA que devolva a transposta de uma matriz*/
/*3.1. Implemente uma versão simples (sem recorrer a optimizações).*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void transposta(int *d_matrix, int *d_out, int N){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if(col<N && row <N)
{
d_out[col*N+row] = d_matrix[row*N+col];
}
}
int main(int argc, char const *argv[])
{
printf("Exercicio 3, Lab 5 de CHAD. Efectua a transposta duma matriz.\n");
int *d_matrix, *d_out, *h_matrix, error, M;
//Start simple. N = M
for(int N = 64; N <= 512; N = N * 2){
M = N;
printf("Transposta duma matriz %i * %i.\n", N, M);
//alocar memoria para input do device
if(cudaMalloc(&d_matrix, sizeof(int) * N * M) != cudaSuccess){
fprintf(stderr, "Error allocating memory on device.\n");
return(-1);
}
//alocar memoria para output do device
if(cudaMalloc(&d_out, sizeof(int) * N * M) != cudaSuccess){
fprintf(stderr, "Error allocating memory on device.\n");
return(-1);
}
//alocar memoria para matriz no host
h_matrix = (int *) malloc(N * M * sizeof(int));
//inicializar matriz
for(int i = 0; i < N * M; i++)
h_matrix[i] = i;
//dimensionar grid e block sizes
dim3 BlockSize(32, 32, 1);
dim3 GridSize(N / 32 + 1, M / 32 + 1, 1);
//transferir matriz para device.
if((error = cudaMemcpy(d_matrix, h_matrix, N * M * sizeof(int), cudaMemcpyHostToDevice)))
fprintf(stderr, "Erro a transferir matriz para device. Error code: %i.\n", error);
transposta<<<GridSize, BlockSize>>>(d_matrix, d_out, N);
if((error = cudaMemcpy(h_matrix, d_out, N * M * sizeof(int), cudaMemcpyDeviceToHost)) != cudaSuccess)
fprintf(stderr, "Erro a transferir matriz do device para host. Error code: %i.\n", error);
//imprimir uma sub matriz 5*5
for(int i = 0; i < 5; i++){
for(int j = 0; j < 5; j++)
printf(" %i ", h_matrix[i * N + j]);
printf("\n");
}
if((error = cudaFree(d_matrix)) != cudaSuccess)
printf("Erro a libertar memoria no device. Error code: %i.\n", error);
if((error = cudaFree(d_out)) != cudaSuccess)
printf("Erro a libertar memoria no device. Error code: %i.\n", error);
free(h_matrix);
}
return 0;
} |
20,755 | /* Hello Cuda example */
/* Intro to GPU tutorial */
/* SCV group */
#include <stdio.h>
#define NUM_BLOCKS 4
#define BLOCK_WIDTH 8
/* Function executed on device (GPU */
__global__ void hello( void) {
printf("\tHello from GPU: thread %d and block %d\n", threadIdx.x, blockIdx.x);
}
/* Main function, executed on host (CPU) */
int main( void) {
/* print message from CPU */
printf( "Hello Cuda!\n" );
/* execute function on device */
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
/* wait until all threads finish their job */
cudaDeviceSynchronize();
/* print message from CPU */
printf( "Welcome back to CPU!\n" );
return (0);
}
|
20,756 | #include <stdio.h>
__global__ void matrixs_1D_multiplication(int *matrix_a_dev,int *matrix_b_dev,int *matrix_c_dev,int row,int col)//记住这里的row和col直接对应global里面的数值,不能有误
{
int j = threadIdx.x+blockIdx.x * blockDim.x;
int i = threadIdx.y+blockIdx.y * blockDim.y;
if(i< row &&j < row)
{
for(int k = 0; k < col; k++)
{
matrix_c_dev[row *i + j] += matrix_a_dev[i* col + k] * matrix_b_dev[row*k + j];
}
}
}
int main()
{
int row = 4;
int col = 5;
int *matrix_a_host;
int *matrix_b_host;
int *matrix_c_host;
matrix_a_host = (int *)malloc(row*col*sizeof(int));
matrix_b_host = (int *)malloc(row*col*sizeof(int));
matrix_c_host = (int *)malloc(row*row*sizeof(int));
for(int i = 0; i<row; i++)
{
for(int j = 0; j < col; j++)
{
matrix_a_host[i*col +j] = i+j;
}
}
printf("\n-------------Matrix a-----------------\n");
for(int i = 0; i < row*col; i++)
{
printf("%d ",*(matrix_a_host + i));
if(i%col==col-1) printf("\n");//每输出3个换行。
}
for(int i = 0; i<col; i++)
{
for(int j = 0; j < row; j++)
{
matrix_b_host[i*row +j] = i+j;
}
}
// ------------------GPU--------------------------
int *matrix_a_dev;
int *matrix_b_dev;
int *matrix_c_dev;
cudaMalloc((void**) &matrix_a_dev, row*col*sizeof(int));
cudaMalloc((void**) &matrix_b_dev, row*col*sizeof(int));
cudaMalloc((void**) &matrix_c_dev, row*row*sizeof(int));
cudaMemcpy(matrix_a_dev, matrix_a_host, row*col*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matrix_b_dev, matrix_b_host, row*col*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(1, 2, 1);
dim3 dimBlock(4, 5, 1);//记住这里的row和col直接对应global里面的数值,不能有误
matrixs_1D_multiplication<<<dimGrid, dimBlock>>>(matrix_a_dev, matrix_b_dev, matrix_c_dev, row,col);
cudaMemcpy(matrix_c_host, matrix_c_dev, row*row*sizeof(int), cudaMemcpyDeviceToHost);
printf("\n-------------Matrix c-----------------\n");
for(int i = 0; i < row*row; i++)
{
printf("%d ",*(matrix_c_host + i));
if(i%row==row-1) printf("\n");//每输出4个换行。
}
free(matrix_a_host);
free(matrix_b_host);
free(matrix_c_host);
cudaFree(matrix_a_dev);
cudaFree(matrix_b_dev);
cudaFree(matrix_c_dev);
return 1;
}
|
20,757 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <ctype.h>
#include <cuda.h>
#include <math.h>
#define CEIL(a,b) ((a+b-1)/b)
#define SWAP(a,b,t) t=b; b=a; a=t;
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
#define PI 3.14159265
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
uch *TheImg, *CopyImg; // Where images are stored in CPU
uch *GPUImg, *GPUCopyImg, *GPUResult; // Where images are stored in GPU
struct ImgProp{
int Hpixels;
int Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that flips the given image horizontally
// each thread only flips a single pixel (R,G,B)
__global__
void imrotate(uch *ImgDst, uch *ImgSrc, ui Vpixels, ui Hpixels, ui BlkPerRow, ui RowBytes, double cosRot, double sinRot)
{
__shared__ uch PixBuffer[3072*16];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcOffset = MYrow * RowBytes;
ui MYsrcIndex = MYsrcOffset + 3 * MYcol;
////////////// find destination index
int c, h, v, X, Y, NewCol, NewRow;
double newX, newY, H, V, Diagonal, ScaleFactor;
c=MYcol; h=Hpixels/2; v=Vpixels/2; // integer div
X=(double)c-(double)h;
Y=(double)v-(double)MYrow;
// pixel rotation matrix
newX=cosRot*X-sinRot*Y;
newY=sinRot*X+cosRot*Y;
// Scale to fit everything in the image box
H=(double)Hpixels;
V=(double)Vpixels;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(Hpixels>Vpixels) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY=newY*ScaleFactor;
// convert back from Cartesian to image coordinates
NewCol=((int) newX+h);
NewRow=v-(int)newY;
ui MYdstOffset = NewRow*RowBytes;
ui MYdstIndex = MYdstOffset + 3 * NewCol;
///////////////
ui Mytid3 = MYtid*3;
PixBuffer[Mytid3] = ImgSrc[MYsrcIndex];
PixBuffer[Mytid3+1] = ImgSrc[MYsrcIndex+1];
PixBuffer[Mytid3+2] = ImgSrc[MYsrcIndex+2];
__syncthreads();
// swap pixels RGB @MYcol , @MYmirrorcol
ImgDst[MYdstIndex] = PixBuffer[Mytid3];
ImgDst[MYdstIndex + 1] = PixBuffer[Mytid3+1];
ImgDst[MYdstIndex + 2] = PixBuffer[Mytid3+2];
}
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%d x %d) File Size=%lu", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%lu", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
// char Flip = 'H';
float tmpKernelExcutionTime, totalKernelExecutionTime; // GPU code run times
cudaError_t cudaStatus, cudaStatus2;
cudaEvent_t time1, time2;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow;
// ui BlkPerRowInt, BlkPerRowInt2;
ui ThrPerBlk = 128, NumBlocks;
// ui NB2, NB4, NB8, RowInts;
ui RowBytes;
cudaDeviceProp GPUprop;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk;
// ui *GPUCopyImg32, *GPUImg32;
char SupportedBlocks[100];
// int KernelNum=1;
char KernelName[255];
double RotAngle, deltaAngle; // rotation angle
int RotIter;
int TotalIters;
double cosRot, sinRot;
strcpy(ProgName, "imrotateG");
if(argc!=4){
printf("\n\nUsage: ./imrotateG infile outfile N");
return 0;
}
strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
free(TheImg);
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
exit(EXIT_FAILURE);
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%lu %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
// Allocate GPU buffer for the input and output images
cudaStatus = cudaMalloc((void**)&GPUImg, IMAGESIZE);
cudaStatus2 = cudaMalloc((void**)&GPUCopyImg, IMAGESIZE);
if ((cudaStatus != cudaSuccess) || (cudaStatus2 != cudaSuccess)){
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory");
exit(EXIT_FAILURE);
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy CPU to GPU failed!");
exit(EXIT_FAILURE);
}
RowBytes = (IPH * 3 + 3) & (~3);
RowBytes = (IPH * 3 + 3) & (~3);
BlkPerRow = CEIL(IPH,ThrPerBlk);
NumBlocks = IPV*BlkPerRow;
printf("\nNum blocks: %d\n", NumBlocks);
printf("\nThread per block: %d\n", ThrPerBlk);
TotalIters = atoi(argv[3]);
if(TotalIters > 30){
printf("\nN is too large, should be less or equal to 30\n");
}
deltaAngle = 2*PI/float(TotalIters);
printf("\nTotal iterations: %d\n", TotalIters);
// iteration to find all images
strcpy(OutputFileName, argv[2]);
char* token = strtok(OutputFileName, ".");
char* OutputFirstName = token;
token = strtok(NULL, ".");
char* OutputLastName = token;
for(RotIter=1; RotIter<=TotalIters; RotIter++){
char outName[128]="";
char tmp[10];
sprintf(tmp, "%d", RotIter);
strcat(outName, OutputFirstName);
strcat(outName, tmp);
strcat(outName, ".");
strcat(outName, OutputLastName);
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventRecord(time1, 0); // record time1 in the first iteration
RotAngle = (double)(RotIter-1)*deltaAngle;
cosRot = cos(RotAngle);
sinRot = sin(RotAngle);
printf("\nRotation angle = %lf\n", RotAngle);
imrotate <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPV, IPH, BlkPerRow, RowBytes, cosRot, sinRot);
cudaEventRecord(time2, 0); //record time2 in teh last iteration
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventElapsedTime(&tmpKernelExcutionTime, time1, time2);
totalKernelExecutionTime += tmpKernelExcutionTime;
strcpy(KernelName, "imrotate : Each thread rotate 1 pixel. Computes everything.\n");
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
GPUResult = GPUCopyImg;
cudaStatus = cudaMemcpy(CopyImg, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy GPU to CPU failed!");
exit(EXIT_FAILURE);
}
cudaStatus = cudaDeviceSynchronize();
//checkError(cudaGetLastError()); // screen for errors in kernel launches
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, outName); // Write the flipped image back to disk
memset(CopyImg, 0, IMAGESIZE);
cudaMemset(GPUCopyImg, 0, IMAGESIZE);
}
printf("\nTotal Kernel Execution =%7.2f ms\n", totalKernelExecutionTime);
cudaFree(GPUImg);
cudaFree(GPUCopyImg);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
}
|
20,758 | #include "includes.h"
__device__ int position; //index of the largest value
__device__ int largest; //value of the largest value
int lenString = 593;
int maxNumStrings = 1000000;
int threshold = 2;
__global__ void compare(char *d_a, int *d_b, int *d_c, int size, int lenString, int threshold) {
int my_id = blockDim.x * blockIdx.x + threadIdx.x;
if (my_id == position)
d_c[my_id] = 2;
if ((my_id < size) && (d_c[my_id] == 0) && (my_id != position)) {
int x, diffs = 0;
for (x = 0; x < lenString; x++) {
diffs += (bool)(d_a[(lenString*position)+x]^d_a[(my_id*lenString)+x]);
if (diffs > threshold)
break;
}
if (diffs <= threshold) {
d_b[position] += d_b[my_id];
d_c[my_id] = 1;
}
}
} |
20,759 | #include "../image_headers/hough.cuh"
#include <iostream>
#include <cmath>
#include <cstdio>
__global__ void hough_kernel(int* line_matrix, int* image, int width, int height, int diag) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i = idx / width;
int j = idx % width;
if (idx < width * height) {
for (int r = 0; r <= diag; r++) {
for (float theta = 0; theta < 360; theta = theta + 1) {
if (r == (int)(i * cosf(theta) + j * sinf(theta))) {
//printf("Doing an add!!!");
if (image[i * width + j] == 255) {
atomicAdd(line_matrix + r * 360 + (int)theta, 1);
}
}
}
}
}
}
void hough(int* line_matrix, int* image, int width, int height, int diag, unsigned int threads_per_block) {
size_t number_of_blocks = (width * height + threads_per_block - 1) / threads_per_block;
hough_kernel<<<number_of_blocks, threads_per_block>>>(line_matrix, image, width, height, diag);
cudaDeviceSynchronize();
} |
20,760 | #include<iostream>
#include<vector>
const int SHARED_MEM = 256;
__global__ void absoluteKernel(int *a, int *abs_a, int N){
int index = threadIdx.x + blockIdx.x*blockDim.x;
if(index<N){
if(a[index] < 0){
abs_a[index] = -1*a[index];
}
else{
abs_a[index] = a[index];
}
}
}
__global__ void findmaxnorm(int *a, int *res, int N){
int index = threadIdx.x + blockIdx.x*blockDim.x;
// extern __shared__ int sh[];
__shared__ int sh[SHARED_MEM*sizeof(int)];
sh[threadIdx.x] = a[index];
__syncthreads();
int dist = blockDim.x/2;
while (dist > 0){
if(threadIdx.x+dist < blockDim.x){
if(sh[threadIdx.x] < sh[threadIdx.x+dist]){
sh[threadIdx.x] = sh[threadIdx.x+dist];
// printf("%d\n", sh[threadIdx.x]);
}
}
dist /= 2;
}
__syncthreads();
if(threadIdx.x == 0){*res = sh[0];}
}
int main(){
const int N = 1024;
size_t size = N*sizeof(int);
std::vector<int> arr(N);
std::vector<int> absarr(N,0);
int norm = 0;
for(auto& i:arr){i = (-1)^(rand()%3)*rand()%10;}
int *d_arr, *d_absarr, *d_norm;
cudaMalloc((void **)&d_arr, size);
cudaMalloc((void **)&d_absarr, size);
cudaMalloc((void **)&d_norm, sizeof(int));
cudaMemcpy(d_arr, arr.data(), size, cudaMemcpyHostToDevice);
int threadsPerBlock = 32;
int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
absoluteKernel<<<blocksPerGrid,threadsPerBlock>>>(d_arr, d_absarr, N);
cudaMemcpy(absarr.data(), d_absarr, size, cudaMemcpyDeviceToHost);
// for(const auto& i:absarr){std::cout << i << std::endl;}
findmaxnorm<<<blocksPerGrid,threadsPerBlock>>>(d_absarr, d_norm, N);
cudaMemcpy(&norm, d_norm, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "norm is: " << norm << std::endl;
cudaFree(d_arr);
cudaFree(d_absarr);
cudaFree(d_norm);
return 0;
}
|
20,761 | #include "MurMurHash3.cuh"
__host__ __device__ inline uint64_t rotl64(uint64_t x, int8_t r)
{
return (x << r) | (x >> (64 - r));
}
__host__ __device__ inline uint64_t getblock64(const uint64_t *p, int i)
{
return p[i];
}
__host__ __device__ inline uint64_t fmix64(uint64_t k)
{
k ^= k >> 33;
k *= 0xff51afd7ed558ccd;
k ^= k >> 33;
k *= 0xc4ceb9fe1a85ec53;
k ^= k >> 33;
return k;
}
__host__ __device__ void MurmurHash3_128(const void *key, int len, uint32_t seed, void *out)
{
const uint8_t *data = (const uint8_t *)key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = 0x87c37b91114253d5;
const uint64_t c2 = 0x4cf5ad432745937f;
//----------
// body
const uint64_t *blocks = (const uint64_t *)(data);
for (int i = 0; i < nblocks; i++)
{
uint64_t k1 = getblock64(blocks, i * 2 + 0);
uint64_t k2 = getblock64(blocks, i * 2 + 1);
k1 *= c1;
k1 = rotl64(k1, 31);
k1 *= c2;
h1 ^= k1;
h1 = rotl64(h1, 27);
h1 += h2;
h1 = h1 * 5 + 0x52dce729;
k2 *= c2;
k2 = rotl64(k2, 33);
k2 *= c1;
h2 ^= k2;
h2 = rotl64(h2, 31);
h2 += h1;
h2 = h2 * 5 + 0x38495ab5;
}
//----------
// tail
const uint8_t *tail = (const uint8_t *)(data + nblocks * 16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15)
{
case 15:
k2 ^= ((uint64_t)tail[14]) << 48;
case 14:
k2 ^= ((uint64_t)tail[13]) << 40;
case 13:
k2 ^= ((uint64_t)tail[12]) << 32;
case 12:
k2 ^= ((uint64_t)tail[11]) << 24;
case 11:
k2 ^= ((uint64_t)tail[10]) << 16;
case 10:
k2 ^= ((uint64_t)tail[9]) << 8;
case 9:
k2 ^= ((uint64_t)tail[8]) << 0;
k2 *= c2;
k2 = rotl64(k2, 33);
k2 *= c1;
h2 ^= k2;
case 8:
k1 ^= ((uint64_t)tail[7]) << 56;
case 7:
k1 ^= ((uint64_t)tail[6]) << 48;
case 6:
k1 ^= ((uint64_t)tail[5]) << 40;
case 5:
k1 ^= ((uint64_t)tail[4]) << 32;
case 4:
k1 ^= ((uint64_t)tail[3]) << 24;
case 3:
k1 ^= ((uint64_t)tail[2]) << 16;
case 2:
k1 ^= ((uint64_t)tail[1]) << 8;
case 1:
k1 ^= ((uint64_t)tail[0]) << 0;
k1 *= c1;
k1 = rotl64(k1, 31);
k1 *= c2;
h1 ^= k1;
};
//----------
// finalization
h1 ^= len;
h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
((uint64_t *)out)[0] = h1;
((uint64_t *)out)[1] = h2;
}
|
20,762 | #include "includes.h"
__global__ void kernel(float *F, double *D)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
{
*F = 12.1;
*D = 12.1;
}
} |
20,763 | /*************************************************************************************************
* File: matrixmath.cu
* Date: 11/06/2018
*
* Compiling: Requires a Nvidia CUDA capable graphics card and the Nvidia GPU Computing Toolkit.
* Linux & Windows: nvcc -Wno-deprecated-gpu-targets -O3 -o prog2 matrixmath.cu
*
* Usage: Linux: >> prog2
* Windows: PS > ./prog2.exe
*
* Description: This file runs a parallel program using CUDA to find the sum of squares. The first
* part of the program asks whether you would like to run the optimized completely parallel
* solution or an equivalent sequential solution. Both solutions use CUDA, but 1 is optimized
* to be ran on many cores using atomic addition while the other runs the entire calculation
* on a single pass-through, similar to how a sequential CPU program would run. Once the type
* of kernel to run has been decided the user is asked how large they would like the sum of
* squares to calculate. This calculation is done by creating an NxN matrix and a N sized vector.
* The matrix (A) and the vector (B) create a new vector C that satisfies the following formula:
* C[i] += A[i][j] * B[j]
*
*************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <fstream>
#define GRIDVAL 20.0
/**
* __global__ void matrixSum(int, int, int, int, int)
* - Function is a __global__ function meaning it is accessible for GPGPU processing.
* The function takes in a NxN matrix as *a, and a N length vector *b and an empty
* N length vector *c along with the N value (both l and w are N in this case). The
* function calculates c[x] += a[x][y] * b[y] and performs an atomicAdd function when
* adding into c[x]. This function is meant to be highly parallelized.
**/
__global__ void matrixSum(int *a, int *b, int *c, int l, int w) {
// grab x position on grid
int x = threadIdx.x + blockIdx.x * blockDim.x;
// grab y position on grid
int y = threadIdx.y + blockIdx.y * blockDim.y;
// safety check + math run
if( x >= 0 && y >= 0 && x < w && y < l) {
// perform c[x] += a[y][x] * b[y] using an atomic add
atomicAdd(&c[x], a[(x*w)+y] * b[y]);
}
}
/**
* __global__ void singleSum(int, int, int ,int ,int)
* - Function is a __global__ function meaning it is accessible for GPGPU processing.
* The function takes in a NxN matrix as *a, and a N length vector *b and an empty
* N length vector *c along with the N value (both l and w are N in this case). The
* function loops through each y value and each x value calculating
* c[x] += a[x][y] * b[y]. The function is meant to run on a single CUDA core and is
* meant to represent a sequential run of the matrixSum function
**/
__global__ void singleSum(int *a, int *b, int *c, int l, int w) {
// loop through all y values
for(int i = 0; i < w; i++) {
// loop through all x values
for(int j = 0; j < l; j++) {
// perform c[i] += a[y][x] * b[x]
c[i] += a[(i*w)+j]*b[j];
}
}
}
/**
* int main(int, char*[])
* - Function is the entry point for the program. Welcomes the user, then asks the user
* whether they want to run a sequential or parallel calculation for the sum of squares.
* Once a selection is made the program asks the user for the max square to use (also
* known as the size N for the NxN matrix and N length vectors). When both these values
* have been entered then the NxN matrix and N length vectors are allocated and initialized
* with their starting values, the function then calls the external __global__ function
* with the appropriate grid/block set-up and returns the result out.
*/
int main(int argc, char* argv[]) {
// declare a size variable and a sequential flag
int size, sequential;
// give a hello prompt and prompt for either sequential or parallel
std::cout << "Sum of Squares using CUDA." << std::endl;
std::cout << "Enter 1 for Sequential calculation or enter 0 for Parallel calculation: ";
std::cin >> sequential;
// let the user know the selection they just made
if (sequential == 1)
std::cout << "SEQUENTIAL calculation is ON." << std::endl << std::endl;
else
std::cout << "PARALLEL calculation is ON." << std::endl << std::endl;
// prompt user for N value of the matrix and vector
std::cout << "Enter in the maximum square to calculate: ";
std::cin >> size;
// prepare a NxN matrix, and two N length vectors and populate them with valid data
int *a = new int[size*size];
int *b = new int[size];
int *c = new int[size];
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
a[(i*size)+j] = j+1;
}
b[i] = i+1;
c[i] = 0;
}
// declare 3 variables that will be used on the GPU
int *gpu_a, *gpu_b, *gpu_c;
// allocate space on the GPU for the incoming matrix and vectors
cudaMalloc( (void**)&gpu_a, (size * size)*sizeof(int));
cudaMalloc( (void**)&gpu_b, (size)*sizeof(int));
cudaMalloc( (void**)&gpu_c, (size)*sizeof(int));
// copy all the matrix and vector data to the GPU, set gpu_c to be all 0s
cudaMemcpy(gpu_a, a, size*size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(gpu_c, 0, size*sizeof(int));
// create a dim3 go find the number of blocks and number of threads per block given the user's input size
// and the staticly defined GRIDVAL variable
dim3 threadsPerBlock(GRIDVAL, GRIDVAL, 1);
dim3 numBlocks(ceil(size/GRIDVAL), ceil(size/GRIDVAL), 1);
// if we are running the sequential program, run the singleSum function with 1 block and 1 thread
if (sequential == 1)
singleSum<<<1, 1>>>(gpu_a, gpu_b, gpu_c, size, size);
// if w are running the parallel program, run the matrixSum function with the previously calculated num of blocks & threads
else
matrixSum<<<numBlocks, threadsPerBlock>>>(gpu_a, gpu_b, gpu_c, size, size);
// copy the results from the GPGPU computation back to the CPU
cudaMemcpy(c, gpu_c, size*sizeof(int), cudaMemcpyDeviceToHost);
// output the result of C
printf("Resulting values of the vector C:\n");
for(int i = 0; i < size; i++) {
printf("%d | ", c[i]);
}
printf("\n");
//return a 0 for successful program run.
return 0;
}
|
20,764 | #include <stdio.h>
#include <math.h>
#define N 3000000
#define BLOCKSIZE 256
__global__ void moving_average(float *in, float *out) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N-2) {
out[i] = (in[i] + in[i+1] + in[i+2]) / 3.0;
}
}
int main() {
float *in, *out;
float *d_in, *d_out;
size_t vecSize = N * sizeof(float);
in = (float*)malloc(vecSize);
out = (float*)malloc((N-2) * sizeof(float));
// Allocate device memory for vector a, b and c
cudaMalloc((void**)&d_in, vecSize);
cudaMalloc((void**)&d_out, (N-2) * sizeof(float));
// Transfer data from host to device
cudaMemcpy(d_in, in, vecSize, cudaMemcpyHostToDevice);
// Call kernel
int threadsPerBlock = BLOCKSIZE;
int numBlocks = ceil((N-2) * 1.0 / threadsPerBlock);
moving_average<<<numBlocks, threadsPerBlock>>>(d_in, d_out);
// Transfer data from device to host
cudaMemcpy(out, d_out, (N-2) * sizeof(float), cudaMemcpyDeviceToHost);
// Deallocate device memory
cudaFree(d_in);
cudaFree(d_out);
free(in); free(out);
return 0;
} |
20,765 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void Kernel
(
double* u1,
double* v1,
double a,
double b,
double eta,
double d_u1,
double d_v1,
double dt,
double D,
int N
)
{
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int bidx = blockIdx.x;
int bidy = blockIdx.y;
int bdimx = blockDim.x;
int bdimy = blockDim.y;
int x = tidx+bidx*bdimx;
int y = tidy+bidy*bdimy;
int id = x + y*N;
double u1_c,v1_c;
u1_c=u1[id];
v1_c=v1[id];
//lap_Du1R
double u1_l,u1_r,u1_u,u1_d,lap_Du1;
if(x==0) u1_l=u1[id+(N-1)];
else u1_l=u1[id-1];
if(x==N-1) u1_r=u1[id-(N-1)];
else u1_r=u1[id+1];
if(y==0) u1_u=u1[id+N*(N-1)];
else u1_u=u1[id-N];
if(y==N-1) u1_d=u1[id-N*(N-1)];
else u1_d=u1[id+N];
lap_Du1=u1_l+u1_r+u1_u+u1_d-u1_c*4.0;
//lap_Du1I
double v1_l,v1_r,v1_u,v1_d,lap_Dv1;
if(x==0) v1_l=v1[id+(N-1)];
else v1_l=v1[id-1];
if(x==N-1) v1_r=v1[id-(N-1)];
else v1_r=v1[id+1];
if(y==0) v1_u=v1[id+N*(N-1)];
else v1_u=v1[id-N];
if(y==N-1) v1_d=v1[id-N*(N-1)];
else v1_d=v1[id+N];
lap_Dv1=v1_l+v1_r+v1_u+v1_d-v1_c*4.0;
//reaction
double react_u1,react_v1;
react_u1=eta*( a-(b+1.0)*u1_c + u1_c*u1_c*v1_c );
react_v1=eta*( b*u1_c - u1_c*u1_c*v1_c );
//main
u1[id] = u1_c+dt*react_u1+d_u1*D*lap_Du1;
v1[id] = v1_c+dt*react_v1+d_v1*D*lap_Dv1;
}
|
20,766 | // Taken from the NVIDIA "2_Graphics\simpleGL" sample:
// A kernel that modifies the z-coordinates of a rectangular
// grid of vertices, based on a time value, so that they
// form an animated sine wave
extern "C"
__global__ void simple_vbo_kernel(
float4 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, 1.0f);
}
|
20,767 | __global__ void process_kernel1(const float *input1,const float *input2, float *output, int datasize)
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x+ blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x* blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) +threadNum;
if(i < datasize)
{
output[i] = sin(input1[i]) + cos(input2[i]);
}
}
__global__ void process_kernel2(const float *input, float *output, int datasize)
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x+ blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x* blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) +threadNum;
if(i < datasize)
{
output[i] = log(input[i]);
}
}
__global__ void process_kernel3(const float *input, float *output, int datasize)
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x+ blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x* blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) +threadNum;
if(i < datasize)
{
output[i] = sqrt(input[i]);
}
}
|
20,768 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11) {
for (int i=0; i < var_1; ++i) {
comp = coshf(var_2 - (-1.7378E23f + (var_3 / (+1.8335E-42f + var_4))));
comp = (+1.5395E-41f / sinf((var_5 - floorf(-1.4379E-35f - (+1.5941E-43f - -1.0407E-24f - -1.5079E-36f - -0.0f)))));
float tmp_1 = +0.0f;
comp = tmp_1 * (-1.4513E-44f / var_6);
if (comp == var_7 - var_8 - +1.9090E-37f / +1.6882E36f) {
comp = var_9 - var_10;
comp += var_11 * (-1.3228E-35f / (+1.1695E20f * (-1.5240E35f * (+1.6842E-18f / +1.3278E-44f))));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12);
cudaDeviceSynchronize();
return 0;
}
|
20,769 | /* A toy example that adds two numbers on the device. */
#include <stdio.h>
__global__ void add(int *c, int a, int b) {
*c = a + b;
}
int main(void) {
int result;
int *result_dev;
cudaMalloc(&result_dev, sizeof(int));
// <<<1,1>>> means: run the kernel on a grid of one block, where each block
// has just one thread.
add<<<1,1>>>(result_dev, 5, 6);
cudaMemcpy(&result, result_dev, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(result_dev);
printf("%d\n", result);
return 0;
}
|
20,770 | /*
# compile
$ nvcc -o sigmoid sigmoid.cu
# numpy counterpart
import numpy as np
m = np.array(((0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11)))
s = 1/(1+np.exp(-m))
sd = s*(1-s)
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
// kernel of device sigmoid function
__global__
void kSigmoid(const int nThreads, float const *input, float *output){
/* Computes the value of the sigmoid function f(x) = 1/(1 + e^-x).
Inputs:
input: array
output: array, the results of the computation are to be stored here
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = 1.0 / (1.0 + std::exp(-input[i]));
}
}
// cuda version (device-side) of sigmoid function
void dSigmoid(float const *input, float *output, const int height, const int width){
kSigmoid <<< height, width >>> (height * width, input, output);
cudaDeviceSynchronize();
}
// kernel of derivative of sigmoid function
__global__
void kSigmoid_d(const int nThreads, float const *input, float *output) {
/* Computes the value of the sigmoid function derivative f'(x) = f(x)(1 - f(x)),
where f(x) is sigmoid function.
Inputs:
input: array
output: array, the results of the computation are to be stored here:
x(1 - x) for every element of the input matrix m1.
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = input[i] * (1 - input[i]);
}
}
// derivative of sigmoid function (d: device, d: derivative)
float* dSigmoid_d(float const *input, float *output, const int rows, const int columns){
kSigmoid_d <<< rows, columns >>> (rows*columns, input, output);
cudaDeviceSynchronize();
return output;
}
int main(void)
{
// host initialization
const int M1_SIZE = 12; // 4x3 matrix
const int M1_BYTES = M1_SIZE * sizeof(float);
float h_m1[M1_SIZE];
for (int i = 0; i < M1_SIZE; i++)
{
h_m1[i] = float(i); // 0, 1, .. 11
}
float h_out[M1_SIZE]; // sigmoid
// GPU
float *d_m1;
float *d_out;
cudaMalloc((void**) &d_m1, M1_BYTES);
cudaMalloc((void**) &d_out, M1_BYTES);
// sigmoid
cudaMemcpy(d_m1, h_m1, M1_BYTES, cudaMemcpyHostToDevice);
dSigmoid(d_m1, d_out, 4, 3);
cudaMemcpy(h_out, d_out, M1_BYTES, cudaMemcpyDeviceToHost);
// print result
printf("sigmoid\n");
for (int i = 0; i < M1_SIZE; i++)
{
printf("h_out[%d] = %f\n", i, h_out[i]);
}
// sigmoid derivative
cudaMemcpy(d_m1, h_out, M1_BYTES, cudaMemcpyHostToDevice);
dSigmoid_d(d_m1, d_out, 4, 3);
cudaMemcpy(h_out, d_out, M1_BYTES, cudaMemcpyDeviceToHost);
// print result
printf("sigmoid derivative\n");
for (int i = 0; i < M1_SIZE; i++)
{
printf("h_out[%d] = %f\n", i, h_out[i]);
}
// free memory
cudaFree(d_m1);
cudaFree(d_out);
// free(h_m1);
// free(h_m2);
// free(h_out);
} |
20,771 | #include "includes.h"
// cuDEBYE SOURCE CODE VERSION 1.5
// TO DO:
// - REWRITE TO DOUBLE PRECISION DISTANCE CALCULATIONS FOR BENCHMARKING
// - CONSIDER NOT CALLING SQRT (HISTOGRAM OF VALUE UNDER SQUARE -> problem with memory, no solution jet) IN KERNEL TO SAVE COMPUTATION TIME
// - USE INTEGER VALUES INSTEAD OF FLOAT AND CALCULATE IN FEMTO METERS INSTEAD OF ANGSTROM -> INTEGER OPERATIONS SHOULD REPLACE ROUND AND SINGLE PRECISION OPERATIONS WITH ACCEPTABLE ERROR
// - IMPLEMENT A CLEVER ALGORYTHM TO SET GRID AND BLOCK SIZE AUTOMATICALLY
// - BINARY FILE SUPPORT FOR FASTER INFORMATION EXCHANGE AND LESS MEMORY CONSUMPTION OR/AND PYTHON7MATLAB INTERFACE TO GET ARRAYS DIRECTLY
// - CREATE INTERFACE TO DISCUS (READ DISCUS STRUCTURES)
// - IMPLEMENT USAGE OF MORE GPU'S
// - MULTIPLE EMPTY LINES IN ASCII CAN CAUSE A CRASH DURING READING
// - HOST AND THRUST OPERATIONS ARE VERY INEFFICIENT (BUT FAST ENOUGH) -> MAYBE REWRITE THEM
// - ELIMINATE COMPILER WARNINGS FOR A MORE STABLE PROGRAM
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// PREAMBLE: LIBARIES AND USEFULL BASIC FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Include cuda libaries for parallel computing
// Thrust libaries from the cuda toolkit for optimized vector operations
// Libaries for input and output streams for display results and read and write files.
// Better than the old printf shit
using namespace std; // Normally all stream functions have to called via prefix std:: -> So functions can called withaout prefix (Example: std::cout -> cout)
// Libary for measuring calculation time
// define the mathematical constant pi
# define PI 3.14159265358979323846
// Function to check if input file parsed via commandline exists
__global__ void atomicScatter(int type1, int type2, int size_K, double *occ, double *beq, double *K, double *a, double *b, double *c, double *ffoobb) {
// Kernel is executed for each K/TwoTheta (one dimensional grid)
int Idx = blockIdx.x*blockDim.x + threadIdx.x;
// Only execute if K/TwoTheta exists and is no phantom value, caused be discrete grid and block size.
if (Idx < size_K) {
double rp16pi2 = -0.006332573977646; // = (-1) * 1/(16*pi²)
double negativeHalfSquaredS = K[Idx] * K[Idx] * rp16pi2; // = -sin²(theta)/lambda², s = 2*sin(theta)/lambda = 1/d
// Calculate occupancy and debye-waller part of the prefactor
ffoobb[Idx] = occ[type1] * occ[type2];
ffoobb[Idx] = ffoobb[Idx] * exp(negativeHalfSquaredS*(beq[type1] + beq[type2]));
// Calculate atomic scattering factords from 11 parameter approximation.
double f1 = c[type1];
double f2 = c[type2];
for (int i = 0; i < 5; i++) {
f1 += a[type1 * 5 + i] * exp(b[type1 * 5 + i] * negativeHalfSquaredS);
f2 += a[type2 * 5 + i] * exp(b[type2 * 5 + i] * negativeHalfSquaredS);
}
// Complement prefactor with calculated scattering factors
ffoobb[Idx] = ffoobb[Idx] * f1*f2;
}
} |
20,772 | #include<stdio.h>
__global__ void shift(int * g){
int i = threadIdx.x;
__shared__ int array[128];
array[i] = i;
__syncthreads();
if(i<127){
int temp = array[i + 1];
__syncthreads();
array[i] = temp;
__syncthreads();
}
g[i] = array[i];
__syncthreads(); // not really necessary as no further operations
}
// helper function
void print_array(int * h){
for(int i=0; i < 128; i=i+1){
printf("%d ", h[i]);
}
printf("\n");
}
int main(){
// array on host memory
int h_array[128];
const int ARRAY_BYTES = sizeof(int)*128;
// array on Device global memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
// kernel call
shift<<<1,128>>>(d_array);
// get results from Device global to host
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// see results
print_array(h_array);
cudaFree(d_array);
return 0;
} |
20,773 | #include <stdio.h>
#include <stdlib.h>
#define NUM_ELEMENTS 8192
#define MAX_THREADS_PER_BLOCK 1024
#define KERNEL_LOOP 100000
__host__ void generate_rand_data(unsigned int * host_data_ptr)
{
for(unsigned int i=0; i < NUM_ELEMENTS; i++)
{
host_data_ptr[i] = (unsigned int) rand();
}
}
__global__ void test_gpu_global(unsigned int * const data,
const unsigned int num_elements,
const unsigned int loop_iter)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid < num_elements)
{
// compute the average of this thread's left and right neighbors and place in register
// Place value directly in register
float tmp = (data[tid > 0 ? tid - 1 : NUM_ELEMENTS-1] + data[tid < NUM_ELEMENTS-1 ? tid + 1 : 0]) * 0.5f;
for(int i = 0; i < KERNEL_LOOP; i++) {
tmp += data[tid];
}
data[tid] = tmp;
__syncthreads();
}
}
__global__ void test_gpu_register(unsigned int * const data,
const unsigned int num_elements,
const unsigned int loop_iter)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid < num_elements)
{
// compute the average of this thread's left and right neighbors and place in register
// Place value directly in register
float tmp = (data[tid > 0 ? tid - 1 : NUM_ELEMENTS-1] + data[tid < NUM_ELEMENTS-1 ? tid + 1 : 0]) * 0.5f;
for(int i = 0; i < KERNEL_LOOP; i++) {
tmp += tmp;
}
data[tid] = tmp;
__syncthreads();
}
}
__global__ void test_gpu_shared(unsigned int * const data,
const unsigned int num_elements,
const unsigned int loop_iter)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ unsigned int tmp_0[NUM_ELEMENTS];
if(tid < num_elements)
{
// compute the average of this thread's left and right neighbors and place in register
// Place valude directly in shared
tmp_0[tid] = (data[tid > 0 ? tid - 1 : NUM_ELEMENTS-1] + data[tid < NUM_ELEMENTS-1 ? tid + 1 : 0]) * 0.5f;
float tmp;
for(int i = 0; i < KERNEL_LOOP; i++) {
tmp += tmp_0[tid];
}
data[tid] = tmp;
__syncthreads();
}
}
__host__ void start_measure(cudaEvent_t * start, cudaEvent_t *stop){
cudaEventCreate(start,0);
cudaEventCreate(stop,0);
cudaEventRecord(*start, 0);
}
__host__ void stop_measure(cudaEvent_t* start, cudaEvent_t * stop, float &time) {
cudaEventRecord(*stop, 0);
cudaEventSynchronize(*stop);
cudaEventElapsedTime(&time, *start, *stop);
}
// Create display function to ensure output is the same for both kernels
__host__ void display_data(const unsigned int * const in_data, const unsigned int * const out_data) {
for(int i = 0; i < 20; i ++){
printf("i=%i, input_data = %u, output_data = %u\n", i, in_data[i], out_data[i]);
}
}
__host__ void gpu_kernel(void)
{
const unsigned int num_elements = NUM_ELEMENTS;
const unsigned int num_threads = MAX_THREADS_PER_BLOCK;
const unsigned int num_blocks = num_elements/num_threads;
const unsigned int num_bytes = num_elements * sizeof(unsigned int);
const unsigned int kernel_loop = KERNEL_LOOP;
// Prep Host data
unsigned int * host_pinned;
unsigned int * host_pinned_final;
cudaMallocHost((void**)&host_pinned, num_bytes);
cudaMallocHost((void**)&host_pinned_final, num_bytes);
generate_rand_data(host_pinned);
// Prep Device data
unsigned int * data_gpu;
cudaMalloc(&data_gpu, num_bytes);
// Define measurement
float time;
cudaEvent_t kernel_start, kernel_stop;
cudaEvent_t kernel_start1, kernel_stop1;
cudaEvent_t kernel_start2, kernel_stop2;
// Run Test with gpu shared
cudaMemcpy(data_gpu, host_pinned, num_bytes, cudaMemcpyHostToDevice);
start_measure(&kernel_start, &kernel_stop);
test_gpu_global<<<num_blocks, num_threads>>>(data_gpu, num_elements, kernel_loop);
stop_measure(&kernel_start, &kernel_stop, time);
printf("test_gpu_global took %f\n", time);
cudaMemcpy(host_pinned_final, data_gpu, num_bytes,cudaMemcpyDeviceToHost);
display_data(host_pinned, host_pinned_final);
// Run Test with gpu registers
cudaMemcpy(data_gpu, host_pinned, num_bytes, cudaMemcpyHostToDevice); // referesh data
start_measure(&kernel_start1, &kernel_stop1);
test_gpu_shared <<<num_blocks, num_threads>>>(data_gpu, num_elements, kernel_loop);
stop_measure(&kernel_start1, &kernel_stop1, time);
printf("test_gpu_shared took %f\n", time);
cudaMemcpy(host_pinned_final, data_gpu, num_bytes,cudaMemcpyDeviceToHost);
display_data(host_pinned, host_pinned_final);
// Run Test with gpu global
cudaMemcpy(data_gpu, host_pinned, num_bytes, cudaMemcpyHostToDevice); // referesh data
start_measure(&kernel_start2, &kernel_stop2);
test_gpu_register <<<num_blocks, num_threads>>>(data_gpu, num_elements, kernel_loop);
stop_measure(&kernel_start2, &kernel_stop2, time);
printf("test_gpu_register took %f\n", time);
cudaMemcpy(host_pinned_final, data_gpu, num_bytes,cudaMemcpyDeviceToHost);
display_data(host_pinned, host_pinned_final);
cudaFree((void* ) data_gpu);
cudaFreeHost(host_pinned_final);
cudaFreeHost(host_pinned);
cudaDeviceReset();
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
// Find out statistics of GPU
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
}
gpu_kernel();
return EXIT_SUCCESS;
} |
20,774 | /* Molecular dynamics simulation linear code for binary Lennard-Jones liquid
under NVE ensemble; Author: You-Liang Zhu, Email: youliangzhu@ciac.ac.cn
Copyright: You-Liang Zhu
This code is free: you can redistribute it and/or modify it under the terms
of the GNU General Public License.*/
#include <ctype.h>
#include <cuda_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// check CUDA error
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
// matrix multiplication C=A*B by device
extern "C" __global__ void matrix_kernel(float *d_c, float *d_a, float *d_b,
int wa, int ha, int wb) {
float sum = 0;
// 绑定线程到矩阵的 row 行 col 列的元素
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < ha && col < wb) {
for (int i = 0; i < wa; i++) {
sum += d_a[row * wa + i] * d_b[i * wb + col];
}
d_c[row * wb + col] = sum;
}
}
// matrix multiplication C=A*B by host
void MatrixMulCPU(float *h_d, float *h_a, float *h_b, int wa, int ha, int wb) {
// write codes here
for (int i = 0; i < ha; i++) {
for (int j = 0; j < wb; j++) {
float e = 0;
for (int k = 0; k < wa; k++) {
e += h_a[i * wa + k] * h_b[k * wb + j];
}
h_d[i * wb + j] = e;
}
}
}
int main(int argc, char **argv) {
int wa = 128; // the width of matrix A
int ha = 64; // the hight of matrix A
int wb = ha; // the width of matrix B
int N = wa * ha; // the number of elements
float *h_a, *h_b, *h_c, *h_d; // pointer for host memory
float *d_a, *d_b, *d_c; // pointer for device memory
cudaSetDevice(0); // set GPU ID for computation
int numThreadsPerBlock = 16; // define block size
int numBlocks_x =
(int)ceil((float)ha / (float)numThreadsPerBlock); // define grid size
int numBlocks_y =
(int)ceil((float)wb / (float)numThreadsPerBlock); // define grid size
// Part 1 of 5: allocate host and device memory
size_t memSize = N * sizeof(float);
h_a = (float *)malloc(memSize);
h_b = (float *)malloc(memSize);
h_c = (float *)malloc(ha * wb * sizeof(float));
h_d = (float *)malloc(ha * wb * sizeof(float));
cudaMalloc((void **)&d_a, memSize);
cudaMalloc((void **)&d_b, memSize);
cudaMalloc((void **)&d_c, ha * wb * sizeof(float));
// Part 2 of 5: initiate host array
for (unsigned int i = 0; i < N; i++) {
int ran = rand();
float fran = (float)ran / (float)RAND_MAX;
h_a[i] = fran;
ran = rand();
fran = (float)ran / (float)RAND_MAX;
h_b[i] = fran;
}
// matrix multiplication by host
MatrixMulCPU(h_d, h_a, h_b, wa, ha, wb);
// Part 3 of 5: copy data from host to device
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, memSize, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy");
// Part 4 of 5: launch kernel
dim3 dimGrid(numBlocks_x, numBlocks_y);
dim3 dimBlock(numThreadsPerBlock, numThreadsPerBlock);
matrix_kernel<<<dimGrid, dimBlock>>>(d_c, d_a, d_b, wa, ha, wb);
// seems that this function has been declared
/* cudaThreadSynchronize(); // block until the device has completed */
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 5 of 5: device to host copy
cudaMemcpy(h_c, d_c, ha * wb * sizeof(float), cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
/* for (int y = 0; y < ha; y++) { */
/* for (int x = 0; x < wb; x++) */
/* printf("%02.2f ", h_c[y * wb + x]); */
/* printf("\n"); */
/* } */
// check the results from device
for (unsigned int i = 0; i < ha * wb; i++) {
if (abs(h_c[i] - h_d[i]) > 0.00001) {
fprintf(stderr, "Failed!!! %d %f %f\n", i, h_c[i], h_d[i]);
// free device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// free host memory
free(h_a);
free(h_b);
free(h_c);
free(h_d);
exit(-1);
}
}
// free device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// free host memory
free(h_a);
free(h_b);
free(h_c);
free(h_d);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Success!\n");
return 0;
}
|
20,775 | #include "utils.cu"
|
20,776 | #include <stdio.h>
template<typename srcT, typename dstT>
__global__
void yuv2rgb_kernel(srcT *src, dstT *dst, int width, int height)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height)
return;
int yIdx = j * width + i;
int uvIdx = (j/2) * (width/2) + i/2;
srcT *Y = src;
srcT *U = Y + width * height;
srcT *V = U + width * height / 4;
dstT *R = dst;
dstT *G = R + width * height;
dstT *B = G + width * height;
R[yIdx] = Y[yIdx] + 1.370705 * (V[uvIdx] - 128.0);
G[yIdx] = Y[yIdx] - 0.698001 * (V[uvIdx] - 128.0) - 0.337633 * (U[uvIdx] - 128.0);
B[yIdx] = Y[yIdx] + 1.732446 * (U[uvIdx] - 128.0);
}
template<typename srcT, typename dstT>
int yuv2rgb(srcT *src, dstT *dst, int width, int height)
{
dim3 blockSize(32, 12);
dim3 nBlocks((width+blockSize.x-1)/blockSize.x,
(height+blockSize.y-1)/blockSize.y);
yuv2rgb_kernel<<<nBlocks, blockSize>>>(src, dst, width, height);
cudaDeviceSynchronize();
return 0;
}
template
int yuv2rgb(unsigned char *src, float *dst, int width, int height);
|
20,777 | /*
* usage: nvcc ./stream_test_v3.cu -o ./stream_v3
* nvvp ./stream_v3 ( or as root:
* nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_v3 )
*
* purpose: just see what commenting out the final call to the default
* stream would cause our concurrency profile to look like
*
* result: essentially concurrent by default as long as execution configurations
* not specifying a particular stream aren't among the kernel calls
*
*/
#include <stdio.h>
const int N = 1 << 20;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x;
for (int i = tid; i < n; i += blockDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
int main()
{
const int num_streams = 8;
float localx[N];
cudaStream_t streams[num_streams];
float *data[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&streams[i]);
//cudaMalloc(&data[i], N * sizeof(float));
cudaMallocManaged(&data[i], N * sizeof(float));
// launch one worker kernel per stream
kernel<<<1, 64, 0, streams[i]>>>(data[i], N);
// launch a dummy kernel on the default stream
// kernel<<<1, 1>>>(0, 0);
}
// and a quick check of results because individual streams
// should have done identical calculations !
for (int i = 0; i < num_streams; i++) {
// cudaMemcpy(localx, data[i], N * sizeof(float), cudaMemcpyDeviceToHost);
// printf("*** %d %12.6lf%12.6lf%12.6lf\n", i, localx[0], localx[1], localx[2]);
cudaStreamSynchronize(streams[i]);
printf("*** %d %12.6lf%12.6lf%12.6lf\n", i, data[i][0], data[i][1], data[i][2]);
}
cudaDeviceReset();
return 0;
}
|
20,778 | //***************************************************************************
// Broday Walker
// Dr. Eduardo Colmenares
//
//
//***************************************************************************
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#include <vector>
#include <queue>
using namespace std;
const int GRID_X = 1; // Number of blocks in the grid on the x-axis
const int BLOCK_X = 7; // Number of threads on the x-axis per block
const int SIZE = 49;
// Hold the adjacency matrix in constant memory on the device as it will not be
// modified over the lifetime of the program
__constant__ int adjMat_d[SIZE];
__global__ void dijkstra(int *dist_d, int *parent_d, int *visited_d, int s, int width)
{
int tid_x = threadIdx.x;
int block_x = blockIdx.x;
int grid_x = gridDim.x;
}
// A kernel for testing if the adjacency matrix was actually copied to the constant
// memory on the device
__global__ void printAdjMat(int *test, int width)
{
int tid_x = threadIdx.x;
for(int i = 0; i < width; i++)
test[i * width + tid_x] = adjMat_d[i * width + tid_x];
}
int main()
{
int vertices;
cin >> vertices;
while(vertices != 0)
{
// Host Declarations
int adj_size, dist_size, parent_size, visited_size, start, end;
int *adjMat, *dist, *parent, *visited;
// Device declarations
int *adj_d, *dist_d, *parent_d, *visited_d;
// For error checking
cudaError_t cudaErr;
// This is a linearized adjacency matrix
adjMat = new int[vertices * vertices];
dist = new int[vertices];
parent = new int[vertices];
visited = new int[vertices];
// Find size of arrays in bytes
adj_size = vertices * vertices * sizeof(int);
dist_size = vertices * sizeof(int); // Equal to the number of vertices
parent_size = vertices * sizeof(int);
visited_size = vertices * sizeof(int);
// Fill the adjacency-matrix with 0s
for(int i = 0; i < vertices * vertices; i++)
adjMat[i] = -1;
// A vertex does not have a parent if its value is -1 (after running
// Dijkstra's algorithm, this will only be true for the starting vertex).
for(int i = 0; i < vertices; i++)
{
dist[i] = INT_MAX;
parent[i] = -1;
visited[i] = 0;
}
// Fill the adjacency matrix with data
for(int i = 0; i < vertices; i++)
{
// Temporary storage for adjacent vertices and the weight of the edge
int num_edges, u, w;
// Read in the number of adjacent vertices for the ith vertex
cin >> num_edges;
// Add the adjacent vertices to the linearized adjacency-matrix for the ith vertex
for(int j = 0; j < num_edges; j++)
{
cin >> u >> w;
int offset = i * vertices + u;
if(adjMat[offset] == -1 || w < adjMat[offset])
// This is basically blockIdx.x * blockDim.x + threadIdx.x where blockIdx.x
// corresponds with the iteration of the loop we are on
adjMat[offset] = w;
}
}
cin >> start >> end;
dist[start] = 0;
// Print weights for testing
printf("Test print of all weights: \n");
printf("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
for(int i = 0; i < vertices; i++)
{
cout << "Row " << i << ": ";
for(int j = 0; j < vertices; j++)
cout << "{" << adjMat[i * vertices + j] << "} ";
cout << '\n';
}
printf("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
cout << "\n\n";
// Copy the adjacency-matrix to constant memory on the device
cudaErr = cudaMemcpyToSymbol(adjMat_d, adjMat, adj_size);
if(cudaErr != cudaSuccess)
{
printf("Error copying from host to device symbol\n");
return 1;
}
else
printf("Successful copy from host to device symbol\n");
// Set the dimensions of the grid and blocks
dim3 gridDim(GRID_X, 1);
dim3 blockDim(BLOCK_X, 1);
// Allocate memory on the device
cudaMalloc((void **)&dist_d, dist_size);
cudaMemcpy(dist_d, dist, dist_size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&parent_d, parent_size);
cudaMemcpy(parent_d, parent, parent_size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&visited_d, visited_size);
cudaMemcpy(visited_d, visited, visited_size, cudaMemcpyHostToDevice);
// Invoke the kernel
dijkstra<<<gridDim, blockDim>>>(dist_d, parent_d, visited_d, start, vertices);
// Copy the results back
cudaMemcpy(dist, dist_d, dist_size, cudaMemcpyDeviceToHost);
cudaMemcpy(parent, parent_d, parent_size, cudaMemcpyDeviceToHost);
// Free the device memory
cudaFree(dist_d);
cudaFree(parent_d);
cudaFree(visited_d);
// Print the distances from start vertex s
for(int i = 0; i < vertices; i++)
printf("%d to %d: %d\n", i, start, dist[i]);
printf("\n\n");
// Print the parent array
for(int i = 0; i < vertices; i++)
printf("%d: %d\n", i, parent[i]);
printf("\n\n");
// Free the host memory
delete [] adjMat;
delete [] dist;
delete [] parent;
delete [] visited;
cin >> vertices;
}
return 0;
} |
20,779 | #include <iostream>
#include "vector_summation.cuh"
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <cuda.h>
GpuVector::GpuVector(int* vec_cpu,int nbytes){
/* allocate GPU mem */
cudaMallocManaged(&vec_gpu,nbytes);
cudaMemcpy(vec_gpu, vec_cpu, nbytes, cudaMemcpyHostToDevice);
}
void GpuVector::sum()
{vector_sum_kernel<<<1, 1>>>(vec_gpu,length_,vec_sum);}
GpuVector::~GpuVector(){cudaFree(vec_gpu);}
int main()
{ /*Declare a vector on the host*/
int* vec_cpu;
int N=100;
int nbytes = N * sizeof(int);
vec_cpu = (int *) malloc(nbytes);
for (int i = 0; i < N; ++i)vec_cpu[i]=10;
cudaThreadSynchronize();
// Run kernel on 1M elements on the GPU
GpuVector vec_gpu(vec_cpu,nbytes);
std::cout<<vec_gpu.vec_sum<<std::endl;
vec_gpu.sum();
std::cout<<vec_gpu.vec_sum<<std::endl;
cudaDeviceSynchronize();
int vec_cpu_sum=0;
for( size_t i = 0 ; i < N ; i++ )
{vec_cpu_sum+=vec_cpu[i];}
std::cout<<vec_cpu_sum<<std::endl;
free(vec_cpu);
}
|
20,780 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <time.h>
#include <cfloat>
#define min(a, b) (a < b ? a : b)
#define max(a, b) (a > b ? a : b)
#define abs(a) (a > 0 ? a : -1 * a)
#define MAX_BLOCKS 50000
__global__ void kMeansStep2(int *d_counts, float *d_new_clusters, float *d_prev_clusters, int *converged, int n_clusters, int d){
int cluster = threadIdx.x;
int dim = threadIdx.y;
if (dim == 0){
printf("cluster number %d, count %d\n", cluster, d_counts[cluster]);
}
int count = max(1, d_counts[cluster]);
d_new_clusters[cluster * d + dim] /= (float)count;
if (abs(d_new_clusters[cluster * d + dim] - d_prev_clusters[cluster * d + dim]) > 0.01)
atomicAnd(&converged[0], 0);
}
__global__ void kMeansStep1(float *d_data, float *d_prev_clusters, float *d_new_clusters, int *d_counts, int n_data, int n_clusters, int d){
int data = blockIdx.x;
int cluster = threadIdx.x;
int dim = threadIdx.y;
// if (blockIdx.x > 5000)
// printf("data number %d\n", blockIdx.x);
while(data < n_data){
extern __shared__ float s[];
float *shared_dist = s;
float *shared_data = (float*)&shared_dist[n_clusters];
float *shared_prev_clusters = (float*)&shared_data[d];
shared_dist[cluster] = 0.0;
shared_prev_clusters[cluster * d + dim] = d_prev_clusters[cluster * d + dim];
if (cluster == 0)
shared_data[dim] = d_data[data * d + dim];
__syncthreads();
float tmp_dist = shared_prev_clusters[cluster * d + dim] - shared_data[dim];
float dist_data_cluster_dim = tmp_dist * tmp_dist;
atomicAdd(&shared_dist[cluster], dist_data_cluster_dim);
__syncthreads();
__shared__ int best_cluster;
if (cluster == 0 && dim == 0){
float best_distance = FLT_MAX;
best_cluster = -1;
for (int j=0; j<n_clusters; j++)
if (shared_dist[j] < best_distance){
best_distance = shared_dist[j];
best_cluster = j;
}
printf("data point number %d assigned to cluster %d\n", data, best_cluster);
atomicAdd(&d_counts[best_cluster], 1);
}
__syncthreads();
if (cluster == 0){
atomicAdd(&d_new_clusters[best_cluster * d + dim], shared_data[dim]);
// printf("%f is added to new clusters %d , %d\n", shared_data[dim], best_cluster, dim);
}
data += MAX_BLOCKS;
__syncthreads();
}
}
int main(){
srand((unsigned int)time(NULL));
int n_data = 30;
int n_clusters = 4;
int d = 2;
int size_data = sizeof(float) * n_data * d;
int size_clusters = sizeof(float) * n_clusters * d;
int *h_converged = (int *)malloc(1 * sizeof(int));
float *h_data = (float *)malloc(size_data);
float *h_clusters = (float *)malloc(size_clusters);
int data_x[30] = {25,34,22,27,33,33,31,22,35,34,67,54,57,43,50,57,59,52,65,47,49,48,35,33,44,45,38,43,51,46};
int data_y[30] = {79,51,53,78,59,74,73,57,69,75,51,32,40,47,53,36,35,58,59,50,25,20,14,12,20,5,29,27,8,7};
for (int i=0; i<n_data*d; i++){
// h_data[i] = ((float)rand()/(float)(RAND_MAX)) * 100.0;
if (i % 2 == 0)
h_data[i] = data_x[i / 2];
else
h_data[i] = data_y[i / 2];
printf("%f ", h_data[i]);
if ((i+1) % d == 0)
printf("\n");
}
printf("\ninitial clusters:\n");
for (int i=0; i<n_clusters*d; i++){
h_clusters[i] = ((float)rand()/(float)(RAND_MAX)) * 100.0;
printf("%f ", h_clusters[i]);
if ((i+1) % d == 0)
printf("\n");
}
float *d_data, *d_new_clusters, *d_prev_clusters;
int *d_converged, *d_counts;
cudaMalloc((void **)&d_data, size_data);
cudaMalloc((void **)&d_new_clusters, size_clusters);
cudaMalloc((void **)&d_prev_clusters, size_clusters);
cudaMalloc((void **)&d_counts, n_clusters * sizeof(int));
cudaMalloc((void **)&d_converged, sizeof(int));
cudaMemcpy(d_data, h_data, size_data, cudaMemcpyHostToDevice);
cudaMemcpy(d_prev_clusters, h_clusters, size_clusters, cudaMemcpyHostToDevice);
float *d1 = d_prev_clusters;
float *d2 = d_new_clusters;
dim3 bd(n_clusters, d);
int n_data_blocks = min(n_data, MAX_BLOCKS);
int sharedMemSize1 = (n_clusters + d + n_clusters * d) * sizeof(float);
int iteration = 1;
clock_t start_time = clock();
while(1){
cudaMemset(d2, 0.0, size_clusters);
cudaMemset(d_counts, 0, n_clusters * sizeof(int));
kMeansStep1 <<<n_data_blocks, bd, sharedMemSize1>>> (d_data, d1, d2, d_counts, n_data, n_clusters, d);
cudaThreadSynchronize();
h_converged[0] = 1;
cudaMemcpy(d_converged, h_converged, sizeof(int), cudaMemcpyHostToDevice);
kMeansStep2 <<<1, bd>>> (d_counts, d2, d1, d_converged, n_clusters, d);
cudaThreadSynchronize();
cudaMemcpy(h_clusters, d1, size_clusters, cudaMemcpyDeviceToHost);
printf("\niteration %d prev cluster:\n", iteration);
for(int i=0; i<n_clusters*d; i++){
printf("%f ", h_clusters[i]);
if ((i+1) % d == 0)
printf("\n");
}
cudaMemcpy(h_clusters, d2, size_clusters, cudaMemcpyDeviceToHost);
printf("\niteration %d new cluster:\n", iteration);
for(int i=0; i<n_clusters*d; i++){
printf("%f ", h_clusters[i]);
if ((i+1) % d == 0)
printf("\n");
}
cudaMemcpy(h_converged, d_converged, sizeof(int), cudaMemcpyDeviceToHost);
if (h_converged[0] == 1){
cudaMemcpy(h_clusters, d2, size_clusters, cudaMemcpyDeviceToHost);
break;
}
d1 = d1 == d_prev_clusters ? d_new_clusters : d_prev_clusters;
d2 = d2 == d_prev_clusters ? d_new_clusters : d_prev_clusters;
iteration += 1;
if (iteration > 10)
break;
}
clock_t end_time = clock();
printf("\nFinished!!\n");
printf("Final clusters:\n");
for (int i=0; i<n_clusters*d; i++){
printf("%f ", h_clusters[i]);
if ((i+1) % d == 0)
printf("\n");
}
printf("number of iterations is %d \n", iteration);
double total_time = ((double) (end_time - start_time)) / CLOCKS_PER_SEC;
printf("total time: %f\n", total_time);
return 0;
}
|
20,781 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define PREFIX_LENGTH 4
#define MAX_PASSWORD_LENGTH 6
#define ALPHABET_SIZE 26
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__host__ __device__ void md5_vfy(unsigned char* data, unsigned int length, unsigned int *a1, unsigned int *b1, unsigned int *c1, unsigned int *d1)
{
const unsigned int a0 = 0x67452301;
const unsigned int b0 = 0xEFCDAB89;
const unsigned int c0 = 0x98BADCFE;
const unsigned int d0 = 0x10325476;
unsigned int a = 0;
unsigned int b = 0;
unsigned int c = 0;
unsigned int d = 0;
unsigned int vals[14] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
int i = 0;
for (i = 0; i < length; i++)
{
vals[i / 4] |= data[i] << ((i % 4) * 8);
}
vals[i / 4] |= 0x80 << ((i % 4) * 8);
unsigned int bitlen = length * 8;
#define in0 (vals[0])//x
#define in1 (vals[1])//y
#define in2 (vals[2])//z
#define in3 (vals[3])
#define in4 (vals[4])
#define in5 (vals[5])
#define in6 (vals[6])
#define in7 (vals[7])
#define in8 (vals[8])
#define in9 (vals[9])
#define in10 (vals[10])
#define in11 (vals[11])
#define in12 (vals[12])
#define in13 (vals[13])
#define in14 (bitlen) //w = bit length
#define in15 (0)
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF(a, b, c, d, in0, S11, 3614090360); /* 1 */
FF(d, a, b, c, in1, S12, 3905402710); /* 2 */
FF(c, d, a, b, in2, S13, 606105819); /* 3 */
FF(b, c, d, a, in3, S14, 3250441966); /* 4 */
FF(a, b, c, d, in4, S11, 4118548399); /* 5 */
FF(d, a, b, c, in5, S12, 1200080426); /* 6 */
FF(c, d, a, b, in6, S13, 2821735955); /* 7 */
FF(b, c, d, a, in7, S14, 4249261313); /* 8 */
FF(a, b, c, d, in8, S11, 1770035416); /* 9 */
FF(d, a, b, c, in9, S12, 2336552879); /* 10 */
FF(c, d, a, b, in10, S13, 4294925233); /* 11 */
FF(b, c, d, a, in11, S14, 2304563134); /* 12 */
FF(a, b, c, d, in12, S11, 1804603682); /* 13 */
FF(d, a, b, c, in13, S12, 4254626195); /* 14 */
FF(c, d, a, b, in14, S13, 2792965006); /* 15 */
FF(b, c, d, a, in15, S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG(a, b, c, d, in1, S21, 4129170786); /* 17 */
GG(d, a, b, c, in6, S22, 3225465664); /* 18 */
GG(c, d, a, b, in11, S23, 643717713); /* 19 */
GG(b, c, d, a, in0, S24, 3921069994); /* 20 */
GG(a, b, c, d, in5, S21, 3593408605); /* 21 */
GG(d, a, b, c, in10, S22, 38016083); /* 22 */
GG(c, d, a, b, in15, S23, 3634488961); /* 23 */
GG(b, c, d, a, in4, S24, 3889429448); /* 24 */
GG(a, b, c, d, in9, S21, 568446438); /* 25 */
GG(d, a, b, c, in14, S22, 3275163606); /* 26 */
GG(c, d, a, b, in3, S23, 4107603335); /* 27 */
GG(b, c, d, a, in8, S24, 1163531501); /* 28 */
GG(a, b, c, d, in13, S21, 2850285829); /* 29 */
GG(d, a, b, c, in2, S22, 4243563512); /* 30 */
GG(c, d, a, b, in7, S23, 1735328473); /* 31 */
GG(b, c, d, a, in12, S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH(a, b, c, d, in5, S31, 4294588738); /* 33 */
HH(d, a, b, c, in8, S32, 2272392833); /* 34 */
HH(c, d, a, b, in11, S33, 1839030562); /* 35 */
HH(b, c, d, a, in14, S34, 4259657740); /* 36 */
HH(a, b, c, d, in1, S31, 2763975236); /* 37 */
HH(d, a, b, c, in4, S32, 1272893353); /* 38 */
HH(c, d, a, b, in7, S33, 4139469664); /* 39 */
HH(b, c, d, a, in10, S34, 3200236656); /* 40 */
HH(a, b, c, d, in13, S31, 681279174); /* 41 */
HH(d, a, b, c, in0, S32, 3936430074); /* 42 */
HH(c, d, a, b, in3, S33, 3572445317); /* 43 */
HH(b, c, d, a, in6, S34, 76029189); /* 44 */
HH(a, b, c, d, in9, S31, 3654602809); /* 45 */
HH(d, a, b, c, in12, S32, 3873151461); /* 46 */
HH(c, d, a, b, in15, S33, 530742520); /* 47 */
HH(b, c, d, a, in2, S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II(a, b, c, d, in0, S41, 4096336452); /* 49 */
II(d, a, b, c, in7, S42, 1126891415); /* 50 */
II(c, d, a, b, in14, S43, 2878612391); /* 51 */
II(b, c, d, a, in5, S44, 4237533241); /* 52 */
II(a, b, c, d, in12, S41, 1700485571); /* 53 */
II(d, a, b, c, in3, S42, 2399980690); /* 54 */
II(c, d, a, b, in10, S43, 4293915773); /* 55 */
II(b, c, d, a, in1, S44, 2240044497); /* 56 */
II(a, b, c, d, in8, S41, 1873313359); /* 57 */
II(d, a, b, c, in15, S42, 4264355552); /* 58 */
II(c, d, a, b, in6, S43, 2734768916); /* 59 */
II(b, c, d, a, in13, S44, 1309151649); /* 60 */
II(a, b, c, d, in4, S41, 4149444226); /* 61 */
II(d, a, b, c, in11, S42, 3174756917); /* 62 */
II(c, d, a, b, in2, S43, 718787259); /* 63 */
II(b, c, d, a, in9, S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
*a1 = a;
*b1 = b;
*c1 = c;
*d1 = d;
}
unsigned int unhex(unsigned char x)
{
if (x <= 'F' && x >= 'A')
{
return (unsigned int)(x - 'A' + 10);
}
else if (x <= 'f' && x >= 'a')
{
return (unsigned int)(x - 'a' + 10);
}
else if (x <= '9' && x >= '0')
{
return (unsigned int)(x - '0');
}
return 0;
}
void md5_to_ints(unsigned char* md5, unsigned int *r0, unsigned int *r1, unsigned int *r2, unsigned int *r3)
{
unsigned int v0 = 0, v1 = 0, v2 = 0, v3 = 0;
int i = 0;
for (i = 0; i < 32; i += 2)
{
unsigned int first = unhex(md5[i]);
unsigned int second = unhex(md5[i + 1]);
unsigned int both = first * 16 + second;
both = both << 24;
if (i < 8)
{
v0 = (v0 >> 8) | both;
}
else if (i < 16)
{
v1 = (v1 >> 8) | both;
}
else if (i < 24)
{
v2 = (v2 >> 8) | both;
}
else if (i < 32)
{
v3 = (v3 >> 8) | both;
}
}
*r0 = v0;
*r1 = v1;
*r2 = v2;
*r3 = v3;
}
/*
* This method is given a test password and the unsigned int of the orginal password and will check
* to see if it is the original passowrd
*
*/
__host__ __device__ int myencrypt(unsigned char * test, unsigned int length, unsigned int a, unsigned int b, unsigned int c, unsigned int d)
{
unsigned int v1 = 0, v2 = 0, v3 = 0, v4 = 0;
md5_vfy(test, length, &v1, &v2, &v3, &v4);
if (v1 == a && v2 == b && v3 == c && v4 == d)
{
return 1;
}
else
return 0;
}//end of crypt method
__device__ bool Compare(unsigned char* guess, unsigned int* hash) {
return myencrypt(guess, MAX_PASSWORD_LENGTH, hash[0], hash[1], hash[2], hash[3]);
}
__global__ void square_array(char* a, unsigned int* hash) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
unsigned char guess[MAX_PASSWORD_LENGTH];
if (idx < pow((double)ALPHABET_SIZE, (double)PREFIX_LENGTH)) {
for (unsigned int i = 0; i < PREFIX_LENGTH; i++) {
if (i == PREFIX_LENGTH - 1) {
guess[i] = 'a' + idx % ALPHABET_SIZE;
}
else {
unsigned int offset = PREFIX_LENGTH - i - 1;
unsigned int t = idx % (int)(pow((double)ALPHABET_SIZE, (double)(offset + 1)));
guess[i] = 'a' + t / (unsigned long long int)(pow((double)ALPHABET_SIZE, (double)offset));
}
}
}
int remainingLetters = MAX_PASSWORD_LENGTH - PREFIX_LENGTH;
unsigned long i = 1;
for (int j = 0; j < remainingLetters; j++) {
i *= 26;
}
for (int pos = PREFIX_LENGTH; pos < MAX_PASSWORD_LENGTH; pos++) {
guess[pos] = 'a';
}
while (true) {
int tail;
for (tail = MAX_PASSWORD_LENGTH - 1; tail >= PREFIX_LENGTH && guess[tail] == 'z'; tail--);
if (tail < PREFIX_LENGTH) {
break;
}
else {
for (int j = MAX_PASSWORD_LENGTH - 1; j > tail; j--) {
guess[j] = 'a';
}
guess[tail] += 1;
}
if (Compare(guess, hash)) {
for (unsigned int jj = 0; jj < MAX_PASSWORD_LENGTH; jj++)
a[jj] = guess[jj];
break;
}
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main() {
unsigned char password[MAX_PASSWORD_LENGTH] = { 'p', 'a', 's', 's', 'w', 'o' };
char* a_h, *a_d;
size_t size = MAX_PASSWORD_LENGTH * sizeof(char);
unsigned int* hash_h, *hash_d;
hash_h = (unsigned int*)malloc(4 * sizeof(unsigned int));
// calculate hash and verify it works
md5_vfy(password, MAX_PASSWORD_LENGTH, &hash_h[0], &hash_h[1], &hash_h[2], &hash_h[3]);
printf("hash: 0x%08X 0x%08X 0x%08X 0x%08X\n", hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
int res = myencrypt((unsigned char*)"password", MAX_PASSWORD_LENGTH, hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
printf("%d\n", res);
// set array to a default value
a_h = (char*)malloc(size + 1);
for (unsigned int i = 0; i < size; i++) {
a_h[i] = 'a' - 1;
}
a_h[size] = '\0';
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&hash_d, 4 * sizeof(unsigned int));
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(hash_d, hash_h, 4 * sizeof(unsigned int), cudaMemcpyHostToDevice);
int threadsPerBlock = 1024;
int numBlocks = ceil(pow(ALPHABET_SIZE, PREFIX_LENGTH) / threadsPerBlock);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
square_array<<<numBlocks, threadsPerBlock>>>(a_d, hash_d);
cudaEventRecord(stop);
cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost);
printf("password: %s\n", a_h);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Done in: %f milliseconds\n", milliseconds);
free(a_h);
cudaFree(a_d);
}
|
20,782 | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* This example demonstrates how to use the CUDA Direct3D bindings with the
* runtime API.
* Device code.
*/
// includes, C string library
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param pos pos in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void cudaKernelMesh(float4* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, __int_as_float(0xff00ff00)); //Color : DirectX ARGB, OpenGL ABGR
}
extern "C" void cudaMeshUpdate(void* deviceMesh, unsigned int width, unsigned int height, float t)
{
dim3 block(8, 8, 1);
dim3 grid(width / block.x, height / block.y, 1);
cudaKernelMesh<<<grid, block>>>((float4*)deviceMesh, width, height, t);
}
|
20,783 | #include <iostream>
__device__ int myAtomicAdd(int *address, int incr)
{
// Create an initial guess for the value stored at *address.
int guess = *address;
int oldValue = atomicCAS(address, guess, guess + incr);
// Loop while the guess is incorrect.
while (oldValue != guess)
{
guess = oldValue;
oldValue = atomicCAS(address, guess, guess + incr);
}
return oldValue;
}
template<typename T>
__device__ T fetch_and_add(T *x, T inc)
{
T orig_val = myAtomicAdd(x,inc);
return orig_val;
}
__global__ void kernel(int *sharedInteger)
{
myAtomicAdd(sharedInteger, 1);
}
//template __device__ long fetch_and_add<long>(long*, long);
//template __device__ float fetch_and_add<float>(float*, float);
//template __device__ double fetch_and_add<double>(double*, double);
template __device__ int fetch_and_add<int>(int*, int);
|
20,784 | /**
* bicg.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//Error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 4096
#define NY 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r)
{
int i, j;
for (i = 0; i < NX; i++)
{
r[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*j) / NX;
}
}
for (i = 0; i < NY; i++)
{
p[i] = i * M_PI;
}
}
void compareResults(DATA_TYPE* s, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q, DATA_TYPE* q_outputFromGpu)
{
int i,fail;
fail = 0;
// Compare s with s_cuda
for (i=0; i<NX; i++)
{
if (percentDiff(q[i], q_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
for (i=0; i<NY; i++)
{
if (percentDiff(s[i], s_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
//Distributed (split) from initial loop and permuted into reverse order to allow parallelism...
__global__ void bicg_kernel1(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
s[j] = 0.0f;
int i;
for(i = 0; i < NX; i++)
{
s[j] += A[i * NY + j] * r[i];
}
}
}
//Distributed (split) from initial loop to allow parallelism
__global__ void bicg_kernel2(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *q)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
q[i] = 0.0f;
int j;
for(j=0; j < NY; j++)
{
q[i] += A[i * NY + j] * p[j];
}
}
}
void bicg_cpu(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q)
{
int i,j;
for (i = 0; i < NY; i++)
{
s[i] = 0.0;
}
for (i = 0; i < NX; i++)
{
q[i] = 0.0;
for (j = 0; j < NY; j++)
{
s[j] = s[j] + r[i] * A[i*NY + j];
q[i] = q[i] + A[i*NY + j] * p[j];
}
}
}
void bicgCuda(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q,
DATA_TYPE* s_outputFromGpu, DATA_TYPE* q_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *q_gpu;
DATA_TYPE *p_gpu;
DATA_TYPE *r_gpu;
DATA_TYPE *s_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
cudaMalloc((void **)&r_gpu, sizeof(DATA_TYPE) * NX);
cudaMalloc((void **)&s_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&p_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&q_gpu, sizeof(DATA_TYPE) * NX);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice);
cudaMemcpy(r_gpu, r, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice);
cudaMemcpy(s_gpu, s, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(p_gpu, p, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(q_gpu, q, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
t_start = rtclock();
bicg_kernel1<<< grid1, block >>>(A_gpu, r_gpu, s_gpu);
cudaThreadSynchronize();
bicg_kernel2<<< grid2, block >>>(A_gpu, p_gpu, q_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(s_outputFromGpu, s_gpu, sizeof(DATA_TYPE) * NY, cudaMemcpyDeviceToHost);
cudaMemcpy(q_outputFromGpu, q_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(r_gpu);
cudaFree(s_gpu);
cudaFree(p_gpu);
cudaFree(q_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* r;
DATA_TYPE* s;
DATA_TYPE* p;
DATA_TYPE* q;
DATA_TYPE* s_outputFromGpu;
DATA_TYPE* q_outputFromGpu;
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
r = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
s = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
p = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
q = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
s_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
q_outputFromGpu = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
init_array(A, p, r);
GPU_argv_init();
bicgCuda(A, r, s, p, q, s_outputFromGpu, q_outputFromGpu);
t_start = rtclock();
bicg_cpu(A, r, s, p, q);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(s, s_outputFromGpu, q, q_outputFromGpu);
free(A);
free(r);
free(s);
free(p);
free(q);
free(s_outputFromGpu);
free(q_outputFromGpu);
return 0;
}
|
20,785 |
__global__ void anisotropy_kernel(float1* imInD, int M,int N, float k, float lambda, short type) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j+i*M;
int len = N*M;
float deltaN;
float deltaS;
float deltaW;
float deltaE;
float cN;
float cS;
float cW;
float cE;
int indexN;
int indexS;
int indexW;
int indexE;
float val;
indexN = (j)+(i-1)*(M);
indexS = (j)+(i+1)*(M);
indexW = (j-1)+(i)*(M);
indexE = (j+1)+(i)*(M);
if (i>1)
deltaN = imInD[indexN].x-imInD[index].x;
if (indexS < len)
deltaS = imInD[indexS].x-imInD[index].x;
if (j>1)
deltaW = imInD[indexW].x-imInD[index].x;
if (indexE < len)
deltaE = imInD[indexE].x-imInD[index].x;
if (type==1) {
cN = exp(-(pow((deltaN / k),2)));
cS = exp(-(pow((deltaS / k),2)));
cW = exp(-(pow((deltaW / k),2)));
cE = exp(-(pow((deltaE / k),2)));
} else {
cN = 1/(1+pow((deltaN / k),2));
cS = 1/(1+pow((deltaS / k),2));
cW = 1/(1+pow((deltaW / k),2));
cE = 1/(1+pow((deltaE / k),2));
}
val = (lambda*(cN*deltaN + cS*deltaS + cW*deltaW + cE*deltaE));
imInD[index].x += val;
__syncthreads();
}
|
20,786 | #include <stdio.h>
__global__ void helloKernel()
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
printf("Hello World! My threadId is %d \n", i);
}
int main()
{
// Launch kernel to print
helloKernel<<<1, 256>>>();
cudaDeviceSynchronize();
return 0;
}
|
20,787 | #include "includes.h"
__global__ void cuda_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = input_int8[idx] * multipler; // 7-bit (1-bit sign)
} |
20,788 | #include "includes.h"
__global__ void absDifference(double *dDifference, double *dSup, double *dLow, int dSize){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < dSize) {
double a = dSup[tid];
double b = dLow[tid];
dDifference[tid] = (a > b) ? (a - b) : (b - a);
tid += blockDim.x * gridDim.x;
}
} |
20,789 | #include "includes.h"
__global__ void warmUpGPU()
{
// do nothing
} |
20,790 | #include <fstream>
#include <iostream>
#include <cmath>
#include <algorithm>
#include <cstring>
#include <sys/time.h>
#include <cuda_runtime.h>
#define BLOCK_DIM 8
__device__ double c(const double x, const double y) {
//if ((y > 1.0) && (y <= 1.2)) return 0.8;
//if ((y > 0.5) && (y <= 0.8) && (x > 0.2) && (x <= 0.5)) return 1.0;
return 1.0;
}
__device__ double v(const double x, const double t) {
//if (5*t < 2*M_PI) return sin(5*t) * exp(-30*(x-0.5)*(x-0.5));
return 1.0;
}
__global__ void calcNodeSimple(double *prev, double *curr, double *next,
const double h_x, const double h_y,
const double tau, const double time,
const int N, const int M) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if ( (i <= N) && (j <= M) ) {
unsigned int ind = j*(N+1) + i; // 1D-index
if (j == M)
next[ind] = v(i*h_x, time);
else if (j == 0)
next[ind] = curr[ind]
+ c(i*h_x, j*h_y) * tau / h_y * (curr[(j+1)*(N+1) + i] - curr[ind]);
else if (i == 0)
next[ind] = curr[ind]
+ c(i*h_x, j*h_y) * tau / h_x * (curr[j*(N+1) + i+1] - curr[ind]);
else if (i == N)
next[ind] = curr[ind]
- c(i*h_x, j*h_y) * tau / h_x * (curr[ind] - curr[j*(N+1) + i-1]);
else
next[ind] = 2 * curr[ind] - prev[ind]
+ c(i*h_x, j*h_y)*c(i*h_x, j*h_y) * tau*tau / (h_x*h_x) *
(curr[j*(N+1) + i+1] - 2*curr[ind] + curr[j*(N+1) + i-1])
+ c(i*h_x, j*h_y)*c(i*h_x, j*h_y) * tau*tau / (h_y*h_y) *
(curr[(j+1)*(N+1) + i] - 2*curr[ind] + curr[(j-1)*(N+1) + i]);
}
}
__global__ void calcNode(double *prev, double *curr, double *next,
const double h_x, const double h_y,
const double tau, const double time,
const int N, const int M) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i <= N && j <= M) {
unsigned int ind = j*(N+1) + i; // 1D-index
__shared__ double sc[BLOCK_DIM+2][BLOCK_DIM+2];
double n, u; // values on the next and current time layers in that node
double p = prev[ind];
// Copy values of that block to shared memory
u = sc[threadIdx.x+1][threadIdx.y+1] = curr[ind];
// Copy necessary values of neighbour blocks to shared memory
if (threadIdx.x == 0) {
sc[0][threadIdx.y+1] = 0;
if (i != 0) sc[0][threadIdx.y+1] = curr[j*(N+1) + i-1];
}
if (threadIdx.x == blockDim.x-1) {
sc[blockDim.x+1][threadIdx.y+1] = 0;
if (i != N) sc[blockDim.x+1][threadIdx.y+1] = curr[j*(N+1) + i+1];
}
if (threadIdx.y == 0) {
sc[threadIdx.x+1][0] = 0;
if (j != 0) sc[threadIdx.x+1][0] = curr[(j-1)*(N+1) + i];
}
if (threadIdx.y == blockDim.y-1) {
sc[threadIdx.x+1][blockDim.y+1] = 0;
if (j != M) sc[threadIdx.x+1][blockDim.y+1] = curr[(j+1)*(N+1) + i];
}
__syncthreads();
// Calculate next time step
if (j == M)
n = v(i*h_x, time);
else if (j == 0)
n = u + c(i*h_x, j*h_y) * tau / h_y *
(sc[threadIdx.x+1][threadIdx.y+2] - u);
else if (i == 0)
n = u + c(i*h_x, j*h_y) * tau / h_x *
(sc[threadIdx.x+2][threadIdx.y+1] - u);
else if (i == N)
n = u - c(i*h_x, j*h_y) * tau / h_x *
(u - sc[threadIdx.x][threadIdx.y+1]);
else
n = 2 * u - p
+ c(i*h_x, j*h_y)*c(i*h_x, j*h_y) * tau*tau / (h_x*h_x) *
(sc[threadIdx.x+2][threadIdx.y+1] - 2*u + sc[threadIdx.x][threadIdx.y+1])
+ c(i*h_x, j*h_y)*c(i*h_x, j*h_y) * tau*tau / (h_y*h_y) *
(sc[threadIdx.x+1][threadIdx.y+2] - 2*u + sc[threadIdx.x+1][threadIdx.y]);
// Copy calculated value to global memory
next[ind] = n;
}
}
template<typename T>
static void put(std::fstream &f, const T value) {
union {
char buf[sizeof(T)];
T val;
} helper;
helper.val = value;
std::reverse(helper.buf, helper.buf + sizeof(T));
f.write(helper.buf, sizeof(T));
}
void save(const char *prefix, int step, double *a,
const double &h_x, const double &h_y,
const int N, const int M) {
char buffer[50];
sprintf(buffer, "%s.%05d.vtk", prefix, step);
std::fstream f(buffer, std::ios::out);
if (!f) {
std::cerr << "Unable to open file " << buffer << std::endl;
return;
}
f << "# vtk DataFile Version 3.0" << std::endl;
f << "U data" << std::endl;
f << "BINARY" << std::endl;
f << "DATASET STRUCTURED_POINTS" << std::endl;
f << "DIMENSIONS " << N+1 << " " << M+1 << " 1" << std::endl;
f << "SPACING " << h_x << " " << h_y << " 1" << std::endl;
f << "ORIGIN 0 0 0" << std::endl;
f << "POINT_DATA " << (N+1) * (M+1) << std::endl;
f << "SCALARS u double" << std::endl;
f << "LOOKUP_TABLE default" << std::endl;
for (int j = 0 ; j < M+1; j++){
for (int i = 0; i < N+1; i++)
put(f, a[j*(N+1) + i]);
}
f.close();
}
/**
* Calculate the process, save results.
*/
void calculate(double *prev, double *curr, double *next,
double *hostData, const int N, const int M) {
const double h_x = 0.01;
const double h_y = 0.005;
const double tau = h_y / 2.0;
const double T = 5.0;
double curTime = 0.0;
int counter = 0;
double visualisationStep = T / 100;
float maxTime = 0;
float avgTime = 0;
float minTime = 9e+9;
std::cout << "Max time of kernel execution:\nstep\ttime\n";
while (curTime <= T) {
dim3 threadsPerBlock(BLOCK_DIM, BLOCK_DIM);
dim3 numBlocks(N/threadsPerBlock.x + 1, M/threadsPerBlock.y + 1);
// Cuda Events for measuring of execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
calcNode <<<numBlocks, threadsPerBlock>>>
(prev, curr, next, h_x, h_y, tau, curTime, N, M);
cudaEventRecord(stop);
// Evaluating execution time
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if (maxTime < milliseconds)
std::cout << curTime/tau << "\t" << milliseconds << "\n";
maxTime = (maxTime > milliseconds) ? maxTime : milliseconds;
minTime = (minTime < milliseconds) ? minTime : milliseconds;
avgTime += milliseconds / (T / tau);
double *tmp = prev;
prev = curr;
curr = next;
next = tmp;
if ( fabs(curTime - counter * visualisationStep) < 1e-5 ) {
cudaError_t err = cudaMemcpy(hostData, prev,
(N+1) * (M+1) * sizeof (double),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy data from device to host",
"(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
save("output", counter, hostData, h_x, h_y, N, M);
counter++;
}
curTime += tau;
}
std::cout << "Kernel execution in milliseconds:\nmax time = " << maxTime <<
"\navg time = " << avgTime << "\nmin time = " << minTime << "\n";
}
double mtime()
{
struct timeval t;
gettimeofday(&t, NULL);
double mt = (double) (t.tv_sec) * 1000 + (double) t.tv_usec / 1000;
return mt;
}
int main(int argc, char **argv) {
const int N = 1000;
const int M = 4000;
size_t size = (N+1) * (M+1) * sizeof (double);
cudaError_t err = cudaSuccess;
double *prev = NULL;
err = cudaMalloc((void **) &prev, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device memory!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *curr = NULL;
err = cudaMalloc((void **) &curr, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device memory!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *next = NULL;
err = cudaMalloc((void **) &next, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device memory!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *hostData = new double[(N+1)*(M+1)];
cudaMemset(prev, 0, size);
cudaMemset(curr, 0, size);
cudaMemset(next, 0, size);
memset(hostData, 0, size);
double t1 = mtime();
calculate(prev, curr, next, hostData, N, M);
double t2 = mtime();
std::cout << "Time of calculate() execution = " << t2 - t1 << std::endl;
cudaFree(prev);
cudaFree(curr);
cudaFree(next);
delete [] hostData;
}
|
20,791 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include<stdio.h>
__global__ void cuda_gray_kernel(unsigned char *b, unsigned char *g, unsigned char *r, unsigned char *gray, size_t size)
{
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size) {
return;
}
gray[idx] = (unsigned char)(0.114f*b[idx] + 0.587f*g[idx] + 0.299f*r[idx] + 0.5);
}
extern "C" {
void cuda_gray(unsigned char *a, unsigned char *b, unsigned char *c, unsigned char *d, size_t size)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
unsigned char *d_a, *d_b, *d_c, *d_d;
cudaMalloc((void **)&d_a, size * sizeof(char));
cudaMalloc((void **)&d_b, size * sizeof(char));
cudaMalloc((void **)&d_c, size * sizeof(char));
cudaMalloc((void **)&d_d, size * sizeof(char));
cudaMemcpy(d_a, a, size * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, size * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_d, d, size * sizeof(char), cudaMemcpyHostToDevice);
cudaEventRecord(start);
cuda_gray_kernel <<< ceil(size / 1024.0), 1024 >>> (d_a, d_b, d_c, d_d, size);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time on GPU : %f msec\n", milliseconds);
cudaMemcpy(d, d_d, size * sizeof(char), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_d);
}
}
|
20,792 | //===- transpose.cu -------------------------------------------*--- C++ -*-===//
//
// Copyright 2022 ByteDance Ltd. and/or its affiliates. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//===----------------------------------------------------------------------===//
#include <cuda_fp16.h>
namespace brt {
namespace cuda {
namespace kernel {
template <typename T>
__global__ void transpose_naive_2d_kernel(const T *input, T *output, int m,
int n) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (iy < m && ix < n) {
int in_idx = iy * n + ix;
int out_idx = ix * m + iy;
output[out_idx] = input[in_idx];
}
}
template <typename T>
void transpose_naive_2d(const T *input, T *output, int m, int n, dim3 grid,
dim3 block, cudaStream_t stream) {
transpose_naive_2d_kernel<T><<<grid, block, 0, stream>>>(input, output, m, n);
}
// instantiate
template void transpose_naive_2d<float>(const float *, float *, int, int, dim3,
dim3, cudaStream_t);
template void transpose_naive_2d<__half>(const __half *, __half *, int, int,
dim3, dim3, cudaStream_t);
} // namespace kernel
} // namespace cuda
} // namespace brt
|
20,793 | #include <stdio.h>
#include <sys/time.h>
#include <cuda_runtime.h>
const float step = 0.001;
enum {
BLOCK_SIZE = 32,
N = 1024
};
void tabfun_host(float *tab, float step, int n)
{
for (int i = 0; i < n; i++) {
float x = step * i;
tab[i] = sinf(sqrtf(x));
}
}
__global__ void tabfun(float *tab, float step, int n)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
float x = step * index;
tab[index] = sinf(sqrtf(x));
}
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
int main()
{
double tcpu = 0, tgpu = 0, tmem = 0;
cudaError_t err;
/* Allocate memory on host */
size_t size = sizeof(float) * N;
float *hT = (float *)malloc(size);
float *hRes = (float *)malloc(size);
if (hT == NULL || hRes == NULL) {
fprintf(stderr, "Allocation error.\n");
exit(EXIT_FAILURE);
}
tcpu = -wtime();
tabfun_host(hT, step, N);
tcpu += wtime();
/* Allocate vectors on device */
float *dT = NULL;
if (cudaMalloc((void **)&dT, size) != cudaSuccess) {
fprintf(stderr, "Allocation error\n");
exit(EXIT_FAILURE);
}
/* Launch the kernel */
int threadsPerBlock = 1024;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n",
blocksPerGrid, threadsPerBlock);
tgpu = -wtime();
tabfun<<<blocksPerGrid, threadsPerBlock>>>(dT, step, N);
cudaDeviceSynchronize();
tgpu += wtime();
if ( (err = cudaGetLastError()) != cudaSuccess) {
fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Copy the device vectors to host */
tmem -= wtime();
if (cudaMemcpy(hRes, dT, size, cudaMemcpyDeviceToHost) != cudaSuccess) {
fprintf(stderr, "Device to host copying failed\n");
exit(EXIT_FAILURE);
}
tmem += wtime();
// Verify that the result vector is correct
for (int i = 0; i < N; i++) {
float d = fabs(hT[i] - hRes[i]);
printf("%d: %f\n", i, d);
}
printf("CPU version (sec.): %.6f\n", tcpu);
printf("GPU version (sec.): %.6f\n", tgpu);
printf("Memory ops. (sec.): %.6f\n", tmem);
printf("Speedup: %.2f\n", tcpu / tgpu);
printf("Speedup (with mem ops.): %.2f\n", tcpu / (tgpu + tmem));
cudaFree(dT);
free(hT);
free(hRes);
cudaDeviceReset();
return 0;
}
|
20,794 | #define N_W 128
#define N_H 128
#define N_D 128
extern "C" // ensure function name to be exactly "vadd"
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void reduction_min_kernel( double *input, double *output ){
__shared__ double sh_data[256];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sh_data[tid] = min( input[i], input[i + blockDim.x*gridDim.x ] ) ;
__syncthreads();
for( unsigned int s = blockDim.x/2; s>0; s >>= 1){
if ( tid < s ) sh_data[tid] = min( sh_data[tid], sh_data[tid+s] );
__syncthreads();
}
if ( tid == 0 ) output[ blockIdx.x ] = sh_data[0];
}
__device__ void writeBound( const int boundAxis,
double *cnsv_1, double *cnsv_2, double *cnsv_3, double *cnsv_4, double *cnsv_5,
double *bound_1, double *bound_2, double *bound_3, double *bound_4, double *bound_5,
const int t_j, const int t_i, const int t_k, const int tid ){
int boundId;
if ( boundAxis == 1 ) boundId = t_i + t_k*N_H; //X BOUNDERIES
if ( boundAxis == 2 ) boundId = t_j + t_k*N_W; //Y BOUNDERIES
if ( boundAxis == 3 ) boundId = t_j + t_i*N_W; //Z BOUNDERIES
bound_1[boundId] = cnsv_1[tid];
bound_2[boundId] = cnsv_2[tid];
bound_3[boundId] = cnsv_3[tid];
bound_4[boundId] = cnsv_4[tid];
bound_5[boundId] = cnsv_5[tid];
}
__global__ void setBounderies(
double* cnsv_1, double* cnsv_2, double* cnsv_3, double* cnsv_4, double* cnsv_5,
double* bound_1_l, double* bound_1_r, double* bound_1_d, double* bound_1_u, double* bound_1_b, double *bound_1_t,
double* bound_2_l, double* bound_2_r, double* bound_2_d, double* bound_2_u, double* bound_2_b, double *bound_2_t,
double* bound_3_l, double* bound_3_r, double* bound_3_d, double* bound_3_u, double* bound_3_b, double *bound_3_t,
double* bound_4_l, double* bound_4_r, double* bound_4_d, double* bound_4_u, double* bound_4_b, double *bound_4_t,
double* bound_5_l, double* bound_5_r, double* bound_5_d, double* bound_5_u, double* bound_5_b, double *bound_5_t ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int t_k = blockIdx.z*blockDim.z + threadIdx.z;
int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
bool boundBlock = false;
if ( blockIdx.x==0 || blockIdx.y==0 || blockIdx.z==0 ) boundBlock = true;
if ( blockIdx.x==(gridDim.x-1) || blockIdx.y==(gridDim.y-1) || blockIdx.z==(gridDim.z-1) ) boundBlock = true;
if ( !boundBlock ) return;
if ( t_j==0 )
writeBound( 1, cnsv_1, cnsv_2, cnsv_3, cnsv_4, cnsv_5,
bound_1_l, bound_2_l, bound_3_l, bound_4_l, bound_5_l,
t_j, t_i, t_k, tid );
if ( t_j==(N_W-1) )
writeBound( 1, cnsv_1, cnsv_2, cnsv_3, cnsv_4, cnsv_5,
bound_1_r, bound_2_r, bound_3_r, bound_4_r, bound_5_r,
t_j, t_i, t_k, tid );
if ( t_i==0 )
writeBound( 2, cnsv_1, cnsv_2, cnsv_3, cnsv_4, cnsv_5,
bound_1_d, bound_2_d, bound_3_d, bound_4_d, bound_5_d,
t_j, t_i, t_k, tid );
if ( t_i==(N_H-1) )
writeBound( 2, cnsv_1, cnsv_2, cnsv_3, cnsv_4, cnsv_5,
bound_1_u, bound_2_u, bound_3_u, bound_4_u, bound_5_u,
t_j, t_i, t_k, tid );
if ( t_k==0 )
writeBound( 3, cnsv_1, cnsv_2, cnsv_3, cnsv_4, cnsv_5,
bound_1_b, bound_2_b, bound_3_b, bound_4_b, bound_5_b,
t_j, t_i, t_k, tid );
if ( t_k==(N_D-1) )
writeBound( 3, cnsv_1, cnsv_2, cnsv_3, cnsv_4, cnsv_5,
bound_1_t, bound_2_t, bound_3_t, bound_4_t, bound_5_t,
t_j, t_i, t_k, tid );
}
__device__ double hll_interFlux( double val_l, double val_r, double F_l, double F_r, double s_l, double s_r ){
if ( s_l > 0 ) return F_l;
if ( s_r < 0 ) return F_r;
return ( s_r*F_l - s_l*F_r + s_l*s_r*( val_r - val_l ) ) / ( s_r - s_l );
}
__device__ void writeInterFlux(const int coord, int tid,
double rho_l, double rho_r, double vx_l, double vx_r, double vy_l, double vy_r, double vz_l, double vz_r, double E_l, double E_r,
double p_l, double p_r, double s_l, double s_r, double *iFlx_1, double *iFlx_2, double *iFlx_3, double *iFlx_4, double *iFlx_5 ){
// Adjacent fluxes from left and center cell
double F_l, F_r;
//iFlx rho
if ( coord == 1 ){
F_l = rho_l * vx_l;
F_r = rho_r * vx_r;
}
else if ( coord == 2 ){
F_l = rho_l * vy_l;
F_r = rho_r * vy_r;
}
else if ( coord == 3 ){
F_l = rho_l * vz_l;
F_r = rho_r * vz_r;
}
iFlx_1[tid] = hll_interFlux( rho_l, rho_r, F_l, F_r, s_l, s_r );
//iFlx rho * vx
if ( coord == 1 ){
F_l = rho_l * vx_l * vx_l + p_l;
F_r = rho_r * vx_r * vx_r + p_r;
}
else if ( coord == 2 ){
F_l = rho_l * vx_l * vy_l;
F_r = rho_r * vx_r * vy_r;
}
else if ( coord == 3 ){
F_l = rho_l * vx_l * vz_l;
F_r = rho_r * vx_r * vz_r;
}
iFlx_2[tid] = hll_interFlux( rho_l*vx_l, rho_r*vx_r, F_l, F_r, s_l, s_r );
//iFlx rho * vy
if ( coord == 1 ){
F_l = rho_l * vy_l * vx_l ;
F_r = rho_r * vy_r * vx_r ;
}
else if ( coord == 2 ){
F_l = rho_l * vy_l * vy_l + p_l;
F_r = rho_r * vy_r * vy_r + p_r;
}
else if ( coord == 3 ){
F_l = rho_l * vy_l * vz_l;
F_r = rho_r * vy_r * vz_r;
}
iFlx_3[tid] = hll_interFlux( rho_l*vy_l, rho_r*vy_r, F_l, F_r, s_l, s_r );
//iFlx rho * vz
if ( coord == 1 ){
F_l = rho_l * vz_l * vx_l ;
F_r = rho_r * vz_r * vx_r ;
}
else if ( coord == 2 ){
F_l = rho_l * vz_l * vy_l ;
F_r = rho_r * vz_r * vy_r ;
}
else if ( coord == 3 ){
F_l = rho_l * vz_l * vz_l + p_l ;
F_r = rho_r * vz_r * vz_r + p_r ;
}
iFlx_4[tid] = hll_interFlux( rho_l*vz_l, rho_r*vz_r, F_l, F_r, s_l, s_r );
//iFlx E
if ( coord == 1 ){
F_l = vx_l * ( E_l + p_l ) ;
F_r = vx_r * ( E_r + p_r ) ;
}
else if ( coord == 2 ){
F_l = vy_l * ( E_l + p_l ) ;
F_r = vy_r * ( E_r + p_r ) ;
}
else if ( coord == 3 ){
F_l = vz_l * ( E_l + p_l ) ;
F_r = vz_r * ( E_r + p_r ) ;
}
iFlx_5[tid] = hll_interFlux( E_l, E_r, F_l, F_r, s_l, s_r );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void setInterFlux_hll( const int coord, const double gamma, const double dx, const double dy, const double dz,
double* cnsv_1, double* cnsv_2, double* cnsv_3, double* cnsv_4, double* cnsv_5,
double* iFlx_1, double* iFlx_2, double* iFlx_3, double* iFlx_4, double* iFlx_5,
double* bound_1_l, double* bound_2_l, double* bound_3_l, double* bound_4_l, double* bound_5_l,
double* bound_1_r, double* bound_2_r, double* bound_3_r, double* bound_4_r, double* bound_5_r,
double* iFlx_1_bnd, double* iFlx_2_bnd, double* iFlx_3_bnd, double* iFlx_4_bnd, double* iFlx_5_bnd,
double* times ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int t_k = blockIdx.z*blockDim.z + threadIdx.z;
int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
int tid_adj, boundId;
double v2;
double rho_l, vx_l, vy_l, vz_l, E_l, p_l;
double rho_c, vx_c, vy_c, vz_c, E_c, p_c;
//Set adjacent id
if ( coord == 1 ){
if ( t_j == 0) tid_adj = (t_j) + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
else tid_adj = (t_j-1) + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
}
if ( coord == 2 ){
if ( t_i == 0) tid_adj = t_j + (t_i)*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
else tid_adj = t_j + (t_i-1)*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
}
if ( coord == 3 ){
if ( t_k == 0) tid_adj = t_j + t_i*blockDim.x*gridDim.x + (t_k)*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
else tid_adj = t_j + t_i*blockDim.x*gridDim.x + (t_k-1)*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
}
//Read adjacent and center conservatives
rho_l = cnsv_1[ tid_adj ];
rho_c = cnsv_1[ tid ];
vx_l = cnsv_2[ tid_adj ] / rho_l;
vx_c = cnsv_2[ tid ] / rho_c;
vy_l = cnsv_3[ tid_adj ] / rho_l;
vy_c = cnsv_3[ tid ] / rho_c;
vz_l = cnsv_4[ tid_adj ] / rho_l;
vz_c = cnsv_4[ tid ] / rho_c;
E_l = cnsv_5[ tid_adj ];
E_c = cnsv_5[ tid ];
//Load and apply boundery conditions
if ( coord == 1 ){
boundId = t_i + t_k*N_H;
if ( t_j == 0) {
rho_l = bound_1_l[boundId];
vx_l = bound_2_l[boundId] / rho_l;
vy_l = bound_3_l[boundId] / rho_l;
vz_l = bound_4_l[boundId] / rho_l;
E_l = bound_5_l[boundId];
}
}
if ( coord == 2 ){
boundId = t_j + t_k*N_W;
if ( t_i == 0) {
rho_l = bound_1_l[boundId];
vx_l = bound_2_l[boundId] / rho_l;
vy_l = bound_3_l[boundId] / rho_l;
vz_l = bound_4_l[boundId] / rho_l;
E_l = bound_5_l[boundId];
}
}
if ( coord == 3 ){
boundId = t_j + t_i*N_W;
if ( t_k == 0) {
rho_l = bound_1_l[boundId];
vx_l = bound_2_l[boundId] / rho_l;
vy_l = bound_3_l[boundId] / rho_l;
vz_l = bound_4_l[boundId] / rho_l;
E_l = bound_5_l[boundId];
}
}
// //Boundary bounce condition
// if ( t_j == 0 ) vx_l = -vx_c;
// //Boundary bounce condition
// if ( t_i == 0 ) vy_l = -vy_c;
// //Boundary bounce condition
// if ( t_k == 0 ) vz_l = -vz_c;
v2 = vx_l*vx_l + vy_l*vy_l + vz_l*vz_l;
p_l = ( E_l - rho_l*v2/2 ) * (gamma-1);
v2 = vx_c*vx_c + vy_c*vy_c + vz_c*vz_c;
p_c = ( E_c - rho_c*v2/2 ) * (gamma-1);
double cs_l, cs_c, s_l, s_c;
cs_l = sqrt( p_l * gamma / rho_l );
cs_c = sqrt( p_c * gamma / rho_c );
if ( coord == 1 ){
s_l = min( vx_l - cs_l, vx_c - cs_c );
s_c = max( vx_l + cs_l, vx_c + cs_c );
//Use v2 to save time minimum
v2 = dx / ( abs( vx_c ) + cs_c );
v2 = min( v2, dy / ( abs( vy_c ) + cs_c ) );
v2 = min( v2, dz / ( abs( vz_c ) + cs_c ) );
times[ tid ] = v2;
}
else if ( coord == 2 ){
s_l = min( vy_l - cs_l, vy_c - cs_c );
s_c = max( vy_l + cs_l, vy_c + cs_c );
}
else if ( coord == 3 ){
s_l = min( vz_l - cs_l, vz_c - cs_c );
s_c = max( vz_l + cs_l, vz_c + cs_c );
}
writeInterFlux( coord, tid, rho_l, rho_c, vx_l, vx_c, vy_l, vy_c, vz_l, vz_c, E_l, E_c,
p_l, p_c, s_l, s_c, iFlx_1, iFlx_2, iFlx_3, iFlx_4, iFlx_5 );
//Get iFlux_r for most right cell
// if ( blockIdx.x!=(gridDim.x-1) || blockIdx.y!=(gridDim.y-1) || blockIdx.z!=(gridDim.z-1) ) return;
if ( coord == 1 ){
if ( t_j != (N_W-1) ) return;
}
if ( coord == 2 ){
if ( t_i != (N_H-1) ) return;
}
if ( coord == 3 ){
if ( t_k != (N_D-1) ) return;
}
rho_l = rho_c;
vx_l = vx_c;
vy_l = vy_c;
vz_l = vz_c;
E_l = E_c;
p_l = p_c;
cs_l = cs_c;
//Load Bounderies for right part of the box_size
rho_c = bound_1_r[boundId];
vx_c = bound_2_r[boundId] / rho_c;
vy_c = bound_3_r[boundId] / rho_c;
vz_c = bound_4_r[boundId] / rho_c;
E_c = bound_5_r[boundId];
v2 = vx_c*vx_c + vy_c*vy_c + vz_c*vz_c;
p_c = ( E_c - rho_c*v2/2 ) * (gamma-1);
cs_c = sqrt( p_c * gamma / rho_c );
if ( coord == 1 ){
s_l = min( vx_l - cs_l, vx_c - cs_c );
s_c = max( vx_l + cs_l, vx_c + cs_c );
}
else if ( coord == 2 ){
s_l = min( vy_l - cs_l, vy_c - cs_c );
s_c = max( vy_l + cs_l, vy_c + cs_c );
}
else if ( coord == 3 ){
s_l = min( vz_l - cs_l, vz_c - cs_c );
s_c = max( vz_l + cs_l, vz_c + cs_c );
}
writeInterFlux( coord, boundId, rho_l, rho_c, vx_l, vx_c, vy_l, vy_c, vz_l, vz_c, E_l, E_c,
p_l, p_c, s_l, s_c, iFlx_1_bnd, iFlx_2_bnd, iFlx_3_bnd, iFlx_4_bnd, iFlx_5_bnd );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void getInterFlux_hll( const int coord, const double dt, const double gamma,
const double dx, const double dy, const double dz,
double* cnsv_adv_1, double* cnsv_adv_2, double* cnsv_adv_3, double* cnsv_adv_4, double* cnsv_adv_5,
double* iFlx_1, double* iFlx_2, double* iFlx_3, double* iFlx_4, double* iFlx_5,
double* iFlx_1_bnd, double* iFlx_2_bnd, double* iFlx_3_bnd, double* iFlx_4_bnd, double* iFlx_5_bnd ){
// double* gForceX, double* gForceY, double* gForceZ, double* gravWork ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int t_k = blockIdx.z*blockDim.z + threadIdx.z;
int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
int tid_adj, boundId;
double iFlx1_l, iFlx2_l, iFlx3_l, iFlx4_l, iFlx5_l;
double iFlx1_r, iFlx2_r, iFlx3_r, iFlx4_r, iFlx5_r;
double delta;
//Set adjacent id
if ( coord == 1 ){
if ( t_j == N_W-1 ) tid_adj = (t_j) + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
else tid_adj = (t_j+1) + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
delta = dt / dx;
}
if ( coord == 2 ){
if ( t_i == N_H-1 ) tid_adj = t_j + (t_i)*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
else tid_adj = t_j + (t_i+1)*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
delta = dt / dy;
}
if ( coord == 3 ){
if ( t_k == N_D-1) tid_adj = t_j + t_i*blockDim.x*gridDim.x + (t_k)*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
else tid_adj = t_j + t_i*blockDim.x*gridDim.x + (t_k+1)*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
delta = dt / dz;
}
//Read inter-cell fluxes
iFlx1_l = iFlx_1[ tid ];
iFlx1_r = iFlx_1[ tid_adj ];
iFlx2_l = iFlx_2[ tid ];
iFlx2_r = iFlx_2[ tid_adj ];
iFlx3_l = iFlx_3[ tid ];
iFlx3_r = iFlx_3[ tid_adj ];
iFlx4_l = iFlx_4[ tid ];
iFlx4_r = iFlx_4[ tid_adj ];
iFlx5_l = iFlx_5[ tid ];
iFlx5_r = iFlx_5[ tid_adj ];
if ( coord == 1 ){
boundId = t_i + t_k*N_H;
if ( t_j == (N_W-1) ) {
iFlx1_r = iFlx_1_bnd[boundId];
iFlx2_r = iFlx_2_bnd[boundId];
iFlx3_r = iFlx_3_bnd[boundId];
iFlx4_r = iFlx_4_bnd[boundId];
iFlx5_r = iFlx_5_bnd[boundId];
}
}
if ( coord == 2 ){
boundId = t_j + t_k*N_W;
if ( t_i == (N_H-1) ) {
iFlx1_r = iFlx_1_bnd[boundId];
iFlx2_r = iFlx_2_bnd[boundId];
iFlx3_r = iFlx_3_bnd[boundId];
iFlx4_r = iFlx_4_bnd[boundId];
iFlx5_r = iFlx_5_bnd[boundId];
}
}
if ( coord == 3 ){
boundId = t_j + t_i*N_W;
if ( t_k == (N_D-1) ) {
iFlx1_r = iFlx_1_bnd[boundId];
iFlx2_r = iFlx_2_bnd[boundId];
iFlx3_r = iFlx_3_bnd[boundId];
iFlx4_r = iFlx_4_bnd[boundId];
iFlx5_r = iFlx_5_bnd[boundId];
}
}
//Load and apply boundery conditions
// if ( coord == 1 ){
// boundId = t_i + t_k*N_H;
// if ( t_j == (N_W-1) ) {
// iFlx1_r = iFlx_1_bnd[boundId];
// iFlx2_r = iFlx_2_bnd[boundId];
// iFlx3_r = iFlx_3_bnd[boundId];
// iFlx4_r = iFlx_4_bnd[boundId];
// iFlx5_r = iFlx_5_bnd[boundId];
// }
// }
// if ( coord == 2 ){
// boundId = t_j + t_k*N_W;
// if ( t_i == (N_H-1) ) {
// iFlx1_r = iFlx_1_bnd[boundId];
// iFlx2_r = iFlx_2_bnd[boundId];
// iFlx3_r = iFlx_3_bnd[boundId];
// iFlx4_r = iFlx_4_bnd[boundId];
// iFlx5_r = iFlx_5_bnd[boundId];
// }
// }
// if ( coord == 3 ){
// boundId = t_j + t_i*N_W;
// if ( t_k == (N_D-1) ) {
// iFlx1_r = iFlx_1_bnd[boundId];
// iFlx2_r = iFlx_2_bnd[boundId];
// iFlx3_r = iFlx_3_bnd[boundId];
// iFlx4_r = iFlx_4_bnd[boundId];
// iFlx5_r = iFlx_5_bnd[boundId];
// }
// }
//Advance the consv values
// cnsv_1[ tid ] = cnsv_1[ tid ] - delta*( iFlx1_r - iFlx1_l );
// cnsv_2[ tid ] = cnsv_2[ tid ] - delta*( iFlx2_r - iFlx2_l ) + dt*gForceX[tid]*50;
// cnsv_3[ tid ] = cnsv_3[ tid ] - delta*( iFlx3_r - iFlx3_l ) + dt*gForceY[tid]*50;
// cnsv_4[ tid ] = cnsv_4[ tid ] - delta*( iFlx4_r - iFlx4_l ) + dt*gForceZ[tid]*50;
// cnsv_5[ tid ] = cnsv_5[ tid ] - delta*( iFlx5_r - iFlx5_l ) + dt*gravWork[tid]*50;
if ( coord == 1 ){
cnsv_adv_1[ tid ] = -delta*( iFlx1_r - iFlx1_l );
cnsv_adv_2[ tid ] = -delta*( iFlx2_r - iFlx2_l );
cnsv_adv_3[ tid ] = -delta*( iFlx3_r - iFlx3_l );
cnsv_adv_4[ tid ] = -delta*( iFlx4_r - iFlx4_l );
cnsv_adv_5[ tid ] = -delta*( iFlx5_r - iFlx5_l );
}
else{
cnsv_adv_1[ tid ] = cnsv_adv_1[ tid ] - delta*( iFlx1_r - iFlx1_l );
cnsv_adv_2[ tid ] = cnsv_adv_2[ tid ] - delta*( iFlx2_r - iFlx2_l );
cnsv_adv_3[ tid ] = cnsv_adv_3[ tid ] - delta*( iFlx3_r - iFlx3_l );
cnsv_adv_4[ tid ] = cnsv_adv_4[ tid ] - delta*( iFlx4_r - iFlx4_l );
cnsv_adv_5[ tid ] = cnsv_adv_5[ tid ] - delta*( iFlx5_r - iFlx5_l );
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void reduction_kernel( double *input, double *output ){
__shared__ double sh_data[256];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sh_data[tid] = input[i] + input[i + blockDim.x*gridDim.x ] ;
__syncthreads();
for( unsigned int s = blockDim.x/2; s>0; s >>= 1){
if ( tid < s ) sh_data[tid] += sh_data[tid+s];
__syncthreads();
}
if ( tid == 0 ) output[ blockIdx.x ] = sh_data[0];
}
__global__ void copyDtoD( double *src, double *dst ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int t_k = blockIdx.z*blockDim.z + threadIdx.z;
int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
dst[tid] = src[tid];
}
__global__ void addDtoD(
double *dst_1, double *dst_2, double *dst_3, double *dst_4, double *dst_5,
double *sum_1, double *sum_2, double *sum_3, double *sum_4, double *sum_5 ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int t_k = blockIdx.z*blockDim.z + threadIdx.z;
int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
dst_1[tid] = dst_1[tid] + sum_1[tid];
dst_2[tid] = dst_2[tid] + sum_2[tid];
dst_3[tid] = dst_3[tid] + sum_3[tid];
dst_4[tid] = dst_4[tid] + sum_4[tid];
dst_5[tid] = dst_5[tid] + sum_5[tid];
}
}
|
20,795 | #include "includes.h"
/*
#define N 512
#define N 2048
#define THREADS_PER_BLOCK 512
*/
const int THREADS_PER_BLOCK = 32;
const int N = 2048;
__global__ void mult(int *a, int *b, int *c)
{
int pos = threadIdx.x + blockDim.x * blockIdx.x;
if (pos >= N) return;
c[pos] = a[pos] * b[pos];
} |
20,796 | #include "includes.h"
// Include files
// Parameters
#define N_ATOMS 343
#define MASS_ATOM 1.0f
#define time_step 0.01f
#define L 10.5f
#define T 0.728f
#define NUM_STEPS 10000
const int BLOCK_SIZE = 1024;
//const int L = ;
const int scheme = 1; // 0 for explicit, 1 for implicit
/*************************************************************************************************************/
/************* INITIALIZATION CODE **********/
/*************************************************************************************************************/
__global__ void kinematics_phase2(float* force, float* vel, int len){
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = bx*blockDim.x + tx;
//if (index == 0){ printf("You have been trolled! \n"); }
if (index < len){
vel[index] += 0.5 * force[index] / MASS_ATOM * time_step;
}
} |
20,797 | /*
* UpdaterEz1D.cpp
*
* Created on: 25 янв. 2016 г.
* Author: aleksandr
*/
#include "UpdaterIntensityTM.h"
__device__
void UpdaterIntensityTM::operator() (const int indx) {
#define Ez(M, N) Ez[(M) * (gridSizeY) + (N)]
const int n = indx % sizeY;
const int m = indx / sizeY;
intensity[indx] = intensity[indx] + Ez(firstX + m*stepX, firstY + n*stepX)*Ez(firstX + m*stepX, firstY + n*stepX);
}
|
20,798 | #include "includes.h"
__global__ void add(int *a, int *b, int *c, int n)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] + b[index];
} |
20,799 | /**
* @file compare.cu
* @brief cuda arrayの比較の実装
* @author HIKARU KONDO
* @date 2021/07/19
*/
#include "compare.cuh"
#define BLOCKDIM 256
/**
* @def
* Macro to compare against arrays on the GPU
* @fn
* Macro to compare against arrays on the GPU
* @param (comareArrayA) Pointer to the beginning of the array to be compared
* @param (comareArrayB) Pointer to the beginning of the array to be compared
* @param (resArray) Pointer to an array to record the result of the comparison.
* @param (size) Number of elements in the array
* @detail
*/
#define COMPARE(FUNCTION, OPERATOR) \
template<typename T> \
__global__ \
void FUNCTION(T *compareArrayA, T *compareArrayB, T *resArray, int size) { \
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; \
if (idx >= size) { return; } \
resArray[idx] = (compareArrayA[idx] OPERATOR compareArrayB[idx]); \
}
COMPARE(equal, ==)
COMPARE(negativeEqual, !=)
COMPARE(greater, >)
COMPARE(less, <)
COMPARE(greaterEqual, >=)
COMPARE(lessEqual, <=)
/**
* @def
* Macro to call compare kernel on host
* @fn
* Macro to call compare kernel on device
* @param (comareArrayA) Pointer to the beginning of the array to be compared
* @param (comareArrayB) Pointer to the beginning of the array to be compared
* @param (resArray) Pointer to an array to record the result of the comparison.
* @param (size) Number of elements in the array
* @detail
*/
#define IMLP_COMPARE_FN(FUNCTION, KERNEL, TYPE) \
void FUNCTION(TYPE *compareArrayA, TYPE *compareArrayB, TYPE *resArray, int size) { \
dim3 blockDim(BLOCKDIM); \
dim3 gridDim((size + blockDim.x - 1) / blockDim.x); \
KERNEL <<< gridDim, blockDim >>> (compareArrayA, compareArrayB, resArray, size); \
}
IMLP_COMPARE_FN(equalFloat, equal, float)
IMLP_COMPARE_FN(equalInt, equal, int)
IMLP_COMPARE_FN(negativeEqualFloat, negativeEqual, float)
IMLP_COMPARE_FN(negativeEqualInt, negativeEqual, int)
IMLP_COMPARE_FN(lessFloat, less, float)
IMLP_COMPARE_FN(lessInt, less, int)
IMLP_COMPARE_FN(greaterFloat, greater, float)
IMLP_COMPARE_FN(greaterInt, greater, int)
IMLP_COMPARE_FN(lessEqualFloat, lessEqual, float)
IMLP_COMPARE_FN(lessEqualInt, lessEqual, int)
IMLP_COMPARE_FN(greaterEqualFloat, greaterEqual, float)
IMLP_COMPARE_FN(greaterEqualInt, greaterEqual, int)
|
20,800 | //https://devblogs.nvidia.com/easy-introduction-cuda-c-and-c/
#include <stdio.h>
#include <cuda.h>
int main(void)
{
int runtimeVersion = -1;
cudaError_t error_id = cudaRuntimeGetVersion(&runtimeVersion);
printf("Runtime version %d; Cuda error: %x (%s)\n", runtimeVersion, error_id, cudaGetErrorString(error_id));
int driverVersion = -1;
error_id = cudaDriverGetVersion(&driverVersion);
printf("Driver version %d; Cuda error: %x (%s)\n", driverVersion,error_id, cudaGetErrorString(error_id));
int deviceCount = -1;
error_id = cudaGetDeviceCount(&deviceCount);
printf("Device count %d; Cuda error: %x (%s)\n", deviceCount, error_id, cudaGetErrorString(error_id));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.